GNU Linux-libre 5.4.257-gnu1
[releases.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36
37 static const struct rhashtable_params br_mdb_rht_params = {
38         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40         .key_len = sizeof(struct br_ip),
41         .automatic_shrinking = true,
42 };
43
44 static void br_multicast_start_querier(struct net_bridge *br,
45                                        struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47                                     struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49                                          struct net_bridge_port *port,
50                                          __be32 group,
51                                          __u16 vid,
52                                          const unsigned char *src);
53
54 static void __del_port_router(struct net_bridge_port *p);
55 #if IS_ENABLED(CONFIG_IPV6)
56 static void br_ip6_multicast_leave_group(struct net_bridge *br,
57                                          struct net_bridge_port *port,
58                                          const struct in6_addr *group,
59                                          __u16 vid, const unsigned char *src);
60 #endif
61
62 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
63                                                       struct br_ip *dst)
64 {
65         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
66 }
67
68 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
69                                            struct br_ip *dst)
70 {
71         struct net_bridge_mdb_entry *ent;
72
73         lockdep_assert_held_once(&br->multicast_lock);
74
75         rcu_read_lock();
76         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
77         rcu_read_unlock();
78
79         return ent;
80 }
81
82 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
83                                                    __be32 dst, __u16 vid)
84 {
85         struct br_ip br_dst;
86
87         memset(&br_dst, 0, sizeof(br_dst));
88         br_dst.u.ip4 = dst;
89         br_dst.proto = htons(ETH_P_IP);
90         br_dst.vid = vid;
91
92         return br_mdb_ip_get(br, &br_dst);
93 }
94
95 #if IS_ENABLED(CONFIG_IPV6)
96 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
97                                                    const struct in6_addr *dst,
98                                                    __u16 vid)
99 {
100         struct br_ip br_dst;
101
102         memset(&br_dst, 0, sizeof(br_dst));
103         br_dst.u.ip6 = *dst;
104         br_dst.proto = htons(ETH_P_IPV6);
105         br_dst.vid = vid;
106
107         return br_mdb_ip_get(br, &br_dst);
108 }
109 #endif
110
111 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
112                                         struct sk_buff *skb, u16 vid)
113 {
114         struct br_ip ip;
115
116         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
117                 return NULL;
118
119         if (BR_INPUT_SKB_CB(skb)->igmp)
120                 return NULL;
121
122         memset(&ip, 0, sizeof(ip));
123         ip.proto = skb->protocol;
124         ip.vid = vid;
125
126         switch (skb->protocol) {
127         case htons(ETH_P_IP):
128                 ip.u.ip4 = ip_hdr(skb)->daddr;
129                 break;
130 #if IS_ENABLED(CONFIG_IPV6)
131         case htons(ETH_P_IPV6):
132                 ip.u.ip6 = ipv6_hdr(skb)->daddr;
133                 break;
134 #endif
135         default:
136                 return NULL;
137         }
138
139         return br_mdb_ip_get_rcu(br, &ip);
140 }
141
142 static void br_multicast_group_expired(struct timer_list *t)
143 {
144         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
145         struct net_bridge *br = mp->br;
146
147         spin_lock(&br->multicast_lock);
148         if (!netif_running(br->dev) || timer_pending(&mp->timer))
149                 goto out;
150
151         br_multicast_host_leave(mp, true);
152
153         if (mp->ports)
154                 goto out;
155
156         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
157                                br_mdb_rht_params);
158         hlist_del_rcu(&mp->mdb_node);
159
160         kfree_rcu(mp, rcu);
161
162 out:
163         spin_unlock(&br->multicast_lock);
164 }
165
166 static void br_multicast_del_pg(struct net_bridge *br,
167                                 struct net_bridge_port_group *pg)
168 {
169         struct net_bridge_mdb_entry *mp;
170         struct net_bridge_port_group *p;
171         struct net_bridge_port_group __rcu **pp;
172
173         mp = br_mdb_ip_get(br, &pg->addr);
174         if (WARN_ON(!mp))
175                 return;
176
177         for (pp = &mp->ports;
178              (p = mlock_dereference(*pp, br)) != NULL;
179              pp = &p->next) {
180                 if (p != pg)
181                         continue;
182
183                 rcu_assign_pointer(*pp, p->next);
184                 hlist_del_init(&p->mglist);
185                 del_timer(&p->timer);
186                 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
187                               p->flags);
188                 kfree_rcu(p, rcu);
189
190                 if (!mp->ports && !mp->host_joined &&
191                     netif_running(br->dev))
192                         mod_timer(&mp->timer, jiffies);
193
194                 return;
195         }
196
197         WARN_ON(1);
198 }
199
200 static void br_multicast_port_group_expired(struct timer_list *t)
201 {
202         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
203         struct net_bridge *br = pg->port->br;
204
205         spin_lock(&br->multicast_lock);
206         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
207             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
208                 goto out;
209
210         br_multicast_del_pg(br, pg);
211
212 out:
213         spin_unlock(&br->multicast_lock);
214 }
215
216 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
217                                                     __be32 group,
218                                                     u8 *igmp_type)
219 {
220         struct igmpv3_query *ihv3;
221         size_t igmp_hdr_size;
222         struct sk_buff *skb;
223         struct igmphdr *ih;
224         struct ethhdr *eth;
225         struct iphdr *iph;
226
227         igmp_hdr_size = sizeof(*ih);
228         if (br->multicast_igmp_version == 3)
229                 igmp_hdr_size = sizeof(*ihv3);
230         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
231                                                  igmp_hdr_size + 4);
232         if (!skb)
233                 goto out;
234
235         skb->protocol = htons(ETH_P_IP);
236
237         skb_reset_mac_header(skb);
238         eth = eth_hdr(skb);
239
240         ether_addr_copy(eth->h_source, br->dev->dev_addr);
241         eth->h_dest[0] = 1;
242         eth->h_dest[1] = 0;
243         eth->h_dest[2] = 0x5e;
244         eth->h_dest[3] = 0;
245         eth->h_dest[4] = 0;
246         eth->h_dest[5] = 1;
247         eth->h_proto = htons(ETH_P_IP);
248         skb_put(skb, sizeof(*eth));
249
250         skb_set_network_header(skb, skb->len);
251         iph = ip_hdr(skb);
252
253         iph->version = 4;
254         iph->ihl = 6;
255         iph->tos = 0xc0;
256         iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
257         iph->id = 0;
258         iph->frag_off = htons(IP_DF);
259         iph->ttl = 1;
260         iph->protocol = IPPROTO_IGMP;
261         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
262                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
263         iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
264         ((u8 *)&iph[1])[0] = IPOPT_RA;
265         ((u8 *)&iph[1])[1] = 4;
266         ((u8 *)&iph[1])[2] = 0;
267         ((u8 *)&iph[1])[3] = 0;
268         ip_send_check(iph);
269         skb_put(skb, 24);
270
271         skb_set_transport_header(skb, skb->len);
272         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
273
274         switch (br->multicast_igmp_version) {
275         case 2:
276                 ih = igmp_hdr(skb);
277                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
278                 ih->code = (group ? br->multicast_last_member_interval :
279                                     br->multicast_query_response_interval) /
280                            (HZ / IGMP_TIMER_SCALE);
281                 ih->group = group;
282                 ih->csum = 0;
283                 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
284                 break;
285         case 3:
286                 ihv3 = igmpv3_query_hdr(skb);
287                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
288                 ihv3->code = (group ? br->multicast_last_member_interval :
289                                       br->multicast_query_response_interval) /
290                              (HZ / IGMP_TIMER_SCALE);
291                 ihv3->group = group;
292                 ihv3->qqic = br->multicast_query_interval / HZ;
293                 ihv3->nsrcs = 0;
294                 ihv3->resv = 0;
295                 ihv3->suppress = 0;
296                 ihv3->qrv = 2;
297                 ihv3->csum = 0;
298                 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
299                 break;
300         }
301
302         skb_put(skb, igmp_hdr_size);
303         __skb_pull(skb, sizeof(*eth));
304
305 out:
306         return skb;
307 }
308
309 #if IS_ENABLED(CONFIG_IPV6)
310 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
311                                                     const struct in6_addr *grp,
312                                                     u8 *igmp_type)
313 {
314         struct mld2_query *mld2q;
315         unsigned long interval;
316         struct ipv6hdr *ip6h;
317         struct mld_msg *mldq;
318         size_t mld_hdr_size;
319         struct sk_buff *skb;
320         struct ethhdr *eth;
321         u8 *hopopt;
322
323         mld_hdr_size = sizeof(*mldq);
324         if (br->multicast_mld_version == 2)
325                 mld_hdr_size = sizeof(*mld2q);
326         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
327                                                  8 + mld_hdr_size);
328         if (!skb)
329                 goto out;
330
331         skb->protocol = htons(ETH_P_IPV6);
332
333         /* Ethernet header */
334         skb_reset_mac_header(skb);
335         eth = eth_hdr(skb);
336
337         ether_addr_copy(eth->h_source, br->dev->dev_addr);
338         eth->h_proto = htons(ETH_P_IPV6);
339         skb_put(skb, sizeof(*eth));
340
341         /* IPv6 header + HbH option */
342         skb_set_network_header(skb, skb->len);
343         ip6h = ipv6_hdr(skb);
344
345         *(__force __be32 *)ip6h = htonl(0x60000000);
346         ip6h->payload_len = htons(8 + mld_hdr_size);
347         ip6h->nexthdr = IPPROTO_HOPOPTS;
348         ip6h->hop_limit = 1;
349         ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
350         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
351                                &ip6h->saddr)) {
352                 kfree_skb(skb);
353                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
354                 return NULL;
355         }
356
357         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
358         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
359
360         hopopt = (u8 *)(ip6h + 1);
361         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
362         hopopt[1] = 0;                          /* length of HbH */
363         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
364         hopopt[3] = 2;                          /* Length of RA Option */
365         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
366         hopopt[5] = 0;
367         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
368         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
369
370         skb_put(skb, sizeof(*ip6h) + 8);
371
372         /* ICMPv6 */
373         skb_set_transport_header(skb, skb->len);
374         interval = ipv6_addr_any(grp) ?
375                         br->multicast_query_response_interval :
376                         br->multicast_last_member_interval;
377         *igmp_type = ICMPV6_MGM_QUERY;
378         switch (br->multicast_mld_version) {
379         case 1:
380                 mldq = (struct mld_msg *)icmp6_hdr(skb);
381                 mldq->mld_type = ICMPV6_MGM_QUERY;
382                 mldq->mld_code = 0;
383                 mldq->mld_cksum = 0;
384                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
385                 mldq->mld_reserved = 0;
386                 mldq->mld_mca = *grp;
387                 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
388                                                   sizeof(*mldq), IPPROTO_ICMPV6,
389                                                   csum_partial(mldq,
390                                                                sizeof(*mldq),
391                                                                0));
392                 break;
393         case 2:
394                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
395                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
396                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
397                 mld2q->mld2q_code = 0;
398                 mld2q->mld2q_cksum = 0;
399                 mld2q->mld2q_resv1 = 0;
400                 mld2q->mld2q_resv2 = 0;
401                 mld2q->mld2q_suppress = 0;
402                 mld2q->mld2q_qrv = 2;
403                 mld2q->mld2q_nsrcs = 0;
404                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
405                 mld2q->mld2q_mca = *grp;
406                 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
407                                                      sizeof(*mld2q),
408                                                      IPPROTO_ICMPV6,
409                                                      csum_partial(mld2q,
410                                                                   sizeof(*mld2q),
411                                                                   0));
412                 break;
413         }
414         skb_put(skb, mld_hdr_size);
415
416         __skb_pull(skb, sizeof(*eth));
417
418 out:
419         return skb;
420 }
421 #endif
422
423 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
424                                                 struct br_ip *addr,
425                                                 u8 *igmp_type)
426 {
427         switch (addr->proto) {
428         case htons(ETH_P_IP):
429                 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
430 #if IS_ENABLED(CONFIG_IPV6)
431         case htons(ETH_P_IPV6):
432                 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
433                                                     igmp_type);
434 #endif
435         }
436         return NULL;
437 }
438
439 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
440                                                     struct br_ip *group)
441 {
442         struct net_bridge_mdb_entry *mp;
443         int err;
444
445         mp = br_mdb_ip_get(br, group);
446         if (mp)
447                 return mp;
448
449         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
450                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
451                 return ERR_PTR(-E2BIG);
452         }
453
454         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
455         if (unlikely(!mp))
456                 return ERR_PTR(-ENOMEM);
457
458         mp->br = br;
459         mp->addr = *group;
460         timer_setup(&mp->timer, br_multicast_group_expired, 0);
461         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
462                                             br_mdb_rht_params);
463         if (err) {
464                 kfree(mp);
465                 mp = ERR_PTR(err);
466         } else {
467                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
468         }
469
470         return mp;
471 }
472
473 struct net_bridge_port_group *br_multicast_new_port_group(
474                         struct net_bridge_port *port,
475                         struct br_ip *group,
476                         struct net_bridge_port_group __rcu *next,
477                         unsigned char flags,
478                         const unsigned char *src)
479 {
480         struct net_bridge_port_group *p;
481
482         p = kzalloc(sizeof(*p), GFP_ATOMIC);
483         if (unlikely(!p))
484                 return NULL;
485
486         p->addr = *group;
487         p->port = port;
488         p->flags = flags;
489         rcu_assign_pointer(p->next, next);
490         hlist_add_head(&p->mglist, &port->mglist);
491         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
492
493         if (src)
494                 memcpy(p->eth_addr, src, ETH_ALEN);
495         else
496                 eth_broadcast_addr(p->eth_addr);
497
498         return p;
499 }
500
501 static bool br_port_group_equal(struct net_bridge_port_group *p,
502                                 struct net_bridge_port *port,
503                                 const unsigned char *src)
504 {
505         if (p->port != port)
506                 return false;
507
508         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
509                 return true;
510
511         return ether_addr_equal(src, p->eth_addr);
512 }
513
514 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
515 {
516         if (!mp->host_joined) {
517                 mp->host_joined = true;
518                 if (notify)
519                         br_mdb_notify(mp->br->dev, NULL, &mp->addr,
520                                       RTM_NEWMDB, 0);
521         }
522         mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
523 }
524
525 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
526 {
527         if (!mp->host_joined)
528                 return;
529
530         mp->host_joined = false;
531         if (notify)
532                 br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
533 }
534
535 static int br_multicast_add_group(struct net_bridge *br,
536                                   struct net_bridge_port *port,
537                                   struct br_ip *group,
538                                   const unsigned char *src)
539 {
540         struct net_bridge_port_group __rcu **pp;
541         struct net_bridge_port_group *p;
542         struct net_bridge_mdb_entry *mp;
543         unsigned long now = jiffies;
544         int err;
545
546         spin_lock(&br->multicast_lock);
547         if (!netif_running(br->dev) ||
548             (port && port->state == BR_STATE_DISABLED))
549                 goto out;
550
551         mp = br_multicast_new_group(br, group);
552         err = PTR_ERR(mp);
553         if (IS_ERR(mp))
554                 goto err;
555
556         if (!port) {
557                 br_multicast_host_join(mp, true);
558                 goto out;
559         }
560
561         for (pp = &mp->ports;
562              (p = mlock_dereference(*pp, br)) != NULL;
563              pp = &p->next) {
564                 if (br_port_group_equal(p, port, src))
565                         goto found;
566                 if ((unsigned long)p->port < (unsigned long)port)
567                         break;
568         }
569
570         p = br_multicast_new_port_group(port, group, *pp, 0, src);
571         if (unlikely(!p))
572                 goto err;
573         rcu_assign_pointer(*pp, p);
574         br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
575
576 found:
577         mod_timer(&p->timer, now + br->multicast_membership_interval);
578 out:
579         err = 0;
580
581 err:
582         spin_unlock(&br->multicast_lock);
583         return err;
584 }
585
586 static int br_ip4_multicast_add_group(struct net_bridge *br,
587                                       struct net_bridge_port *port,
588                                       __be32 group,
589                                       __u16 vid,
590                                       const unsigned char *src)
591 {
592         struct br_ip br_group;
593
594         if (ipv4_is_local_multicast(group))
595                 return 0;
596
597         memset(&br_group, 0, sizeof(br_group));
598         br_group.u.ip4 = group;
599         br_group.proto = htons(ETH_P_IP);
600         br_group.vid = vid;
601
602         return br_multicast_add_group(br, port, &br_group, src);
603 }
604
605 #if IS_ENABLED(CONFIG_IPV6)
606 static int br_ip6_multicast_add_group(struct net_bridge *br,
607                                       struct net_bridge_port *port,
608                                       const struct in6_addr *group,
609                                       __u16 vid,
610                                       const unsigned char *src)
611 {
612         struct br_ip br_group;
613
614         if (ipv6_addr_is_ll_all_nodes(group))
615                 return 0;
616
617         memset(&br_group, 0, sizeof(br_group));
618         br_group.u.ip6 = *group;
619         br_group.proto = htons(ETH_P_IPV6);
620         br_group.vid = vid;
621
622         return br_multicast_add_group(br, port, &br_group, src);
623 }
624 #endif
625
626 static void br_multicast_router_expired(struct timer_list *t)
627 {
628         struct net_bridge_port *port =
629                         from_timer(port, t, multicast_router_timer);
630         struct net_bridge *br = port->br;
631
632         spin_lock(&br->multicast_lock);
633         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
634             port->multicast_router == MDB_RTR_TYPE_PERM ||
635             timer_pending(&port->multicast_router_timer))
636                 goto out;
637
638         __del_port_router(port);
639 out:
640         spin_unlock(&br->multicast_lock);
641 }
642
643 static void br_mc_router_state_change(struct net_bridge *p,
644                                       bool is_mc_router)
645 {
646         struct switchdev_attr attr = {
647                 .orig_dev = p->dev,
648                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
649                 .flags = SWITCHDEV_F_DEFER,
650                 .u.mrouter = is_mc_router,
651         };
652
653         switchdev_port_attr_set(p->dev, &attr);
654 }
655
656 static void br_multicast_local_router_expired(struct timer_list *t)
657 {
658         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
659
660         spin_lock(&br->multicast_lock);
661         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
662             br->multicast_router == MDB_RTR_TYPE_PERM ||
663             timer_pending(&br->multicast_router_timer))
664                 goto out;
665
666         br_mc_router_state_change(br, false);
667 out:
668         spin_unlock(&br->multicast_lock);
669 }
670
671 static void br_multicast_querier_expired(struct net_bridge *br,
672                                          struct bridge_mcast_own_query *query)
673 {
674         spin_lock(&br->multicast_lock);
675         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
676                 goto out;
677
678         br_multicast_start_querier(br, query);
679
680 out:
681         spin_unlock(&br->multicast_lock);
682 }
683
684 static void br_ip4_multicast_querier_expired(struct timer_list *t)
685 {
686         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
687
688         br_multicast_querier_expired(br, &br->ip4_own_query);
689 }
690
691 #if IS_ENABLED(CONFIG_IPV6)
692 static void br_ip6_multicast_querier_expired(struct timer_list *t)
693 {
694         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
695
696         br_multicast_querier_expired(br, &br->ip6_own_query);
697 }
698 #endif
699
700 static void br_multicast_select_own_querier(struct net_bridge *br,
701                                             struct br_ip *ip,
702                                             struct sk_buff *skb)
703 {
704         if (ip->proto == htons(ETH_P_IP))
705                 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
706 #if IS_ENABLED(CONFIG_IPV6)
707         else
708                 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
709 #endif
710 }
711
712 static void __br_multicast_send_query(struct net_bridge *br,
713                                       struct net_bridge_port *port,
714                                       struct br_ip *ip)
715 {
716         struct sk_buff *skb;
717         u8 igmp_type;
718
719         skb = br_multicast_alloc_query(br, ip, &igmp_type);
720         if (!skb)
721                 return;
722
723         if (port) {
724                 skb->dev = port->dev;
725                 br_multicast_count(br, port, skb, igmp_type,
726                                    BR_MCAST_DIR_TX);
727                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
728                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
729                         br_dev_queue_push_xmit);
730         } else {
731                 br_multicast_select_own_querier(br, ip, skb);
732                 br_multicast_count(br, port, skb, igmp_type,
733                                    BR_MCAST_DIR_RX);
734                 netif_rx(skb);
735         }
736 }
737
738 static void br_multicast_send_query(struct net_bridge *br,
739                                     struct net_bridge_port *port,
740                                     struct bridge_mcast_own_query *own_query)
741 {
742         struct bridge_mcast_other_query *other_query = NULL;
743         struct br_ip br_group;
744         unsigned long time;
745
746         if (!netif_running(br->dev) ||
747             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
748             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
749                 return;
750
751         memset(&br_group.u, 0, sizeof(br_group.u));
752
753         if (port ? (own_query == &port->ip4_own_query) :
754                    (own_query == &br->ip4_own_query)) {
755                 other_query = &br->ip4_other_query;
756                 br_group.proto = htons(ETH_P_IP);
757 #if IS_ENABLED(CONFIG_IPV6)
758         } else {
759                 other_query = &br->ip6_other_query;
760                 br_group.proto = htons(ETH_P_IPV6);
761 #endif
762         }
763
764         if (!other_query || timer_pending(&other_query->timer))
765                 return;
766
767         __br_multicast_send_query(br, port, &br_group);
768
769         time = jiffies;
770         time += own_query->startup_sent < br->multicast_startup_query_count ?
771                 br->multicast_startup_query_interval :
772                 br->multicast_query_interval;
773         mod_timer(&own_query->timer, time);
774 }
775
776 static void
777 br_multicast_port_query_expired(struct net_bridge_port *port,
778                                 struct bridge_mcast_own_query *query)
779 {
780         struct net_bridge *br = port->br;
781
782         spin_lock(&br->multicast_lock);
783         if (port->state == BR_STATE_DISABLED ||
784             port->state == BR_STATE_BLOCKING)
785                 goto out;
786
787         if (query->startup_sent < br->multicast_startup_query_count)
788                 query->startup_sent++;
789
790         br_multicast_send_query(port->br, port, query);
791
792 out:
793         spin_unlock(&br->multicast_lock);
794 }
795
796 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
797 {
798         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
799
800         br_multicast_port_query_expired(port, &port->ip4_own_query);
801 }
802
803 #if IS_ENABLED(CONFIG_IPV6)
804 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
805 {
806         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
807
808         br_multicast_port_query_expired(port, &port->ip6_own_query);
809 }
810 #endif
811
812 static void br_mc_disabled_update(struct net_device *dev, bool value)
813 {
814         struct switchdev_attr attr = {
815                 .orig_dev = dev,
816                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
817                 .flags = SWITCHDEV_F_DEFER,
818                 .u.mc_disabled = !value,
819         };
820
821         switchdev_port_attr_set(dev, &attr);
822 }
823
824 int br_multicast_add_port(struct net_bridge_port *port)
825 {
826         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
827
828         timer_setup(&port->multicast_router_timer,
829                     br_multicast_router_expired, 0);
830         timer_setup(&port->ip4_own_query.timer,
831                     br_ip4_multicast_port_query_expired, 0);
832 #if IS_ENABLED(CONFIG_IPV6)
833         timer_setup(&port->ip6_own_query.timer,
834                     br_ip6_multicast_port_query_expired, 0);
835 #endif
836         br_mc_disabled_update(port->dev,
837                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
838
839         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
840         if (!port->mcast_stats)
841                 return -ENOMEM;
842
843         return 0;
844 }
845
846 void br_multicast_del_port(struct net_bridge_port *port)
847 {
848         struct net_bridge *br = port->br;
849         struct net_bridge_port_group *pg;
850         struct hlist_node *n;
851
852         /* Take care of the remaining groups, only perm ones should be left */
853         spin_lock_bh(&br->multicast_lock);
854         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
855                 br_multicast_del_pg(br, pg);
856         spin_unlock_bh(&br->multicast_lock);
857         del_timer_sync(&port->multicast_router_timer);
858         free_percpu(port->mcast_stats);
859 }
860
861 static void br_multicast_enable(struct bridge_mcast_own_query *query)
862 {
863         query->startup_sent = 0;
864
865         if (try_to_del_timer_sync(&query->timer) >= 0 ||
866             del_timer(&query->timer))
867                 mod_timer(&query->timer, jiffies);
868 }
869
870 static void __br_multicast_enable_port(struct net_bridge_port *port)
871 {
872         struct net_bridge *br = port->br;
873
874         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
875                 return;
876
877         br_multicast_enable(&port->ip4_own_query);
878 #if IS_ENABLED(CONFIG_IPV6)
879         br_multicast_enable(&port->ip6_own_query);
880 #endif
881         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
882             hlist_unhashed(&port->rlist))
883                 br_multicast_add_router(br, port);
884 }
885
886 void br_multicast_enable_port(struct net_bridge_port *port)
887 {
888         struct net_bridge *br = port->br;
889
890         spin_lock(&br->multicast_lock);
891         __br_multicast_enable_port(port);
892         spin_unlock(&br->multicast_lock);
893 }
894
895 void br_multicast_disable_port(struct net_bridge_port *port)
896 {
897         struct net_bridge *br = port->br;
898         struct net_bridge_port_group *pg;
899         struct hlist_node *n;
900
901         spin_lock(&br->multicast_lock);
902         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
903                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
904                         br_multicast_del_pg(br, pg);
905
906         __del_port_router(port);
907
908         del_timer(&port->multicast_router_timer);
909         del_timer(&port->ip4_own_query.timer);
910 #if IS_ENABLED(CONFIG_IPV6)
911         del_timer(&port->ip6_own_query.timer);
912 #endif
913         spin_unlock(&br->multicast_lock);
914 }
915
916 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
917                                          struct net_bridge_port *port,
918                                          struct sk_buff *skb,
919                                          u16 vid)
920 {
921         const unsigned char *src;
922         struct igmpv3_report *ih;
923         struct igmpv3_grec *grec;
924         int i;
925         int len;
926         int num;
927         int type;
928         int err = 0;
929         __be32 group;
930         u16 nsrcs;
931
932         ih = igmpv3_report_hdr(skb);
933         num = ntohs(ih->ngrec);
934         len = skb_transport_offset(skb) + sizeof(*ih);
935
936         for (i = 0; i < num; i++) {
937                 len += sizeof(*grec);
938                 if (!ip_mc_may_pull(skb, len))
939                         return -EINVAL;
940
941                 grec = (void *)(skb->data + len - sizeof(*grec));
942                 group = grec->grec_mca;
943                 type = grec->grec_type;
944                 nsrcs = ntohs(grec->grec_nsrcs);
945
946                 len += nsrcs * 4;
947                 if (!ip_mc_may_pull(skb, len))
948                         return -EINVAL;
949
950                 /* We treat this as an IGMPv2 report for now. */
951                 switch (type) {
952                 case IGMPV3_MODE_IS_INCLUDE:
953                 case IGMPV3_MODE_IS_EXCLUDE:
954                 case IGMPV3_CHANGE_TO_INCLUDE:
955                 case IGMPV3_CHANGE_TO_EXCLUDE:
956                 case IGMPV3_ALLOW_NEW_SOURCES:
957                 case IGMPV3_BLOCK_OLD_SOURCES:
958                         break;
959
960                 default:
961                         continue;
962                 }
963
964                 src = eth_hdr(skb)->h_source;
965                 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
966                      type == IGMPV3_MODE_IS_INCLUDE) &&
967                     nsrcs == 0) {
968                         br_ip4_multicast_leave_group(br, port, group, vid, src);
969                 } else {
970                         err = br_ip4_multicast_add_group(br, port, group, vid,
971                                                          src);
972                         if (err)
973                                 break;
974                 }
975         }
976
977         return err;
978 }
979
980 #if IS_ENABLED(CONFIG_IPV6)
981 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
982                                         struct net_bridge_port *port,
983                                         struct sk_buff *skb,
984                                         u16 vid)
985 {
986         unsigned int nsrcs_offset;
987         const unsigned char *src;
988         struct icmp6hdr *icmp6h;
989         struct mld2_grec *grec;
990         unsigned int grec_len;
991         int i;
992         int len;
993         int num;
994         int err = 0;
995
996         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
997                 return -EINVAL;
998
999         icmp6h = icmp6_hdr(skb);
1000         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1001         len = skb_transport_offset(skb) + sizeof(*icmp6h);
1002
1003         for (i = 0; i < num; i++) {
1004                 __be16 *_nsrcs, __nsrcs;
1005                 u16 nsrcs;
1006
1007                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1008
1009                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1010                     nsrcs_offset + sizeof(__nsrcs))
1011                         return -EINVAL;
1012
1013                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
1014                                             sizeof(__nsrcs), &__nsrcs);
1015                 if (!_nsrcs)
1016                         return -EINVAL;
1017
1018                 nsrcs = ntohs(*_nsrcs);
1019                 grec_len = struct_size(grec, grec_src, nsrcs);
1020
1021                 if (!ipv6_mc_may_pull(skb, len + grec_len))
1022                         return -EINVAL;
1023
1024                 grec = (struct mld2_grec *)(skb->data + len);
1025                 len += grec_len;
1026
1027                 /* We treat these as MLDv1 reports for now. */
1028                 switch (grec->grec_type) {
1029                 case MLD2_MODE_IS_INCLUDE:
1030                 case MLD2_MODE_IS_EXCLUDE:
1031                 case MLD2_CHANGE_TO_INCLUDE:
1032                 case MLD2_CHANGE_TO_EXCLUDE:
1033                 case MLD2_ALLOW_NEW_SOURCES:
1034                 case MLD2_BLOCK_OLD_SOURCES:
1035                         break;
1036
1037                 default:
1038                         continue;
1039                 }
1040
1041                 src = eth_hdr(skb)->h_source;
1042                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1043                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1044                     nsrcs == 0) {
1045                         br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1046                                                      vid, src);
1047                 } else {
1048                         err = br_ip6_multicast_add_group(br, port,
1049                                                          &grec->grec_mca, vid,
1050                                                          src);
1051                         if (err)
1052                                 break;
1053                 }
1054         }
1055
1056         return err;
1057 }
1058 #endif
1059
1060 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1061                                             struct net_bridge_port *port,
1062                                             __be32 saddr)
1063 {
1064         if (!timer_pending(&br->ip4_own_query.timer) &&
1065             !timer_pending(&br->ip4_other_query.timer))
1066                 goto update;
1067
1068         if (!br->ip4_querier.addr.u.ip4)
1069                 goto update;
1070
1071         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1072                 goto update;
1073
1074         return false;
1075
1076 update:
1077         br->ip4_querier.addr.u.ip4 = saddr;
1078
1079         /* update protected by general multicast_lock by caller */
1080         rcu_assign_pointer(br->ip4_querier.port, port);
1081
1082         return true;
1083 }
1084
1085 #if IS_ENABLED(CONFIG_IPV6)
1086 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1087                                             struct net_bridge_port *port,
1088                                             struct in6_addr *saddr)
1089 {
1090         if (!timer_pending(&br->ip6_own_query.timer) &&
1091             !timer_pending(&br->ip6_other_query.timer))
1092                 goto update;
1093
1094         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1095                 goto update;
1096
1097         return false;
1098
1099 update:
1100         br->ip6_querier.addr.u.ip6 = *saddr;
1101
1102         /* update protected by general multicast_lock by caller */
1103         rcu_assign_pointer(br->ip6_querier.port, port);
1104
1105         return true;
1106 }
1107 #endif
1108
1109 static bool br_multicast_select_querier(struct net_bridge *br,
1110                                         struct net_bridge_port *port,
1111                                         struct br_ip *saddr)
1112 {
1113         switch (saddr->proto) {
1114         case htons(ETH_P_IP):
1115                 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1116 #if IS_ENABLED(CONFIG_IPV6)
1117         case htons(ETH_P_IPV6):
1118                 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1119 #endif
1120         }
1121
1122         return false;
1123 }
1124
1125 static void
1126 br_multicast_update_query_timer(struct net_bridge *br,
1127                                 struct bridge_mcast_other_query *query,
1128                                 unsigned long max_delay)
1129 {
1130         if (!timer_pending(&query->timer))
1131                 query->delay_time = jiffies + max_delay;
1132
1133         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1134 }
1135
1136 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1137                                            bool is_mc_router)
1138 {
1139         struct switchdev_attr attr = {
1140                 .orig_dev = p->dev,
1141                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1142                 .flags = SWITCHDEV_F_DEFER,
1143                 .u.mrouter = is_mc_router,
1144         };
1145
1146         switchdev_port_attr_set(p->dev, &attr);
1147 }
1148
1149 /*
1150  * Add port to router_list
1151  *  list is maintained ordered by pointer value
1152  *  and locked by br->multicast_lock and RCU
1153  */
1154 static void br_multicast_add_router(struct net_bridge *br,
1155                                     struct net_bridge_port *port)
1156 {
1157         struct net_bridge_port *p;
1158         struct hlist_node *slot = NULL;
1159
1160         if (!hlist_unhashed(&port->rlist))
1161                 return;
1162
1163         hlist_for_each_entry(p, &br->router_list, rlist) {
1164                 if ((unsigned long) port >= (unsigned long) p)
1165                         break;
1166                 slot = &p->rlist;
1167         }
1168
1169         if (slot)
1170                 hlist_add_behind_rcu(&port->rlist, slot);
1171         else
1172                 hlist_add_head_rcu(&port->rlist, &br->router_list);
1173         br_rtr_notify(br->dev, port, RTM_NEWMDB);
1174         br_port_mc_router_state_change(port, true);
1175 }
1176
1177 static void br_multicast_mark_router(struct net_bridge *br,
1178                                      struct net_bridge_port *port)
1179 {
1180         unsigned long now = jiffies;
1181
1182         if (!port) {
1183                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1184                         if (!timer_pending(&br->multicast_router_timer))
1185                                 br_mc_router_state_change(br, true);
1186                         mod_timer(&br->multicast_router_timer,
1187                                   now + br->multicast_querier_interval);
1188                 }
1189                 return;
1190         }
1191
1192         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1193             port->multicast_router == MDB_RTR_TYPE_PERM)
1194                 return;
1195
1196         br_multicast_add_router(br, port);
1197
1198         mod_timer(&port->multicast_router_timer,
1199                   now + br->multicast_querier_interval);
1200 }
1201
1202 static void br_multicast_query_received(struct net_bridge *br,
1203                                         struct net_bridge_port *port,
1204                                         struct bridge_mcast_other_query *query,
1205                                         struct br_ip *saddr,
1206                                         unsigned long max_delay)
1207 {
1208         if (!br_multicast_select_querier(br, port, saddr))
1209                 return;
1210
1211         br_multicast_update_query_timer(br, query, max_delay);
1212         br_multicast_mark_router(br, port);
1213 }
1214
1215 static void br_ip4_multicast_query(struct net_bridge *br,
1216                                    struct net_bridge_port *port,
1217                                    struct sk_buff *skb,
1218                                    u16 vid)
1219 {
1220         unsigned int transport_len = ip_transport_len(skb);
1221         const struct iphdr *iph = ip_hdr(skb);
1222         struct igmphdr *ih = igmp_hdr(skb);
1223         struct net_bridge_mdb_entry *mp;
1224         struct igmpv3_query *ih3;
1225         struct net_bridge_port_group *p;
1226         struct net_bridge_port_group __rcu **pp;
1227         struct br_ip saddr;
1228         unsigned long max_delay;
1229         unsigned long now = jiffies;
1230         __be32 group;
1231
1232         spin_lock(&br->multicast_lock);
1233         if (!netif_running(br->dev) ||
1234             (port && port->state == BR_STATE_DISABLED))
1235                 goto out;
1236
1237         group = ih->group;
1238
1239         if (transport_len == sizeof(*ih)) {
1240                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1241
1242                 if (!max_delay) {
1243                         max_delay = 10 * HZ;
1244                         group = 0;
1245                 }
1246         } else if (transport_len >= sizeof(*ih3)) {
1247                 ih3 = igmpv3_query_hdr(skb);
1248                 if (ih3->nsrcs)
1249                         goto out;
1250
1251                 max_delay = ih3->code ?
1252                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1253         } else {
1254                 goto out;
1255         }
1256
1257         if (!group) {
1258                 saddr.proto = htons(ETH_P_IP);
1259                 saddr.u.ip4 = iph->saddr;
1260
1261                 br_multicast_query_received(br, port, &br->ip4_other_query,
1262                                             &saddr, max_delay);
1263                 goto out;
1264         }
1265
1266         mp = br_mdb_ip4_get(br, group, vid);
1267         if (!mp)
1268                 goto out;
1269
1270         max_delay *= br->multicast_last_member_count;
1271
1272         if (mp->host_joined &&
1273             (timer_pending(&mp->timer) ?
1274              time_after(mp->timer.expires, now + max_delay) :
1275              try_to_del_timer_sync(&mp->timer) >= 0))
1276                 mod_timer(&mp->timer, now + max_delay);
1277
1278         for (pp = &mp->ports;
1279              (p = mlock_dereference(*pp, br)) != NULL;
1280              pp = &p->next) {
1281                 if (timer_pending(&p->timer) ?
1282                     time_after(p->timer.expires, now + max_delay) :
1283                     try_to_del_timer_sync(&p->timer) >= 0)
1284                         mod_timer(&p->timer, now + max_delay);
1285         }
1286
1287 out:
1288         spin_unlock(&br->multicast_lock);
1289 }
1290
1291 #if IS_ENABLED(CONFIG_IPV6)
1292 static int br_ip6_multicast_query(struct net_bridge *br,
1293                                   struct net_bridge_port *port,
1294                                   struct sk_buff *skb,
1295                                   u16 vid)
1296 {
1297         unsigned int transport_len = ipv6_transport_len(skb);
1298         struct mld_msg *mld;
1299         struct net_bridge_mdb_entry *mp;
1300         struct mld2_query *mld2q;
1301         struct net_bridge_port_group *p;
1302         struct net_bridge_port_group __rcu **pp;
1303         struct br_ip saddr;
1304         unsigned long max_delay;
1305         unsigned long now = jiffies;
1306         unsigned int offset = skb_transport_offset(skb);
1307         const struct in6_addr *group = NULL;
1308         bool is_general_query;
1309         int err = 0;
1310
1311         spin_lock(&br->multicast_lock);
1312         if (!netif_running(br->dev) ||
1313             (port && port->state == BR_STATE_DISABLED))
1314                 goto out;
1315
1316         if (transport_len == sizeof(*mld)) {
1317                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1318                         err = -EINVAL;
1319                         goto out;
1320                 }
1321                 mld = (struct mld_msg *) icmp6_hdr(skb);
1322                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1323                 if (max_delay)
1324                         group = &mld->mld_mca;
1325         } else {
1326                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1327                         err = -EINVAL;
1328                         goto out;
1329                 }
1330                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1331                 if (!mld2q->mld2q_nsrcs)
1332                         group = &mld2q->mld2q_mca;
1333
1334                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1335         }
1336
1337         is_general_query = group && ipv6_addr_any(group);
1338
1339         if (is_general_query) {
1340                 saddr.proto = htons(ETH_P_IPV6);
1341                 saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1342
1343                 br_multicast_query_received(br, port, &br->ip6_other_query,
1344                                             &saddr, max_delay);
1345                 goto out;
1346         } else if (!group) {
1347                 goto out;
1348         }
1349
1350         mp = br_mdb_ip6_get(br, group, vid);
1351         if (!mp)
1352                 goto out;
1353
1354         max_delay *= br->multicast_last_member_count;
1355         if (mp->host_joined &&
1356             (timer_pending(&mp->timer) ?
1357              time_after(mp->timer.expires, now + max_delay) :
1358              try_to_del_timer_sync(&mp->timer) >= 0))
1359                 mod_timer(&mp->timer, now + max_delay);
1360
1361         for (pp = &mp->ports;
1362              (p = mlock_dereference(*pp, br)) != NULL;
1363              pp = &p->next) {
1364                 if (timer_pending(&p->timer) ?
1365                     time_after(p->timer.expires, now + max_delay) :
1366                     try_to_del_timer_sync(&p->timer) >= 0)
1367                         mod_timer(&p->timer, now + max_delay);
1368         }
1369
1370 out:
1371         spin_unlock(&br->multicast_lock);
1372         return err;
1373 }
1374 #endif
1375
1376 static void
1377 br_multicast_leave_group(struct net_bridge *br,
1378                          struct net_bridge_port *port,
1379                          struct br_ip *group,
1380                          struct bridge_mcast_other_query *other_query,
1381                          struct bridge_mcast_own_query *own_query,
1382                          const unsigned char *src)
1383 {
1384         struct net_bridge_mdb_entry *mp;
1385         struct net_bridge_port_group *p;
1386         unsigned long now;
1387         unsigned long time;
1388
1389         spin_lock(&br->multicast_lock);
1390         if (!netif_running(br->dev) ||
1391             (port && port->state == BR_STATE_DISABLED))
1392                 goto out;
1393
1394         mp = br_mdb_ip_get(br, group);
1395         if (!mp)
1396                 goto out;
1397
1398         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1399                 struct net_bridge_port_group __rcu **pp;
1400
1401                 for (pp = &mp->ports;
1402                      (p = mlock_dereference(*pp, br)) != NULL;
1403                      pp = &p->next) {
1404                         if (!br_port_group_equal(p, port, src))
1405                                 continue;
1406
1407                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
1408                                 break;
1409
1410                         rcu_assign_pointer(*pp, p->next);
1411                         hlist_del_init(&p->mglist);
1412                         del_timer(&p->timer);
1413                         kfree_rcu(p, rcu);
1414                         br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1415                                       p->flags | MDB_PG_FLAGS_FAST_LEAVE);
1416
1417                         if (!mp->ports && !mp->host_joined &&
1418                             netif_running(br->dev))
1419                                 mod_timer(&mp->timer, jiffies);
1420                 }
1421                 goto out;
1422         }
1423
1424         if (timer_pending(&other_query->timer))
1425                 goto out;
1426
1427         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1428                 __br_multicast_send_query(br, port, &mp->addr);
1429
1430                 time = jiffies + br->multicast_last_member_count *
1431                                  br->multicast_last_member_interval;
1432
1433                 mod_timer(&own_query->timer, time);
1434
1435                 for (p = mlock_dereference(mp->ports, br);
1436                      p != NULL;
1437                      p = mlock_dereference(p->next, br)) {
1438                         if (!br_port_group_equal(p, port, src))
1439                                 continue;
1440
1441                         if (!hlist_unhashed(&p->mglist) &&
1442                             (timer_pending(&p->timer) ?
1443                              time_after(p->timer.expires, time) :
1444                              try_to_del_timer_sync(&p->timer) >= 0)) {
1445                                 mod_timer(&p->timer, time);
1446                         }
1447
1448                         break;
1449                 }
1450         }
1451
1452         now = jiffies;
1453         time = now + br->multicast_last_member_count *
1454                      br->multicast_last_member_interval;
1455
1456         if (!port) {
1457                 if (mp->host_joined &&
1458                     (timer_pending(&mp->timer) ?
1459                      time_after(mp->timer.expires, time) :
1460                      try_to_del_timer_sync(&mp->timer) >= 0)) {
1461                         mod_timer(&mp->timer, time);
1462                 }
1463
1464                 goto out;
1465         }
1466
1467         for (p = mlock_dereference(mp->ports, br);
1468              p != NULL;
1469              p = mlock_dereference(p->next, br)) {
1470                 if (p->port != port)
1471                         continue;
1472
1473                 if (!hlist_unhashed(&p->mglist) &&
1474                     (timer_pending(&p->timer) ?
1475                      time_after(p->timer.expires, time) :
1476                      try_to_del_timer_sync(&p->timer) >= 0)) {
1477                         mod_timer(&p->timer, time);
1478                 }
1479
1480                 break;
1481         }
1482 out:
1483         spin_unlock(&br->multicast_lock);
1484 }
1485
1486 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1487                                          struct net_bridge_port *port,
1488                                          __be32 group,
1489                                          __u16 vid,
1490                                          const unsigned char *src)
1491 {
1492         struct br_ip br_group;
1493         struct bridge_mcast_own_query *own_query;
1494
1495         if (ipv4_is_local_multicast(group))
1496                 return;
1497
1498         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499
1500         memset(&br_group, 0, sizeof(br_group));
1501         br_group.u.ip4 = group;
1502         br_group.proto = htons(ETH_P_IP);
1503         br_group.vid = vid;
1504
1505         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1506                                  own_query, src);
1507 }
1508
1509 #if IS_ENABLED(CONFIG_IPV6)
1510 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1511                                          struct net_bridge_port *port,
1512                                          const struct in6_addr *group,
1513                                          __u16 vid,
1514                                          const unsigned char *src)
1515 {
1516         struct br_ip br_group;
1517         struct bridge_mcast_own_query *own_query;
1518
1519         if (ipv6_addr_is_ll_all_nodes(group))
1520                 return;
1521
1522         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1523
1524         memset(&br_group, 0, sizeof(br_group));
1525         br_group.u.ip6 = *group;
1526         br_group.proto = htons(ETH_P_IPV6);
1527         br_group.vid = vid;
1528
1529         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1530                                  own_query, src);
1531 }
1532 #endif
1533
1534 static void br_multicast_err_count(const struct net_bridge *br,
1535                                    const struct net_bridge_port *p,
1536                                    __be16 proto)
1537 {
1538         struct bridge_mcast_stats __percpu *stats;
1539         struct bridge_mcast_stats *pstats;
1540
1541         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1542                 return;
1543
1544         if (p)
1545                 stats = p->mcast_stats;
1546         else
1547                 stats = br->mcast_stats;
1548         if (WARN_ON(!stats))
1549                 return;
1550
1551         pstats = this_cpu_ptr(stats);
1552
1553         u64_stats_update_begin(&pstats->syncp);
1554         switch (proto) {
1555         case htons(ETH_P_IP):
1556                 pstats->mstats.igmp_parse_errors++;
1557                 break;
1558 #if IS_ENABLED(CONFIG_IPV6)
1559         case htons(ETH_P_IPV6):
1560                 pstats->mstats.mld_parse_errors++;
1561                 break;
1562 #endif
1563         }
1564         u64_stats_update_end(&pstats->syncp);
1565 }
1566
1567 static void br_multicast_pim(struct net_bridge *br,
1568                              struct net_bridge_port *port,
1569                              const struct sk_buff *skb)
1570 {
1571         unsigned int offset = skb_transport_offset(skb);
1572         struct pimhdr *pimhdr, _pimhdr;
1573
1574         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1575         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1576             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1577                 return;
1578
1579         br_multicast_mark_router(br, port);
1580 }
1581
1582 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1583                                     struct net_bridge_port *port,
1584                                     struct sk_buff *skb)
1585 {
1586         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1587             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1588                 return -ENOMSG;
1589
1590         br_multicast_mark_router(br, port);
1591
1592         return 0;
1593 }
1594
1595 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1596                                  struct net_bridge_port *port,
1597                                  struct sk_buff *skb,
1598                                  u16 vid)
1599 {
1600         const unsigned char *src;
1601         struct igmphdr *ih;
1602         int err;
1603
1604         err = ip_mc_check_igmp(skb);
1605
1606         if (err == -ENOMSG) {
1607                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1608                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1609                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1610                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1611                                 br_multicast_pim(br, port, skb);
1612                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1613                         br_ip4_multicast_mrd_rcv(br, port, skb);
1614                 }
1615
1616                 return 0;
1617         } else if (err < 0) {
1618                 br_multicast_err_count(br, port, skb->protocol);
1619                 return err;
1620         }
1621
1622         ih = igmp_hdr(skb);
1623         src = eth_hdr(skb)->h_source;
1624         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1625
1626         switch (ih->type) {
1627         case IGMP_HOST_MEMBERSHIP_REPORT:
1628         case IGMPV2_HOST_MEMBERSHIP_REPORT:
1629                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1630                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1631                 break;
1632         case IGMPV3_HOST_MEMBERSHIP_REPORT:
1633                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1634                 break;
1635         case IGMP_HOST_MEMBERSHIP_QUERY:
1636                 br_ip4_multicast_query(br, port, skb, vid);
1637                 break;
1638         case IGMP_HOST_LEAVE_MESSAGE:
1639                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1640                 break;
1641         }
1642
1643         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1644                            BR_MCAST_DIR_RX);
1645
1646         return err;
1647 }
1648
1649 #if IS_ENABLED(CONFIG_IPV6)
1650 static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1651                                      struct net_bridge_port *port,
1652                                      struct sk_buff *skb)
1653 {
1654         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1655                 return;
1656
1657         br_multicast_mark_router(br, port);
1658 }
1659
1660 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1661                                  struct net_bridge_port *port,
1662                                  struct sk_buff *skb,
1663                                  u16 vid)
1664 {
1665         const unsigned char *src;
1666         struct mld_msg *mld;
1667         int err;
1668
1669         err = ipv6_mc_check_mld(skb);
1670
1671         if (err == -ENOMSG || err == -ENODATA) {
1672                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1673                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1674                 if (err == -ENODATA &&
1675                     ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
1676                         br_ip6_multicast_mrd_rcv(br, port, skb);
1677
1678                 return 0;
1679         } else if (err < 0) {
1680                 br_multicast_err_count(br, port, skb->protocol);
1681                 return err;
1682         }
1683
1684         mld = (struct mld_msg *)skb_transport_header(skb);
1685         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1686
1687         switch (mld->mld_type) {
1688         case ICMPV6_MGM_REPORT:
1689                 src = eth_hdr(skb)->h_source;
1690                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1691                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1692                                                  src);
1693                 break;
1694         case ICMPV6_MLD2_REPORT:
1695                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1696                 break;
1697         case ICMPV6_MGM_QUERY:
1698                 err = br_ip6_multicast_query(br, port, skb, vid);
1699                 break;
1700         case ICMPV6_MGM_REDUCTION:
1701                 src = eth_hdr(skb)->h_source;
1702                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1703                 break;
1704         }
1705
1706         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1707                            BR_MCAST_DIR_RX);
1708
1709         return err;
1710 }
1711 #endif
1712
1713 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1714                      struct sk_buff *skb, u16 vid)
1715 {
1716         int ret = 0;
1717
1718         BR_INPUT_SKB_CB(skb)->igmp = 0;
1719         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1720
1721         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1722                 return 0;
1723
1724         switch (skb->protocol) {
1725         case htons(ETH_P_IP):
1726                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1727                 break;
1728 #if IS_ENABLED(CONFIG_IPV6)
1729         case htons(ETH_P_IPV6):
1730                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1731                 break;
1732 #endif
1733         }
1734
1735         return ret;
1736 }
1737
1738 static void br_multicast_query_expired(struct net_bridge *br,
1739                                        struct bridge_mcast_own_query *query,
1740                                        struct bridge_mcast_querier *querier)
1741 {
1742         spin_lock(&br->multicast_lock);
1743         if (query->startup_sent < br->multicast_startup_query_count)
1744                 query->startup_sent++;
1745
1746         RCU_INIT_POINTER(querier->port, NULL);
1747         br_multicast_send_query(br, NULL, query);
1748         spin_unlock(&br->multicast_lock);
1749 }
1750
1751 static void br_ip4_multicast_query_expired(struct timer_list *t)
1752 {
1753         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1754
1755         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1756 }
1757
1758 #if IS_ENABLED(CONFIG_IPV6)
1759 static void br_ip6_multicast_query_expired(struct timer_list *t)
1760 {
1761         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1762
1763         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1764 }
1765 #endif
1766
1767 void br_multicast_init(struct net_bridge *br)
1768 {
1769         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1770
1771         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1772         br->multicast_last_member_count = 2;
1773         br->multicast_startup_query_count = 2;
1774
1775         br->multicast_last_member_interval = HZ;
1776         br->multicast_query_response_interval = 10 * HZ;
1777         br->multicast_startup_query_interval = 125 * HZ / 4;
1778         br->multicast_query_interval = 125 * HZ;
1779         br->multicast_querier_interval = 255 * HZ;
1780         br->multicast_membership_interval = 260 * HZ;
1781
1782         br->ip4_other_query.delay_time = 0;
1783         br->ip4_querier.port = NULL;
1784         br->multicast_igmp_version = 2;
1785 #if IS_ENABLED(CONFIG_IPV6)
1786         br->multicast_mld_version = 1;
1787         br->ip6_other_query.delay_time = 0;
1788         br->ip6_querier.port = NULL;
1789 #endif
1790         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1791         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1792
1793         spin_lock_init(&br->multicast_lock);
1794         timer_setup(&br->multicast_router_timer,
1795                     br_multicast_local_router_expired, 0);
1796         timer_setup(&br->ip4_other_query.timer,
1797                     br_ip4_multicast_querier_expired, 0);
1798         timer_setup(&br->ip4_own_query.timer,
1799                     br_ip4_multicast_query_expired, 0);
1800 #if IS_ENABLED(CONFIG_IPV6)
1801         timer_setup(&br->ip6_other_query.timer,
1802                     br_ip6_multicast_querier_expired, 0);
1803         timer_setup(&br->ip6_own_query.timer,
1804                     br_ip6_multicast_query_expired, 0);
1805 #endif
1806         INIT_HLIST_HEAD(&br->mdb_list);
1807 }
1808
1809 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1810 {
1811         struct in_device *in_dev = in_dev_get(br->dev);
1812
1813         if (!in_dev)
1814                 return;
1815
1816         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1817         in_dev_put(in_dev);
1818 }
1819
1820 #if IS_ENABLED(CONFIG_IPV6)
1821 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1822 {
1823         struct in6_addr addr;
1824
1825         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1826         ipv6_dev_mc_inc(br->dev, &addr);
1827 }
1828 #else
1829 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1830 {
1831 }
1832 #endif
1833
1834 void br_multicast_join_snoopers(struct net_bridge *br)
1835 {
1836         br_ip4_multicast_join_snoopers(br);
1837         br_ip6_multicast_join_snoopers(br);
1838 }
1839
1840 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1841 {
1842         struct in_device *in_dev = in_dev_get(br->dev);
1843
1844         if (WARN_ON(!in_dev))
1845                 return;
1846
1847         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1848         in_dev_put(in_dev);
1849 }
1850
1851 #if IS_ENABLED(CONFIG_IPV6)
1852 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1853 {
1854         struct in6_addr addr;
1855
1856         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1857         ipv6_dev_mc_dec(br->dev, &addr);
1858 }
1859 #else
1860 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1861 {
1862 }
1863 #endif
1864
1865 void br_multicast_leave_snoopers(struct net_bridge *br)
1866 {
1867         br_ip4_multicast_leave_snoopers(br);
1868         br_ip6_multicast_leave_snoopers(br);
1869 }
1870
1871 static void __br_multicast_open(struct net_bridge *br,
1872                                 struct bridge_mcast_own_query *query)
1873 {
1874         query->startup_sent = 0;
1875
1876         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1877                 return;
1878
1879         mod_timer(&query->timer, jiffies);
1880 }
1881
1882 void br_multicast_open(struct net_bridge *br)
1883 {
1884         __br_multicast_open(br, &br->ip4_own_query);
1885 #if IS_ENABLED(CONFIG_IPV6)
1886         __br_multicast_open(br, &br->ip6_own_query);
1887 #endif
1888 }
1889
1890 void br_multicast_stop(struct net_bridge *br)
1891 {
1892         del_timer_sync(&br->multicast_router_timer);
1893         del_timer_sync(&br->ip4_other_query.timer);
1894         del_timer_sync(&br->ip4_own_query.timer);
1895 #if IS_ENABLED(CONFIG_IPV6)
1896         del_timer_sync(&br->ip6_other_query.timer);
1897         del_timer_sync(&br->ip6_own_query.timer);
1898 #endif
1899 }
1900
1901 void br_multicast_dev_del(struct net_bridge *br)
1902 {
1903         struct net_bridge_mdb_entry *mp;
1904         struct hlist_node *tmp;
1905
1906         spin_lock_bh(&br->multicast_lock);
1907         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1908                 del_timer(&mp->timer);
1909                 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1910                                        br_mdb_rht_params);
1911                 hlist_del_rcu(&mp->mdb_node);
1912                 kfree_rcu(mp, rcu);
1913         }
1914         spin_unlock_bh(&br->multicast_lock);
1915
1916         rcu_barrier();
1917 }
1918
1919 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1920 {
1921         int err = -EINVAL;
1922
1923         spin_lock_bh(&br->multicast_lock);
1924
1925         switch (val) {
1926         case MDB_RTR_TYPE_DISABLED:
1927         case MDB_RTR_TYPE_PERM:
1928                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1929                 del_timer(&br->multicast_router_timer);
1930                 br->multicast_router = val;
1931                 err = 0;
1932                 break;
1933         case MDB_RTR_TYPE_TEMP_QUERY:
1934                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1935                         br_mc_router_state_change(br, false);
1936                 br->multicast_router = val;
1937                 err = 0;
1938                 break;
1939         }
1940
1941         spin_unlock_bh(&br->multicast_lock);
1942
1943         return err;
1944 }
1945
1946 static void __del_port_router(struct net_bridge_port *p)
1947 {
1948         if (hlist_unhashed(&p->rlist))
1949                 return;
1950         hlist_del_init_rcu(&p->rlist);
1951         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1952         br_port_mc_router_state_change(p, false);
1953
1954         /* don't allow timer refresh */
1955         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1956                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1957 }
1958
1959 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1960 {
1961         struct net_bridge *br = p->br;
1962         unsigned long now = jiffies;
1963         int err = -EINVAL;
1964
1965         spin_lock(&br->multicast_lock);
1966         if (p->multicast_router == val) {
1967                 /* Refresh the temp router port timer */
1968                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1969                         mod_timer(&p->multicast_router_timer,
1970                                   now + br->multicast_querier_interval);
1971                 err = 0;
1972                 goto unlock;
1973         }
1974         switch (val) {
1975         case MDB_RTR_TYPE_DISABLED:
1976                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
1977                 __del_port_router(p);
1978                 del_timer(&p->multicast_router_timer);
1979                 break;
1980         case MDB_RTR_TYPE_TEMP_QUERY:
1981                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1982                 __del_port_router(p);
1983                 break;
1984         case MDB_RTR_TYPE_PERM:
1985                 p->multicast_router = MDB_RTR_TYPE_PERM;
1986                 del_timer(&p->multicast_router_timer);
1987                 br_multicast_add_router(br, p);
1988                 break;
1989         case MDB_RTR_TYPE_TEMP:
1990                 p->multicast_router = MDB_RTR_TYPE_TEMP;
1991                 br_multicast_mark_router(br, p);
1992                 break;
1993         default:
1994                 goto unlock;
1995         }
1996         err = 0;
1997 unlock:
1998         spin_unlock(&br->multicast_lock);
1999
2000         return err;
2001 }
2002
2003 static void br_multicast_start_querier(struct net_bridge *br,
2004                                        struct bridge_mcast_own_query *query)
2005 {
2006         struct net_bridge_port *port;
2007
2008         __br_multicast_open(br, query);
2009
2010         rcu_read_lock();
2011         list_for_each_entry_rcu(port, &br->port_list, list) {
2012                 if (port->state == BR_STATE_DISABLED ||
2013                     port->state == BR_STATE_BLOCKING)
2014                         continue;
2015
2016                 if (query == &br->ip4_own_query)
2017                         br_multicast_enable(&port->ip4_own_query);
2018 #if IS_ENABLED(CONFIG_IPV6)
2019                 else
2020                         br_multicast_enable(&port->ip6_own_query);
2021 #endif
2022         }
2023         rcu_read_unlock();
2024 }
2025
2026 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2027 {
2028         struct net_bridge_port *port;
2029         bool change_snoopers = false;
2030
2031         spin_lock_bh(&br->multicast_lock);
2032         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2033                 goto unlock;
2034
2035         br_mc_disabled_update(br->dev, val);
2036         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2037         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2038                 change_snoopers = true;
2039                 goto unlock;
2040         }
2041
2042         if (!netif_running(br->dev))
2043                 goto unlock;
2044
2045         br_multicast_open(br);
2046         list_for_each_entry(port, &br->port_list, list)
2047                 __br_multicast_enable_port(port);
2048
2049         change_snoopers = true;
2050
2051 unlock:
2052         spin_unlock_bh(&br->multicast_lock);
2053
2054         /* br_multicast_join_snoopers has the potential to cause
2055          * an MLD Report/Leave to be delivered to br_multicast_rcv,
2056          * which would in turn call br_multicast_add_group, which would
2057          * attempt to acquire multicast_lock. This function should be
2058          * called after the lock has been released to avoid deadlocks on
2059          * multicast_lock.
2060          *
2061          * br_multicast_leave_snoopers does not have the problem since
2062          * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
2063          * returns without calling br_multicast_ipv4/6_rcv if it's not
2064          * enabled. Moved both functions out just for symmetry.
2065          */
2066         if (change_snoopers) {
2067                 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2068                         br_multicast_join_snoopers(br);
2069                 else
2070                         br_multicast_leave_snoopers(br);
2071         }
2072
2073         return 0;
2074 }
2075
2076 bool br_multicast_enabled(const struct net_device *dev)
2077 {
2078         struct net_bridge *br = netdev_priv(dev);
2079
2080         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2081 }
2082 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2083
2084 bool br_multicast_router(const struct net_device *dev)
2085 {
2086         struct net_bridge *br = netdev_priv(dev);
2087         bool is_router;
2088
2089         spin_lock_bh(&br->multicast_lock);
2090         is_router = br_multicast_is_router(br);
2091         spin_unlock_bh(&br->multicast_lock);
2092         return is_router;
2093 }
2094 EXPORT_SYMBOL_GPL(br_multicast_router);
2095
2096 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2097 {
2098         unsigned long max_delay;
2099
2100         val = !!val;
2101
2102         spin_lock_bh(&br->multicast_lock);
2103         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2104                 goto unlock;
2105
2106         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2107         if (!val)
2108                 goto unlock;
2109
2110         max_delay = br->multicast_query_response_interval;
2111
2112         if (!timer_pending(&br->ip4_other_query.timer))
2113                 br->ip4_other_query.delay_time = jiffies + max_delay;
2114
2115         br_multicast_start_querier(br, &br->ip4_own_query);
2116
2117 #if IS_ENABLED(CONFIG_IPV6)
2118         if (!timer_pending(&br->ip6_other_query.timer))
2119                 br->ip6_other_query.delay_time = jiffies + max_delay;
2120
2121         br_multicast_start_querier(br, &br->ip6_own_query);
2122 #endif
2123
2124 unlock:
2125         spin_unlock_bh(&br->multicast_lock);
2126
2127         return 0;
2128 }
2129
2130 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2131 {
2132         /* Currently we support only version 2 and 3 */
2133         switch (val) {
2134         case 2:
2135         case 3:
2136                 break;
2137         default:
2138                 return -EINVAL;
2139         }
2140
2141         spin_lock_bh(&br->multicast_lock);
2142         br->multicast_igmp_version = val;
2143         spin_unlock_bh(&br->multicast_lock);
2144
2145         return 0;
2146 }
2147
2148 #if IS_ENABLED(CONFIG_IPV6)
2149 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2150 {
2151         /* Currently we support version 1 and 2 */
2152         switch (val) {
2153         case 1:
2154         case 2:
2155                 break;
2156         default:
2157                 return -EINVAL;
2158         }
2159
2160         spin_lock_bh(&br->multicast_lock);
2161         br->multicast_mld_version = val;
2162         spin_unlock_bh(&br->multicast_lock);
2163
2164         return 0;
2165 }
2166 #endif
2167
2168 /**
2169  * br_multicast_list_adjacent - Returns snooped multicast addresses
2170  * @dev:        The bridge port adjacent to which to retrieve addresses
2171  * @br_ip_list: The list to store found, snooped multicast IP addresses in
2172  *
2173  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2174  * snooping feature on all bridge ports of dev's bridge device, excluding
2175  * the addresses from dev itself.
2176  *
2177  * Returns the number of items added to br_ip_list.
2178  *
2179  * Notes:
2180  * - br_ip_list needs to be initialized by caller
2181  * - br_ip_list might contain duplicates in the end
2182  *   (needs to be taken care of by caller)
2183  * - br_ip_list needs to be freed by caller
2184  */
2185 int br_multicast_list_adjacent(struct net_device *dev,
2186                                struct list_head *br_ip_list)
2187 {
2188         struct net_bridge *br;
2189         struct net_bridge_port *port;
2190         struct net_bridge_port_group *group;
2191         struct br_ip_list *entry;
2192         int count = 0;
2193
2194         rcu_read_lock();
2195         if (!br_ip_list || !netif_is_bridge_port(dev))
2196                 goto unlock;
2197
2198         port = br_port_get_rcu(dev);
2199         if (!port || !port->br)
2200                 goto unlock;
2201
2202         br = port->br;
2203
2204         list_for_each_entry_rcu(port, &br->port_list, list) {
2205                 if (!port->dev || port->dev == dev)
2206                         continue;
2207
2208                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2209                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2210                         if (!entry)
2211                                 goto unlock;
2212
2213                         entry->addr = group->addr;
2214                         list_add(&entry->list, br_ip_list);
2215                         count++;
2216                 }
2217         }
2218
2219 unlock:
2220         rcu_read_unlock();
2221         return count;
2222 }
2223 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2224
2225 /**
2226  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2227  * @dev: The bridge port providing the bridge on which to check for a querier
2228  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2229  *
2230  * Checks whether the given interface has a bridge on top and if so returns
2231  * true if a valid querier exists anywhere on the bridged link layer.
2232  * Otherwise returns false.
2233  */
2234 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2235 {
2236         struct net_bridge *br;
2237         struct net_bridge_port *port;
2238         struct ethhdr eth;
2239         bool ret = false;
2240
2241         rcu_read_lock();
2242         if (!netif_is_bridge_port(dev))
2243                 goto unlock;
2244
2245         port = br_port_get_rcu(dev);
2246         if (!port || !port->br)
2247                 goto unlock;
2248
2249         br = port->br;
2250
2251         memset(&eth, 0, sizeof(eth));
2252         eth.h_proto = htons(proto);
2253
2254         ret = br_multicast_querier_exists(br, &eth);
2255
2256 unlock:
2257         rcu_read_unlock();
2258         return ret;
2259 }
2260 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2261
2262 /**
2263  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2264  * @dev: The bridge port adjacent to which to check for a querier
2265  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2266  *
2267  * Checks whether the given interface has a bridge on top and if so returns
2268  * true if a selected querier is behind one of the other ports of this
2269  * bridge. Otherwise returns false.
2270  */
2271 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2272 {
2273         struct net_bridge *br;
2274         struct net_bridge_port *port;
2275         bool ret = false;
2276
2277         rcu_read_lock();
2278         if (!netif_is_bridge_port(dev))
2279                 goto unlock;
2280
2281         port = br_port_get_rcu(dev);
2282         if (!port || !port->br)
2283                 goto unlock;
2284
2285         br = port->br;
2286
2287         switch (proto) {
2288         case ETH_P_IP:
2289                 if (!timer_pending(&br->ip4_other_query.timer) ||
2290                     rcu_dereference(br->ip4_querier.port) == port)
2291                         goto unlock;
2292                 break;
2293 #if IS_ENABLED(CONFIG_IPV6)
2294         case ETH_P_IPV6:
2295                 if (!timer_pending(&br->ip6_other_query.timer) ||
2296                     rcu_dereference(br->ip6_querier.port) == port)
2297                         goto unlock;
2298                 break;
2299 #endif
2300         default:
2301                 goto unlock;
2302         }
2303
2304         ret = true;
2305 unlock:
2306         rcu_read_unlock();
2307         return ret;
2308 }
2309 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2310
2311 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2312                                const struct sk_buff *skb, u8 type, u8 dir)
2313 {
2314         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2315         __be16 proto = skb->protocol;
2316         unsigned int t_len;
2317
2318         u64_stats_update_begin(&pstats->syncp);
2319         switch (proto) {
2320         case htons(ETH_P_IP):
2321                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2322                 switch (type) {
2323                 case IGMP_HOST_MEMBERSHIP_REPORT:
2324                         pstats->mstats.igmp_v1reports[dir]++;
2325                         break;
2326                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2327                         pstats->mstats.igmp_v2reports[dir]++;
2328                         break;
2329                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2330                         pstats->mstats.igmp_v3reports[dir]++;
2331                         break;
2332                 case IGMP_HOST_MEMBERSHIP_QUERY:
2333                         if (t_len != sizeof(struct igmphdr)) {
2334                                 pstats->mstats.igmp_v3queries[dir]++;
2335                         } else {
2336                                 unsigned int offset = skb_transport_offset(skb);
2337                                 struct igmphdr *ih, _ihdr;
2338
2339                                 ih = skb_header_pointer(skb, offset,
2340                                                         sizeof(_ihdr), &_ihdr);
2341                                 if (!ih)
2342                                         break;
2343                                 if (!ih->code)
2344                                         pstats->mstats.igmp_v1queries[dir]++;
2345                                 else
2346                                         pstats->mstats.igmp_v2queries[dir]++;
2347                         }
2348                         break;
2349                 case IGMP_HOST_LEAVE_MESSAGE:
2350                         pstats->mstats.igmp_leaves[dir]++;
2351                         break;
2352                 }
2353                 break;
2354 #if IS_ENABLED(CONFIG_IPV6)
2355         case htons(ETH_P_IPV6):
2356                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2357                         sizeof(struct ipv6hdr);
2358                 t_len -= skb_network_header_len(skb);
2359                 switch (type) {
2360                 case ICMPV6_MGM_REPORT:
2361                         pstats->mstats.mld_v1reports[dir]++;
2362                         break;
2363                 case ICMPV6_MLD2_REPORT:
2364                         pstats->mstats.mld_v2reports[dir]++;
2365                         break;
2366                 case ICMPV6_MGM_QUERY:
2367                         if (t_len != sizeof(struct mld_msg))
2368                                 pstats->mstats.mld_v2queries[dir]++;
2369                         else
2370                                 pstats->mstats.mld_v1queries[dir]++;
2371                         break;
2372                 case ICMPV6_MGM_REDUCTION:
2373                         pstats->mstats.mld_leaves[dir]++;
2374                         break;
2375                 }
2376                 break;
2377 #endif /* CONFIG_IPV6 */
2378         }
2379         u64_stats_update_end(&pstats->syncp);
2380 }
2381
2382 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2383                         const struct sk_buff *skb, u8 type, u8 dir)
2384 {
2385         struct bridge_mcast_stats __percpu *stats;
2386
2387         /* if multicast_disabled is true then igmp type can't be set */
2388         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2389                 return;
2390
2391         if (p)
2392                 stats = p->mcast_stats;
2393         else
2394                 stats = br->mcast_stats;
2395         if (WARN_ON(!stats))
2396                 return;
2397
2398         br_mcast_stats_add(stats, skb, type, dir);
2399 }
2400
2401 int br_multicast_init_stats(struct net_bridge *br)
2402 {
2403         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2404         if (!br->mcast_stats)
2405                 return -ENOMEM;
2406
2407         return 0;
2408 }
2409
2410 void br_multicast_uninit_stats(struct net_bridge *br)
2411 {
2412         free_percpu(br->mcast_stats);
2413 }
2414
2415 static void mcast_stats_add_dir(u64 *dst, u64 *src)
2416 {
2417         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2418         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2419 }
2420
2421 void br_multicast_get_stats(const struct net_bridge *br,
2422                             const struct net_bridge_port *p,
2423                             struct br_mcast_stats *dest)
2424 {
2425         struct bridge_mcast_stats __percpu *stats;
2426         struct br_mcast_stats tdst;
2427         int i;
2428
2429         memset(dest, 0, sizeof(*dest));
2430         if (p)
2431                 stats = p->mcast_stats;
2432         else
2433                 stats = br->mcast_stats;
2434         if (WARN_ON(!stats))
2435                 return;
2436
2437         memset(&tdst, 0, sizeof(tdst));
2438         for_each_possible_cpu(i) {
2439                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2440                 struct br_mcast_stats temp;
2441                 unsigned int start;
2442
2443                 do {
2444                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2445                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2446                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2447
2448                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2449                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2450                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2451                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2452                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2453                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2454                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2455                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2456
2457                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2458                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2459                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2460                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2461                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2462                 tdst.mld_parse_errors += temp.mld_parse_errors;
2463         }
2464         memcpy(dest, &tdst, sizeof(*dest));
2465 }
2466
2467 int br_mdb_hash_init(struct net_bridge *br)
2468 {
2469         return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2470 }
2471
2472 void br_mdb_hash_fini(struct net_bridge *br)
2473 {
2474         rhashtable_destroy(&br->mdb_hash_tbl);
2475 }