net: bridge: multicast: add EHT allow/block handling
[linux-2.6-microblaze.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41         .key_len = sizeof(struct br_ip),
42         .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46         .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47         .key_offset = offsetof(struct net_bridge_port_group, key),
48         .key_len = sizeof(struct net_bridge_port_group_sg_key),
49         .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge *br,
53                                        struct bridge_mcast_own_query *query);
54 static void br_multicast_add_router(struct net_bridge *br,
55                                     struct net_bridge_port *port);
56 static void br_ip4_multicast_leave_group(struct net_bridge *br,
57                                          struct net_bridge_port *port,
58                                          __be32 group,
59                                          __u16 vid,
60                                          const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void __del_port_router(struct net_bridge_port *p);
64 #if IS_ENABLED(CONFIG_IPV6)
65 static void br_ip6_multicast_leave_group(struct net_bridge *br,
66                                          struct net_bridge_port *port,
67                                          const struct in6_addr *group,
68                                          __u16 vid, const unsigned char *src);
69 #endif
70 static struct net_bridge_port_group *
71 __br_multicast_add_group(struct net_bridge *br,
72                          struct net_bridge_port *port,
73                          struct br_ip *group,
74                          const unsigned char *src,
75                          u8 filter_mode,
76                          bool igmpv2_mldv1,
77                          bool blocked);
78 static void br_multicast_find_del_pg(struct net_bridge *br,
79                                      struct net_bridge_port_group *pg);
80
81 static struct net_bridge_port_group *
82 br_sg_port_find(struct net_bridge *br,
83                 struct net_bridge_port_group_sg_key *sg_p)
84 {
85         lockdep_assert_held_once(&br->multicast_lock);
86
87         return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
88                                       br_sg_port_rht_params);
89 }
90
91 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
92                                                       struct br_ip *dst)
93 {
94         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
95 }
96
97 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
98                                            struct br_ip *dst)
99 {
100         struct net_bridge_mdb_entry *ent;
101
102         lockdep_assert_held_once(&br->multicast_lock);
103
104         rcu_read_lock();
105         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
106         rcu_read_unlock();
107
108         return ent;
109 }
110
111 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
112                                                    __be32 dst, __u16 vid)
113 {
114         struct br_ip br_dst;
115
116         memset(&br_dst, 0, sizeof(br_dst));
117         br_dst.dst.ip4 = dst;
118         br_dst.proto = htons(ETH_P_IP);
119         br_dst.vid = vid;
120
121         return br_mdb_ip_get(br, &br_dst);
122 }
123
124 #if IS_ENABLED(CONFIG_IPV6)
125 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
126                                                    const struct in6_addr *dst,
127                                                    __u16 vid)
128 {
129         struct br_ip br_dst;
130
131         memset(&br_dst, 0, sizeof(br_dst));
132         br_dst.dst.ip6 = *dst;
133         br_dst.proto = htons(ETH_P_IPV6);
134         br_dst.vid = vid;
135
136         return br_mdb_ip_get(br, &br_dst);
137 }
138 #endif
139
140 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
141                                         struct sk_buff *skb, u16 vid)
142 {
143         struct br_ip ip;
144
145         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
146                 return NULL;
147
148         if (BR_INPUT_SKB_CB(skb)->igmp)
149                 return NULL;
150
151         memset(&ip, 0, sizeof(ip));
152         ip.proto = skb->protocol;
153         ip.vid = vid;
154
155         switch (skb->protocol) {
156         case htons(ETH_P_IP):
157                 ip.dst.ip4 = ip_hdr(skb)->daddr;
158                 if (br->multicast_igmp_version == 3) {
159                         struct net_bridge_mdb_entry *mdb;
160
161                         ip.src.ip4 = ip_hdr(skb)->saddr;
162                         mdb = br_mdb_ip_get_rcu(br, &ip);
163                         if (mdb)
164                                 return mdb;
165                         ip.src.ip4 = 0;
166                 }
167                 break;
168 #if IS_ENABLED(CONFIG_IPV6)
169         case htons(ETH_P_IPV6):
170                 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
171                 if (br->multicast_mld_version == 2) {
172                         struct net_bridge_mdb_entry *mdb;
173
174                         ip.src.ip6 = ipv6_hdr(skb)->saddr;
175                         mdb = br_mdb_ip_get_rcu(br, &ip);
176                         if (mdb)
177                                 return mdb;
178                         memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
179                 }
180                 break;
181 #endif
182         default:
183                 ip.proto = 0;
184                 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
185         }
186
187         return br_mdb_ip_get_rcu(br, &ip);
188 }
189
190 static bool br_port_group_equal(struct net_bridge_port_group *p,
191                                 struct net_bridge_port *port,
192                                 const unsigned char *src)
193 {
194         if (p->key.port != port)
195                 return false;
196
197         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
198                 return true;
199
200         return ether_addr_equal(src, p->eth_addr);
201 }
202
203 static void __fwd_add_star_excl(struct net_bridge_port_group *pg,
204                                 struct br_ip *sg_ip)
205 {
206         struct net_bridge_port_group_sg_key sg_key;
207         struct net_bridge *br = pg->key.port->br;
208         struct net_bridge_port_group *src_pg;
209
210         memset(&sg_key, 0, sizeof(sg_key));
211         sg_key.port = pg->key.port;
212         sg_key.addr = *sg_ip;
213         if (br_sg_port_find(br, &sg_key))
214                 return;
215
216         src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr,
217                                           MCAST_INCLUDE, false, false);
218         if (IS_ERR_OR_NULL(src_pg) ||
219             src_pg->rt_protocol != RTPROT_KERNEL)
220                 return;
221
222         src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
223 }
224
225 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
226                                 struct br_ip *sg_ip)
227 {
228         struct net_bridge_port_group_sg_key sg_key;
229         struct net_bridge *br = pg->key.port->br;
230         struct net_bridge_port_group *src_pg;
231
232         memset(&sg_key, 0, sizeof(sg_key));
233         sg_key.port = pg->key.port;
234         sg_key.addr = *sg_ip;
235         src_pg = br_sg_port_find(br, &sg_key);
236         if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
237             src_pg->rt_protocol != RTPROT_KERNEL)
238                 return;
239
240         br_multicast_find_del_pg(br, src_pg);
241 }
242
243 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
244  * to all other ports' S,G entries which are not blocked by the current group
245  * for proper replication, the assumption is that any S,G blocked entries
246  * are already added so the S,G,port lookup should skip them.
247  * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
248  * deleted we need to remove it from all ports' S,G entries where it was
249  * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
250  */
251 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
252                                      u8 filter_mode)
253 {
254         struct net_bridge *br = pg->key.port->br;
255         struct net_bridge_port_group *pg_lst;
256         struct net_bridge_mdb_entry *mp;
257         struct br_ip sg_ip;
258
259         if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
260                 return;
261
262         mp = br_mdb_ip_get(br, &pg->key.addr);
263         if (!mp)
264                 return;
265
266         memset(&sg_ip, 0, sizeof(sg_ip));
267         sg_ip = pg->key.addr;
268         for (pg_lst = mlock_dereference(mp->ports, br);
269              pg_lst;
270              pg_lst = mlock_dereference(pg_lst->next, br)) {
271                 struct net_bridge_group_src *src_ent;
272
273                 if (pg_lst == pg)
274                         continue;
275                 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
276                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
277                                 continue;
278                         sg_ip.src = src_ent->addr.src;
279                         switch (filter_mode) {
280                         case MCAST_INCLUDE:
281                                 __fwd_del_star_excl(pg, &sg_ip);
282                                 break;
283                         case MCAST_EXCLUDE:
284                                 __fwd_add_star_excl(pg, &sg_ip);
285                                 break;
286                         }
287                 }
288         }
289 }
290
291 /* called when adding a new S,G with host_joined == false by default */
292 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
293                                        struct net_bridge_port_group *sg)
294 {
295         struct net_bridge_mdb_entry *sg_mp;
296
297         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
298                 return;
299         if (!star_mp->host_joined)
300                 return;
301
302         sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
303         if (!sg_mp)
304                 return;
305         sg_mp->host_joined = true;
306 }
307
308 /* set the host_joined state of all of *,G's S,G entries */
309 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
310 {
311         struct net_bridge *br = star_mp->br;
312         struct net_bridge_mdb_entry *sg_mp;
313         struct net_bridge_port_group *pg;
314         struct br_ip sg_ip;
315
316         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
317                 return;
318
319         memset(&sg_ip, 0, sizeof(sg_ip));
320         sg_ip = star_mp->addr;
321         for (pg = mlock_dereference(star_mp->ports, br);
322              pg;
323              pg = mlock_dereference(pg->next, br)) {
324                 struct net_bridge_group_src *src_ent;
325
326                 hlist_for_each_entry(src_ent, &pg->src_list, node) {
327                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
328                                 continue;
329                         sg_ip.src = src_ent->addr.src;
330                         sg_mp = br_mdb_ip_get(br, &sg_ip);
331                         if (!sg_mp)
332                                 continue;
333                         sg_mp->host_joined = star_mp->host_joined;
334                 }
335         }
336 }
337
338 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
339 {
340         struct net_bridge_port_group __rcu **pp;
341         struct net_bridge_port_group *p;
342
343         /* *,G exclude ports are only added to S,G entries */
344         if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
345                 return;
346
347         /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
348          * we should ignore perm entries since they're managed by user-space
349          */
350         for (pp = &sgmp->ports;
351              (p = mlock_dereference(*pp, sgmp->br)) != NULL;
352              pp = &p->next)
353                 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
354                                   MDB_PG_FLAGS_PERMANENT)))
355                         return;
356
357         /* currently the host can only have joined the *,G which means
358          * we treat it as EXCLUDE {}, so for an S,G it's considered a
359          * STAR_EXCLUDE entry and we can safely leave it
360          */
361         sgmp->host_joined = false;
362
363         for (pp = &sgmp->ports;
364              (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
365                 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
366                         br_multicast_del_pg(sgmp, p, pp);
367                 else
368                         pp = &p->next;
369         }
370 }
371
372 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
373                                        struct net_bridge_port_group *sg)
374 {
375         struct net_bridge_port_group_sg_key sg_key;
376         struct net_bridge *br = star_mp->br;
377         struct net_bridge_port_group *pg;
378
379         if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
380                 return;
381         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
382                 return;
383
384         br_multicast_sg_host_state(star_mp, sg);
385         memset(&sg_key, 0, sizeof(sg_key));
386         sg_key.addr = sg->key.addr;
387         /* we need to add all exclude ports to the S,G */
388         for (pg = mlock_dereference(star_mp->ports, br);
389              pg;
390              pg = mlock_dereference(pg->next, br)) {
391                 struct net_bridge_port_group *src_pg;
392
393                 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
394                         continue;
395
396                 sg_key.port = pg->key.port;
397                 if (br_sg_port_find(br, &sg_key))
398                         continue;
399
400                 src_pg = __br_multicast_add_group(br, pg->key.port,
401                                                   &sg->key.addr,
402                                                   sg->eth_addr,
403                                                   MCAST_INCLUDE, false, false);
404                 if (IS_ERR_OR_NULL(src_pg) ||
405                     src_pg->rt_protocol != RTPROT_KERNEL)
406                         continue;
407                 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
408         }
409 }
410
411 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
412 {
413         struct net_bridge_mdb_entry *star_mp;
414         struct net_bridge_port_group *sg;
415         struct br_ip sg_ip;
416
417         if (src->flags & BR_SGRP_F_INSTALLED)
418                 return;
419
420         memset(&sg_ip, 0, sizeof(sg_ip));
421         sg_ip = src->pg->key.addr;
422         sg_ip.src = src->addr.src;
423         sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip,
424                                       src->pg->eth_addr, MCAST_INCLUDE, false,
425                                       !timer_pending(&src->timer));
426         if (IS_ERR_OR_NULL(sg))
427                 return;
428         src->flags |= BR_SGRP_F_INSTALLED;
429         sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
430
431         /* if it was added by user-space as perm we can skip next steps */
432         if (sg->rt_protocol != RTPROT_KERNEL &&
433             (sg->flags & MDB_PG_FLAGS_PERMANENT))
434                 return;
435
436         /* the kernel is now responsible for removing this S,G */
437         del_timer(&sg->timer);
438         star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
439         if (!star_mp)
440                 return;
441
442         br_multicast_sg_add_exclude_ports(star_mp, sg);
443 }
444
445 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src)
446 {
447         struct net_bridge_port_group *p, *pg = src->pg;
448         struct net_bridge_port_group __rcu **pp;
449         struct net_bridge_mdb_entry *mp;
450         struct br_ip sg_ip;
451
452         memset(&sg_ip, 0, sizeof(sg_ip));
453         sg_ip = pg->key.addr;
454         sg_ip.src = src->addr.src;
455
456         mp = br_mdb_ip_get(src->br, &sg_ip);
457         if (!mp)
458                 return;
459
460         for (pp = &mp->ports;
461              (p = mlock_dereference(*pp, src->br)) != NULL;
462              pp = &p->next) {
463                 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
464                         continue;
465
466                 if (p->rt_protocol != RTPROT_KERNEL &&
467                     (p->flags & MDB_PG_FLAGS_PERMANENT))
468                         break;
469
470                 br_multicast_del_pg(mp, p, pp);
471                 break;
472         }
473         src->flags &= ~BR_SGRP_F_INSTALLED;
474 }
475
476 /* install S,G and based on src's timer enable or disable forwarding */
477 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
478 {
479         struct net_bridge_port_group_sg_key sg_key;
480         struct net_bridge_port_group *sg;
481         u8 old_flags;
482
483         br_multicast_fwd_src_add(src);
484
485         memset(&sg_key, 0, sizeof(sg_key));
486         sg_key.addr = src->pg->key.addr;
487         sg_key.addr.src = src->addr.src;
488         sg_key.port = src->pg->key.port;
489
490         sg = br_sg_port_find(src->br, &sg_key);
491         if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
492                 return;
493
494         old_flags = sg->flags;
495         if (timer_pending(&src->timer))
496                 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
497         else
498                 sg->flags |= MDB_PG_FLAGS_BLOCKED;
499
500         if (old_flags != sg->flags) {
501                 struct net_bridge_mdb_entry *sg_mp;
502
503                 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
504                 if (!sg_mp)
505                         return;
506                 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
507         }
508 }
509
510 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
511 {
512         struct net_bridge_mdb_entry *mp;
513
514         mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
515         WARN_ON(!hlist_unhashed(&mp->mdb_node));
516         WARN_ON(mp->ports);
517
518         del_timer_sync(&mp->timer);
519         kfree_rcu(mp, rcu);
520 }
521
522 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
523 {
524         struct net_bridge *br = mp->br;
525
526         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
527                                br_mdb_rht_params);
528         hlist_del_init_rcu(&mp->mdb_node);
529         hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
530         queue_work(system_long_wq, &br->mcast_gc_work);
531 }
532
533 static void br_multicast_group_expired(struct timer_list *t)
534 {
535         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
536         struct net_bridge *br = mp->br;
537
538         spin_lock(&br->multicast_lock);
539         if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
540             timer_pending(&mp->timer))
541                 goto out;
542
543         br_multicast_host_leave(mp, true);
544
545         if (mp->ports)
546                 goto out;
547         br_multicast_del_mdb_entry(mp);
548 out:
549         spin_unlock(&br->multicast_lock);
550 }
551
552 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
553 {
554         struct net_bridge_group_src *src;
555
556         src = container_of(gc, struct net_bridge_group_src, mcast_gc);
557         WARN_ON(!hlist_unhashed(&src->node));
558
559         del_timer_sync(&src->timer);
560         kfree_rcu(src, rcu);
561 }
562
563 void br_multicast_del_group_src(struct net_bridge_group_src *src)
564 {
565         struct net_bridge *br = src->pg->key.port->br;
566
567         br_multicast_fwd_src_remove(src);
568         hlist_del_init_rcu(&src->node);
569         src->pg->src_ents--;
570         hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
571         queue_work(system_long_wq, &br->mcast_gc_work);
572 }
573
574 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
575 {
576         struct net_bridge_port_group *pg;
577
578         pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
579         WARN_ON(!hlist_unhashed(&pg->mglist));
580         WARN_ON(!hlist_empty(&pg->src_list));
581
582         del_timer_sync(&pg->rexmit_timer);
583         del_timer_sync(&pg->timer);
584         kfree_rcu(pg, rcu);
585 }
586
587 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
588                          struct net_bridge_port_group *pg,
589                          struct net_bridge_port_group __rcu **pp)
590 {
591         struct net_bridge *br = pg->key.port->br;
592         struct net_bridge_group_src *ent;
593         struct hlist_node *tmp;
594
595         rcu_assign_pointer(*pp, pg->next);
596         hlist_del_init(&pg->mglist);
597         br_multicast_eht_clean_sets(pg);
598         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
599                 br_multicast_del_group_src(ent);
600         br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
601         if (!br_multicast_is_star_g(&mp->addr)) {
602                 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
603                                        br_sg_port_rht_params);
604                 br_multicast_sg_del_exclude_ports(mp);
605         } else {
606                 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
607         }
608         hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
609         queue_work(system_long_wq, &br->mcast_gc_work);
610
611         if (!mp->ports && !mp->host_joined && netif_running(br->dev))
612                 mod_timer(&mp->timer, jiffies);
613 }
614
615 static void br_multicast_find_del_pg(struct net_bridge *br,
616                                      struct net_bridge_port_group *pg)
617 {
618         struct net_bridge_port_group __rcu **pp;
619         struct net_bridge_mdb_entry *mp;
620         struct net_bridge_port_group *p;
621
622         mp = br_mdb_ip_get(br, &pg->key.addr);
623         if (WARN_ON(!mp))
624                 return;
625
626         for (pp = &mp->ports;
627              (p = mlock_dereference(*pp, br)) != NULL;
628              pp = &p->next) {
629                 if (p != pg)
630                         continue;
631
632                 br_multicast_del_pg(mp, pg, pp);
633                 return;
634         }
635
636         WARN_ON(1);
637 }
638
639 static void br_multicast_port_group_expired(struct timer_list *t)
640 {
641         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
642         struct net_bridge_group_src *src_ent;
643         struct net_bridge *br = pg->key.port->br;
644         struct hlist_node *tmp;
645         bool changed;
646
647         spin_lock(&br->multicast_lock);
648         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
649             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
650                 goto out;
651
652         changed = !!(pg->filter_mode == MCAST_EXCLUDE);
653         pg->filter_mode = MCAST_INCLUDE;
654         hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
655                 if (!timer_pending(&src_ent->timer)) {
656                         br_multicast_del_group_src(src_ent);
657                         changed = true;
658                 }
659         }
660
661         if (hlist_empty(&pg->src_list)) {
662                 br_multicast_find_del_pg(br, pg);
663         } else if (changed) {
664                 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
665
666                 if (changed && br_multicast_is_star_g(&pg->key.addr))
667                         br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
668
669                 if (WARN_ON(!mp))
670                         goto out;
671                 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
672         }
673 out:
674         spin_unlock(&br->multicast_lock);
675 }
676
677 static void br_multicast_gc(struct hlist_head *head)
678 {
679         struct net_bridge_mcast_gc *gcent;
680         struct hlist_node *tmp;
681
682         hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
683                 hlist_del_init(&gcent->gc_node);
684                 gcent->destroy(gcent);
685         }
686 }
687
688 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
689                                                     struct net_bridge_port_group *pg,
690                                                     __be32 ip_dst, __be32 group,
691                                                     bool with_srcs, bool over_lmqt,
692                                                     u8 sflag, u8 *igmp_type,
693                                                     bool *need_rexmit)
694 {
695         struct net_bridge_port *p = pg ? pg->key.port : NULL;
696         struct net_bridge_group_src *ent;
697         size_t pkt_size, igmp_hdr_size;
698         unsigned long now = jiffies;
699         struct igmpv3_query *ihv3;
700         void *csum_start = NULL;
701         __sum16 *csum = NULL;
702         struct sk_buff *skb;
703         struct igmphdr *ih;
704         struct ethhdr *eth;
705         unsigned long lmqt;
706         struct iphdr *iph;
707         u16 lmqt_srcs = 0;
708
709         igmp_hdr_size = sizeof(*ih);
710         if (br->multicast_igmp_version == 3) {
711                 igmp_hdr_size = sizeof(*ihv3);
712                 if (pg && with_srcs) {
713                         lmqt = now + (br->multicast_last_member_interval *
714                                       br->multicast_last_member_count);
715                         hlist_for_each_entry(ent, &pg->src_list, node) {
716                                 if (over_lmqt == time_after(ent->timer.expires,
717                                                             lmqt) &&
718                                     ent->src_query_rexmit_cnt > 0)
719                                         lmqt_srcs++;
720                         }
721
722                         if (!lmqt_srcs)
723                                 return NULL;
724                         igmp_hdr_size += lmqt_srcs * sizeof(__be32);
725                 }
726         }
727
728         pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
729         if ((p && pkt_size > p->dev->mtu) ||
730             pkt_size > br->dev->mtu)
731                 return NULL;
732
733         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
734         if (!skb)
735                 goto out;
736
737         skb->protocol = htons(ETH_P_IP);
738
739         skb_reset_mac_header(skb);
740         eth = eth_hdr(skb);
741
742         ether_addr_copy(eth->h_source, br->dev->dev_addr);
743         ip_eth_mc_map(ip_dst, eth->h_dest);
744         eth->h_proto = htons(ETH_P_IP);
745         skb_put(skb, sizeof(*eth));
746
747         skb_set_network_header(skb, skb->len);
748         iph = ip_hdr(skb);
749         iph->tot_len = htons(pkt_size - sizeof(*eth));
750
751         iph->version = 4;
752         iph->ihl = 6;
753         iph->tos = 0xc0;
754         iph->id = 0;
755         iph->frag_off = htons(IP_DF);
756         iph->ttl = 1;
757         iph->protocol = IPPROTO_IGMP;
758         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
759                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
760         iph->daddr = ip_dst;
761         ((u8 *)&iph[1])[0] = IPOPT_RA;
762         ((u8 *)&iph[1])[1] = 4;
763         ((u8 *)&iph[1])[2] = 0;
764         ((u8 *)&iph[1])[3] = 0;
765         ip_send_check(iph);
766         skb_put(skb, 24);
767
768         skb_set_transport_header(skb, skb->len);
769         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
770
771         switch (br->multicast_igmp_version) {
772         case 2:
773                 ih = igmp_hdr(skb);
774                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
775                 ih->code = (group ? br->multicast_last_member_interval :
776                                     br->multicast_query_response_interval) /
777                            (HZ / IGMP_TIMER_SCALE);
778                 ih->group = group;
779                 ih->csum = 0;
780                 csum = &ih->csum;
781                 csum_start = (void *)ih;
782                 break;
783         case 3:
784                 ihv3 = igmpv3_query_hdr(skb);
785                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
786                 ihv3->code = (group ? br->multicast_last_member_interval :
787                                       br->multicast_query_response_interval) /
788                              (HZ / IGMP_TIMER_SCALE);
789                 ihv3->group = group;
790                 ihv3->qqic = br->multicast_query_interval / HZ;
791                 ihv3->nsrcs = htons(lmqt_srcs);
792                 ihv3->resv = 0;
793                 ihv3->suppress = sflag;
794                 ihv3->qrv = 2;
795                 ihv3->csum = 0;
796                 csum = &ihv3->csum;
797                 csum_start = (void *)ihv3;
798                 if (!pg || !with_srcs)
799                         break;
800
801                 lmqt_srcs = 0;
802                 hlist_for_each_entry(ent, &pg->src_list, node) {
803                         if (over_lmqt == time_after(ent->timer.expires,
804                                                     lmqt) &&
805                             ent->src_query_rexmit_cnt > 0) {
806                                 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
807                                 ent->src_query_rexmit_cnt--;
808                                 if (need_rexmit && ent->src_query_rexmit_cnt)
809                                         *need_rexmit = true;
810                         }
811                 }
812                 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
813                         kfree_skb(skb);
814                         return NULL;
815                 }
816                 break;
817         }
818
819         if (WARN_ON(!csum || !csum_start)) {
820                 kfree_skb(skb);
821                 return NULL;
822         }
823
824         *csum = ip_compute_csum(csum_start, igmp_hdr_size);
825         skb_put(skb, igmp_hdr_size);
826         __skb_pull(skb, sizeof(*eth));
827
828 out:
829         return skb;
830 }
831
832 #if IS_ENABLED(CONFIG_IPV6)
833 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
834                                                     struct net_bridge_port_group *pg,
835                                                     const struct in6_addr *ip6_dst,
836                                                     const struct in6_addr *group,
837                                                     bool with_srcs, bool over_llqt,
838                                                     u8 sflag, u8 *igmp_type,
839                                                     bool *need_rexmit)
840 {
841         struct net_bridge_port *p = pg ? pg->key.port : NULL;
842         struct net_bridge_group_src *ent;
843         size_t pkt_size, mld_hdr_size;
844         unsigned long now = jiffies;
845         struct mld2_query *mld2q;
846         void *csum_start = NULL;
847         unsigned long interval;
848         __sum16 *csum = NULL;
849         struct ipv6hdr *ip6h;
850         struct mld_msg *mldq;
851         struct sk_buff *skb;
852         unsigned long llqt;
853         struct ethhdr *eth;
854         u16 llqt_srcs = 0;
855         u8 *hopopt;
856
857         mld_hdr_size = sizeof(*mldq);
858         if (br->multicast_mld_version == 2) {
859                 mld_hdr_size = sizeof(*mld2q);
860                 if (pg && with_srcs) {
861                         llqt = now + (br->multicast_last_member_interval *
862                                       br->multicast_last_member_count);
863                         hlist_for_each_entry(ent, &pg->src_list, node) {
864                                 if (over_llqt == time_after(ent->timer.expires,
865                                                             llqt) &&
866                                     ent->src_query_rexmit_cnt > 0)
867                                         llqt_srcs++;
868                         }
869
870                         if (!llqt_srcs)
871                                 return NULL;
872                         mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
873                 }
874         }
875
876         pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
877         if ((p && pkt_size > p->dev->mtu) ||
878             pkt_size > br->dev->mtu)
879                 return NULL;
880
881         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
882         if (!skb)
883                 goto out;
884
885         skb->protocol = htons(ETH_P_IPV6);
886
887         /* Ethernet header */
888         skb_reset_mac_header(skb);
889         eth = eth_hdr(skb);
890
891         ether_addr_copy(eth->h_source, br->dev->dev_addr);
892         eth->h_proto = htons(ETH_P_IPV6);
893         skb_put(skb, sizeof(*eth));
894
895         /* IPv6 header + HbH option */
896         skb_set_network_header(skb, skb->len);
897         ip6h = ipv6_hdr(skb);
898
899         *(__force __be32 *)ip6h = htonl(0x60000000);
900         ip6h->payload_len = htons(8 + mld_hdr_size);
901         ip6h->nexthdr = IPPROTO_HOPOPTS;
902         ip6h->hop_limit = 1;
903         ip6h->daddr = *ip6_dst;
904         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
905                                &ip6h->saddr)) {
906                 kfree_skb(skb);
907                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
908                 return NULL;
909         }
910
911         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
912         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
913
914         hopopt = (u8 *)(ip6h + 1);
915         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
916         hopopt[1] = 0;                          /* length of HbH */
917         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
918         hopopt[3] = 2;                          /* Length of RA Option */
919         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
920         hopopt[5] = 0;
921         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
922         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
923
924         skb_put(skb, sizeof(*ip6h) + 8);
925
926         /* ICMPv6 */
927         skb_set_transport_header(skb, skb->len);
928         interval = ipv6_addr_any(group) ?
929                         br->multicast_query_response_interval :
930                         br->multicast_last_member_interval;
931         *igmp_type = ICMPV6_MGM_QUERY;
932         switch (br->multicast_mld_version) {
933         case 1:
934                 mldq = (struct mld_msg *)icmp6_hdr(skb);
935                 mldq->mld_type = ICMPV6_MGM_QUERY;
936                 mldq->mld_code = 0;
937                 mldq->mld_cksum = 0;
938                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
939                 mldq->mld_reserved = 0;
940                 mldq->mld_mca = *group;
941                 csum = &mldq->mld_cksum;
942                 csum_start = (void *)mldq;
943                 break;
944         case 2:
945                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
946                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
947                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
948                 mld2q->mld2q_code = 0;
949                 mld2q->mld2q_cksum = 0;
950                 mld2q->mld2q_resv1 = 0;
951                 mld2q->mld2q_resv2 = 0;
952                 mld2q->mld2q_suppress = sflag;
953                 mld2q->mld2q_qrv = 2;
954                 mld2q->mld2q_nsrcs = htons(llqt_srcs);
955                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
956                 mld2q->mld2q_mca = *group;
957                 csum = &mld2q->mld2q_cksum;
958                 csum_start = (void *)mld2q;
959                 if (!pg || !with_srcs)
960                         break;
961
962                 llqt_srcs = 0;
963                 hlist_for_each_entry(ent, &pg->src_list, node) {
964                         if (over_llqt == time_after(ent->timer.expires,
965                                                     llqt) &&
966                             ent->src_query_rexmit_cnt > 0) {
967                                 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
968                                 ent->src_query_rexmit_cnt--;
969                                 if (need_rexmit && ent->src_query_rexmit_cnt)
970                                         *need_rexmit = true;
971                         }
972                 }
973                 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
974                         kfree_skb(skb);
975                         return NULL;
976                 }
977                 break;
978         }
979
980         if (WARN_ON(!csum || !csum_start)) {
981                 kfree_skb(skb);
982                 return NULL;
983         }
984
985         *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
986                                 IPPROTO_ICMPV6,
987                                 csum_partial(csum_start, mld_hdr_size, 0));
988         skb_put(skb, mld_hdr_size);
989         __skb_pull(skb, sizeof(*eth));
990
991 out:
992         return skb;
993 }
994 #endif
995
996 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
997                                                 struct net_bridge_port_group *pg,
998                                                 struct br_ip *ip_dst,
999                                                 struct br_ip *group,
1000                                                 bool with_srcs, bool over_lmqt,
1001                                                 u8 sflag, u8 *igmp_type,
1002                                                 bool *need_rexmit)
1003 {
1004         __be32 ip4_dst;
1005
1006         switch (group->proto) {
1007         case htons(ETH_P_IP):
1008                 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1009                 return br_ip4_multicast_alloc_query(br, pg,
1010                                                     ip4_dst, group->dst.ip4,
1011                                                     with_srcs, over_lmqt,
1012                                                     sflag, igmp_type,
1013                                                     need_rexmit);
1014 #if IS_ENABLED(CONFIG_IPV6)
1015         case htons(ETH_P_IPV6): {
1016                 struct in6_addr ip6_dst;
1017
1018                 if (ip_dst)
1019                         ip6_dst = ip_dst->dst.ip6;
1020                 else
1021                         ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1022                                       htonl(1));
1023
1024                 return br_ip6_multicast_alloc_query(br, pg,
1025                                                     &ip6_dst, &group->dst.ip6,
1026                                                     with_srcs, over_lmqt,
1027                                                     sflag, igmp_type,
1028                                                     need_rexmit);
1029         }
1030 #endif
1031         }
1032         return NULL;
1033 }
1034
1035 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1036                                                     struct br_ip *group)
1037 {
1038         struct net_bridge_mdb_entry *mp;
1039         int err;
1040
1041         mp = br_mdb_ip_get(br, group);
1042         if (mp)
1043                 return mp;
1044
1045         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1046                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1047                 return ERR_PTR(-E2BIG);
1048         }
1049
1050         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1051         if (unlikely(!mp))
1052                 return ERR_PTR(-ENOMEM);
1053
1054         mp->br = br;
1055         mp->addr = *group;
1056         mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1057         timer_setup(&mp->timer, br_multicast_group_expired, 0);
1058         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1059                                             br_mdb_rht_params);
1060         if (err) {
1061                 kfree(mp);
1062                 mp = ERR_PTR(err);
1063         } else {
1064                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1065         }
1066
1067         return mp;
1068 }
1069
1070 static void br_multicast_group_src_expired(struct timer_list *t)
1071 {
1072         struct net_bridge_group_src *src = from_timer(src, t, timer);
1073         struct net_bridge_port_group *pg;
1074         struct net_bridge *br = src->br;
1075
1076         spin_lock(&br->multicast_lock);
1077         if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1078             timer_pending(&src->timer))
1079                 goto out;
1080
1081         pg = src->pg;
1082         if (pg->filter_mode == MCAST_INCLUDE) {
1083                 br_multicast_del_group_src(src);
1084                 if (!hlist_empty(&pg->src_list))
1085                         goto out;
1086                 br_multicast_find_del_pg(br, pg);
1087         } else {
1088                 br_multicast_fwd_src_handle(src);
1089         }
1090
1091 out:
1092         spin_unlock(&br->multicast_lock);
1093 }
1094
1095 struct net_bridge_group_src *
1096 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1097 {
1098         struct net_bridge_group_src *ent;
1099
1100         switch (ip->proto) {
1101         case htons(ETH_P_IP):
1102                 hlist_for_each_entry(ent, &pg->src_list, node)
1103                         if (ip->src.ip4 == ent->addr.src.ip4)
1104                                 return ent;
1105                 break;
1106 #if IS_ENABLED(CONFIG_IPV6)
1107         case htons(ETH_P_IPV6):
1108                 hlist_for_each_entry(ent, &pg->src_list, node)
1109                         if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1110                                 return ent;
1111                 break;
1112 #endif
1113         }
1114
1115         return NULL;
1116 }
1117
1118 static struct net_bridge_group_src *
1119 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1120 {
1121         struct net_bridge_group_src *grp_src;
1122
1123         if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1124                 return NULL;
1125
1126         switch (src_ip->proto) {
1127         case htons(ETH_P_IP):
1128                 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1129                     ipv4_is_multicast(src_ip->src.ip4))
1130                         return NULL;
1131                 break;
1132 #if IS_ENABLED(CONFIG_IPV6)
1133         case htons(ETH_P_IPV6):
1134                 if (ipv6_addr_any(&src_ip->src.ip6) ||
1135                     ipv6_addr_is_multicast(&src_ip->src.ip6))
1136                         return NULL;
1137                 break;
1138 #endif
1139         }
1140
1141         grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1142         if (unlikely(!grp_src))
1143                 return NULL;
1144
1145         grp_src->pg = pg;
1146         grp_src->br = pg->key.port->br;
1147         grp_src->addr = *src_ip;
1148         grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1149         timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1150
1151         hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1152         pg->src_ents++;
1153
1154         return grp_src;
1155 }
1156
1157 struct net_bridge_port_group *br_multicast_new_port_group(
1158                         struct net_bridge_port *port,
1159                         struct br_ip *group,
1160                         struct net_bridge_port_group __rcu *next,
1161                         unsigned char flags,
1162                         const unsigned char *src,
1163                         u8 filter_mode,
1164                         u8 rt_protocol)
1165 {
1166         struct net_bridge_port_group *p;
1167
1168         p = kzalloc(sizeof(*p), GFP_ATOMIC);
1169         if (unlikely(!p))
1170                 return NULL;
1171
1172         p->key.addr = *group;
1173         p->key.port = port;
1174         p->flags = flags;
1175         p->filter_mode = filter_mode;
1176         p->rt_protocol = rt_protocol;
1177         p->eht_host_tree = RB_ROOT;
1178         p->eht_set_tree = RB_ROOT;
1179         p->mcast_gc.destroy = br_multicast_destroy_port_group;
1180         INIT_HLIST_HEAD(&p->src_list);
1181
1182         if (!br_multicast_is_star_g(group) &&
1183             rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1184                                           br_sg_port_rht_params)) {
1185                 kfree(p);
1186                 return NULL;
1187         }
1188
1189         rcu_assign_pointer(p->next, next);
1190         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1191         timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1192         hlist_add_head(&p->mglist, &port->mglist);
1193
1194         if (src)
1195                 memcpy(p->eth_addr, src, ETH_ALEN);
1196         else
1197                 eth_broadcast_addr(p->eth_addr);
1198
1199         return p;
1200 }
1201
1202 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
1203 {
1204         if (!mp->host_joined) {
1205                 mp->host_joined = true;
1206                 if (br_multicast_is_star_g(&mp->addr))
1207                         br_multicast_star_g_host_state(mp);
1208                 if (notify)
1209                         br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1210         }
1211
1212         if (br_group_is_l2(&mp->addr))
1213                 return;
1214
1215         mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
1216 }
1217
1218 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1219 {
1220         if (!mp->host_joined)
1221                 return;
1222
1223         mp->host_joined = false;
1224         if (br_multicast_is_star_g(&mp->addr))
1225                 br_multicast_star_g_host_state(mp);
1226         if (notify)
1227                 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1228 }
1229
1230 static struct net_bridge_port_group *
1231 __br_multicast_add_group(struct net_bridge *br,
1232                          struct net_bridge_port *port,
1233                          struct br_ip *group,
1234                          const unsigned char *src,
1235                          u8 filter_mode,
1236                          bool igmpv2_mldv1,
1237                          bool blocked)
1238 {
1239         struct net_bridge_port_group __rcu **pp;
1240         struct net_bridge_port_group *p = NULL;
1241         struct net_bridge_mdb_entry *mp;
1242         unsigned long now = jiffies;
1243
1244         if (!netif_running(br->dev) ||
1245             (port && port->state == BR_STATE_DISABLED))
1246                 goto out;
1247
1248         mp = br_multicast_new_group(br, group);
1249         if (IS_ERR(mp))
1250                 return ERR_PTR(PTR_ERR(mp));
1251
1252         if (!port) {
1253                 br_multicast_host_join(mp, true);
1254                 goto out;
1255         }
1256
1257         for (pp = &mp->ports;
1258              (p = mlock_dereference(*pp, br)) != NULL;
1259              pp = &p->next) {
1260                 if (br_port_group_equal(p, port, src))
1261                         goto found;
1262                 if ((unsigned long)p->key.port < (unsigned long)port)
1263                         break;
1264         }
1265
1266         p = br_multicast_new_port_group(port, group, *pp, 0, src,
1267                                         filter_mode, RTPROT_KERNEL);
1268         if (unlikely(!p)) {
1269                 p = ERR_PTR(-ENOMEM);
1270                 goto out;
1271         }
1272         rcu_assign_pointer(*pp, p);
1273         if (blocked)
1274                 p->flags |= MDB_PG_FLAGS_BLOCKED;
1275         br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1276
1277 found:
1278         if (igmpv2_mldv1)
1279                 mod_timer(&p->timer, now + br->multicast_membership_interval);
1280
1281 out:
1282         return p;
1283 }
1284
1285 static int br_multicast_add_group(struct net_bridge *br,
1286                                   struct net_bridge_port *port,
1287                                   struct br_ip *group,
1288                                   const unsigned char *src,
1289                                   u8 filter_mode,
1290                                   bool igmpv2_mldv1)
1291 {
1292         struct net_bridge_port_group *pg;
1293         int err;
1294
1295         spin_lock(&br->multicast_lock);
1296         pg = __br_multicast_add_group(br, port, group, src, filter_mode,
1297                                       igmpv2_mldv1, false);
1298         /* NULL is considered valid for host joined groups */
1299         err = IS_ERR(pg) ? PTR_ERR(pg) : 0;
1300         spin_unlock(&br->multicast_lock);
1301
1302         return err;
1303 }
1304
1305 static int br_ip4_multicast_add_group(struct net_bridge *br,
1306                                       struct net_bridge_port *port,
1307                                       __be32 group,
1308                                       __u16 vid,
1309                                       const unsigned char *src,
1310                                       bool igmpv2)
1311 {
1312         struct br_ip br_group;
1313         u8 filter_mode;
1314
1315         if (ipv4_is_local_multicast(group))
1316                 return 0;
1317
1318         memset(&br_group, 0, sizeof(br_group));
1319         br_group.dst.ip4 = group;
1320         br_group.proto = htons(ETH_P_IP);
1321         br_group.vid = vid;
1322         filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1323
1324         return br_multicast_add_group(br, port, &br_group, src, filter_mode,
1325                                       igmpv2);
1326 }
1327
1328 #if IS_ENABLED(CONFIG_IPV6)
1329 static int br_ip6_multicast_add_group(struct net_bridge *br,
1330                                       struct net_bridge_port *port,
1331                                       const struct in6_addr *group,
1332                                       __u16 vid,
1333                                       const unsigned char *src,
1334                                       bool mldv1)
1335 {
1336         struct br_ip br_group;
1337         u8 filter_mode;
1338
1339         if (ipv6_addr_is_ll_all_nodes(group))
1340                 return 0;
1341
1342         memset(&br_group, 0, sizeof(br_group));
1343         br_group.dst.ip6 = *group;
1344         br_group.proto = htons(ETH_P_IPV6);
1345         br_group.vid = vid;
1346         filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1347
1348         return br_multicast_add_group(br, port, &br_group, src, filter_mode,
1349                                       mldv1);
1350 }
1351 #endif
1352
1353 static void br_multicast_router_expired(struct timer_list *t)
1354 {
1355         struct net_bridge_port *port =
1356                         from_timer(port, t, multicast_router_timer);
1357         struct net_bridge *br = port->br;
1358
1359         spin_lock(&br->multicast_lock);
1360         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1361             port->multicast_router == MDB_RTR_TYPE_PERM ||
1362             timer_pending(&port->multicast_router_timer))
1363                 goto out;
1364
1365         __del_port_router(port);
1366 out:
1367         spin_unlock(&br->multicast_lock);
1368 }
1369
1370 static void br_mc_router_state_change(struct net_bridge *p,
1371                                       bool is_mc_router)
1372 {
1373         struct switchdev_attr attr = {
1374                 .orig_dev = p->dev,
1375                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1376                 .flags = SWITCHDEV_F_DEFER,
1377                 .u.mrouter = is_mc_router,
1378         };
1379
1380         switchdev_port_attr_set(p->dev, &attr);
1381 }
1382
1383 static void br_multicast_local_router_expired(struct timer_list *t)
1384 {
1385         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
1386
1387         spin_lock(&br->multicast_lock);
1388         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
1389             br->multicast_router == MDB_RTR_TYPE_PERM ||
1390             timer_pending(&br->multicast_router_timer))
1391                 goto out;
1392
1393         br_mc_router_state_change(br, false);
1394 out:
1395         spin_unlock(&br->multicast_lock);
1396 }
1397
1398 static void br_multicast_querier_expired(struct net_bridge *br,
1399                                          struct bridge_mcast_own_query *query)
1400 {
1401         spin_lock(&br->multicast_lock);
1402         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1403                 goto out;
1404
1405         br_multicast_start_querier(br, query);
1406
1407 out:
1408         spin_unlock(&br->multicast_lock);
1409 }
1410
1411 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1412 {
1413         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
1414
1415         br_multicast_querier_expired(br, &br->ip4_own_query);
1416 }
1417
1418 #if IS_ENABLED(CONFIG_IPV6)
1419 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1420 {
1421         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
1422
1423         br_multicast_querier_expired(br, &br->ip6_own_query);
1424 }
1425 #endif
1426
1427 static void br_multicast_select_own_querier(struct net_bridge *br,
1428                                             struct br_ip *ip,
1429                                             struct sk_buff *skb)
1430 {
1431         if (ip->proto == htons(ETH_P_IP))
1432                 br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1433 #if IS_ENABLED(CONFIG_IPV6)
1434         else
1435                 br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1436 #endif
1437 }
1438
1439 static void __br_multicast_send_query(struct net_bridge *br,
1440                                       struct net_bridge_port *port,
1441                                       struct net_bridge_port_group *pg,
1442                                       struct br_ip *ip_dst,
1443                                       struct br_ip *group,
1444                                       bool with_srcs,
1445                                       u8 sflag,
1446                                       bool *need_rexmit)
1447 {
1448         bool over_lmqt = !!sflag;
1449         struct sk_buff *skb;
1450         u8 igmp_type;
1451
1452 again_under_lmqt:
1453         skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
1454                                        over_lmqt, sflag, &igmp_type,
1455                                        need_rexmit);
1456         if (!skb)
1457                 return;
1458
1459         if (port) {
1460                 skb->dev = port->dev;
1461                 br_multicast_count(br, port, skb, igmp_type,
1462                                    BR_MCAST_DIR_TX);
1463                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1464                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
1465                         br_dev_queue_push_xmit);
1466
1467                 if (over_lmqt && with_srcs && sflag) {
1468                         over_lmqt = false;
1469                         goto again_under_lmqt;
1470                 }
1471         } else {
1472                 br_multicast_select_own_querier(br, group, skb);
1473                 br_multicast_count(br, port, skb, igmp_type,
1474                                    BR_MCAST_DIR_RX);
1475                 netif_rx(skb);
1476         }
1477 }
1478
1479 static void br_multicast_send_query(struct net_bridge *br,
1480                                     struct net_bridge_port *port,
1481                                     struct bridge_mcast_own_query *own_query)
1482 {
1483         struct bridge_mcast_other_query *other_query = NULL;
1484         struct br_ip br_group;
1485         unsigned long time;
1486
1487         if (!netif_running(br->dev) ||
1488             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1489             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1490                 return;
1491
1492         memset(&br_group.dst, 0, sizeof(br_group.dst));
1493
1494         if (port ? (own_query == &port->ip4_own_query) :
1495                    (own_query == &br->ip4_own_query)) {
1496                 other_query = &br->ip4_other_query;
1497                 br_group.proto = htons(ETH_P_IP);
1498 #if IS_ENABLED(CONFIG_IPV6)
1499         } else {
1500                 other_query = &br->ip6_other_query;
1501                 br_group.proto = htons(ETH_P_IPV6);
1502 #endif
1503         }
1504
1505         if (!other_query || timer_pending(&other_query->timer))
1506                 return;
1507
1508         __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
1509                                   NULL);
1510
1511         time = jiffies;
1512         time += own_query->startup_sent < br->multicast_startup_query_count ?
1513                 br->multicast_startup_query_interval :
1514                 br->multicast_query_interval;
1515         mod_timer(&own_query->timer, time);
1516 }
1517
1518 static void
1519 br_multicast_port_query_expired(struct net_bridge_port *port,
1520                                 struct bridge_mcast_own_query *query)
1521 {
1522         struct net_bridge *br = port->br;
1523
1524         spin_lock(&br->multicast_lock);
1525         if (port->state == BR_STATE_DISABLED ||
1526             port->state == BR_STATE_BLOCKING)
1527                 goto out;
1528
1529         if (query->startup_sent < br->multicast_startup_query_count)
1530                 query->startup_sent++;
1531
1532         br_multicast_send_query(port->br, port, query);
1533
1534 out:
1535         spin_unlock(&br->multicast_lock);
1536 }
1537
1538 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1539 {
1540         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1541
1542         br_multicast_port_query_expired(port, &port->ip4_own_query);
1543 }
1544
1545 #if IS_ENABLED(CONFIG_IPV6)
1546 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1547 {
1548         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1549
1550         br_multicast_port_query_expired(port, &port->ip6_own_query);
1551 }
1552 #endif
1553
1554 static void br_multicast_port_group_rexmit(struct timer_list *t)
1555 {
1556         struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1557         struct bridge_mcast_other_query *other_query = NULL;
1558         struct net_bridge *br = pg->key.port->br;
1559         bool need_rexmit = false;
1560
1561         spin_lock(&br->multicast_lock);
1562         if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1563             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1564             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1565                 goto out;
1566
1567         if (pg->key.addr.proto == htons(ETH_P_IP))
1568                 other_query = &br->ip4_other_query;
1569 #if IS_ENABLED(CONFIG_IPV6)
1570         else
1571                 other_query = &br->ip6_other_query;
1572 #endif
1573
1574         if (!other_query || timer_pending(&other_query->timer))
1575                 goto out;
1576
1577         if (pg->grp_query_rexmit_cnt) {
1578                 pg->grp_query_rexmit_cnt--;
1579                 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1580                                           &pg->key.addr, false, 1, NULL);
1581         }
1582         __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1583                                   &pg->key.addr, true, 0, &need_rexmit);
1584
1585         if (pg->grp_query_rexmit_cnt || need_rexmit)
1586                 mod_timer(&pg->rexmit_timer, jiffies +
1587                                              br->multicast_last_member_interval);
1588 out:
1589         spin_unlock(&br->multicast_lock);
1590 }
1591
1592 static void br_mc_disabled_update(struct net_device *dev, bool value)
1593 {
1594         struct switchdev_attr attr = {
1595                 .orig_dev = dev,
1596                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1597                 .flags = SWITCHDEV_F_DEFER,
1598                 .u.mc_disabled = !value,
1599         };
1600
1601         switchdev_port_attr_set(dev, &attr);
1602 }
1603
1604 int br_multicast_add_port(struct net_bridge_port *port)
1605 {
1606         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1607
1608         timer_setup(&port->multicast_router_timer,
1609                     br_multicast_router_expired, 0);
1610         timer_setup(&port->ip4_own_query.timer,
1611                     br_ip4_multicast_port_query_expired, 0);
1612 #if IS_ENABLED(CONFIG_IPV6)
1613         timer_setup(&port->ip6_own_query.timer,
1614                     br_ip6_multicast_port_query_expired, 0);
1615 #endif
1616         br_mc_disabled_update(port->dev,
1617                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
1618
1619         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1620         if (!port->mcast_stats)
1621                 return -ENOMEM;
1622
1623         return 0;
1624 }
1625
1626 void br_multicast_del_port(struct net_bridge_port *port)
1627 {
1628         struct net_bridge *br = port->br;
1629         struct net_bridge_port_group *pg;
1630         HLIST_HEAD(deleted_head);
1631         struct hlist_node *n;
1632
1633         /* Take care of the remaining groups, only perm ones should be left */
1634         spin_lock_bh(&br->multicast_lock);
1635         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1636                 br_multicast_find_del_pg(br, pg);
1637         hlist_move_list(&br->mcast_gc_list, &deleted_head);
1638         spin_unlock_bh(&br->multicast_lock);
1639         br_multicast_gc(&deleted_head);
1640         del_timer_sync(&port->multicast_router_timer);
1641         free_percpu(port->mcast_stats);
1642 }
1643
1644 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1645 {
1646         query->startup_sent = 0;
1647
1648         if (try_to_del_timer_sync(&query->timer) >= 0 ||
1649             del_timer(&query->timer))
1650                 mod_timer(&query->timer, jiffies);
1651 }
1652
1653 static void __br_multicast_enable_port(struct net_bridge_port *port)
1654 {
1655         struct net_bridge *br = port->br;
1656
1657         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
1658                 return;
1659
1660         br_multicast_enable(&port->ip4_own_query);
1661 #if IS_ENABLED(CONFIG_IPV6)
1662         br_multicast_enable(&port->ip6_own_query);
1663 #endif
1664         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1665             hlist_unhashed(&port->rlist))
1666                 br_multicast_add_router(br, port);
1667 }
1668
1669 void br_multicast_enable_port(struct net_bridge_port *port)
1670 {
1671         struct net_bridge *br = port->br;
1672
1673         spin_lock(&br->multicast_lock);
1674         __br_multicast_enable_port(port);
1675         spin_unlock(&br->multicast_lock);
1676 }
1677
1678 void br_multicast_disable_port(struct net_bridge_port *port)
1679 {
1680         struct net_bridge *br = port->br;
1681         struct net_bridge_port_group *pg;
1682         struct hlist_node *n;
1683
1684         spin_lock(&br->multicast_lock);
1685         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1686                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1687                         br_multicast_find_del_pg(br, pg);
1688
1689         __del_port_router(port);
1690
1691         del_timer(&port->multicast_router_timer);
1692         del_timer(&port->ip4_own_query.timer);
1693 #if IS_ENABLED(CONFIG_IPV6)
1694         del_timer(&port->ip6_own_query.timer);
1695 #endif
1696         spin_unlock(&br->multicast_lock);
1697 }
1698
1699 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1700 {
1701         struct net_bridge_group_src *ent;
1702         struct hlist_node *tmp;
1703         int deleted = 0;
1704
1705         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1706                 if (ent->flags & BR_SGRP_F_DELETE) {
1707                         br_multicast_del_group_src(ent);
1708                         deleted++;
1709                 }
1710
1711         return deleted;
1712 }
1713
1714 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1715                                 unsigned long expires)
1716 {
1717         mod_timer(&src->timer, expires);
1718         br_multicast_fwd_src_handle(src);
1719 }
1720
1721 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
1722 {
1723         struct bridge_mcast_other_query *other_query = NULL;
1724         struct net_bridge *br = pg->key.port->br;
1725         u32 lmqc = br->multicast_last_member_count;
1726         unsigned long lmqt, lmi, now = jiffies;
1727         struct net_bridge_group_src *ent;
1728
1729         if (!netif_running(br->dev) ||
1730             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1731                 return;
1732
1733         if (pg->key.addr.proto == htons(ETH_P_IP))
1734                 other_query = &br->ip4_other_query;
1735 #if IS_ENABLED(CONFIG_IPV6)
1736         else
1737                 other_query = &br->ip6_other_query;
1738 #endif
1739
1740         lmqt = now + br_multicast_lmqt(br);
1741         hlist_for_each_entry(ent, &pg->src_list, node) {
1742                 if (ent->flags & BR_SGRP_F_SEND) {
1743                         ent->flags &= ~BR_SGRP_F_SEND;
1744                         if (ent->timer.expires > lmqt) {
1745                                 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1746                                     other_query &&
1747                                     !timer_pending(&other_query->timer))
1748                                         ent->src_query_rexmit_cnt = lmqc;
1749                                 __grp_src_mod_timer(ent, lmqt);
1750                         }
1751                 }
1752         }
1753
1754         if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) ||
1755             !other_query || timer_pending(&other_query->timer))
1756                 return;
1757
1758         __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1759                                   &pg->key.addr, true, 1, NULL);
1760
1761         lmi = now + br->multicast_last_member_interval;
1762         if (!timer_pending(&pg->rexmit_timer) ||
1763             time_after(pg->rexmit_timer.expires, lmi))
1764                 mod_timer(&pg->rexmit_timer, lmi);
1765 }
1766
1767 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
1768 {
1769         struct bridge_mcast_other_query *other_query = NULL;
1770         struct net_bridge *br = pg->key.port->br;
1771         unsigned long now = jiffies, lmi;
1772
1773         if (!netif_running(br->dev) ||
1774             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1775                 return;
1776
1777         if (pg->key.addr.proto == htons(ETH_P_IP))
1778                 other_query = &br->ip4_other_query;
1779 #if IS_ENABLED(CONFIG_IPV6)
1780         else
1781                 other_query = &br->ip6_other_query;
1782 #endif
1783
1784         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1785             other_query && !timer_pending(&other_query->timer)) {
1786                 lmi = now + br->multicast_last_member_interval;
1787                 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
1788                 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1789                                           &pg->key.addr, false, 0, NULL);
1790                 if (!timer_pending(&pg->rexmit_timer) ||
1791                     time_after(pg->rexmit_timer.expires, lmi))
1792                         mod_timer(&pg->rexmit_timer, lmi);
1793         }
1794
1795         if (pg->filter_mode == MCAST_EXCLUDE &&
1796             (!timer_pending(&pg->timer) ||
1797              time_after(pg->timer.expires, now + br_multicast_lmqt(br))))
1798                 mod_timer(&pg->timer, now + br_multicast_lmqt(br));
1799 }
1800
1801 /* State          Msg type      New state                Actions
1802  * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
1803  * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
1804  * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1805  */
1806 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg, void *h_addr,
1807                                      void *srcs, u32 nsrcs, size_t addr_size,
1808                                      int grec_type)
1809 {
1810         struct net_bridge *br = pg->key.port->br;
1811         struct net_bridge_group_src *ent;
1812         unsigned long now = jiffies;
1813         bool changed = false;
1814         struct br_ip src_ip;
1815         u32 src_idx;
1816
1817         memset(&src_ip, 0, sizeof(src_ip));
1818         src_ip.proto = pg->key.addr.proto;
1819         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1820                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1821                 ent = br_multicast_find_group_src(pg, &src_ip);
1822                 if (!ent) {
1823                         ent = br_multicast_new_group_src(pg, &src_ip);
1824                         if (ent)
1825                                 changed = true;
1826                 }
1827
1828                 if (ent)
1829                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
1830         }
1831
1832         if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type))
1833                 changed = true;
1834
1835         return changed;
1836 }
1837
1838 /* State          Msg type      New state                Actions
1839  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
1840  *                                                       Delete (A-B)
1841  *                                                       Group Timer=GMI
1842  */
1843 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg, void *h_addr,
1844                                  void *srcs, u32 nsrcs, size_t addr_size)
1845 {
1846         struct net_bridge_group_src *ent;
1847         struct br_ip src_ip;
1848         u32 src_idx;
1849
1850         hlist_for_each_entry(ent, &pg->src_list, node)
1851                 ent->flags |= BR_SGRP_F_DELETE;
1852
1853         memset(&src_ip, 0, sizeof(src_ip));
1854         src_ip.proto = pg->key.addr.proto;
1855         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1856                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1857                 ent = br_multicast_find_group_src(pg, &src_ip);
1858                 if (ent)
1859                         ent->flags &= ~BR_SGRP_F_DELETE;
1860                 else
1861                         ent = br_multicast_new_group_src(pg, &src_ip);
1862                 if (ent)
1863                         br_multicast_fwd_src_handle(ent);
1864         }
1865
1866         __grp_src_delete_marked(pg);
1867 }
1868
1869 /* State          Msg type      New state                Actions
1870  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
1871  *                                                       Delete (X-A)
1872  *                                                       Delete (Y-A)
1873  *                                                       Group Timer=GMI
1874  */
1875 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg, void *h_addr,
1876                                  void *srcs, u32 nsrcs, size_t addr_size)
1877 {
1878         struct net_bridge *br = pg->key.port->br;
1879         struct net_bridge_group_src *ent;
1880         unsigned long now = jiffies;
1881         bool changed = false;
1882         struct br_ip src_ip;
1883         u32 src_idx;
1884
1885         hlist_for_each_entry(ent, &pg->src_list, node)
1886                 ent->flags |= BR_SGRP_F_DELETE;
1887
1888         memset(&src_ip, 0, sizeof(src_ip));
1889         src_ip.proto = pg->key.addr.proto;
1890         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1891                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1892                 ent = br_multicast_find_group_src(pg, &src_ip);
1893                 if (ent) {
1894                         ent->flags &= ~BR_SGRP_F_DELETE;
1895                 } else {
1896                         ent = br_multicast_new_group_src(pg, &src_ip);
1897                         if (ent) {
1898                                 __grp_src_mod_timer(ent,
1899                                                     now + br_multicast_gmi(br));
1900                                 changed = true;
1901                         }
1902                 }
1903         }
1904
1905         if (__grp_src_delete_marked(pg))
1906                 changed = true;
1907
1908         return changed;
1909 }
1910
1911 static bool br_multicast_isexc(struct net_bridge_port_group *pg, void *h_addr,
1912                                void *srcs, u32 nsrcs, size_t addr_size)
1913 {
1914         struct net_bridge *br = pg->key.port->br;
1915         bool changed = false;
1916
1917         switch (pg->filter_mode) {
1918         case MCAST_INCLUDE:
1919                 __grp_src_isexc_incl(pg, h_addr, srcs, nsrcs, addr_size);
1920                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
1921                 changed = true;
1922                 break;
1923         case MCAST_EXCLUDE:
1924                 changed = __grp_src_isexc_excl(pg, h_addr, srcs, nsrcs, addr_size);
1925                 break;
1926         }
1927
1928         pg->filter_mode = MCAST_EXCLUDE;
1929         mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1930
1931         return changed;
1932 }
1933
1934 /* State          Msg type      New state                Actions
1935  * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
1936  *                                                       Send Q(G,A-B)
1937  */
1938 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg, void *h_addr,
1939                                 void *srcs, u32 nsrcs, size_t addr_size)
1940 {
1941         struct net_bridge *br = pg->key.port->br;
1942         u32 src_idx, to_send = pg->src_ents;
1943         struct net_bridge_group_src *ent;
1944         unsigned long now = jiffies;
1945         bool changed = false;
1946         struct br_ip src_ip;
1947
1948         hlist_for_each_entry(ent, &pg->src_list, node)
1949                 ent->flags |= BR_SGRP_F_SEND;
1950
1951         memset(&src_ip, 0, sizeof(src_ip));
1952         src_ip.proto = pg->key.addr.proto;
1953         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1954                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1955                 ent = br_multicast_find_group_src(pg, &src_ip);
1956                 if (ent) {
1957                         ent->flags &= ~BR_SGRP_F_SEND;
1958                         to_send--;
1959                 } else {
1960                         ent = br_multicast_new_group_src(pg, &src_ip);
1961                         if (ent)
1962                                 changed = true;
1963                 }
1964                 if (ent)
1965                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
1966         }
1967
1968         if (to_send)
1969                 __grp_src_query_marked_and_rexmit(pg);
1970
1971         return changed;
1972 }
1973
1974 /* State          Msg type      New state                Actions
1975  * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1976  *                                                       Send Q(G,X-A)
1977  *                                                       Send Q(G)
1978  */
1979 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg, void *h_addr,
1980                                 void *srcs, u32 nsrcs, size_t addr_size)
1981 {
1982         struct net_bridge *br = pg->key.port->br;
1983         u32 src_idx, to_send = pg->src_ents;
1984         struct net_bridge_group_src *ent;
1985         unsigned long now = jiffies;
1986         bool changed = false;
1987         struct br_ip src_ip;
1988
1989         hlist_for_each_entry(ent, &pg->src_list, node)
1990                 if (timer_pending(&ent->timer))
1991                         ent->flags |= BR_SGRP_F_SEND;
1992
1993         memset(&src_ip, 0, sizeof(src_ip));
1994         src_ip.proto = pg->key.addr.proto;
1995         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1996                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
1997                 ent = br_multicast_find_group_src(pg, &src_ip);
1998                 if (ent) {
1999                         if (timer_pending(&ent->timer)) {
2000                                 ent->flags &= ~BR_SGRP_F_SEND;
2001                                 to_send--;
2002                         }
2003                 } else {
2004                         ent = br_multicast_new_group_src(pg, &src_ip);
2005                         if (ent)
2006                                 changed = true;
2007                 }
2008                 if (ent)
2009                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
2010         }
2011
2012         if (to_send)
2013                 __grp_src_query_marked_and_rexmit(pg);
2014
2015         __grp_send_query_and_rexmit(pg);
2016
2017         return changed;
2018 }
2019
2020 static bool br_multicast_toin(struct net_bridge_port_group *pg, void *h_addr,
2021                               void *srcs, u32 nsrcs, size_t addr_size)
2022 {
2023         bool changed = false;
2024
2025         switch (pg->filter_mode) {
2026         case MCAST_INCLUDE:
2027                 changed = __grp_src_toin_incl(pg, h_addr, srcs, nsrcs, addr_size);
2028                 break;
2029         case MCAST_EXCLUDE:
2030                 changed = __grp_src_toin_excl(pg, h_addr, srcs, nsrcs, addr_size);
2031                 break;
2032         }
2033
2034         return changed;
2035 }
2036
2037 /* State          Msg type      New state                Actions
2038  * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2039  *                                                       Delete (A-B)
2040  *                                                       Send Q(G,A*B)
2041  *                                                       Group Timer=GMI
2042  */
2043 static void __grp_src_toex_incl(struct net_bridge_port_group *pg, void *h_addr,
2044                                 void *srcs, u32 nsrcs, size_t addr_size)
2045 {
2046         struct net_bridge_group_src *ent;
2047         u32 src_idx, to_send = 0;
2048         struct br_ip src_ip;
2049
2050         hlist_for_each_entry(ent, &pg->src_list, node)
2051                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2052
2053         memset(&src_ip, 0, sizeof(src_ip));
2054         src_ip.proto = pg->key.addr.proto;
2055         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2056                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2057                 ent = br_multicast_find_group_src(pg, &src_ip);
2058                 if (ent) {
2059                         ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2060                                      BR_SGRP_F_SEND;
2061                         to_send++;
2062                 } else {
2063                         ent = br_multicast_new_group_src(pg, &src_ip);
2064                 }
2065                 if (ent)
2066                         br_multicast_fwd_src_handle(ent);
2067         }
2068
2069         __grp_src_delete_marked(pg);
2070         if (to_send)
2071                 __grp_src_query_marked_and_rexmit(pg);
2072 }
2073
2074 /* State          Msg type      New state                Actions
2075  * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
2076  *                                                       Delete (X-A)
2077  *                                                       Delete (Y-A)
2078  *                                                       Send Q(G,A-Y)
2079  *                                                       Group Timer=GMI
2080  */
2081 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg, void *h_addr,
2082                                 void *srcs, u32 nsrcs, size_t addr_size)
2083 {
2084         struct net_bridge_group_src *ent;
2085         u32 src_idx, to_send = 0;
2086         bool changed = false;
2087         struct br_ip src_ip;
2088
2089         hlist_for_each_entry(ent, &pg->src_list, node)
2090                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2091
2092         memset(&src_ip, 0, sizeof(src_ip));
2093         src_ip.proto = pg->key.addr.proto;
2094         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2095                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2096                 ent = br_multicast_find_group_src(pg, &src_ip);
2097                 if (ent) {
2098                         ent->flags &= ~BR_SGRP_F_DELETE;
2099                 } else {
2100                         ent = br_multicast_new_group_src(pg, &src_ip);
2101                         if (ent) {
2102                                 __grp_src_mod_timer(ent, pg->timer.expires);
2103                                 changed = true;
2104                         }
2105                 }
2106                 if (ent && timer_pending(&ent->timer)) {
2107                         ent->flags |= BR_SGRP_F_SEND;
2108                         to_send++;
2109                 }
2110         }
2111
2112         if (__grp_src_delete_marked(pg))
2113                 changed = true;
2114         if (to_send)
2115                 __grp_src_query_marked_and_rexmit(pg);
2116
2117         return changed;
2118 }
2119
2120 static bool br_multicast_toex(struct net_bridge_port_group *pg, void *h_addr,
2121                               void *srcs, u32 nsrcs, size_t addr_size)
2122 {
2123         struct net_bridge *br = pg->key.port->br;
2124         bool changed = false;
2125
2126         switch (pg->filter_mode) {
2127         case MCAST_INCLUDE:
2128                 __grp_src_toex_incl(pg, h_addr, srcs, nsrcs, addr_size);
2129                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2130                 changed = true;
2131                 break;
2132         case MCAST_EXCLUDE:
2133                 changed = __grp_src_toex_excl(pg, h_addr, srcs, nsrcs, addr_size);
2134                 break;
2135         }
2136
2137         pg->filter_mode = MCAST_EXCLUDE;
2138         mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
2139
2140         return changed;
2141 }
2142
2143 /* State          Msg type      New state                Actions
2144  * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
2145  */
2146 static bool __grp_src_block_incl(struct net_bridge_port_group *pg, void *h_addr,
2147                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2148 {
2149         struct net_bridge_group_src *ent;
2150         u32 src_idx, to_send = 0;
2151         bool changed = false;
2152         struct br_ip src_ip;
2153
2154         hlist_for_each_entry(ent, &pg->src_list, node)
2155                 ent->flags &= ~BR_SGRP_F_SEND;
2156
2157         memset(&src_ip, 0, sizeof(src_ip));
2158         src_ip.proto = pg->key.addr.proto;
2159         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2160                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2161                 ent = br_multicast_find_group_src(pg, &src_ip);
2162                 if (ent) {
2163                         ent->flags |= BR_SGRP_F_SEND;
2164                         to_send++;
2165                 }
2166         }
2167
2168         if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type))
2169                 changed = true;
2170
2171         if (to_send)
2172                 __grp_src_query_marked_and_rexmit(pg);
2173
2174         if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) {
2175                 br_multicast_find_del_pg(pg->key.port->br, pg);
2176                 /* a notification has already been sent and we shouldn't access
2177                  * pg after the delete thus we have to return false
2178                  */
2179                 changed = false;
2180         }
2181
2182         return changed;
2183 }
2184
2185 /* State          Msg type      New state                Actions
2186  * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
2187  *                                                       Send Q(G,A-Y)
2188  */
2189 static bool __grp_src_block_excl(struct net_bridge_port_group *pg, void *h_addr,
2190                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2191 {
2192         struct net_bridge_group_src *ent;
2193         u32 src_idx, to_send = 0;
2194         bool changed = false;
2195         struct br_ip src_ip;
2196
2197         hlist_for_each_entry(ent, &pg->src_list, node)
2198                 ent->flags &= ~BR_SGRP_F_SEND;
2199
2200         memset(&src_ip, 0, sizeof(src_ip));
2201         src_ip.proto = pg->key.addr.proto;
2202         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2203                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2204                 ent = br_multicast_find_group_src(pg, &src_ip);
2205                 if (!ent) {
2206                         ent = br_multicast_new_group_src(pg, &src_ip);
2207                         if (ent) {
2208                                 __grp_src_mod_timer(ent, pg->timer.expires);
2209                                 changed = true;
2210                         }
2211                 }
2212                 if (ent && timer_pending(&ent->timer)) {
2213                         ent->flags |= BR_SGRP_F_SEND;
2214                         to_send++;
2215                 }
2216         }
2217
2218         if (br_multicast_eht_handle(pg, h_addr, srcs, nsrcs, addr_size, grec_type))
2219                 changed = true;
2220
2221         if (to_send)
2222                 __grp_src_query_marked_and_rexmit(pg);
2223
2224         return changed;
2225 }
2226
2227 static bool br_multicast_block(struct net_bridge_port_group *pg, void *h_addr,
2228                                void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2229 {
2230         bool changed = false;
2231
2232         switch (pg->filter_mode) {
2233         case MCAST_INCLUDE:
2234                 changed = __grp_src_block_incl(pg, h_addr, srcs, nsrcs, addr_size,
2235                                                grec_type);
2236                 break;
2237         case MCAST_EXCLUDE:
2238                 changed = __grp_src_block_excl(pg, h_addr, srcs, nsrcs, addr_size,
2239                                                grec_type);
2240                 break;
2241         }
2242
2243         return changed;
2244 }
2245
2246 static struct net_bridge_port_group *
2247 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2248                        struct net_bridge_port *p,
2249                        const unsigned char *src)
2250 {
2251         struct net_bridge *br __maybe_unused = mp->br;
2252         struct net_bridge_port_group *pg;
2253
2254         for (pg = mlock_dereference(mp->ports, br);
2255              pg;
2256              pg = mlock_dereference(pg->next, br))
2257                 if (br_port_group_equal(pg, p, src))
2258                         return pg;
2259
2260         return NULL;
2261 }
2262
2263 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
2264                                          struct net_bridge_port *port,
2265                                          struct sk_buff *skb,
2266                                          u16 vid)
2267 {
2268         bool igmpv2 = br->multicast_igmp_version == 2;
2269         struct net_bridge_mdb_entry *mdst;
2270         struct net_bridge_port_group *pg;
2271         const unsigned char *src;
2272         struct igmpv3_report *ih;
2273         struct igmpv3_grec *grec;
2274         int i, len, num, type;
2275         __be32 group, *h_addr;
2276         bool changed = false;
2277         int err = 0;
2278         u16 nsrcs;
2279
2280         ih = igmpv3_report_hdr(skb);
2281         num = ntohs(ih->ngrec);
2282         len = skb_transport_offset(skb) + sizeof(*ih);
2283
2284         for (i = 0; i < num; i++) {
2285                 len += sizeof(*grec);
2286                 if (!ip_mc_may_pull(skb, len))
2287                         return -EINVAL;
2288
2289                 grec = (void *)(skb->data + len - sizeof(*grec));
2290                 group = grec->grec_mca;
2291                 type = grec->grec_type;
2292                 nsrcs = ntohs(grec->grec_nsrcs);
2293
2294                 len += nsrcs * 4;
2295                 if (!ip_mc_may_pull(skb, len))
2296                         return -EINVAL;
2297
2298                 switch (type) {
2299                 case IGMPV3_MODE_IS_INCLUDE:
2300                 case IGMPV3_MODE_IS_EXCLUDE:
2301                 case IGMPV3_CHANGE_TO_INCLUDE:
2302                 case IGMPV3_CHANGE_TO_EXCLUDE:
2303                 case IGMPV3_ALLOW_NEW_SOURCES:
2304                 case IGMPV3_BLOCK_OLD_SOURCES:
2305                         break;
2306
2307                 default:
2308                         continue;
2309                 }
2310
2311                 src = eth_hdr(skb)->h_source;
2312                 if (nsrcs == 0 &&
2313                     (type == IGMPV3_CHANGE_TO_INCLUDE ||
2314                      type == IGMPV3_MODE_IS_INCLUDE)) {
2315                         if (!port || igmpv2) {
2316                                 br_ip4_multicast_leave_group(br, port, group, vid, src);
2317                                 continue;
2318                         }
2319                 } else {
2320                         err = br_ip4_multicast_add_group(br, port, group, vid,
2321                                                          src, igmpv2);
2322                         if (err)
2323                                 break;
2324                 }
2325
2326                 if (!port || igmpv2)
2327                         continue;
2328
2329                 spin_lock_bh(&br->multicast_lock);
2330                 mdst = br_mdb_ip4_get(br, group, vid);
2331                 if (!mdst)
2332                         goto unlock_continue;
2333                 pg = br_multicast_find_port(mdst, port, src);
2334                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2335                         goto unlock_continue;
2336                 /* reload grec and host addr */
2337                 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2338                 h_addr = &ip_hdr(skb)->saddr;
2339                 switch (type) {
2340                 case IGMPV3_ALLOW_NEW_SOURCES:
2341                         changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src,
2342                                                            nsrcs, sizeof(__be32), type);
2343                         break;
2344                 case IGMPV3_MODE_IS_INCLUDE:
2345                         changed = br_multicast_isinc_allow(pg, h_addr, grec->grec_src,
2346                                                            nsrcs, sizeof(__be32), type);
2347                         break;
2348                 case IGMPV3_MODE_IS_EXCLUDE:
2349                         changed = br_multicast_isexc(pg, h_addr, grec->grec_src,
2350                                                      nsrcs, sizeof(__be32));
2351                         break;
2352                 case IGMPV3_CHANGE_TO_INCLUDE:
2353                         changed = br_multicast_toin(pg, h_addr, grec->grec_src,
2354                                                     nsrcs, sizeof(__be32));
2355                         break;
2356                 case IGMPV3_CHANGE_TO_EXCLUDE:
2357                         changed = br_multicast_toex(pg, h_addr, grec->grec_src,
2358                                                     nsrcs, sizeof(__be32));
2359                         break;
2360                 case IGMPV3_BLOCK_OLD_SOURCES:
2361                         changed = br_multicast_block(pg, h_addr, grec->grec_src,
2362                                                      nsrcs, sizeof(__be32), type);
2363                         break;
2364                 }
2365                 if (changed)
2366                         br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2367 unlock_continue:
2368                 spin_unlock_bh(&br->multicast_lock);
2369         }
2370
2371         return err;
2372 }
2373
2374 #if IS_ENABLED(CONFIG_IPV6)
2375 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
2376                                         struct net_bridge_port *port,
2377                                         struct sk_buff *skb,
2378                                         u16 vid)
2379 {
2380         bool mldv1 = br->multicast_mld_version == 1;
2381         struct net_bridge_mdb_entry *mdst;
2382         struct net_bridge_port_group *pg;
2383         unsigned int nsrcs_offset;
2384         const unsigned char *src;
2385         struct icmp6hdr *icmp6h;
2386         struct in6_addr *h_addr;
2387         struct mld2_grec *grec;
2388         unsigned int grec_len;
2389         bool changed = false;
2390         int i, len, num;
2391         int err = 0;
2392
2393         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
2394                 return -EINVAL;
2395
2396         icmp6h = icmp6_hdr(skb);
2397         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
2398         len = skb_transport_offset(skb) + sizeof(*icmp6h);
2399
2400         for (i = 0; i < num; i++) {
2401                 __be16 *_nsrcs, __nsrcs;
2402                 u16 nsrcs;
2403
2404                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2405
2406                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2407                     nsrcs_offset + sizeof(__nsrcs))
2408                         return -EINVAL;
2409
2410                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2411                                             sizeof(__nsrcs), &__nsrcs);
2412                 if (!_nsrcs)
2413                         return -EINVAL;
2414
2415                 nsrcs = ntohs(*_nsrcs);
2416                 grec_len = struct_size(grec, grec_src, nsrcs);
2417
2418                 if (!ipv6_mc_may_pull(skb, len + grec_len))
2419                         return -EINVAL;
2420
2421                 grec = (struct mld2_grec *)(skb->data + len);
2422                 len += grec_len;
2423
2424                 switch (grec->grec_type) {
2425                 case MLD2_MODE_IS_INCLUDE:
2426                 case MLD2_MODE_IS_EXCLUDE:
2427                 case MLD2_CHANGE_TO_INCLUDE:
2428                 case MLD2_CHANGE_TO_EXCLUDE:
2429                 case MLD2_ALLOW_NEW_SOURCES:
2430                 case MLD2_BLOCK_OLD_SOURCES:
2431                         break;
2432
2433                 default:
2434                         continue;
2435                 }
2436
2437                 src = eth_hdr(skb)->h_source;
2438                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2439                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2440                     nsrcs == 0) {
2441                         if (!port || mldv1) {
2442                                 br_ip6_multicast_leave_group(br, port,
2443                                                              &grec->grec_mca,
2444                                                              vid, src);
2445                                 continue;
2446                         }
2447                 } else {
2448                         err = br_ip6_multicast_add_group(br, port,
2449                                                          &grec->grec_mca, vid,
2450                                                          src, mldv1);
2451                         if (err)
2452                                 break;
2453                 }
2454
2455                 if (!port || mldv1)
2456                         continue;
2457
2458                 spin_lock_bh(&br->multicast_lock);
2459                 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid);
2460                 if (!mdst)
2461                         goto unlock_continue;
2462                 pg = br_multicast_find_port(mdst, port, src);
2463                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2464                         goto unlock_continue;
2465                 h_addr = &ipv6_hdr(skb)->saddr;
2466                 switch (grec->grec_type) {
2467                 case MLD2_ALLOW_NEW_SOURCES:
2468                         changed = br_multicast_isinc_allow(pg, h_addr,
2469                                                            grec->grec_src, nsrcs,
2470                                                            sizeof(struct in6_addr),
2471                                                            grec->grec_type);
2472                         break;
2473                 case MLD2_MODE_IS_INCLUDE:
2474                         changed = br_multicast_isinc_allow(pg, h_addr,
2475                                                            grec->grec_src, nsrcs,
2476                                                            sizeof(struct in6_addr),
2477                                                            grec->grec_type);
2478                         break;
2479                 case MLD2_MODE_IS_EXCLUDE:
2480                         changed = br_multicast_isexc(pg, h_addr,
2481                                                      grec->grec_src, nsrcs,
2482                                                      sizeof(struct in6_addr));
2483                         break;
2484                 case MLD2_CHANGE_TO_INCLUDE:
2485                         changed = br_multicast_toin(pg, h_addr,
2486                                                     grec->grec_src, nsrcs,
2487                                                     sizeof(struct in6_addr));
2488                         break;
2489                 case MLD2_CHANGE_TO_EXCLUDE:
2490                         changed = br_multicast_toex(pg, h_addr,
2491                                                     grec->grec_src, nsrcs,
2492                                                     sizeof(struct in6_addr));
2493                         break;
2494                 case MLD2_BLOCK_OLD_SOURCES:
2495                         changed = br_multicast_block(pg, h_addr,
2496                                                      grec->grec_src, nsrcs,
2497                                                      sizeof(struct in6_addr),
2498                                                      grec->grec_type);
2499                         break;
2500                 }
2501                 if (changed)
2502                         br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2503 unlock_continue:
2504                 spin_unlock_bh(&br->multicast_lock);
2505         }
2506
2507         return err;
2508 }
2509 #endif
2510
2511 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
2512                                             struct net_bridge_port *port,
2513                                             __be32 saddr)
2514 {
2515         if (!timer_pending(&br->ip4_own_query.timer) &&
2516             !timer_pending(&br->ip4_other_query.timer))
2517                 goto update;
2518
2519         if (!br->ip4_querier.addr.src.ip4)
2520                 goto update;
2521
2522         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4))
2523                 goto update;
2524
2525         return false;
2526
2527 update:
2528         br->ip4_querier.addr.src.ip4 = saddr;
2529
2530         /* update protected by general multicast_lock by caller */
2531         rcu_assign_pointer(br->ip4_querier.port, port);
2532
2533         return true;
2534 }
2535
2536 #if IS_ENABLED(CONFIG_IPV6)
2537 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
2538                                             struct net_bridge_port *port,
2539                                             struct in6_addr *saddr)
2540 {
2541         if (!timer_pending(&br->ip6_own_query.timer) &&
2542             !timer_pending(&br->ip6_other_query.timer))
2543                 goto update;
2544
2545         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0)
2546                 goto update;
2547
2548         return false;
2549
2550 update:
2551         br->ip6_querier.addr.src.ip6 = *saddr;
2552
2553         /* update protected by general multicast_lock by caller */
2554         rcu_assign_pointer(br->ip6_querier.port, port);
2555
2556         return true;
2557 }
2558 #endif
2559
2560 static bool br_multicast_select_querier(struct net_bridge *br,
2561                                         struct net_bridge_port *port,
2562                                         struct br_ip *saddr)
2563 {
2564         switch (saddr->proto) {
2565         case htons(ETH_P_IP):
2566                 return br_ip4_multicast_select_querier(br, port, saddr->src.ip4);
2567 #if IS_ENABLED(CONFIG_IPV6)
2568         case htons(ETH_P_IPV6):
2569                 return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6);
2570 #endif
2571         }
2572
2573         return false;
2574 }
2575
2576 static void
2577 br_multicast_update_query_timer(struct net_bridge *br,
2578                                 struct bridge_mcast_other_query *query,
2579                                 unsigned long max_delay)
2580 {
2581         if (!timer_pending(&query->timer))
2582                 query->delay_time = jiffies + max_delay;
2583
2584         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
2585 }
2586
2587 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2588                                            bool is_mc_router)
2589 {
2590         struct switchdev_attr attr = {
2591                 .orig_dev = p->dev,
2592                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2593                 .flags = SWITCHDEV_F_DEFER,
2594                 .u.mrouter = is_mc_router,
2595         };
2596
2597         switchdev_port_attr_set(p->dev, &attr);
2598 }
2599
2600 /*
2601  * Add port to router_list
2602  *  list is maintained ordered by pointer value
2603  *  and locked by br->multicast_lock and RCU
2604  */
2605 static void br_multicast_add_router(struct net_bridge *br,
2606                                     struct net_bridge_port *port)
2607 {
2608         struct net_bridge_port *p;
2609         struct hlist_node *slot = NULL;
2610
2611         if (!hlist_unhashed(&port->rlist))
2612                 return;
2613
2614         hlist_for_each_entry(p, &br->router_list, rlist) {
2615                 if ((unsigned long) port >= (unsigned long) p)
2616                         break;
2617                 slot = &p->rlist;
2618         }
2619
2620         if (slot)
2621                 hlist_add_behind_rcu(&port->rlist, slot);
2622         else
2623                 hlist_add_head_rcu(&port->rlist, &br->router_list);
2624         br_rtr_notify(br->dev, port, RTM_NEWMDB);
2625         br_port_mc_router_state_change(port, true);
2626 }
2627
2628 static void br_multicast_mark_router(struct net_bridge *br,
2629                                      struct net_bridge_port *port)
2630 {
2631         unsigned long now = jiffies;
2632
2633         if (!port) {
2634                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
2635                         if (!timer_pending(&br->multicast_router_timer))
2636                                 br_mc_router_state_change(br, true);
2637                         mod_timer(&br->multicast_router_timer,
2638                                   now + br->multicast_querier_interval);
2639                 }
2640                 return;
2641         }
2642
2643         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
2644             port->multicast_router == MDB_RTR_TYPE_PERM)
2645                 return;
2646
2647         br_multicast_add_router(br, port);
2648
2649         mod_timer(&port->multicast_router_timer,
2650                   now + br->multicast_querier_interval);
2651 }
2652
2653 static void br_multicast_query_received(struct net_bridge *br,
2654                                         struct net_bridge_port *port,
2655                                         struct bridge_mcast_other_query *query,
2656                                         struct br_ip *saddr,
2657                                         unsigned long max_delay)
2658 {
2659         if (!br_multicast_select_querier(br, port, saddr))
2660                 return;
2661
2662         br_multicast_update_query_timer(br, query, max_delay);
2663         br_multicast_mark_router(br, port);
2664 }
2665
2666 static void br_ip4_multicast_query(struct net_bridge *br,
2667                                    struct net_bridge_port *port,
2668                                    struct sk_buff *skb,
2669                                    u16 vid)
2670 {
2671         unsigned int transport_len = ip_transport_len(skb);
2672         const struct iphdr *iph = ip_hdr(skb);
2673         struct igmphdr *ih = igmp_hdr(skb);
2674         struct net_bridge_mdb_entry *mp;
2675         struct igmpv3_query *ih3;
2676         struct net_bridge_port_group *p;
2677         struct net_bridge_port_group __rcu **pp;
2678         struct br_ip saddr;
2679         unsigned long max_delay;
2680         unsigned long now = jiffies;
2681         __be32 group;
2682
2683         spin_lock(&br->multicast_lock);
2684         if (!netif_running(br->dev) ||
2685             (port && port->state == BR_STATE_DISABLED))
2686                 goto out;
2687
2688         group = ih->group;
2689
2690         if (transport_len == sizeof(*ih)) {
2691                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
2692
2693                 if (!max_delay) {
2694                         max_delay = 10 * HZ;
2695                         group = 0;
2696                 }
2697         } else if (transport_len >= sizeof(*ih3)) {
2698                 ih3 = igmpv3_query_hdr(skb);
2699                 if (ih3->nsrcs ||
2700                     (br->multicast_igmp_version == 3 && group && ih3->suppress))
2701                         goto out;
2702
2703                 max_delay = ih3->code ?
2704                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
2705         } else {
2706                 goto out;
2707         }
2708
2709         if (!group) {
2710                 saddr.proto = htons(ETH_P_IP);
2711                 saddr.src.ip4 = iph->saddr;
2712
2713                 br_multicast_query_received(br, port, &br->ip4_other_query,
2714                                             &saddr, max_delay);
2715                 goto out;
2716         }
2717
2718         mp = br_mdb_ip4_get(br, group, vid);
2719         if (!mp)
2720                 goto out;
2721
2722         max_delay *= br->multicast_last_member_count;
2723
2724         if (mp->host_joined &&
2725             (timer_pending(&mp->timer) ?
2726              time_after(mp->timer.expires, now + max_delay) :
2727              try_to_del_timer_sync(&mp->timer) >= 0))
2728                 mod_timer(&mp->timer, now + max_delay);
2729
2730         for (pp = &mp->ports;
2731              (p = mlock_dereference(*pp, br)) != NULL;
2732              pp = &p->next) {
2733                 if (timer_pending(&p->timer) ?
2734                     time_after(p->timer.expires, now + max_delay) :
2735                     try_to_del_timer_sync(&p->timer) >= 0 &&
2736                     (br->multicast_igmp_version == 2 ||
2737                      p->filter_mode == MCAST_EXCLUDE))
2738                         mod_timer(&p->timer, now + max_delay);
2739         }
2740
2741 out:
2742         spin_unlock(&br->multicast_lock);
2743 }
2744
2745 #if IS_ENABLED(CONFIG_IPV6)
2746 static int br_ip6_multicast_query(struct net_bridge *br,
2747                                   struct net_bridge_port *port,
2748                                   struct sk_buff *skb,
2749                                   u16 vid)
2750 {
2751         unsigned int transport_len = ipv6_transport_len(skb);
2752         struct mld_msg *mld;
2753         struct net_bridge_mdb_entry *mp;
2754         struct mld2_query *mld2q;
2755         struct net_bridge_port_group *p;
2756         struct net_bridge_port_group __rcu **pp;
2757         struct br_ip saddr;
2758         unsigned long max_delay;
2759         unsigned long now = jiffies;
2760         unsigned int offset = skb_transport_offset(skb);
2761         const struct in6_addr *group = NULL;
2762         bool is_general_query;
2763         int err = 0;
2764
2765         spin_lock(&br->multicast_lock);
2766         if (!netif_running(br->dev) ||
2767             (port && port->state == BR_STATE_DISABLED))
2768                 goto out;
2769
2770         if (transport_len == sizeof(*mld)) {
2771                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
2772                         err = -EINVAL;
2773                         goto out;
2774                 }
2775                 mld = (struct mld_msg *) icmp6_hdr(skb);
2776                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
2777                 if (max_delay)
2778                         group = &mld->mld_mca;
2779         } else {
2780                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
2781                         err = -EINVAL;
2782                         goto out;
2783                 }
2784                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
2785                 if (!mld2q->mld2q_nsrcs)
2786                         group = &mld2q->mld2q_mca;
2787                 if (br->multicast_mld_version == 2 &&
2788                     !ipv6_addr_any(&mld2q->mld2q_mca) &&
2789                     mld2q->mld2q_suppress)
2790                         goto out;
2791
2792                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
2793         }
2794
2795         is_general_query = group && ipv6_addr_any(group);
2796
2797         if (is_general_query) {
2798                 saddr.proto = htons(ETH_P_IPV6);
2799                 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
2800
2801                 br_multicast_query_received(br, port, &br->ip6_other_query,
2802                                             &saddr, max_delay);
2803                 goto out;
2804         } else if (!group) {
2805                 goto out;
2806         }
2807
2808         mp = br_mdb_ip6_get(br, group, vid);
2809         if (!mp)
2810                 goto out;
2811
2812         max_delay *= br->multicast_last_member_count;
2813         if (mp->host_joined &&
2814             (timer_pending(&mp->timer) ?
2815              time_after(mp->timer.expires, now + max_delay) :
2816              try_to_del_timer_sync(&mp->timer) >= 0))
2817                 mod_timer(&mp->timer, now + max_delay);
2818
2819         for (pp = &mp->ports;
2820              (p = mlock_dereference(*pp, br)) != NULL;
2821              pp = &p->next) {
2822                 if (timer_pending(&p->timer) ?
2823                     time_after(p->timer.expires, now + max_delay) :
2824                     try_to_del_timer_sync(&p->timer) >= 0 &&
2825                     (br->multicast_mld_version == 1 ||
2826                      p->filter_mode == MCAST_EXCLUDE))
2827                         mod_timer(&p->timer, now + max_delay);
2828         }
2829
2830 out:
2831         spin_unlock(&br->multicast_lock);
2832         return err;
2833 }
2834 #endif
2835
2836 static void
2837 br_multicast_leave_group(struct net_bridge *br,
2838                          struct net_bridge_port *port,
2839                          struct br_ip *group,
2840                          struct bridge_mcast_other_query *other_query,
2841                          struct bridge_mcast_own_query *own_query,
2842                          const unsigned char *src)
2843 {
2844         struct net_bridge_mdb_entry *mp;
2845         struct net_bridge_port_group *p;
2846         unsigned long now;
2847         unsigned long time;
2848
2849         spin_lock(&br->multicast_lock);
2850         if (!netif_running(br->dev) ||
2851             (port && port->state == BR_STATE_DISABLED))
2852                 goto out;
2853
2854         mp = br_mdb_ip_get(br, group);
2855         if (!mp)
2856                 goto out;
2857
2858         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
2859                 struct net_bridge_port_group __rcu **pp;
2860
2861                 for (pp = &mp->ports;
2862                      (p = mlock_dereference(*pp, br)) != NULL;
2863                      pp = &p->next) {
2864                         if (!br_port_group_equal(p, port, src))
2865                                 continue;
2866
2867                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
2868                                 break;
2869
2870                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2871                         br_multicast_del_pg(mp, p, pp);
2872                 }
2873                 goto out;
2874         }
2875
2876         if (timer_pending(&other_query->timer))
2877                 goto out;
2878
2879         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
2880                 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
2881                                           false, 0, NULL);
2882
2883                 time = jiffies + br->multicast_last_member_count *
2884                                  br->multicast_last_member_interval;
2885
2886                 mod_timer(&own_query->timer, time);
2887
2888                 for (p = mlock_dereference(mp->ports, br);
2889                      p != NULL;
2890                      p = mlock_dereference(p->next, br)) {
2891                         if (!br_port_group_equal(p, port, src))
2892                                 continue;
2893
2894                         if (!hlist_unhashed(&p->mglist) &&
2895                             (timer_pending(&p->timer) ?
2896                              time_after(p->timer.expires, time) :
2897                              try_to_del_timer_sync(&p->timer) >= 0)) {
2898                                 mod_timer(&p->timer, time);
2899                         }
2900
2901                         break;
2902                 }
2903         }
2904
2905         now = jiffies;
2906         time = now + br->multicast_last_member_count *
2907                      br->multicast_last_member_interval;
2908
2909         if (!port) {
2910                 if (mp->host_joined &&
2911                     (timer_pending(&mp->timer) ?
2912                      time_after(mp->timer.expires, time) :
2913                      try_to_del_timer_sync(&mp->timer) >= 0)) {
2914                         mod_timer(&mp->timer, time);
2915                 }
2916
2917                 goto out;
2918         }
2919
2920         for (p = mlock_dereference(mp->ports, br);
2921              p != NULL;
2922              p = mlock_dereference(p->next, br)) {
2923                 if (p->key.port != port)
2924                         continue;
2925
2926                 if (!hlist_unhashed(&p->mglist) &&
2927                     (timer_pending(&p->timer) ?
2928                      time_after(p->timer.expires, time) :
2929                      try_to_del_timer_sync(&p->timer) >= 0)) {
2930                         mod_timer(&p->timer, time);
2931                 }
2932
2933                 break;
2934         }
2935 out:
2936         spin_unlock(&br->multicast_lock);
2937 }
2938
2939 static void br_ip4_multicast_leave_group(struct net_bridge *br,
2940                                          struct net_bridge_port *port,
2941                                          __be32 group,
2942                                          __u16 vid,
2943                                          const unsigned char *src)
2944 {
2945         struct br_ip br_group;
2946         struct bridge_mcast_own_query *own_query;
2947
2948         if (ipv4_is_local_multicast(group))
2949                 return;
2950
2951         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
2952
2953         memset(&br_group, 0, sizeof(br_group));
2954         br_group.dst.ip4 = group;
2955         br_group.proto = htons(ETH_P_IP);
2956         br_group.vid = vid;
2957
2958         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
2959                                  own_query, src);
2960 }
2961
2962 #if IS_ENABLED(CONFIG_IPV6)
2963 static void br_ip6_multicast_leave_group(struct net_bridge *br,
2964                                          struct net_bridge_port *port,
2965                                          const struct in6_addr *group,
2966                                          __u16 vid,
2967                                          const unsigned char *src)
2968 {
2969         struct br_ip br_group;
2970         struct bridge_mcast_own_query *own_query;
2971
2972         if (ipv6_addr_is_ll_all_nodes(group))
2973                 return;
2974
2975         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
2976
2977         memset(&br_group, 0, sizeof(br_group));
2978         br_group.dst.ip6 = *group;
2979         br_group.proto = htons(ETH_P_IPV6);
2980         br_group.vid = vid;
2981
2982         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
2983                                  own_query, src);
2984 }
2985 #endif
2986
2987 static void br_multicast_err_count(const struct net_bridge *br,
2988                                    const struct net_bridge_port *p,
2989                                    __be16 proto)
2990 {
2991         struct bridge_mcast_stats __percpu *stats;
2992         struct bridge_mcast_stats *pstats;
2993
2994         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2995                 return;
2996
2997         if (p)
2998                 stats = p->mcast_stats;
2999         else
3000                 stats = br->mcast_stats;
3001         if (WARN_ON(!stats))
3002                 return;
3003
3004         pstats = this_cpu_ptr(stats);
3005
3006         u64_stats_update_begin(&pstats->syncp);
3007         switch (proto) {
3008         case htons(ETH_P_IP):
3009                 pstats->mstats.igmp_parse_errors++;
3010                 break;
3011 #if IS_ENABLED(CONFIG_IPV6)
3012         case htons(ETH_P_IPV6):
3013                 pstats->mstats.mld_parse_errors++;
3014                 break;
3015 #endif
3016         }
3017         u64_stats_update_end(&pstats->syncp);
3018 }
3019
3020 static void br_multicast_pim(struct net_bridge *br,
3021                              struct net_bridge_port *port,
3022                              const struct sk_buff *skb)
3023 {
3024         unsigned int offset = skb_transport_offset(skb);
3025         struct pimhdr *pimhdr, _pimhdr;
3026
3027         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3028         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3029             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3030                 return;
3031
3032         br_multicast_mark_router(br, port);
3033 }
3034
3035 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
3036                                     struct net_bridge_port *port,
3037                                     struct sk_buff *skb)
3038 {
3039         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3040             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3041                 return -ENOMSG;
3042
3043         br_multicast_mark_router(br, port);
3044
3045         return 0;
3046 }
3047
3048 static int br_multicast_ipv4_rcv(struct net_bridge *br,
3049                                  struct net_bridge_port *port,
3050                                  struct sk_buff *skb,
3051                                  u16 vid)
3052 {
3053         const unsigned char *src;
3054         struct igmphdr *ih;
3055         int err;
3056
3057         err = ip_mc_check_igmp(skb);
3058
3059         if (err == -ENOMSG) {
3060                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3061                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3062                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3063                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3064                                 br_multicast_pim(br, port, skb);
3065                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3066                         br_ip4_multicast_mrd_rcv(br, port, skb);
3067                 }
3068
3069                 return 0;
3070         } else if (err < 0) {
3071                 br_multicast_err_count(br, port, skb->protocol);
3072                 return err;
3073         }
3074
3075         ih = igmp_hdr(skb);
3076         src = eth_hdr(skb)->h_source;
3077         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3078
3079         switch (ih->type) {
3080         case IGMP_HOST_MEMBERSHIP_REPORT:
3081         case IGMPV2_HOST_MEMBERSHIP_REPORT:
3082                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3083                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
3084                                                  true);
3085                 break;
3086         case IGMPV3_HOST_MEMBERSHIP_REPORT:
3087                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
3088                 break;
3089         case IGMP_HOST_MEMBERSHIP_QUERY:
3090                 br_ip4_multicast_query(br, port, skb, vid);
3091                 break;
3092         case IGMP_HOST_LEAVE_MESSAGE:
3093                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
3094                 break;
3095         }
3096
3097         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
3098                            BR_MCAST_DIR_RX);
3099
3100         return err;
3101 }
3102
3103 #if IS_ENABLED(CONFIG_IPV6)
3104 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
3105                                     struct net_bridge_port *port,
3106                                     struct sk_buff *skb)
3107 {
3108         int ret;
3109
3110         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
3111                 return -ENOMSG;
3112
3113         ret = ipv6_mc_check_icmpv6(skb);
3114         if (ret < 0)
3115                 return ret;
3116
3117         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3118                 return -ENOMSG;
3119
3120         br_multicast_mark_router(br, port);
3121
3122         return 0;
3123 }
3124
3125 static int br_multicast_ipv6_rcv(struct net_bridge *br,
3126                                  struct net_bridge_port *port,
3127                                  struct sk_buff *skb,
3128                                  u16 vid)
3129 {
3130         const unsigned char *src;
3131         struct mld_msg *mld;
3132         int err;
3133
3134         err = ipv6_mc_check_mld(skb);
3135
3136         if (err == -ENOMSG) {
3137                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3138                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3139
3140                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
3141                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
3142
3143                         if (err < 0 && err != -ENOMSG) {
3144                                 br_multicast_err_count(br, port, skb->protocol);
3145                                 return err;
3146                         }
3147                 }
3148
3149                 return 0;
3150         } else if (err < 0) {
3151                 br_multicast_err_count(br, port, skb->protocol);
3152                 return err;
3153         }
3154
3155         mld = (struct mld_msg *)skb_transport_header(skb);
3156         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3157
3158         switch (mld->mld_type) {
3159         case ICMPV6_MGM_REPORT:
3160                 src = eth_hdr(skb)->h_source;
3161                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3162                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
3163                                                  src, true);
3164                 break;
3165         case ICMPV6_MLD2_REPORT:
3166                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
3167                 break;
3168         case ICMPV6_MGM_QUERY:
3169                 err = br_ip6_multicast_query(br, port, skb, vid);
3170                 break;
3171         case ICMPV6_MGM_REDUCTION:
3172                 src = eth_hdr(skb)->h_source;
3173                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
3174                 break;
3175         }
3176
3177         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
3178                            BR_MCAST_DIR_RX);
3179
3180         return err;
3181 }
3182 #endif
3183
3184 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
3185                      struct sk_buff *skb, u16 vid)
3186 {
3187         int ret = 0;
3188
3189         BR_INPUT_SKB_CB(skb)->igmp = 0;
3190         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3191
3192         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3193                 return 0;
3194
3195         switch (skb->protocol) {
3196         case htons(ETH_P_IP):
3197                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
3198                 break;
3199 #if IS_ENABLED(CONFIG_IPV6)
3200         case htons(ETH_P_IPV6):
3201                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
3202                 break;
3203 #endif
3204         }
3205
3206         return ret;
3207 }
3208
3209 static void br_multicast_query_expired(struct net_bridge *br,
3210                                        struct bridge_mcast_own_query *query,
3211                                        struct bridge_mcast_querier *querier)
3212 {
3213         spin_lock(&br->multicast_lock);
3214         if (query->startup_sent < br->multicast_startup_query_count)
3215                 query->startup_sent++;
3216
3217         RCU_INIT_POINTER(querier->port, NULL);
3218         br_multicast_send_query(br, NULL, query);
3219         spin_unlock(&br->multicast_lock);
3220 }
3221
3222 static void br_ip4_multicast_query_expired(struct timer_list *t)
3223 {
3224         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
3225
3226         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
3227 }
3228
3229 #if IS_ENABLED(CONFIG_IPV6)
3230 static void br_ip6_multicast_query_expired(struct timer_list *t)
3231 {
3232         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
3233
3234         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
3235 }
3236 #endif
3237
3238 static void br_multicast_gc_work(struct work_struct *work)
3239 {
3240         struct net_bridge *br = container_of(work, struct net_bridge,
3241                                              mcast_gc_work);
3242         HLIST_HEAD(deleted_head);
3243
3244         spin_lock_bh(&br->multicast_lock);
3245         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3246         spin_unlock_bh(&br->multicast_lock);
3247
3248         br_multicast_gc(&deleted_head);
3249 }
3250
3251 void br_multicast_init(struct net_bridge *br)
3252 {
3253         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3254
3255         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3256         br->multicast_last_member_count = 2;
3257         br->multicast_startup_query_count = 2;
3258
3259         br->multicast_last_member_interval = HZ;
3260         br->multicast_query_response_interval = 10 * HZ;
3261         br->multicast_startup_query_interval = 125 * HZ / 4;
3262         br->multicast_query_interval = 125 * HZ;
3263         br->multicast_querier_interval = 255 * HZ;
3264         br->multicast_membership_interval = 260 * HZ;
3265
3266         br->ip4_other_query.delay_time = 0;
3267         br->ip4_querier.port = NULL;
3268         br->multicast_igmp_version = 2;
3269 #if IS_ENABLED(CONFIG_IPV6)
3270         br->multicast_mld_version = 1;
3271         br->ip6_other_query.delay_time = 0;
3272         br->ip6_querier.port = NULL;
3273 #endif
3274         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3275         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3276
3277         spin_lock_init(&br->multicast_lock);
3278         timer_setup(&br->multicast_router_timer,
3279                     br_multicast_local_router_expired, 0);
3280         timer_setup(&br->ip4_other_query.timer,
3281                     br_ip4_multicast_querier_expired, 0);
3282         timer_setup(&br->ip4_own_query.timer,
3283                     br_ip4_multicast_query_expired, 0);
3284 #if IS_ENABLED(CONFIG_IPV6)
3285         timer_setup(&br->ip6_other_query.timer,
3286                     br_ip6_multicast_querier_expired, 0);
3287         timer_setup(&br->ip6_own_query.timer,
3288                     br_ip6_multicast_query_expired, 0);
3289 #endif
3290         INIT_HLIST_HEAD(&br->mdb_list);
3291         INIT_HLIST_HEAD(&br->mcast_gc_list);
3292         INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3293 }
3294
3295 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3296 {
3297         struct in_device *in_dev = in_dev_get(br->dev);
3298
3299         if (!in_dev)
3300                 return;
3301
3302         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3303         in_dev_put(in_dev);
3304 }
3305
3306 #if IS_ENABLED(CONFIG_IPV6)
3307 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3308 {
3309         struct in6_addr addr;
3310
3311         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3312         ipv6_dev_mc_inc(br->dev, &addr);
3313 }
3314 #else
3315 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3316 {
3317 }
3318 #endif
3319
3320 void br_multicast_join_snoopers(struct net_bridge *br)
3321 {
3322         br_ip4_multicast_join_snoopers(br);
3323         br_ip6_multicast_join_snoopers(br);
3324 }
3325
3326 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3327 {
3328         struct in_device *in_dev = in_dev_get(br->dev);
3329
3330         if (WARN_ON(!in_dev))
3331                 return;
3332
3333         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3334         in_dev_put(in_dev);
3335 }
3336
3337 #if IS_ENABLED(CONFIG_IPV6)
3338 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3339 {
3340         struct in6_addr addr;
3341
3342         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3343         ipv6_dev_mc_dec(br->dev, &addr);
3344 }
3345 #else
3346 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3347 {
3348 }
3349 #endif
3350
3351 void br_multicast_leave_snoopers(struct net_bridge *br)
3352 {
3353         br_ip4_multicast_leave_snoopers(br);
3354         br_ip6_multicast_leave_snoopers(br);
3355 }
3356
3357 static void __br_multicast_open(struct net_bridge *br,
3358                                 struct bridge_mcast_own_query *query)
3359 {
3360         query->startup_sent = 0;
3361
3362         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3363                 return;
3364
3365         mod_timer(&query->timer, jiffies);
3366 }
3367
3368 void br_multicast_open(struct net_bridge *br)
3369 {
3370         __br_multicast_open(br, &br->ip4_own_query);
3371 #if IS_ENABLED(CONFIG_IPV6)
3372         __br_multicast_open(br, &br->ip6_own_query);
3373 #endif
3374 }
3375
3376 void br_multicast_stop(struct net_bridge *br)
3377 {
3378         del_timer_sync(&br->multicast_router_timer);
3379         del_timer_sync(&br->ip4_other_query.timer);
3380         del_timer_sync(&br->ip4_own_query.timer);
3381 #if IS_ENABLED(CONFIG_IPV6)
3382         del_timer_sync(&br->ip6_other_query.timer);
3383         del_timer_sync(&br->ip6_own_query.timer);
3384 #endif
3385 }
3386
3387 void br_multicast_dev_del(struct net_bridge *br)
3388 {
3389         struct net_bridge_mdb_entry *mp;
3390         HLIST_HEAD(deleted_head);
3391         struct hlist_node *tmp;
3392
3393         spin_lock_bh(&br->multicast_lock);
3394         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
3395                 br_multicast_del_mdb_entry(mp);
3396         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3397         spin_unlock_bh(&br->multicast_lock);
3398
3399         br_multicast_gc(&deleted_head);
3400         cancel_work_sync(&br->mcast_gc_work);
3401
3402         rcu_barrier();
3403 }
3404
3405 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
3406 {
3407         int err = -EINVAL;
3408
3409         spin_lock_bh(&br->multicast_lock);
3410
3411         switch (val) {
3412         case MDB_RTR_TYPE_DISABLED:
3413         case MDB_RTR_TYPE_PERM:
3414                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
3415                 del_timer(&br->multicast_router_timer);
3416                 br->multicast_router = val;
3417                 err = 0;
3418                 break;
3419         case MDB_RTR_TYPE_TEMP_QUERY:
3420                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
3421                         br_mc_router_state_change(br, false);
3422                 br->multicast_router = val;
3423                 err = 0;
3424                 break;
3425         }
3426
3427         spin_unlock_bh(&br->multicast_lock);
3428
3429         return err;
3430 }
3431
3432 static void __del_port_router(struct net_bridge_port *p)
3433 {
3434         if (hlist_unhashed(&p->rlist))
3435                 return;
3436         hlist_del_init_rcu(&p->rlist);
3437         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
3438         br_port_mc_router_state_change(p, false);
3439
3440         /* don't allow timer refresh */
3441         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3442                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3443 }
3444
3445 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
3446 {
3447         struct net_bridge *br = p->br;
3448         unsigned long now = jiffies;
3449         int err = -EINVAL;
3450
3451         spin_lock(&br->multicast_lock);
3452         if (p->multicast_router == val) {
3453                 /* Refresh the temp router port timer */
3454                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3455                         mod_timer(&p->multicast_router_timer,
3456                                   now + br->multicast_querier_interval);
3457                 err = 0;
3458                 goto unlock;
3459         }
3460         switch (val) {
3461         case MDB_RTR_TYPE_DISABLED:
3462                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
3463                 __del_port_router(p);
3464                 del_timer(&p->multicast_router_timer);
3465                 break;
3466         case MDB_RTR_TYPE_TEMP_QUERY:
3467                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3468                 __del_port_router(p);
3469                 break;
3470         case MDB_RTR_TYPE_PERM:
3471                 p->multicast_router = MDB_RTR_TYPE_PERM;
3472                 del_timer(&p->multicast_router_timer);
3473                 br_multicast_add_router(br, p);
3474                 break;
3475         case MDB_RTR_TYPE_TEMP:
3476                 p->multicast_router = MDB_RTR_TYPE_TEMP;
3477                 br_multicast_mark_router(br, p);
3478                 break;
3479         default:
3480                 goto unlock;
3481         }
3482         err = 0;
3483 unlock:
3484         spin_unlock(&br->multicast_lock);
3485
3486         return err;
3487 }
3488
3489 static void br_multicast_start_querier(struct net_bridge *br,
3490                                        struct bridge_mcast_own_query *query)
3491 {
3492         struct net_bridge_port *port;
3493
3494         __br_multicast_open(br, query);
3495
3496         rcu_read_lock();
3497         list_for_each_entry_rcu(port, &br->port_list, list) {
3498                 if (port->state == BR_STATE_DISABLED ||
3499                     port->state == BR_STATE_BLOCKING)
3500                         continue;
3501
3502                 if (query == &br->ip4_own_query)
3503                         br_multicast_enable(&port->ip4_own_query);
3504 #if IS_ENABLED(CONFIG_IPV6)
3505                 else
3506                         br_multicast_enable(&port->ip6_own_query);
3507 #endif
3508         }
3509         rcu_read_unlock();
3510 }
3511
3512 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3513 {
3514         struct net_bridge_port *port;
3515         bool change_snoopers = false;
3516
3517         spin_lock_bh(&br->multicast_lock);
3518         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
3519                 goto unlock;
3520
3521         br_mc_disabled_update(br->dev, val);
3522         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
3523         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
3524                 change_snoopers = true;
3525                 goto unlock;
3526         }
3527
3528         if (!netif_running(br->dev))
3529                 goto unlock;
3530
3531         br_multicast_open(br);
3532         list_for_each_entry(port, &br->port_list, list)
3533                 __br_multicast_enable_port(port);
3534
3535         change_snoopers = true;
3536
3537 unlock:
3538         spin_unlock_bh(&br->multicast_lock);
3539
3540         /* br_multicast_join_snoopers has the potential to cause
3541          * an MLD Report/Leave to be delivered to br_multicast_rcv,
3542          * which would in turn call br_multicast_add_group, which would
3543          * attempt to acquire multicast_lock. This function should be
3544          * called after the lock has been released to avoid deadlocks on
3545          * multicast_lock.
3546          *
3547          * br_multicast_leave_snoopers does not have the problem since
3548          * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
3549          * returns without calling br_multicast_ipv4/6_rcv if it's not
3550          * enabled. Moved both functions out just for symmetry.
3551          */
3552         if (change_snoopers) {
3553                 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
3554                         br_multicast_join_snoopers(br);
3555                 else
3556                         br_multicast_leave_snoopers(br);
3557         }
3558
3559         return 0;
3560 }
3561
3562 bool br_multicast_enabled(const struct net_device *dev)
3563 {
3564         struct net_bridge *br = netdev_priv(dev);
3565
3566         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
3567 }
3568 EXPORT_SYMBOL_GPL(br_multicast_enabled);
3569
3570 bool br_multicast_router(const struct net_device *dev)
3571 {
3572         struct net_bridge *br = netdev_priv(dev);
3573         bool is_router;
3574
3575         spin_lock_bh(&br->multicast_lock);
3576         is_router = br_multicast_is_router(br);
3577         spin_unlock_bh(&br->multicast_lock);
3578         return is_router;
3579 }
3580 EXPORT_SYMBOL_GPL(br_multicast_router);
3581
3582 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
3583 {
3584         unsigned long max_delay;
3585
3586         val = !!val;
3587
3588         spin_lock_bh(&br->multicast_lock);
3589         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
3590                 goto unlock;
3591
3592         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
3593         if (!val)
3594                 goto unlock;
3595
3596         max_delay = br->multicast_query_response_interval;
3597
3598         if (!timer_pending(&br->ip4_other_query.timer))
3599                 br->ip4_other_query.delay_time = jiffies + max_delay;
3600
3601         br_multicast_start_querier(br, &br->ip4_own_query);
3602
3603 #if IS_ENABLED(CONFIG_IPV6)
3604         if (!timer_pending(&br->ip6_other_query.timer))
3605                 br->ip6_other_query.delay_time = jiffies + max_delay;
3606
3607         br_multicast_start_querier(br, &br->ip6_own_query);
3608 #endif
3609
3610 unlock:
3611         spin_unlock_bh(&br->multicast_lock);
3612
3613         return 0;
3614 }
3615
3616 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
3617 {
3618         /* Currently we support only version 2 and 3 */
3619         switch (val) {
3620         case 2:
3621         case 3:
3622                 break;
3623         default:
3624                 return -EINVAL;
3625         }
3626
3627         spin_lock_bh(&br->multicast_lock);
3628         br->multicast_igmp_version = val;
3629         spin_unlock_bh(&br->multicast_lock);
3630
3631         return 0;
3632 }
3633
3634 #if IS_ENABLED(CONFIG_IPV6)
3635 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
3636 {
3637         /* Currently we support version 1 and 2 */
3638         switch (val) {
3639         case 1:
3640         case 2:
3641                 break;
3642         default:
3643                 return -EINVAL;
3644         }
3645
3646         spin_lock_bh(&br->multicast_lock);
3647         br->multicast_mld_version = val;
3648         spin_unlock_bh(&br->multicast_lock);
3649
3650         return 0;
3651 }
3652 #endif
3653
3654 /**
3655  * br_multicast_list_adjacent - Returns snooped multicast addresses
3656  * @dev:        The bridge port adjacent to which to retrieve addresses
3657  * @br_ip_list: The list to store found, snooped multicast IP addresses in
3658  *
3659  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
3660  * snooping feature on all bridge ports of dev's bridge device, excluding
3661  * the addresses from dev itself.
3662  *
3663  * Returns the number of items added to br_ip_list.
3664  *
3665  * Notes:
3666  * - br_ip_list needs to be initialized by caller
3667  * - br_ip_list might contain duplicates in the end
3668  *   (needs to be taken care of by caller)
3669  * - br_ip_list needs to be freed by caller
3670  */
3671 int br_multicast_list_adjacent(struct net_device *dev,
3672                                struct list_head *br_ip_list)
3673 {
3674         struct net_bridge *br;
3675         struct net_bridge_port *port;
3676         struct net_bridge_port_group *group;
3677         struct br_ip_list *entry;
3678         int count = 0;
3679
3680         rcu_read_lock();
3681         if (!br_ip_list || !netif_is_bridge_port(dev))
3682                 goto unlock;
3683
3684         port = br_port_get_rcu(dev);
3685         if (!port || !port->br)
3686                 goto unlock;
3687
3688         br = port->br;
3689
3690         list_for_each_entry_rcu(port, &br->port_list, list) {
3691                 if (!port->dev || port->dev == dev)
3692                         continue;
3693
3694                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
3695                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
3696                         if (!entry)
3697                                 goto unlock;
3698
3699                         entry->addr = group->key.addr;
3700                         list_add(&entry->list, br_ip_list);
3701                         count++;
3702                 }
3703         }
3704
3705 unlock:
3706         rcu_read_unlock();
3707         return count;
3708 }
3709 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
3710
3711 /**
3712  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
3713  * @dev: The bridge port providing the bridge on which to check for a querier
3714  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3715  *
3716  * Checks whether the given interface has a bridge on top and if so returns
3717  * true if a valid querier exists anywhere on the bridged link layer.
3718  * Otherwise returns false.
3719  */
3720 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
3721 {
3722         struct net_bridge *br;
3723         struct net_bridge_port *port;
3724         struct ethhdr eth;
3725         bool ret = false;
3726
3727         rcu_read_lock();
3728         if (!netif_is_bridge_port(dev))
3729                 goto unlock;
3730
3731         port = br_port_get_rcu(dev);
3732         if (!port || !port->br)
3733                 goto unlock;
3734
3735         br = port->br;
3736
3737         memset(&eth, 0, sizeof(eth));
3738         eth.h_proto = htons(proto);
3739
3740         ret = br_multicast_querier_exists(br, &eth, NULL);
3741
3742 unlock:
3743         rcu_read_unlock();
3744         return ret;
3745 }
3746 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
3747
3748 /**
3749  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
3750  * @dev: The bridge port adjacent to which to check for a querier
3751  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3752  *
3753  * Checks whether the given interface has a bridge on top and if so returns
3754  * true if a selected querier is behind one of the other ports of this
3755  * bridge. Otherwise returns false.
3756  */
3757 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
3758 {
3759         struct net_bridge *br;
3760         struct net_bridge_port *port;
3761         bool ret = false;
3762
3763         rcu_read_lock();
3764         if (!netif_is_bridge_port(dev))
3765                 goto unlock;
3766
3767         port = br_port_get_rcu(dev);
3768         if (!port || !port->br)
3769                 goto unlock;
3770
3771         br = port->br;
3772
3773         switch (proto) {
3774         case ETH_P_IP:
3775                 if (!timer_pending(&br->ip4_other_query.timer) ||
3776                     rcu_dereference(br->ip4_querier.port) == port)
3777                         goto unlock;
3778                 break;
3779 #if IS_ENABLED(CONFIG_IPV6)
3780         case ETH_P_IPV6:
3781                 if (!timer_pending(&br->ip6_other_query.timer) ||
3782                     rcu_dereference(br->ip6_querier.port) == port)
3783                         goto unlock;
3784                 break;
3785 #endif
3786         default:
3787                 goto unlock;
3788         }
3789
3790         ret = true;
3791 unlock:
3792         rcu_read_unlock();
3793         return ret;
3794 }
3795 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
3796
3797 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
3798                                const struct sk_buff *skb, u8 type, u8 dir)
3799 {
3800         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
3801         __be16 proto = skb->protocol;
3802         unsigned int t_len;
3803
3804         u64_stats_update_begin(&pstats->syncp);
3805         switch (proto) {
3806         case htons(ETH_P_IP):
3807                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
3808                 switch (type) {
3809                 case IGMP_HOST_MEMBERSHIP_REPORT:
3810                         pstats->mstats.igmp_v1reports[dir]++;
3811                         break;
3812                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3813                         pstats->mstats.igmp_v2reports[dir]++;
3814                         break;
3815                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3816                         pstats->mstats.igmp_v3reports[dir]++;
3817                         break;
3818                 case IGMP_HOST_MEMBERSHIP_QUERY:
3819                         if (t_len != sizeof(struct igmphdr)) {
3820                                 pstats->mstats.igmp_v3queries[dir]++;
3821                         } else {
3822                                 unsigned int offset = skb_transport_offset(skb);
3823                                 struct igmphdr *ih, _ihdr;
3824
3825                                 ih = skb_header_pointer(skb, offset,
3826                                                         sizeof(_ihdr), &_ihdr);
3827                                 if (!ih)
3828                                         break;
3829                                 if (!ih->code)
3830                                         pstats->mstats.igmp_v1queries[dir]++;
3831                                 else
3832                                         pstats->mstats.igmp_v2queries[dir]++;
3833                         }
3834                         break;
3835                 case IGMP_HOST_LEAVE_MESSAGE:
3836                         pstats->mstats.igmp_leaves[dir]++;
3837                         break;
3838                 }
3839                 break;
3840 #if IS_ENABLED(CONFIG_IPV6)
3841         case htons(ETH_P_IPV6):
3842                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
3843                         sizeof(struct ipv6hdr);
3844                 t_len -= skb_network_header_len(skb);
3845                 switch (type) {
3846                 case ICMPV6_MGM_REPORT:
3847                         pstats->mstats.mld_v1reports[dir]++;
3848                         break;
3849                 case ICMPV6_MLD2_REPORT:
3850                         pstats->mstats.mld_v2reports[dir]++;
3851                         break;
3852                 case ICMPV6_MGM_QUERY:
3853                         if (t_len != sizeof(struct mld_msg))
3854                                 pstats->mstats.mld_v2queries[dir]++;
3855                         else
3856                                 pstats->mstats.mld_v1queries[dir]++;
3857                         break;
3858                 case ICMPV6_MGM_REDUCTION:
3859                         pstats->mstats.mld_leaves[dir]++;
3860                         break;
3861                 }
3862                 break;
3863 #endif /* CONFIG_IPV6 */
3864         }
3865         u64_stats_update_end(&pstats->syncp);
3866 }
3867
3868 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
3869                         const struct sk_buff *skb, u8 type, u8 dir)
3870 {
3871         struct bridge_mcast_stats __percpu *stats;
3872
3873         /* if multicast_disabled is true then igmp type can't be set */
3874         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3875                 return;
3876
3877         if (p)
3878                 stats = p->mcast_stats;
3879         else
3880                 stats = br->mcast_stats;
3881         if (WARN_ON(!stats))
3882                 return;
3883
3884         br_mcast_stats_add(stats, skb, type, dir);
3885 }
3886
3887 int br_multicast_init_stats(struct net_bridge *br)
3888 {
3889         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
3890         if (!br->mcast_stats)
3891                 return -ENOMEM;
3892
3893         return 0;
3894 }
3895
3896 void br_multicast_uninit_stats(struct net_bridge *br)
3897 {
3898         free_percpu(br->mcast_stats);
3899 }
3900
3901 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
3902 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
3903 {
3904         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
3905         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
3906 }
3907
3908 void br_multicast_get_stats(const struct net_bridge *br,
3909                             const struct net_bridge_port *p,
3910                             struct br_mcast_stats *dest)
3911 {
3912         struct bridge_mcast_stats __percpu *stats;
3913         struct br_mcast_stats tdst;
3914         int i;
3915
3916         memset(dest, 0, sizeof(*dest));
3917         if (p)
3918                 stats = p->mcast_stats;
3919         else
3920                 stats = br->mcast_stats;
3921         if (WARN_ON(!stats))
3922                 return;
3923
3924         memset(&tdst, 0, sizeof(tdst));
3925         for_each_possible_cpu(i) {
3926                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
3927                 struct br_mcast_stats temp;
3928                 unsigned int start;
3929
3930                 do {
3931                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3932                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
3933                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3934
3935                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
3936                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
3937                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
3938                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
3939                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
3940                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
3941                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
3942                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
3943
3944                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
3945                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
3946                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
3947                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
3948                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
3949                 tdst.mld_parse_errors += temp.mld_parse_errors;
3950         }
3951         memcpy(dest, &tdst, sizeof(*dest));
3952 }
3953
3954 int br_mdb_hash_init(struct net_bridge *br)
3955 {
3956         int err;
3957
3958         err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
3959         if (err)
3960                 return err;
3961
3962         err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
3963         if (err) {
3964                 rhashtable_destroy(&br->sg_port_tbl);
3965                 return err;
3966         }
3967
3968         return 0;
3969 }
3970
3971 void br_mdb_hash_fini(struct net_bridge *br)
3972 {
3973         rhashtable_destroy(&br->sg_port_tbl);
3974         rhashtable_destroy(&br->mdb_hash_tbl);
3975 }