Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41         .key_len = sizeof(struct br_ip),
42         .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46         .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47         .key_offset = offsetof(struct net_bridge_port_group, key),
48         .key_len = sizeof(struct net_bridge_port_group_sg_key),
49         .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53                                        struct bridge_mcast_own_query *query);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
55                                         struct net_bridge_mcast_port *pmctx);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
57                                          struct net_bridge_mcast_port *pmctx,
58                                          __be32 group,
59                                          __u16 vid,
60                                          const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
66                                         struct net_bridge_mcast_port *pmctx);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
69                                          struct net_bridge_mcast_port *pmctx,
70                                          const struct in6_addr *group,
71                                          __u16 vid, const unsigned char *src);
72 #endif
73 static struct net_bridge_port_group *
74 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
75                          struct net_bridge_mcast_port *pmctx,
76                          struct br_ip *group,
77                          const unsigned char *src,
78                          u8 filter_mode,
79                          bool igmpv2_mldv1,
80                          bool blocked);
81 static void br_multicast_find_del_pg(struct net_bridge *br,
82                                      struct net_bridge_port_group *pg);
83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84
85 static struct net_bridge_port_group *
86 br_sg_port_find(struct net_bridge *br,
87                 struct net_bridge_port_group_sg_key *sg_p)
88 {
89         lockdep_assert_held_once(&br->multicast_lock);
90
91         return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
92                                       br_sg_port_rht_params);
93 }
94
95 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
96                                                       struct br_ip *dst)
97 {
98         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
99 }
100
101 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
102                                            struct br_ip *dst)
103 {
104         struct net_bridge_mdb_entry *ent;
105
106         lockdep_assert_held_once(&br->multicast_lock);
107
108         rcu_read_lock();
109         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
110         rcu_read_unlock();
111
112         return ent;
113 }
114
115 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
116                                                    __be32 dst, __u16 vid)
117 {
118         struct br_ip br_dst;
119
120         memset(&br_dst, 0, sizeof(br_dst));
121         br_dst.dst.ip4 = dst;
122         br_dst.proto = htons(ETH_P_IP);
123         br_dst.vid = vid;
124
125         return br_mdb_ip_get(br, &br_dst);
126 }
127
128 #if IS_ENABLED(CONFIG_IPV6)
129 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
130                                                    const struct in6_addr *dst,
131                                                    __u16 vid)
132 {
133         struct br_ip br_dst;
134
135         memset(&br_dst, 0, sizeof(br_dst));
136         br_dst.dst.ip6 = *dst;
137         br_dst.proto = htons(ETH_P_IPV6);
138         br_dst.vid = vid;
139
140         return br_mdb_ip_get(br, &br_dst);
141 }
142 #endif
143
144 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
145                                         struct sk_buff *skb, u16 vid)
146 {
147         struct net_bridge *br = brmctx->br;
148         struct br_ip ip;
149
150         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
151             br_multicast_ctx_vlan_global_disabled(brmctx))
152                 return NULL;
153
154         if (BR_INPUT_SKB_CB(skb)->igmp)
155                 return NULL;
156
157         memset(&ip, 0, sizeof(ip));
158         ip.proto = skb->protocol;
159         ip.vid = vid;
160
161         switch (skb->protocol) {
162         case htons(ETH_P_IP):
163                 ip.dst.ip4 = ip_hdr(skb)->daddr;
164                 if (brmctx->multicast_igmp_version == 3) {
165                         struct net_bridge_mdb_entry *mdb;
166
167                         ip.src.ip4 = ip_hdr(skb)->saddr;
168                         mdb = br_mdb_ip_get_rcu(br, &ip);
169                         if (mdb)
170                                 return mdb;
171                         ip.src.ip4 = 0;
172                 }
173                 break;
174 #if IS_ENABLED(CONFIG_IPV6)
175         case htons(ETH_P_IPV6):
176                 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
177                 if (brmctx->multicast_mld_version == 2) {
178                         struct net_bridge_mdb_entry *mdb;
179
180                         ip.src.ip6 = ipv6_hdr(skb)->saddr;
181                         mdb = br_mdb_ip_get_rcu(br, &ip);
182                         if (mdb)
183                                 return mdb;
184                         memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
185                 }
186                 break;
187 #endif
188         default:
189                 ip.proto = 0;
190                 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
191         }
192
193         return br_mdb_ip_get_rcu(br, &ip);
194 }
195
196 /* IMPORTANT: this function must be used only when the contexts cannot be
197  * passed down (e.g. timer) and must be used for read-only purposes because
198  * the vlan snooping option can change, so it can return any context
199  * (non-vlan or vlan). Its initial intended purpose is to read timer values
200  * from the *current* context based on the option. At worst that could lead
201  * to inconsistent timers when the contexts are changed, i.e. src timer
202  * which needs to re-arm with a specific delay taken from the old context
203  */
204 static struct net_bridge_mcast_port *
205 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
206 {
207         struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
208         struct net_bridge_vlan *vlan;
209
210         lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
211
212         /* if vlan snooping is disabled use the port's multicast context */
213         if (!pg->key.addr.vid ||
214             !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
215                 goto out;
216
217         /* locking is tricky here, due to different rules for multicast and
218          * vlans we need to take rcu to find the vlan and make sure it has
219          * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
220          * multicast_lock which must be already held here, so the vlan's pmctx
221          * can safely be used on return
222          */
223         rcu_read_lock();
224         vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
225         if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
226                 pmctx = &vlan->port_mcast_ctx;
227         else
228                 pmctx = NULL;
229         rcu_read_unlock();
230 out:
231         return pmctx;
232 }
233
234 /* when snooping we need to check if the contexts should be used
235  * in the following order:
236  * - if pmctx is non-NULL (port), check if it should be used
237  * - if pmctx is NULL (bridge), check if brmctx should be used
238  */
239 static bool
240 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
241                             const struct net_bridge_mcast_port *pmctx)
242 {
243         if (!netif_running(brmctx->br->dev))
244                 return false;
245
246         if (pmctx)
247                 return !br_multicast_port_ctx_state_disabled(pmctx);
248         else
249                 return !br_multicast_ctx_vlan_disabled(brmctx);
250 }
251
252 static bool br_port_group_equal(struct net_bridge_port_group *p,
253                                 struct net_bridge_port *port,
254                                 const unsigned char *src)
255 {
256         if (p->key.port != port)
257                 return false;
258
259         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
260                 return true;
261
262         return ether_addr_equal(src, p->eth_addr);
263 }
264
265 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
266                                 struct net_bridge_port_group *pg,
267                                 struct br_ip *sg_ip)
268 {
269         struct net_bridge_port_group_sg_key sg_key;
270         struct net_bridge_port_group *src_pg;
271         struct net_bridge_mcast *brmctx;
272
273         memset(&sg_key, 0, sizeof(sg_key));
274         brmctx = br_multicast_port_ctx_get_global(pmctx);
275         sg_key.port = pg->key.port;
276         sg_key.addr = *sg_ip;
277         if (br_sg_port_find(brmctx->br, &sg_key))
278                 return;
279
280         src_pg = __br_multicast_add_group(brmctx, pmctx,
281                                           sg_ip, pg->eth_addr,
282                                           MCAST_INCLUDE, false, false);
283         if (IS_ERR_OR_NULL(src_pg) ||
284             src_pg->rt_protocol != RTPROT_KERNEL)
285                 return;
286
287         src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
288 }
289
290 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
291                                 struct br_ip *sg_ip)
292 {
293         struct net_bridge_port_group_sg_key sg_key;
294         struct net_bridge *br = pg->key.port->br;
295         struct net_bridge_port_group *src_pg;
296
297         memset(&sg_key, 0, sizeof(sg_key));
298         sg_key.port = pg->key.port;
299         sg_key.addr = *sg_ip;
300         src_pg = br_sg_port_find(br, &sg_key);
301         if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
302             src_pg->rt_protocol != RTPROT_KERNEL)
303                 return;
304
305         br_multicast_find_del_pg(br, src_pg);
306 }
307
308 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
309  * to all other ports' S,G entries which are not blocked by the current group
310  * for proper replication, the assumption is that any S,G blocked entries
311  * are already added so the S,G,port lookup should skip them.
312  * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
313  * deleted we need to remove it from all ports' S,G entries where it was
314  * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
315  */
316 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
317                                      u8 filter_mode)
318 {
319         struct net_bridge *br = pg->key.port->br;
320         struct net_bridge_port_group *pg_lst;
321         struct net_bridge_mcast_port *pmctx;
322         struct net_bridge_mdb_entry *mp;
323         struct br_ip sg_ip;
324
325         if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
326                 return;
327
328         mp = br_mdb_ip_get(br, &pg->key.addr);
329         if (!mp)
330                 return;
331         pmctx = br_multicast_pg_to_port_ctx(pg);
332         if (!pmctx)
333                 return;
334
335         memset(&sg_ip, 0, sizeof(sg_ip));
336         sg_ip = pg->key.addr;
337
338         for (pg_lst = mlock_dereference(mp->ports, br);
339              pg_lst;
340              pg_lst = mlock_dereference(pg_lst->next, br)) {
341                 struct net_bridge_group_src *src_ent;
342
343                 if (pg_lst == pg)
344                         continue;
345                 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
346                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
347                                 continue;
348                         sg_ip.src = src_ent->addr.src;
349                         switch (filter_mode) {
350                         case MCAST_INCLUDE:
351                                 __fwd_del_star_excl(pg, &sg_ip);
352                                 break;
353                         case MCAST_EXCLUDE:
354                                 __fwd_add_star_excl(pmctx, pg, &sg_ip);
355                                 break;
356                         }
357                 }
358         }
359 }
360
361 /* called when adding a new S,G with host_joined == false by default */
362 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
363                                        struct net_bridge_port_group *sg)
364 {
365         struct net_bridge_mdb_entry *sg_mp;
366
367         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
368                 return;
369         if (!star_mp->host_joined)
370                 return;
371
372         sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
373         if (!sg_mp)
374                 return;
375         sg_mp->host_joined = true;
376 }
377
378 /* set the host_joined state of all of *,G's S,G entries */
379 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
380 {
381         struct net_bridge *br = star_mp->br;
382         struct net_bridge_mdb_entry *sg_mp;
383         struct net_bridge_port_group *pg;
384         struct br_ip sg_ip;
385
386         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
387                 return;
388
389         memset(&sg_ip, 0, sizeof(sg_ip));
390         sg_ip = star_mp->addr;
391         for (pg = mlock_dereference(star_mp->ports, br);
392              pg;
393              pg = mlock_dereference(pg->next, br)) {
394                 struct net_bridge_group_src *src_ent;
395
396                 hlist_for_each_entry(src_ent, &pg->src_list, node) {
397                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
398                                 continue;
399                         sg_ip.src = src_ent->addr.src;
400                         sg_mp = br_mdb_ip_get(br, &sg_ip);
401                         if (!sg_mp)
402                                 continue;
403                         sg_mp->host_joined = star_mp->host_joined;
404                 }
405         }
406 }
407
408 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
409 {
410         struct net_bridge_port_group __rcu **pp;
411         struct net_bridge_port_group *p;
412
413         /* *,G exclude ports are only added to S,G entries */
414         if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
415                 return;
416
417         /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
418          * we should ignore perm entries since they're managed by user-space
419          */
420         for (pp = &sgmp->ports;
421              (p = mlock_dereference(*pp, sgmp->br)) != NULL;
422              pp = &p->next)
423                 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
424                                   MDB_PG_FLAGS_PERMANENT)))
425                         return;
426
427         /* currently the host can only have joined the *,G which means
428          * we treat it as EXCLUDE {}, so for an S,G it's considered a
429          * STAR_EXCLUDE entry and we can safely leave it
430          */
431         sgmp->host_joined = false;
432
433         for (pp = &sgmp->ports;
434              (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
435                 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
436                         br_multicast_del_pg(sgmp, p, pp);
437                 else
438                         pp = &p->next;
439         }
440 }
441
442 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
443                                        struct net_bridge_port_group *sg)
444 {
445         struct net_bridge_port_group_sg_key sg_key;
446         struct net_bridge *br = star_mp->br;
447         struct net_bridge_mcast_port *pmctx;
448         struct net_bridge_port_group *pg;
449         struct net_bridge_mcast *brmctx;
450
451         if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
452                 return;
453         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
454                 return;
455
456         br_multicast_sg_host_state(star_mp, sg);
457         memset(&sg_key, 0, sizeof(sg_key));
458         sg_key.addr = sg->key.addr;
459         /* we need to add all exclude ports to the S,G */
460         for (pg = mlock_dereference(star_mp->ports, br);
461              pg;
462              pg = mlock_dereference(pg->next, br)) {
463                 struct net_bridge_port_group *src_pg;
464
465                 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
466                         continue;
467
468                 sg_key.port = pg->key.port;
469                 if (br_sg_port_find(br, &sg_key))
470                         continue;
471
472                 pmctx = br_multicast_pg_to_port_ctx(pg);
473                 if (!pmctx)
474                         continue;
475                 brmctx = br_multicast_port_ctx_get_global(pmctx);
476
477                 src_pg = __br_multicast_add_group(brmctx, pmctx,
478                                                   &sg->key.addr,
479                                                   sg->eth_addr,
480                                                   MCAST_INCLUDE, false, false);
481                 if (IS_ERR_OR_NULL(src_pg) ||
482                     src_pg->rt_protocol != RTPROT_KERNEL)
483                         continue;
484                 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
485         }
486 }
487
488 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
489 {
490         struct net_bridge_mdb_entry *star_mp;
491         struct net_bridge_mcast_port *pmctx;
492         struct net_bridge_port_group *sg;
493         struct net_bridge_mcast *brmctx;
494         struct br_ip sg_ip;
495
496         if (src->flags & BR_SGRP_F_INSTALLED)
497                 return;
498
499         memset(&sg_ip, 0, sizeof(sg_ip));
500         pmctx = br_multicast_pg_to_port_ctx(src->pg);
501         if (!pmctx)
502                 return;
503         brmctx = br_multicast_port_ctx_get_global(pmctx);
504         sg_ip = src->pg->key.addr;
505         sg_ip.src = src->addr.src;
506
507         sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
508                                       src->pg->eth_addr, MCAST_INCLUDE, false,
509                                       !timer_pending(&src->timer));
510         if (IS_ERR_OR_NULL(sg))
511                 return;
512         src->flags |= BR_SGRP_F_INSTALLED;
513         sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
514
515         /* if it was added by user-space as perm we can skip next steps */
516         if (sg->rt_protocol != RTPROT_KERNEL &&
517             (sg->flags & MDB_PG_FLAGS_PERMANENT))
518                 return;
519
520         /* the kernel is now responsible for removing this S,G */
521         del_timer(&sg->timer);
522         star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
523         if (!star_mp)
524                 return;
525
526         br_multicast_sg_add_exclude_ports(star_mp, sg);
527 }
528
529 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
530                                         bool fastleave)
531 {
532         struct net_bridge_port_group *p, *pg = src->pg;
533         struct net_bridge_port_group __rcu **pp;
534         struct net_bridge_mdb_entry *mp;
535         struct br_ip sg_ip;
536
537         memset(&sg_ip, 0, sizeof(sg_ip));
538         sg_ip = pg->key.addr;
539         sg_ip.src = src->addr.src;
540
541         mp = br_mdb_ip_get(src->br, &sg_ip);
542         if (!mp)
543                 return;
544
545         for (pp = &mp->ports;
546              (p = mlock_dereference(*pp, src->br)) != NULL;
547              pp = &p->next) {
548                 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
549                         continue;
550
551                 if (p->rt_protocol != RTPROT_KERNEL &&
552                     (p->flags & MDB_PG_FLAGS_PERMANENT))
553                         break;
554
555                 if (fastleave)
556                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
557                 br_multicast_del_pg(mp, p, pp);
558                 break;
559         }
560         src->flags &= ~BR_SGRP_F_INSTALLED;
561 }
562
563 /* install S,G and based on src's timer enable or disable forwarding */
564 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
565 {
566         struct net_bridge_port_group_sg_key sg_key;
567         struct net_bridge_port_group *sg;
568         u8 old_flags;
569
570         br_multicast_fwd_src_add(src);
571
572         memset(&sg_key, 0, sizeof(sg_key));
573         sg_key.addr = src->pg->key.addr;
574         sg_key.addr.src = src->addr.src;
575         sg_key.port = src->pg->key.port;
576
577         sg = br_sg_port_find(src->br, &sg_key);
578         if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
579                 return;
580
581         old_flags = sg->flags;
582         if (timer_pending(&src->timer))
583                 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
584         else
585                 sg->flags |= MDB_PG_FLAGS_BLOCKED;
586
587         if (old_flags != sg->flags) {
588                 struct net_bridge_mdb_entry *sg_mp;
589
590                 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
591                 if (!sg_mp)
592                         return;
593                 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
594         }
595 }
596
597 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
598 {
599         struct net_bridge_mdb_entry *mp;
600
601         mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
602         WARN_ON(!hlist_unhashed(&mp->mdb_node));
603         WARN_ON(mp->ports);
604
605         del_timer_sync(&mp->timer);
606         kfree_rcu(mp, rcu);
607 }
608
609 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
610 {
611         struct net_bridge *br = mp->br;
612
613         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
614                                br_mdb_rht_params);
615         hlist_del_init_rcu(&mp->mdb_node);
616         hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
617         queue_work(system_long_wq, &br->mcast_gc_work);
618 }
619
620 static void br_multicast_group_expired(struct timer_list *t)
621 {
622         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
623         struct net_bridge *br = mp->br;
624
625         spin_lock(&br->multicast_lock);
626         if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
627             timer_pending(&mp->timer))
628                 goto out;
629
630         br_multicast_host_leave(mp, true);
631
632         if (mp->ports)
633                 goto out;
634         br_multicast_del_mdb_entry(mp);
635 out:
636         spin_unlock(&br->multicast_lock);
637 }
638
639 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
640 {
641         struct net_bridge_group_src *src;
642
643         src = container_of(gc, struct net_bridge_group_src, mcast_gc);
644         WARN_ON(!hlist_unhashed(&src->node));
645
646         del_timer_sync(&src->timer);
647         kfree_rcu(src, rcu);
648 }
649
650 void br_multicast_del_group_src(struct net_bridge_group_src *src,
651                                 bool fastleave)
652 {
653         struct net_bridge *br = src->pg->key.port->br;
654
655         br_multicast_fwd_src_remove(src, fastleave);
656         hlist_del_init_rcu(&src->node);
657         src->pg->src_ents--;
658         hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
659         queue_work(system_long_wq, &br->mcast_gc_work);
660 }
661
662 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
663 {
664         struct net_bridge_port_group *pg;
665
666         pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
667         WARN_ON(!hlist_unhashed(&pg->mglist));
668         WARN_ON(!hlist_empty(&pg->src_list));
669
670         del_timer_sync(&pg->rexmit_timer);
671         del_timer_sync(&pg->timer);
672         kfree_rcu(pg, rcu);
673 }
674
675 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
676                          struct net_bridge_port_group *pg,
677                          struct net_bridge_port_group __rcu **pp)
678 {
679         struct net_bridge *br = pg->key.port->br;
680         struct net_bridge_group_src *ent;
681         struct hlist_node *tmp;
682
683         rcu_assign_pointer(*pp, pg->next);
684         hlist_del_init(&pg->mglist);
685         br_multicast_eht_clean_sets(pg);
686         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
687                 br_multicast_del_group_src(ent, false);
688         br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
689         if (!br_multicast_is_star_g(&mp->addr)) {
690                 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
691                                        br_sg_port_rht_params);
692                 br_multicast_sg_del_exclude_ports(mp);
693         } else {
694                 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
695         }
696         hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
697         queue_work(system_long_wq, &br->mcast_gc_work);
698
699         if (!mp->ports && !mp->host_joined && netif_running(br->dev))
700                 mod_timer(&mp->timer, jiffies);
701 }
702
703 static void br_multicast_find_del_pg(struct net_bridge *br,
704                                      struct net_bridge_port_group *pg)
705 {
706         struct net_bridge_port_group __rcu **pp;
707         struct net_bridge_mdb_entry *mp;
708         struct net_bridge_port_group *p;
709
710         mp = br_mdb_ip_get(br, &pg->key.addr);
711         if (WARN_ON(!mp))
712                 return;
713
714         for (pp = &mp->ports;
715              (p = mlock_dereference(*pp, br)) != NULL;
716              pp = &p->next) {
717                 if (p != pg)
718                         continue;
719
720                 br_multicast_del_pg(mp, pg, pp);
721                 return;
722         }
723
724         WARN_ON(1);
725 }
726
727 static void br_multicast_port_group_expired(struct timer_list *t)
728 {
729         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
730         struct net_bridge_group_src *src_ent;
731         struct net_bridge *br = pg->key.port->br;
732         struct hlist_node *tmp;
733         bool changed;
734
735         spin_lock(&br->multicast_lock);
736         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
737             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
738                 goto out;
739
740         changed = !!(pg->filter_mode == MCAST_EXCLUDE);
741         pg->filter_mode = MCAST_INCLUDE;
742         hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
743                 if (!timer_pending(&src_ent->timer)) {
744                         br_multicast_del_group_src(src_ent, false);
745                         changed = true;
746                 }
747         }
748
749         if (hlist_empty(&pg->src_list)) {
750                 br_multicast_find_del_pg(br, pg);
751         } else if (changed) {
752                 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
753
754                 if (changed && br_multicast_is_star_g(&pg->key.addr))
755                         br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
756
757                 if (WARN_ON(!mp))
758                         goto out;
759                 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
760         }
761 out:
762         spin_unlock(&br->multicast_lock);
763 }
764
765 static void br_multicast_gc(struct hlist_head *head)
766 {
767         struct net_bridge_mcast_gc *gcent;
768         struct hlist_node *tmp;
769
770         hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
771                 hlist_del_init(&gcent->gc_node);
772                 gcent->destroy(gcent);
773         }
774 }
775
776 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
777                                              struct net_bridge_mcast_port *pmctx,
778                                              struct sk_buff *skb)
779 {
780         struct net_bridge_vlan *vlan = NULL;
781
782         if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
783                 vlan = pmctx->vlan;
784         else if (br_multicast_ctx_is_vlan(brmctx))
785                 vlan = brmctx->vlan;
786
787         if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
788                 u16 vlan_proto;
789
790                 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
791                         return;
792                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
793         }
794 }
795
796 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
797                                                     struct net_bridge_mcast_port *pmctx,
798                                                     struct net_bridge_port_group *pg,
799                                                     __be32 ip_dst, __be32 group,
800                                                     bool with_srcs, bool over_lmqt,
801                                                     u8 sflag, u8 *igmp_type,
802                                                     bool *need_rexmit)
803 {
804         struct net_bridge_port *p = pg ? pg->key.port : NULL;
805         struct net_bridge_group_src *ent;
806         size_t pkt_size, igmp_hdr_size;
807         unsigned long now = jiffies;
808         struct igmpv3_query *ihv3;
809         void *csum_start = NULL;
810         __sum16 *csum = NULL;
811         struct sk_buff *skb;
812         struct igmphdr *ih;
813         struct ethhdr *eth;
814         unsigned long lmqt;
815         struct iphdr *iph;
816         u16 lmqt_srcs = 0;
817
818         igmp_hdr_size = sizeof(*ih);
819         if (brmctx->multicast_igmp_version == 3) {
820                 igmp_hdr_size = sizeof(*ihv3);
821                 if (pg && with_srcs) {
822                         lmqt = now + (brmctx->multicast_last_member_interval *
823                                       brmctx->multicast_last_member_count);
824                         hlist_for_each_entry(ent, &pg->src_list, node) {
825                                 if (over_lmqt == time_after(ent->timer.expires,
826                                                             lmqt) &&
827                                     ent->src_query_rexmit_cnt > 0)
828                                         lmqt_srcs++;
829                         }
830
831                         if (!lmqt_srcs)
832                                 return NULL;
833                         igmp_hdr_size += lmqt_srcs * sizeof(__be32);
834                 }
835         }
836
837         pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
838         if ((p && pkt_size > p->dev->mtu) ||
839             pkt_size > brmctx->br->dev->mtu)
840                 return NULL;
841
842         skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
843         if (!skb)
844                 goto out;
845
846         __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
847         skb->protocol = htons(ETH_P_IP);
848
849         skb_reset_mac_header(skb);
850         eth = eth_hdr(skb);
851
852         ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
853         ip_eth_mc_map(ip_dst, eth->h_dest);
854         eth->h_proto = htons(ETH_P_IP);
855         skb_put(skb, sizeof(*eth));
856
857         skb_set_network_header(skb, skb->len);
858         iph = ip_hdr(skb);
859         iph->tot_len = htons(pkt_size - sizeof(*eth));
860
861         iph->version = 4;
862         iph->ihl = 6;
863         iph->tos = 0xc0;
864         iph->id = 0;
865         iph->frag_off = htons(IP_DF);
866         iph->ttl = 1;
867         iph->protocol = IPPROTO_IGMP;
868         iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
869                      inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
870         iph->daddr = ip_dst;
871         ((u8 *)&iph[1])[0] = IPOPT_RA;
872         ((u8 *)&iph[1])[1] = 4;
873         ((u8 *)&iph[1])[2] = 0;
874         ((u8 *)&iph[1])[3] = 0;
875         ip_send_check(iph);
876         skb_put(skb, 24);
877
878         skb_set_transport_header(skb, skb->len);
879         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
880
881         switch (brmctx->multicast_igmp_version) {
882         case 2:
883                 ih = igmp_hdr(skb);
884                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
885                 ih->code = (group ? brmctx->multicast_last_member_interval :
886                                     brmctx->multicast_query_response_interval) /
887                            (HZ / IGMP_TIMER_SCALE);
888                 ih->group = group;
889                 ih->csum = 0;
890                 csum = &ih->csum;
891                 csum_start = (void *)ih;
892                 break;
893         case 3:
894                 ihv3 = igmpv3_query_hdr(skb);
895                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
896                 ihv3->code = (group ? brmctx->multicast_last_member_interval :
897                                       brmctx->multicast_query_response_interval) /
898                              (HZ / IGMP_TIMER_SCALE);
899                 ihv3->group = group;
900                 ihv3->qqic = brmctx->multicast_query_interval / HZ;
901                 ihv3->nsrcs = htons(lmqt_srcs);
902                 ihv3->resv = 0;
903                 ihv3->suppress = sflag;
904                 ihv3->qrv = 2;
905                 ihv3->csum = 0;
906                 csum = &ihv3->csum;
907                 csum_start = (void *)ihv3;
908                 if (!pg || !with_srcs)
909                         break;
910
911                 lmqt_srcs = 0;
912                 hlist_for_each_entry(ent, &pg->src_list, node) {
913                         if (over_lmqt == time_after(ent->timer.expires,
914                                                     lmqt) &&
915                             ent->src_query_rexmit_cnt > 0) {
916                                 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
917                                 ent->src_query_rexmit_cnt--;
918                                 if (need_rexmit && ent->src_query_rexmit_cnt)
919                                         *need_rexmit = true;
920                         }
921                 }
922                 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
923                         kfree_skb(skb);
924                         return NULL;
925                 }
926                 break;
927         }
928
929         if (WARN_ON(!csum || !csum_start)) {
930                 kfree_skb(skb);
931                 return NULL;
932         }
933
934         *csum = ip_compute_csum(csum_start, igmp_hdr_size);
935         skb_put(skb, igmp_hdr_size);
936         __skb_pull(skb, sizeof(*eth));
937
938 out:
939         return skb;
940 }
941
942 #if IS_ENABLED(CONFIG_IPV6)
943 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
944                                                     struct net_bridge_mcast_port *pmctx,
945                                                     struct net_bridge_port_group *pg,
946                                                     const struct in6_addr *ip6_dst,
947                                                     const struct in6_addr *group,
948                                                     bool with_srcs, bool over_llqt,
949                                                     u8 sflag, u8 *igmp_type,
950                                                     bool *need_rexmit)
951 {
952         struct net_bridge_port *p = pg ? pg->key.port : NULL;
953         struct net_bridge_group_src *ent;
954         size_t pkt_size, mld_hdr_size;
955         unsigned long now = jiffies;
956         struct mld2_query *mld2q;
957         void *csum_start = NULL;
958         unsigned long interval;
959         __sum16 *csum = NULL;
960         struct ipv6hdr *ip6h;
961         struct mld_msg *mldq;
962         struct sk_buff *skb;
963         unsigned long llqt;
964         struct ethhdr *eth;
965         u16 llqt_srcs = 0;
966         u8 *hopopt;
967
968         mld_hdr_size = sizeof(*mldq);
969         if (brmctx->multicast_mld_version == 2) {
970                 mld_hdr_size = sizeof(*mld2q);
971                 if (pg && with_srcs) {
972                         llqt = now + (brmctx->multicast_last_member_interval *
973                                       brmctx->multicast_last_member_count);
974                         hlist_for_each_entry(ent, &pg->src_list, node) {
975                                 if (over_llqt == time_after(ent->timer.expires,
976                                                             llqt) &&
977                                     ent->src_query_rexmit_cnt > 0)
978                                         llqt_srcs++;
979                         }
980
981                         if (!llqt_srcs)
982                                 return NULL;
983                         mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
984                 }
985         }
986
987         pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
988         if ((p && pkt_size > p->dev->mtu) ||
989             pkt_size > brmctx->br->dev->mtu)
990                 return NULL;
991
992         skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
993         if (!skb)
994                 goto out;
995
996         __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
997         skb->protocol = htons(ETH_P_IPV6);
998
999         /* Ethernet header */
1000         skb_reset_mac_header(skb);
1001         eth = eth_hdr(skb);
1002
1003         ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1004         eth->h_proto = htons(ETH_P_IPV6);
1005         skb_put(skb, sizeof(*eth));
1006
1007         /* IPv6 header + HbH option */
1008         skb_set_network_header(skb, skb->len);
1009         ip6h = ipv6_hdr(skb);
1010
1011         *(__force __be32 *)ip6h = htonl(0x60000000);
1012         ip6h->payload_len = htons(8 + mld_hdr_size);
1013         ip6h->nexthdr = IPPROTO_HOPOPTS;
1014         ip6h->hop_limit = 1;
1015         ip6h->daddr = *ip6_dst;
1016         if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1017                                &ip6h->daddr, 0, &ip6h->saddr)) {
1018                 kfree_skb(skb);
1019                 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1020                 return NULL;
1021         }
1022
1023         br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1024         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1025
1026         hopopt = (u8 *)(ip6h + 1);
1027         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
1028         hopopt[1] = 0;                          /* length of HbH */
1029         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
1030         hopopt[3] = 2;                          /* Length of RA Option */
1031         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
1032         hopopt[5] = 0;
1033         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
1034         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
1035
1036         skb_put(skb, sizeof(*ip6h) + 8);
1037
1038         /* ICMPv6 */
1039         skb_set_transport_header(skb, skb->len);
1040         interval = ipv6_addr_any(group) ?
1041                         brmctx->multicast_query_response_interval :
1042                         brmctx->multicast_last_member_interval;
1043         *igmp_type = ICMPV6_MGM_QUERY;
1044         switch (brmctx->multicast_mld_version) {
1045         case 1:
1046                 mldq = (struct mld_msg *)icmp6_hdr(skb);
1047                 mldq->mld_type = ICMPV6_MGM_QUERY;
1048                 mldq->mld_code = 0;
1049                 mldq->mld_cksum = 0;
1050                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1051                 mldq->mld_reserved = 0;
1052                 mldq->mld_mca = *group;
1053                 csum = &mldq->mld_cksum;
1054                 csum_start = (void *)mldq;
1055                 break;
1056         case 2:
1057                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1058                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1059                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1060                 mld2q->mld2q_code = 0;
1061                 mld2q->mld2q_cksum = 0;
1062                 mld2q->mld2q_resv1 = 0;
1063                 mld2q->mld2q_resv2 = 0;
1064                 mld2q->mld2q_suppress = sflag;
1065                 mld2q->mld2q_qrv = 2;
1066                 mld2q->mld2q_nsrcs = htons(llqt_srcs);
1067                 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1068                 mld2q->mld2q_mca = *group;
1069                 csum = &mld2q->mld2q_cksum;
1070                 csum_start = (void *)mld2q;
1071                 if (!pg || !with_srcs)
1072                         break;
1073
1074                 llqt_srcs = 0;
1075                 hlist_for_each_entry(ent, &pg->src_list, node) {
1076                         if (over_llqt == time_after(ent->timer.expires,
1077                                                     llqt) &&
1078                             ent->src_query_rexmit_cnt > 0) {
1079                                 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1080                                 ent->src_query_rexmit_cnt--;
1081                                 if (need_rexmit && ent->src_query_rexmit_cnt)
1082                                         *need_rexmit = true;
1083                         }
1084                 }
1085                 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1086                         kfree_skb(skb);
1087                         return NULL;
1088                 }
1089                 break;
1090         }
1091
1092         if (WARN_ON(!csum || !csum_start)) {
1093                 kfree_skb(skb);
1094                 return NULL;
1095         }
1096
1097         *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1098                                 IPPROTO_ICMPV6,
1099                                 csum_partial(csum_start, mld_hdr_size, 0));
1100         skb_put(skb, mld_hdr_size);
1101         __skb_pull(skb, sizeof(*eth));
1102
1103 out:
1104         return skb;
1105 }
1106 #endif
1107
1108 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1109                                                 struct net_bridge_mcast_port *pmctx,
1110                                                 struct net_bridge_port_group *pg,
1111                                                 struct br_ip *ip_dst,
1112                                                 struct br_ip *group,
1113                                                 bool with_srcs, bool over_lmqt,
1114                                                 u8 sflag, u8 *igmp_type,
1115                                                 bool *need_rexmit)
1116 {
1117         __be32 ip4_dst;
1118
1119         switch (group->proto) {
1120         case htons(ETH_P_IP):
1121                 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1122                 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1123                                                     ip4_dst, group->dst.ip4,
1124                                                     with_srcs, over_lmqt,
1125                                                     sflag, igmp_type,
1126                                                     need_rexmit);
1127 #if IS_ENABLED(CONFIG_IPV6)
1128         case htons(ETH_P_IPV6): {
1129                 struct in6_addr ip6_dst;
1130
1131                 if (ip_dst)
1132                         ip6_dst = ip_dst->dst.ip6;
1133                 else
1134                         ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1135                                       htonl(1));
1136
1137                 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1138                                                     &ip6_dst, &group->dst.ip6,
1139                                                     with_srcs, over_lmqt,
1140                                                     sflag, igmp_type,
1141                                                     need_rexmit);
1142         }
1143 #endif
1144         }
1145         return NULL;
1146 }
1147
1148 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1149                                                     struct br_ip *group)
1150 {
1151         struct net_bridge_mdb_entry *mp;
1152         int err;
1153
1154         mp = br_mdb_ip_get(br, group);
1155         if (mp)
1156                 return mp;
1157
1158         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1159                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1160                 return ERR_PTR(-E2BIG);
1161         }
1162
1163         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1164         if (unlikely(!mp))
1165                 return ERR_PTR(-ENOMEM);
1166
1167         mp->br = br;
1168         mp->addr = *group;
1169         mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1170         timer_setup(&mp->timer, br_multicast_group_expired, 0);
1171         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1172                                             br_mdb_rht_params);
1173         if (err) {
1174                 kfree(mp);
1175                 mp = ERR_PTR(err);
1176         } else {
1177                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1178         }
1179
1180         return mp;
1181 }
1182
1183 static void br_multicast_group_src_expired(struct timer_list *t)
1184 {
1185         struct net_bridge_group_src *src = from_timer(src, t, timer);
1186         struct net_bridge_port_group *pg;
1187         struct net_bridge *br = src->br;
1188
1189         spin_lock(&br->multicast_lock);
1190         if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1191             timer_pending(&src->timer))
1192                 goto out;
1193
1194         pg = src->pg;
1195         if (pg->filter_mode == MCAST_INCLUDE) {
1196                 br_multicast_del_group_src(src, false);
1197                 if (!hlist_empty(&pg->src_list))
1198                         goto out;
1199                 br_multicast_find_del_pg(br, pg);
1200         } else {
1201                 br_multicast_fwd_src_handle(src);
1202         }
1203
1204 out:
1205         spin_unlock(&br->multicast_lock);
1206 }
1207
1208 struct net_bridge_group_src *
1209 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1210 {
1211         struct net_bridge_group_src *ent;
1212
1213         switch (ip->proto) {
1214         case htons(ETH_P_IP):
1215                 hlist_for_each_entry(ent, &pg->src_list, node)
1216                         if (ip->src.ip4 == ent->addr.src.ip4)
1217                                 return ent;
1218                 break;
1219 #if IS_ENABLED(CONFIG_IPV6)
1220         case htons(ETH_P_IPV6):
1221                 hlist_for_each_entry(ent, &pg->src_list, node)
1222                         if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1223                                 return ent;
1224                 break;
1225 #endif
1226         }
1227
1228         return NULL;
1229 }
1230
1231 static struct net_bridge_group_src *
1232 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1233 {
1234         struct net_bridge_group_src *grp_src;
1235
1236         if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1237                 return NULL;
1238
1239         switch (src_ip->proto) {
1240         case htons(ETH_P_IP):
1241                 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1242                     ipv4_is_multicast(src_ip->src.ip4))
1243                         return NULL;
1244                 break;
1245 #if IS_ENABLED(CONFIG_IPV6)
1246         case htons(ETH_P_IPV6):
1247                 if (ipv6_addr_any(&src_ip->src.ip6) ||
1248                     ipv6_addr_is_multicast(&src_ip->src.ip6))
1249                         return NULL;
1250                 break;
1251 #endif
1252         }
1253
1254         grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1255         if (unlikely(!grp_src))
1256                 return NULL;
1257
1258         grp_src->pg = pg;
1259         grp_src->br = pg->key.port->br;
1260         grp_src->addr = *src_ip;
1261         grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1262         timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1263
1264         hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1265         pg->src_ents++;
1266
1267         return grp_src;
1268 }
1269
1270 struct net_bridge_port_group *br_multicast_new_port_group(
1271                         struct net_bridge_port *port,
1272                         struct br_ip *group,
1273                         struct net_bridge_port_group __rcu *next,
1274                         unsigned char flags,
1275                         const unsigned char *src,
1276                         u8 filter_mode,
1277                         u8 rt_protocol)
1278 {
1279         struct net_bridge_port_group *p;
1280
1281         p = kzalloc(sizeof(*p), GFP_ATOMIC);
1282         if (unlikely(!p))
1283                 return NULL;
1284
1285         p->key.addr = *group;
1286         p->key.port = port;
1287         p->flags = flags;
1288         p->filter_mode = filter_mode;
1289         p->rt_protocol = rt_protocol;
1290         p->eht_host_tree = RB_ROOT;
1291         p->eht_set_tree = RB_ROOT;
1292         p->mcast_gc.destroy = br_multicast_destroy_port_group;
1293         INIT_HLIST_HEAD(&p->src_list);
1294
1295         if (!br_multicast_is_star_g(group) &&
1296             rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1297                                           br_sg_port_rht_params)) {
1298                 kfree(p);
1299                 return NULL;
1300         }
1301
1302         rcu_assign_pointer(p->next, next);
1303         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1304         timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1305         hlist_add_head(&p->mglist, &port->mglist);
1306
1307         if (src)
1308                 memcpy(p->eth_addr, src, ETH_ALEN);
1309         else
1310                 eth_broadcast_addr(p->eth_addr);
1311
1312         return p;
1313 }
1314
1315 void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1316                             struct net_bridge_mdb_entry *mp, bool notify)
1317 {
1318         if (!mp->host_joined) {
1319                 mp->host_joined = true;
1320                 if (br_multicast_is_star_g(&mp->addr))
1321                         br_multicast_star_g_host_state(mp);
1322                 if (notify)
1323                         br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1324         }
1325
1326         if (br_group_is_l2(&mp->addr))
1327                 return;
1328
1329         mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1330 }
1331
1332 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1333 {
1334         if (!mp->host_joined)
1335                 return;
1336
1337         mp->host_joined = false;
1338         if (br_multicast_is_star_g(&mp->addr))
1339                 br_multicast_star_g_host_state(mp);
1340         if (notify)
1341                 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1342 }
1343
1344 static struct net_bridge_port_group *
1345 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1346                          struct net_bridge_mcast_port *pmctx,
1347                          struct br_ip *group,
1348                          const unsigned char *src,
1349                          u8 filter_mode,
1350                          bool igmpv2_mldv1,
1351                          bool blocked)
1352 {
1353         struct net_bridge_port_group __rcu **pp;
1354         struct net_bridge_port_group *p = NULL;
1355         struct net_bridge_mdb_entry *mp;
1356         unsigned long now = jiffies;
1357
1358         if (!br_multicast_ctx_should_use(brmctx, pmctx))
1359                 goto out;
1360
1361         mp = br_multicast_new_group(brmctx->br, group);
1362         if (IS_ERR(mp))
1363                 return ERR_CAST(mp);
1364
1365         if (!pmctx) {
1366                 br_multicast_host_join(brmctx, mp, true);
1367                 goto out;
1368         }
1369
1370         for (pp = &mp->ports;
1371              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1372              pp = &p->next) {
1373                 if (br_port_group_equal(p, pmctx->port, src))
1374                         goto found;
1375                 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1376                         break;
1377         }
1378
1379         p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1380                                         filter_mode, RTPROT_KERNEL);
1381         if (unlikely(!p)) {
1382                 p = ERR_PTR(-ENOMEM);
1383                 goto out;
1384         }
1385         rcu_assign_pointer(*pp, p);
1386         if (blocked)
1387                 p->flags |= MDB_PG_FLAGS_BLOCKED;
1388         br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1389
1390 found:
1391         if (igmpv2_mldv1)
1392                 mod_timer(&p->timer,
1393                           now + brmctx->multicast_membership_interval);
1394
1395 out:
1396         return p;
1397 }
1398
1399 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1400                                   struct net_bridge_mcast_port *pmctx,
1401                                   struct br_ip *group,
1402                                   const unsigned char *src,
1403                                   u8 filter_mode,
1404                                   bool igmpv2_mldv1)
1405 {
1406         struct net_bridge_port_group *pg;
1407         int err;
1408
1409         spin_lock(&brmctx->br->multicast_lock);
1410         pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1411                                       igmpv2_mldv1, false);
1412         /* NULL is considered valid for host joined groups */
1413         err = PTR_ERR_OR_ZERO(pg);
1414         spin_unlock(&brmctx->br->multicast_lock);
1415
1416         return err;
1417 }
1418
1419 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1420                                       struct net_bridge_mcast_port *pmctx,
1421                                       __be32 group,
1422                                       __u16 vid,
1423                                       const unsigned char *src,
1424                                       bool igmpv2)
1425 {
1426         struct br_ip br_group;
1427         u8 filter_mode;
1428
1429         if (ipv4_is_local_multicast(group))
1430                 return 0;
1431
1432         memset(&br_group, 0, sizeof(br_group));
1433         br_group.dst.ip4 = group;
1434         br_group.proto = htons(ETH_P_IP);
1435         br_group.vid = vid;
1436         filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1437
1438         return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1439                                       filter_mode, igmpv2);
1440 }
1441
1442 #if IS_ENABLED(CONFIG_IPV6)
1443 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1444                                       struct net_bridge_mcast_port *pmctx,
1445                                       const struct in6_addr *group,
1446                                       __u16 vid,
1447                                       const unsigned char *src,
1448                                       bool mldv1)
1449 {
1450         struct br_ip br_group;
1451         u8 filter_mode;
1452
1453         if (ipv6_addr_is_ll_all_nodes(group))
1454                 return 0;
1455
1456         memset(&br_group, 0, sizeof(br_group));
1457         br_group.dst.ip6 = *group;
1458         br_group.proto = htons(ETH_P_IPV6);
1459         br_group.vid = vid;
1460         filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1461
1462         return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1463                                       filter_mode, mldv1);
1464 }
1465 #endif
1466
1467 static bool br_multicast_rport_del(struct hlist_node *rlist)
1468 {
1469         if (hlist_unhashed(rlist))
1470                 return false;
1471
1472         hlist_del_init_rcu(rlist);
1473         return true;
1474 }
1475
1476 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1477 {
1478         return br_multicast_rport_del(&pmctx->ip4_rlist);
1479 }
1480
1481 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1482 {
1483 #if IS_ENABLED(CONFIG_IPV6)
1484         return br_multicast_rport_del(&pmctx->ip6_rlist);
1485 #else
1486         return false;
1487 #endif
1488 }
1489
1490 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1491                                         struct timer_list *t,
1492                                         struct hlist_node *rlist)
1493 {
1494         struct net_bridge *br = pmctx->port->br;
1495         bool del;
1496
1497         spin_lock(&br->multicast_lock);
1498         if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1499             pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1500             timer_pending(t))
1501                 goto out;
1502
1503         del = br_multicast_rport_del(rlist);
1504         br_multicast_rport_del_notify(pmctx, del);
1505 out:
1506         spin_unlock(&br->multicast_lock);
1507 }
1508
1509 static void br_ip4_multicast_router_expired(struct timer_list *t)
1510 {
1511         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1512                                                          ip4_mc_router_timer);
1513
1514         br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1515 }
1516
1517 #if IS_ENABLED(CONFIG_IPV6)
1518 static void br_ip6_multicast_router_expired(struct timer_list *t)
1519 {
1520         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1521                                                          ip6_mc_router_timer);
1522
1523         br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1524 }
1525 #endif
1526
1527 static void br_mc_router_state_change(struct net_bridge *p,
1528                                       bool is_mc_router)
1529 {
1530         struct switchdev_attr attr = {
1531                 .orig_dev = p->dev,
1532                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1533                 .flags = SWITCHDEV_F_DEFER,
1534                 .u.mrouter = is_mc_router,
1535         };
1536
1537         switchdev_port_attr_set(p->dev, &attr, NULL);
1538 }
1539
1540 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1541                                               struct timer_list *timer)
1542 {
1543         spin_lock(&brmctx->br->multicast_lock);
1544         if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1545             brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1546             br_ip4_multicast_is_router(brmctx) ||
1547             br_ip6_multicast_is_router(brmctx))
1548                 goto out;
1549
1550         br_mc_router_state_change(brmctx->br, false);
1551 out:
1552         spin_unlock(&brmctx->br->multicast_lock);
1553 }
1554
1555 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1556 {
1557         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1558                                                      ip4_mc_router_timer);
1559
1560         br_multicast_local_router_expired(brmctx, t);
1561 }
1562
1563 #if IS_ENABLED(CONFIG_IPV6)
1564 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1565 {
1566         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1567                                                      ip6_mc_router_timer);
1568
1569         br_multicast_local_router_expired(brmctx, t);
1570 }
1571 #endif
1572
1573 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1574                                          struct bridge_mcast_own_query *query)
1575 {
1576         spin_lock(&brmctx->br->multicast_lock);
1577         if (!netif_running(brmctx->br->dev) ||
1578             br_multicast_ctx_vlan_global_disabled(brmctx) ||
1579             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1580                 goto out;
1581
1582         br_multicast_start_querier(brmctx, query);
1583
1584 out:
1585         spin_unlock(&brmctx->br->multicast_lock);
1586 }
1587
1588 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1589 {
1590         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1591                                                      ip4_other_query.timer);
1592
1593         br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1594 }
1595
1596 #if IS_ENABLED(CONFIG_IPV6)
1597 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1598 {
1599         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1600                                                      ip6_other_query.timer);
1601
1602         br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1603 }
1604 #endif
1605
1606 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1607                                             struct br_ip *ip,
1608                                             struct sk_buff *skb)
1609 {
1610         if (ip->proto == htons(ETH_P_IP))
1611                 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1612 #if IS_ENABLED(CONFIG_IPV6)
1613         else
1614                 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1615 #endif
1616 }
1617
1618 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1619                                       struct net_bridge_mcast_port *pmctx,
1620                                       struct net_bridge_port_group *pg,
1621                                       struct br_ip *ip_dst,
1622                                       struct br_ip *group,
1623                                       bool with_srcs,
1624                                       u8 sflag,
1625                                       bool *need_rexmit)
1626 {
1627         bool over_lmqt = !!sflag;
1628         struct sk_buff *skb;
1629         u8 igmp_type;
1630
1631         if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1632             !br_multicast_ctx_matches_vlan_snooping(brmctx))
1633                 return;
1634
1635 again_under_lmqt:
1636         skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1637                                        with_srcs, over_lmqt, sflag, &igmp_type,
1638                                        need_rexmit);
1639         if (!skb)
1640                 return;
1641
1642         if (pmctx) {
1643                 skb->dev = pmctx->port->dev;
1644                 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1645                                    BR_MCAST_DIR_TX);
1646                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1647                         dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1648                         br_dev_queue_push_xmit);
1649
1650                 if (over_lmqt && with_srcs && sflag) {
1651                         over_lmqt = false;
1652                         goto again_under_lmqt;
1653                 }
1654         } else {
1655                 br_multicast_select_own_querier(brmctx, group, skb);
1656                 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1657                                    BR_MCAST_DIR_RX);
1658                 netif_rx(skb);
1659         }
1660 }
1661
1662 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1663                                       struct bridge_mcast_querier *dest)
1664 {
1665         unsigned int seq;
1666
1667         memset(dest, 0, sizeof(*dest));
1668         do {
1669                 seq = read_seqcount_begin(&querier->seq);
1670                 dest->port_ifidx = querier->port_ifidx;
1671                 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1672         } while (read_seqcount_retry(&querier->seq, seq));
1673 }
1674
1675 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1676                                         struct bridge_mcast_querier *querier,
1677                                         int ifindex,
1678                                         struct br_ip *saddr)
1679 {
1680         lockdep_assert_held_once(&brmctx->br->multicast_lock);
1681
1682         write_seqcount_begin(&querier->seq);
1683         querier->port_ifidx = ifindex;
1684         memcpy(&querier->addr, saddr, sizeof(*saddr));
1685         write_seqcount_end(&querier->seq);
1686 }
1687
1688 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1689                                     struct net_bridge_mcast_port *pmctx,
1690                                     struct bridge_mcast_own_query *own_query)
1691 {
1692         struct bridge_mcast_other_query *other_query = NULL;
1693         struct bridge_mcast_querier *querier;
1694         struct br_ip br_group;
1695         unsigned long time;
1696
1697         if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1698             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1699             !brmctx->multicast_querier)
1700                 return;
1701
1702         memset(&br_group.dst, 0, sizeof(br_group.dst));
1703
1704         if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1705                     (own_query == &brmctx->ip4_own_query)) {
1706                 querier = &brmctx->ip4_querier;
1707                 other_query = &brmctx->ip4_other_query;
1708                 br_group.proto = htons(ETH_P_IP);
1709 #if IS_ENABLED(CONFIG_IPV6)
1710         } else {
1711                 querier = &brmctx->ip6_querier;
1712                 other_query = &brmctx->ip6_other_query;
1713                 br_group.proto = htons(ETH_P_IPV6);
1714 #endif
1715         }
1716
1717         if (!other_query || timer_pending(&other_query->timer))
1718                 return;
1719
1720         /* we're about to select ourselves as querier */
1721         if (!pmctx && querier->port_ifidx) {
1722                 struct br_ip zeroip = {};
1723
1724                 br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1725         }
1726
1727         __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1728                                   0, NULL);
1729
1730         time = jiffies;
1731         time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1732                 brmctx->multicast_startup_query_interval :
1733                 brmctx->multicast_query_interval;
1734         mod_timer(&own_query->timer, time);
1735 }
1736
1737 static void
1738 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1739                                 struct bridge_mcast_own_query *query)
1740 {
1741         struct net_bridge *br = pmctx->port->br;
1742         struct net_bridge_mcast *brmctx;
1743
1744         spin_lock(&br->multicast_lock);
1745         if (br_multicast_port_ctx_state_stopped(pmctx))
1746                 goto out;
1747
1748         brmctx = br_multicast_port_ctx_get_global(pmctx);
1749         if (query->startup_sent < brmctx->multicast_startup_query_count)
1750                 query->startup_sent++;
1751
1752         br_multicast_send_query(brmctx, pmctx, query);
1753
1754 out:
1755         spin_unlock(&br->multicast_lock);
1756 }
1757
1758 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1759 {
1760         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1761                                                          ip4_own_query.timer);
1762
1763         br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1764 }
1765
1766 #if IS_ENABLED(CONFIG_IPV6)
1767 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1768 {
1769         struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1770                                                          ip6_own_query.timer);
1771
1772         br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1773 }
1774 #endif
1775
1776 static void br_multicast_port_group_rexmit(struct timer_list *t)
1777 {
1778         struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1779         struct bridge_mcast_other_query *other_query = NULL;
1780         struct net_bridge *br = pg->key.port->br;
1781         struct net_bridge_mcast_port *pmctx;
1782         struct net_bridge_mcast *brmctx;
1783         bool need_rexmit = false;
1784
1785         spin_lock(&br->multicast_lock);
1786         if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1787             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1788                 goto out;
1789
1790         pmctx = br_multicast_pg_to_port_ctx(pg);
1791         if (!pmctx)
1792                 goto out;
1793         brmctx = br_multicast_port_ctx_get_global(pmctx);
1794         if (!brmctx->multicast_querier)
1795                 goto out;
1796
1797         if (pg->key.addr.proto == htons(ETH_P_IP))
1798                 other_query = &brmctx->ip4_other_query;
1799 #if IS_ENABLED(CONFIG_IPV6)
1800         else
1801                 other_query = &brmctx->ip6_other_query;
1802 #endif
1803
1804         if (!other_query || timer_pending(&other_query->timer))
1805                 goto out;
1806
1807         if (pg->grp_query_rexmit_cnt) {
1808                 pg->grp_query_rexmit_cnt--;
1809                 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1810                                           &pg->key.addr, false, 1, NULL);
1811         }
1812         __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1813                                   &pg->key.addr, true, 0, &need_rexmit);
1814
1815         if (pg->grp_query_rexmit_cnt || need_rexmit)
1816                 mod_timer(&pg->rexmit_timer, jiffies +
1817                                              brmctx->multicast_last_member_interval);
1818 out:
1819         spin_unlock(&br->multicast_lock);
1820 }
1821
1822 static int br_mc_disabled_update(struct net_device *dev, bool value,
1823                                  struct netlink_ext_ack *extack)
1824 {
1825         struct switchdev_attr attr = {
1826                 .orig_dev = dev,
1827                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1828                 .flags = SWITCHDEV_F_DEFER,
1829                 .u.mc_disabled = !value,
1830         };
1831
1832         return switchdev_port_attr_set(dev, &attr, extack);
1833 }
1834
1835 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1836                                 struct net_bridge_vlan *vlan,
1837                                 struct net_bridge_mcast_port *pmctx)
1838 {
1839         pmctx->port = port;
1840         pmctx->vlan = vlan;
1841         pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1842         timer_setup(&pmctx->ip4_mc_router_timer,
1843                     br_ip4_multicast_router_expired, 0);
1844         timer_setup(&pmctx->ip4_own_query.timer,
1845                     br_ip4_multicast_port_query_expired, 0);
1846 #if IS_ENABLED(CONFIG_IPV6)
1847         timer_setup(&pmctx->ip6_mc_router_timer,
1848                     br_ip6_multicast_router_expired, 0);
1849         timer_setup(&pmctx->ip6_own_query.timer,
1850                     br_ip6_multicast_port_query_expired, 0);
1851 #endif
1852 }
1853
1854 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1855 {
1856 #if IS_ENABLED(CONFIG_IPV6)
1857         del_timer_sync(&pmctx->ip6_mc_router_timer);
1858 #endif
1859         del_timer_sync(&pmctx->ip4_mc_router_timer);
1860 }
1861
1862 int br_multicast_add_port(struct net_bridge_port *port)
1863 {
1864         int err;
1865
1866         port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1867         br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1868
1869         err = br_mc_disabled_update(port->dev,
1870                                     br_opt_get(port->br,
1871                                                BROPT_MULTICAST_ENABLED),
1872                                     NULL);
1873         if (err && err != -EOPNOTSUPP)
1874                 return err;
1875
1876         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1877         if (!port->mcast_stats)
1878                 return -ENOMEM;
1879
1880         return 0;
1881 }
1882
1883 void br_multicast_del_port(struct net_bridge_port *port)
1884 {
1885         struct net_bridge *br = port->br;
1886         struct net_bridge_port_group *pg;
1887         HLIST_HEAD(deleted_head);
1888         struct hlist_node *n;
1889
1890         /* Take care of the remaining groups, only perm ones should be left */
1891         spin_lock_bh(&br->multicast_lock);
1892         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1893                 br_multicast_find_del_pg(br, pg);
1894         hlist_move_list(&br->mcast_gc_list, &deleted_head);
1895         spin_unlock_bh(&br->multicast_lock);
1896         br_multicast_gc(&deleted_head);
1897         br_multicast_port_ctx_deinit(&port->multicast_ctx);
1898         free_percpu(port->mcast_stats);
1899 }
1900
1901 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1902 {
1903         query->startup_sent = 0;
1904
1905         if (try_to_del_timer_sync(&query->timer) >= 0 ||
1906             del_timer(&query->timer))
1907                 mod_timer(&query->timer, jiffies);
1908 }
1909
1910 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1911 {
1912         struct net_bridge *br = pmctx->port->br;
1913         struct net_bridge_mcast *brmctx;
1914
1915         brmctx = br_multicast_port_ctx_get_global(pmctx);
1916         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1917             !netif_running(br->dev))
1918                 return;
1919
1920         br_multicast_enable(&pmctx->ip4_own_query);
1921 #if IS_ENABLED(CONFIG_IPV6)
1922         br_multicast_enable(&pmctx->ip6_own_query);
1923 #endif
1924         if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1925                 br_ip4_multicast_add_router(brmctx, pmctx);
1926                 br_ip6_multicast_add_router(brmctx, pmctx);
1927         }
1928 }
1929
1930 void br_multicast_enable_port(struct net_bridge_port *port)
1931 {
1932         struct net_bridge *br = port->br;
1933
1934         spin_lock_bh(&br->multicast_lock);
1935         __br_multicast_enable_port_ctx(&port->multicast_ctx);
1936         spin_unlock_bh(&br->multicast_lock);
1937 }
1938
1939 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1940 {
1941         struct net_bridge_port_group *pg;
1942         struct hlist_node *n;
1943         bool del = false;
1944
1945         hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1946                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1947                     (!br_multicast_port_ctx_is_vlan(pmctx) ||
1948                      pg->key.addr.vid == pmctx->vlan->vid))
1949                         br_multicast_find_del_pg(pmctx->port->br, pg);
1950
1951         del |= br_ip4_multicast_rport_del(pmctx);
1952         del_timer(&pmctx->ip4_mc_router_timer);
1953         del_timer(&pmctx->ip4_own_query.timer);
1954         del |= br_ip6_multicast_rport_del(pmctx);
1955 #if IS_ENABLED(CONFIG_IPV6)
1956         del_timer(&pmctx->ip6_mc_router_timer);
1957         del_timer(&pmctx->ip6_own_query.timer);
1958 #endif
1959         br_multicast_rport_del_notify(pmctx, del);
1960 }
1961
1962 void br_multicast_disable_port(struct net_bridge_port *port)
1963 {
1964         spin_lock_bh(&port->br->multicast_lock);
1965         __br_multicast_disable_port_ctx(&port->multicast_ctx);
1966         spin_unlock_bh(&port->br->multicast_lock);
1967 }
1968
1969 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1970 {
1971         struct net_bridge_group_src *ent;
1972         struct hlist_node *tmp;
1973         int deleted = 0;
1974
1975         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1976                 if (ent->flags & BR_SGRP_F_DELETE) {
1977                         br_multicast_del_group_src(ent, false);
1978                         deleted++;
1979                 }
1980
1981         return deleted;
1982 }
1983
1984 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1985                                 unsigned long expires)
1986 {
1987         mod_timer(&src->timer, expires);
1988         br_multicast_fwd_src_handle(src);
1989 }
1990
1991 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
1992                                               struct net_bridge_mcast_port *pmctx,
1993                                               struct net_bridge_port_group *pg)
1994 {
1995         struct bridge_mcast_other_query *other_query = NULL;
1996         u32 lmqc = brmctx->multicast_last_member_count;
1997         unsigned long lmqt, lmi, now = jiffies;
1998         struct net_bridge_group_src *ent;
1999
2000         if (!netif_running(brmctx->br->dev) ||
2001             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2002                 return;
2003
2004         if (pg->key.addr.proto == htons(ETH_P_IP))
2005                 other_query = &brmctx->ip4_other_query;
2006 #if IS_ENABLED(CONFIG_IPV6)
2007         else
2008                 other_query = &brmctx->ip6_other_query;
2009 #endif
2010
2011         lmqt = now + br_multicast_lmqt(brmctx);
2012         hlist_for_each_entry(ent, &pg->src_list, node) {
2013                 if (ent->flags & BR_SGRP_F_SEND) {
2014                         ent->flags &= ~BR_SGRP_F_SEND;
2015                         if (ent->timer.expires > lmqt) {
2016                                 if (brmctx->multicast_querier &&
2017                                     other_query &&
2018                                     !timer_pending(&other_query->timer))
2019                                         ent->src_query_rexmit_cnt = lmqc;
2020                                 __grp_src_mod_timer(ent, lmqt);
2021                         }
2022                 }
2023         }
2024
2025         if (!brmctx->multicast_querier ||
2026             !other_query || timer_pending(&other_query->timer))
2027                 return;
2028
2029         __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2030                                   &pg->key.addr, true, 1, NULL);
2031
2032         lmi = now + brmctx->multicast_last_member_interval;
2033         if (!timer_pending(&pg->rexmit_timer) ||
2034             time_after(pg->rexmit_timer.expires, lmi))
2035                 mod_timer(&pg->rexmit_timer, lmi);
2036 }
2037
2038 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2039                                         struct net_bridge_mcast_port *pmctx,
2040                                         struct net_bridge_port_group *pg)
2041 {
2042         struct bridge_mcast_other_query *other_query = NULL;
2043         unsigned long now = jiffies, lmi;
2044
2045         if (!netif_running(brmctx->br->dev) ||
2046             !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2047                 return;
2048
2049         if (pg->key.addr.proto == htons(ETH_P_IP))
2050                 other_query = &brmctx->ip4_other_query;
2051 #if IS_ENABLED(CONFIG_IPV6)
2052         else
2053                 other_query = &brmctx->ip6_other_query;
2054 #endif
2055
2056         if (brmctx->multicast_querier &&
2057             other_query && !timer_pending(&other_query->timer)) {
2058                 lmi = now + brmctx->multicast_last_member_interval;
2059                 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2060                 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2061                                           &pg->key.addr, false, 0, NULL);
2062                 if (!timer_pending(&pg->rexmit_timer) ||
2063                     time_after(pg->rexmit_timer.expires, lmi))
2064                         mod_timer(&pg->rexmit_timer, lmi);
2065         }
2066
2067         if (pg->filter_mode == MCAST_EXCLUDE &&
2068             (!timer_pending(&pg->timer) ||
2069              time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2070                 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2071 }
2072
2073 /* State          Msg type      New state                Actions
2074  * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
2075  * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
2076  * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2077  */
2078 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2079                                      struct net_bridge_port_group *pg, void *h_addr,
2080                                      void *srcs, u32 nsrcs, size_t addr_size,
2081                                      int grec_type)
2082 {
2083         struct net_bridge_group_src *ent;
2084         unsigned long now = jiffies;
2085         bool changed = false;
2086         struct br_ip src_ip;
2087         u32 src_idx;
2088
2089         memset(&src_ip, 0, sizeof(src_ip));
2090         src_ip.proto = pg->key.addr.proto;
2091         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2092                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2093                 ent = br_multicast_find_group_src(pg, &src_ip);
2094                 if (!ent) {
2095                         ent = br_multicast_new_group_src(pg, &src_ip);
2096                         if (ent)
2097                                 changed = true;
2098                 }
2099
2100                 if (ent)
2101                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2102         }
2103
2104         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2105                                     grec_type))
2106                 changed = true;
2107
2108         return changed;
2109 }
2110
2111 /* State          Msg type      New state                Actions
2112  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2113  *                                                       Delete (A-B)
2114  *                                                       Group Timer=GMI
2115  */
2116 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2117                                  struct net_bridge_port_group *pg, void *h_addr,
2118                                  void *srcs, u32 nsrcs, size_t addr_size,
2119                                  int grec_type)
2120 {
2121         struct net_bridge_group_src *ent;
2122         struct br_ip src_ip;
2123         u32 src_idx;
2124
2125         hlist_for_each_entry(ent, &pg->src_list, node)
2126                 ent->flags |= BR_SGRP_F_DELETE;
2127
2128         memset(&src_ip, 0, sizeof(src_ip));
2129         src_ip.proto = pg->key.addr.proto;
2130         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2131                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2132                 ent = br_multicast_find_group_src(pg, &src_ip);
2133                 if (ent)
2134                         ent->flags &= ~BR_SGRP_F_DELETE;
2135                 else
2136                         ent = br_multicast_new_group_src(pg, &src_ip);
2137                 if (ent)
2138                         br_multicast_fwd_src_handle(ent);
2139         }
2140
2141         br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2142                                 grec_type);
2143
2144         __grp_src_delete_marked(pg);
2145 }
2146
2147 /* State          Msg type      New state                Actions
2148  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
2149  *                                                       Delete (X-A)
2150  *                                                       Delete (Y-A)
2151  *                                                       Group Timer=GMI
2152  */
2153 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2154                                  struct net_bridge_port_group *pg, void *h_addr,
2155                                  void *srcs, u32 nsrcs, size_t addr_size,
2156                                  int grec_type)
2157 {
2158         struct net_bridge_group_src *ent;
2159         unsigned long now = jiffies;
2160         bool changed = false;
2161         struct br_ip src_ip;
2162         u32 src_idx;
2163
2164         hlist_for_each_entry(ent, &pg->src_list, node)
2165                 ent->flags |= BR_SGRP_F_DELETE;
2166
2167         memset(&src_ip, 0, sizeof(src_ip));
2168         src_ip.proto = pg->key.addr.proto;
2169         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2170                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2171                 ent = br_multicast_find_group_src(pg, &src_ip);
2172                 if (ent) {
2173                         ent->flags &= ~BR_SGRP_F_DELETE;
2174                 } else {
2175                         ent = br_multicast_new_group_src(pg, &src_ip);
2176                         if (ent) {
2177                                 __grp_src_mod_timer(ent,
2178                                                     now + br_multicast_gmi(brmctx));
2179                                 changed = true;
2180                         }
2181                 }
2182         }
2183
2184         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2185                                     grec_type))
2186                 changed = true;
2187
2188         if (__grp_src_delete_marked(pg))
2189                 changed = true;
2190
2191         return changed;
2192 }
2193
2194 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2195                                struct net_bridge_port_group *pg, void *h_addr,
2196                                void *srcs, u32 nsrcs, size_t addr_size,
2197                                int grec_type)
2198 {
2199         bool changed = false;
2200
2201         switch (pg->filter_mode) {
2202         case MCAST_INCLUDE:
2203                 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2204                                      grec_type);
2205                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2206                 changed = true;
2207                 break;
2208         case MCAST_EXCLUDE:
2209                 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2210                                                addr_size, grec_type);
2211                 break;
2212         }
2213
2214         pg->filter_mode = MCAST_EXCLUDE;
2215         mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2216
2217         return changed;
2218 }
2219
2220 /* State          Msg type      New state                Actions
2221  * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
2222  *                                                       Send Q(G,A-B)
2223  */
2224 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2225                                 struct net_bridge_mcast_port *pmctx,
2226                                 struct net_bridge_port_group *pg, void *h_addr,
2227                                 void *srcs, u32 nsrcs, size_t addr_size,
2228                                 int grec_type)
2229 {
2230         u32 src_idx, to_send = pg->src_ents;
2231         struct net_bridge_group_src *ent;
2232         unsigned long now = jiffies;
2233         bool changed = false;
2234         struct br_ip src_ip;
2235
2236         hlist_for_each_entry(ent, &pg->src_list, node)
2237                 ent->flags |= BR_SGRP_F_SEND;
2238
2239         memset(&src_ip, 0, sizeof(src_ip));
2240         src_ip.proto = pg->key.addr.proto;
2241         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2242                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2243                 ent = br_multicast_find_group_src(pg, &src_ip);
2244                 if (ent) {
2245                         ent->flags &= ~BR_SGRP_F_SEND;
2246                         to_send--;
2247                 } else {
2248                         ent = br_multicast_new_group_src(pg, &src_ip);
2249                         if (ent)
2250                                 changed = true;
2251                 }
2252                 if (ent)
2253                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2254         }
2255
2256         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2257                                     grec_type))
2258                 changed = true;
2259
2260         if (to_send)
2261                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2262
2263         return changed;
2264 }
2265
2266 /* State          Msg type      New state                Actions
2267  * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
2268  *                                                       Send Q(G,X-A)
2269  *                                                       Send Q(G)
2270  */
2271 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2272                                 struct net_bridge_mcast_port *pmctx,
2273                                 struct net_bridge_port_group *pg, void *h_addr,
2274                                 void *srcs, u32 nsrcs, size_t addr_size,
2275                                 int grec_type)
2276 {
2277         u32 src_idx, to_send = pg->src_ents;
2278         struct net_bridge_group_src *ent;
2279         unsigned long now = jiffies;
2280         bool changed = false;
2281         struct br_ip src_ip;
2282
2283         hlist_for_each_entry(ent, &pg->src_list, node)
2284                 if (timer_pending(&ent->timer))
2285                         ent->flags |= BR_SGRP_F_SEND;
2286
2287         memset(&src_ip, 0, sizeof(src_ip));
2288         src_ip.proto = pg->key.addr.proto;
2289         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2290                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2291                 ent = br_multicast_find_group_src(pg, &src_ip);
2292                 if (ent) {
2293                         if (timer_pending(&ent->timer)) {
2294                                 ent->flags &= ~BR_SGRP_F_SEND;
2295                                 to_send--;
2296                         }
2297                 } else {
2298                         ent = br_multicast_new_group_src(pg, &src_ip);
2299                         if (ent)
2300                                 changed = true;
2301                 }
2302                 if (ent)
2303                         __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2304         }
2305
2306         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2307                                     grec_type))
2308                 changed = true;
2309
2310         if (to_send)
2311                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2312
2313         __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2314
2315         return changed;
2316 }
2317
2318 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2319                               struct net_bridge_mcast_port *pmctx,
2320                               struct net_bridge_port_group *pg, void *h_addr,
2321                               void *srcs, u32 nsrcs, size_t addr_size,
2322                               int grec_type)
2323 {
2324         bool changed = false;
2325
2326         switch (pg->filter_mode) {
2327         case MCAST_INCLUDE:
2328                 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2329                                               nsrcs, addr_size, grec_type);
2330                 break;
2331         case MCAST_EXCLUDE:
2332                 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2333                                               nsrcs, addr_size, grec_type);
2334                 break;
2335         }
2336
2337         if (br_multicast_eht_should_del_pg(pg)) {
2338                 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2339                 br_multicast_find_del_pg(pg->key.port->br, pg);
2340                 /* a notification has already been sent and we shouldn't
2341                  * access pg after the delete so we have to return false
2342                  */
2343                 changed = false;
2344         }
2345
2346         return changed;
2347 }
2348
2349 /* State          Msg type      New state                Actions
2350  * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2351  *                                                       Delete (A-B)
2352  *                                                       Send Q(G,A*B)
2353  *                                                       Group Timer=GMI
2354  */
2355 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2356                                 struct net_bridge_mcast_port *pmctx,
2357                                 struct net_bridge_port_group *pg, void *h_addr,
2358                                 void *srcs, u32 nsrcs, size_t addr_size,
2359                                 int grec_type)
2360 {
2361         struct net_bridge_group_src *ent;
2362         u32 src_idx, to_send = 0;
2363         struct br_ip src_ip;
2364
2365         hlist_for_each_entry(ent, &pg->src_list, node)
2366                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2367
2368         memset(&src_ip, 0, sizeof(src_ip));
2369         src_ip.proto = pg->key.addr.proto;
2370         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2371                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2372                 ent = br_multicast_find_group_src(pg, &src_ip);
2373                 if (ent) {
2374                         ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2375                                      BR_SGRP_F_SEND;
2376                         to_send++;
2377                 } else {
2378                         ent = br_multicast_new_group_src(pg, &src_ip);
2379                 }
2380                 if (ent)
2381                         br_multicast_fwd_src_handle(ent);
2382         }
2383
2384         br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2385                                 grec_type);
2386
2387         __grp_src_delete_marked(pg);
2388         if (to_send)
2389                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2390 }
2391
2392 /* State          Msg type      New state                Actions
2393  * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
2394  *                                                       Delete (X-A)
2395  *                                                       Delete (Y-A)
2396  *                                                       Send Q(G,A-Y)
2397  *                                                       Group Timer=GMI
2398  */
2399 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2400                                 struct net_bridge_mcast_port *pmctx,
2401                                 struct net_bridge_port_group *pg, void *h_addr,
2402                                 void *srcs, u32 nsrcs, size_t addr_size,
2403                                 int grec_type)
2404 {
2405         struct net_bridge_group_src *ent;
2406         u32 src_idx, to_send = 0;
2407         bool changed = false;
2408         struct br_ip src_ip;
2409
2410         hlist_for_each_entry(ent, &pg->src_list, node)
2411                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2412
2413         memset(&src_ip, 0, sizeof(src_ip));
2414         src_ip.proto = pg->key.addr.proto;
2415         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2416                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2417                 ent = br_multicast_find_group_src(pg, &src_ip);
2418                 if (ent) {
2419                         ent->flags &= ~BR_SGRP_F_DELETE;
2420                 } else {
2421                         ent = br_multicast_new_group_src(pg, &src_ip);
2422                         if (ent) {
2423                                 __grp_src_mod_timer(ent, pg->timer.expires);
2424                                 changed = true;
2425                         }
2426                 }
2427                 if (ent && timer_pending(&ent->timer)) {
2428                         ent->flags |= BR_SGRP_F_SEND;
2429                         to_send++;
2430                 }
2431         }
2432
2433         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2434                                     grec_type))
2435                 changed = true;
2436
2437         if (__grp_src_delete_marked(pg))
2438                 changed = true;
2439         if (to_send)
2440                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2441
2442         return changed;
2443 }
2444
2445 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2446                               struct net_bridge_mcast_port *pmctx,
2447                               struct net_bridge_port_group *pg, void *h_addr,
2448                               void *srcs, u32 nsrcs, size_t addr_size,
2449                               int grec_type)
2450 {
2451         bool changed = false;
2452
2453         switch (pg->filter_mode) {
2454         case MCAST_INCLUDE:
2455                 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2456                                     addr_size, grec_type);
2457                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2458                 changed = true;
2459                 break;
2460         case MCAST_EXCLUDE:
2461                 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2462                                               nsrcs, addr_size, grec_type);
2463                 break;
2464         }
2465
2466         pg->filter_mode = MCAST_EXCLUDE;
2467         mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2468
2469         return changed;
2470 }
2471
2472 /* State          Msg type      New state                Actions
2473  * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
2474  */
2475 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2476                                  struct net_bridge_mcast_port *pmctx,
2477                                  struct net_bridge_port_group *pg, void *h_addr,
2478                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2479 {
2480         struct net_bridge_group_src *ent;
2481         u32 src_idx, to_send = 0;
2482         bool changed = false;
2483         struct br_ip src_ip;
2484
2485         hlist_for_each_entry(ent, &pg->src_list, node)
2486                 ent->flags &= ~BR_SGRP_F_SEND;
2487
2488         memset(&src_ip, 0, sizeof(src_ip));
2489         src_ip.proto = pg->key.addr.proto;
2490         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2491                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2492                 ent = br_multicast_find_group_src(pg, &src_ip);
2493                 if (ent) {
2494                         ent->flags |= BR_SGRP_F_SEND;
2495                         to_send++;
2496                 }
2497         }
2498
2499         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2500                                     grec_type))
2501                 changed = true;
2502
2503         if (to_send)
2504                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2505
2506         return changed;
2507 }
2508
2509 /* State          Msg type      New state                Actions
2510  * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
2511  *                                                       Send Q(G,A-Y)
2512  */
2513 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2514                                  struct net_bridge_mcast_port *pmctx,
2515                                  struct net_bridge_port_group *pg, void *h_addr,
2516                                  void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2517 {
2518         struct net_bridge_group_src *ent;
2519         u32 src_idx, to_send = 0;
2520         bool changed = false;
2521         struct br_ip src_ip;
2522
2523         hlist_for_each_entry(ent, &pg->src_list, node)
2524                 ent->flags &= ~BR_SGRP_F_SEND;
2525
2526         memset(&src_ip, 0, sizeof(src_ip));
2527         src_ip.proto = pg->key.addr.proto;
2528         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2529                 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2530                 ent = br_multicast_find_group_src(pg, &src_ip);
2531                 if (!ent) {
2532                         ent = br_multicast_new_group_src(pg, &src_ip);
2533                         if (ent) {
2534                                 __grp_src_mod_timer(ent, pg->timer.expires);
2535                                 changed = true;
2536                         }
2537                 }
2538                 if (ent && timer_pending(&ent->timer)) {
2539                         ent->flags |= BR_SGRP_F_SEND;
2540                         to_send++;
2541                 }
2542         }
2543
2544         if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2545                                     grec_type))
2546                 changed = true;
2547
2548         if (to_send)
2549                 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2550
2551         return changed;
2552 }
2553
2554 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2555                                struct net_bridge_mcast_port *pmctx,
2556                                struct net_bridge_port_group *pg, void *h_addr,
2557                                void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2558 {
2559         bool changed = false;
2560
2561         switch (pg->filter_mode) {
2562         case MCAST_INCLUDE:
2563                 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2564                                                nsrcs, addr_size, grec_type);
2565                 break;
2566         case MCAST_EXCLUDE:
2567                 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2568                                                nsrcs, addr_size, grec_type);
2569                 break;
2570         }
2571
2572         if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2573             br_multicast_eht_should_del_pg(pg)) {
2574                 if (br_multicast_eht_should_del_pg(pg))
2575                         pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2576                 br_multicast_find_del_pg(pg->key.port->br, pg);
2577                 /* a notification has already been sent and we shouldn't
2578                  * access pg after the delete so we have to return false
2579                  */
2580                 changed = false;
2581         }
2582
2583         return changed;
2584 }
2585
2586 static struct net_bridge_port_group *
2587 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2588                        struct net_bridge_port *p,
2589                        const unsigned char *src)
2590 {
2591         struct net_bridge *br __maybe_unused = mp->br;
2592         struct net_bridge_port_group *pg;
2593
2594         for (pg = mlock_dereference(mp->ports, br);
2595              pg;
2596              pg = mlock_dereference(pg->next, br))
2597                 if (br_port_group_equal(pg, p, src))
2598                         return pg;
2599
2600         return NULL;
2601 }
2602
2603 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2604                                          struct net_bridge_mcast_port *pmctx,
2605                                          struct sk_buff *skb,
2606                                          u16 vid)
2607 {
2608         bool igmpv2 = brmctx->multicast_igmp_version == 2;
2609         struct net_bridge_mdb_entry *mdst;
2610         struct net_bridge_port_group *pg;
2611         const unsigned char *src;
2612         struct igmpv3_report *ih;
2613         struct igmpv3_grec *grec;
2614         int i, len, num, type;
2615         __be32 group, *h_addr;
2616         bool changed = false;
2617         int err = 0;
2618         u16 nsrcs;
2619
2620         ih = igmpv3_report_hdr(skb);
2621         num = ntohs(ih->ngrec);
2622         len = skb_transport_offset(skb) + sizeof(*ih);
2623
2624         for (i = 0; i < num; i++) {
2625                 len += sizeof(*grec);
2626                 if (!ip_mc_may_pull(skb, len))
2627                         return -EINVAL;
2628
2629                 grec = (void *)(skb->data + len - sizeof(*grec));
2630                 group = grec->grec_mca;
2631                 type = grec->grec_type;
2632                 nsrcs = ntohs(grec->grec_nsrcs);
2633
2634                 len += nsrcs * 4;
2635                 if (!ip_mc_may_pull(skb, len))
2636                         return -EINVAL;
2637
2638                 switch (type) {
2639                 case IGMPV3_MODE_IS_INCLUDE:
2640                 case IGMPV3_MODE_IS_EXCLUDE:
2641                 case IGMPV3_CHANGE_TO_INCLUDE:
2642                 case IGMPV3_CHANGE_TO_EXCLUDE:
2643                 case IGMPV3_ALLOW_NEW_SOURCES:
2644                 case IGMPV3_BLOCK_OLD_SOURCES:
2645                         break;
2646
2647                 default:
2648                         continue;
2649                 }
2650
2651                 src = eth_hdr(skb)->h_source;
2652                 if (nsrcs == 0 &&
2653                     (type == IGMPV3_CHANGE_TO_INCLUDE ||
2654                      type == IGMPV3_MODE_IS_INCLUDE)) {
2655                         if (!pmctx || igmpv2) {
2656                                 br_ip4_multicast_leave_group(brmctx, pmctx,
2657                                                              group, vid, src);
2658                                 continue;
2659                         }
2660                 } else {
2661                         err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2662                                                          vid, src, igmpv2);
2663                         if (err)
2664                                 break;
2665                 }
2666
2667                 if (!pmctx || igmpv2)
2668                         continue;
2669
2670                 spin_lock_bh(&brmctx->br->multicast_lock);
2671                 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2672                         goto unlock_continue;
2673
2674                 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2675                 if (!mdst)
2676                         goto unlock_continue;
2677                 pg = br_multicast_find_port(mdst, pmctx->port, src);
2678                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2679                         goto unlock_continue;
2680                 /* reload grec and host addr */
2681                 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2682                 h_addr = &ip_hdr(skb)->saddr;
2683                 switch (type) {
2684                 case IGMPV3_ALLOW_NEW_SOURCES:
2685                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2686                                                            grec->grec_src,
2687                                                            nsrcs, sizeof(__be32), type);
2688                         break;
2689                 case IGMPV3_MODE_IS_INCLUDE:
2690                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2691                                                            grec->grec_src,
2692                                                            nsrcs, sizeof(__be32), type);
2693                         break;
2694                 case IGMPV3_MODE_IS_EXCLUDE:
2695                         changed = br_multicast_isexc(brmctx, pg, h_addr,
2696                                                      grec->grec_src,
2697                                                      nsrcs, sizeof(__be32), type);
2698                         break;
2699                 case IGMPV3_CHANGE_TO_INCLUDE:
2700                         changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2701                                                     grec->grec_src,
2702                                                     nsrcs, sizeof(__be32), type);
2703                         break;
2704                 case IGMPV3_CHANGE_TO_EXCLUDE:
2705                         changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2706                                                     grec->grec_src,
2707                                                     nsrcs, sizeof(__be32), type);
2708                         break;
2709                 case IGMPV3_BLOCK_OLD_SOURCES:
2710                         changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2711                                                      grec->grec_src,
2712                                                      nsrcs, sizeof(__be32), type);
2713                         break;
2714                 }
2715                 if (changed)
2716                         br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2717 unlock_continue:
2718                 spin_unlock_bh(&brmctx->br->multicast_lock);
2719         }
2720
2721         return err;
2722 }
2723
2724 #if IS_ENABLED(CONFIG_IPV6)
2725 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2726                                         struct net_bridge_mcast_port *pmctx,
2727                                         struct sk_buff *skb,
2728                                         u16 vid)
2729 {
2730         bool mldv1 = brmctx->multicast_mld_version == 1;
2731         struct net_bridge_mdb_entry *mdst;
2732         struct net_bridge_port_group *pg;
2733         unsigned int nsrcs_offset;
2734         struct mld2_report *mld2r;
2735         const unsigned char *src;
2736         struct in6_addr *h_addr;
2737         struct mld2_grec *grec;
2738         unsigned int grec_len;
2739         bool changed = false;
2740         int i, len, num;
2741         int err = 0;
2742
2743         if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2744                 return -EINVAL;
2745
2746         mld2r = (struct mld2_report *)icmp6_hdr(skb);
2747         num = ntohs(mld2r->mld2r_ngrec);
2748         len = skb_transport_offset(skb) + sizeof(*mld2r);
2749
2750         for (i = 0; i < num; i++) {
2751                 __be16 *_nsrcs, __nsrcs;
2752                 u16 nsrcs;
2753
2754                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2755
2756                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2757                     nsrcs_offset + sizeof(__nsrcs))
2758                         return -EINVAL;
2759
2760                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2761                                             sizeof(__nsrcs), &__nsrcs);
2762                 if (!_nsrcs)
2763                         return -EINVAL;
2764
2765                 nsrcs = ntohs(*_nsrcs);
2766                 grec_len = struct_size(grec, grec_src, nsrcs);
2767
2768                 if (!ipv6_mc_may_pull(skb, len + grec_len))
2769                         return -EINVAL;
2770
2771                 grec = (struct mld2_grec *)(skb->data + len);
2772                 len += grec_len;
2773
2774                 switch (grec->grec_type) {
2775                 case MLD2_MODE_IS_INCLUDE:
2776                 case MLD2_MODE_IS_EXCLUDE:
2777                 case MLD2_CHANGE_TO_INCLUDE:
2778                 case MLD2_CHANGE_TO_EXCLUDE:
2779                 case MLD2_ALLOW_NEW_SOURCES:
2780                 case MLD2_BLOCK_OLD_SOURCES:
2781                         break;
2782
2783                 default:
2784                         continue;
2785                 }
2786
2787                 src = eth_hdr(skb)->h_source;
2788                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2789                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2790                     nsrcs == 0) {
2791                         if (!pmctx || mldv1) {
2792                                 br_ip6_multicast_leave_group(brmctx, pmctx,
2793                                                              &grec->grec_mca,
2794                                                              vid, src);
2795                                 continue;
2796                         }
2797                 } else {
2798                         err = br_ip6_multicast_add_group(brmctx, pmctx,
2799                                                          &grec->grec_mca, vid,
2800                                                          src, mldv1);
2801                         if (err)
2802                                 break;
2803                 }
2804
2805                 if (!pmctx || mldv1)
2806                         continue;
2807
2808                 spin_lock_bh(&brmctx->br->multicast_lock);
2809                 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2810                         goto unlock_continue;
2811
2812                 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2813                 if (!mdst)
2814                         goto unlock_continue;
2815                 pg = br_multicast_find_port(mdst, pmctx->port, src);
2816                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2817                         goto unlock_continue;
2818                 h_addr = &ipv6_hdr(skb)->saddr;
2819                 switch (grec->grec_type) {
2820                 case MLD2_ALLOW_NEW_SOURCES:
2821                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2822                                                            grec->grec_src, nsrcs,
2823                                                            sizeof(struct in6_addr),
2824                                                            grec->grec_type);
2825                         break;
2826                 case MLD2_MODE_IS_INCLUDE:
2827                         changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2828                                                            grec->grec_src, nsrcs,
2829                                                            sizeof(struct in6_addr),
2830                                                            grec->grec_type);
2831                         break;
2832                 case MLD2_MODE_IS_EXCLUDE:
2833                         changed = br_multicast_isexc(brmctx, pg, h_addr,
2834                                                      grec->grec_src, nsrcs,
2835                                                      sizeof(struct in6_addr),
2836                                                      grec->grec_type);
2837                         break;
2838                 case MLD2_CHANGE_TO_INCLUDE:
2839                         changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2840                                                     grec->grec_src, nsrcs,
2841                                                     sizeof(struct in6_addr),
2842                                                     grec->grec_type);
2843                         break;
2844                 case MLD2_CHANGE_TO_EXCLUDE:
2845                         changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2846                                                     grec->grec_src, nsrcs,
2847                                                     sizeof(struct in6_addr),
2848                                                     grec->grec_type);
2849                         break;
2850                 case MLD2_BLOCK_OLD_SOURCES:
2851                         changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2852                                                      grec->grec_src, nsrcs,
2853                                                      sizeof(struct in6_addr),
2854                                                      grec->grec_type);
2855                         break;
2856                 }
2857                 if (changed)
2858                         br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2859 unlock_continue:
2860                 spin_unlock_bh(&brmctx->br->multicast_lock);
2861         }
2862
2863         return err;
2864 }
2865 #endif
2866
2867 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
2868                                         struct net_bridge_mcast_port *pmctx,
2869                                         struct br_ip *saddr)
2870 {
2871         int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
2872         struct timer_list *own_timer, *other_timer;
2873         struct bridge_mcast_querier *querier;
2874
2875         switch (saddr->proto) {
2876         case htons(ETH_P_IP):
2877                 querier = &brmctx->ip4_querier;
2878                 own_timer = &brmctx->ip4_own_query.timer;
2879                 other_timer = &brmctx->ip4_other_query.timer;
2880                 if (!querier->addr.src.ip4 ||
2881                     ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
2882                         goto update;
2883                 break;
2884 #if IS_ENABLED(CONFIG_IPV6)
2885         case htons(ETH_P_IPV6):
2886                 querier = &brmctx->ip6_querier;
2887                 own_timer = &brmctx->ip6_own_query.timer;
2888                 other_timer = &brmctx->ip6_other_query.timer;
2889                 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
2890                         goto update;
2891                 break;
2892 #endif
2893         default:
2894                 return false;
2895         }
2896
2897         if (!timer_pending(own_timer) && !timer_pending(other_timer))
2898                 goto update;
2899
2900         return false;
2901
2902 update:
2903         br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
2904
2905         return true;
2906 }
2907
2908 static struct net_bridge_port *
2909 __br_multicast_get_querier_port(struct net_bridge *br,
2910                                 const struct bridge_mcast_querier *querier)
2911 {
2912         int port_ifidx = READ_ONCE(querier->port_ifidx);
2913         struct net_bridge_port *p;
2914         struct net_device *dev;
2915
2916         if (port_ifidx == 0)
2917                 return NULL;
2918
2919         dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
2920         if (!dev)
2921                 return NULL;
2922         p = br_port_get_rtnl_rcu(dev);
2923         if (!p || p->br != br)
2924                 return NULL;
2925
2926         return p;
2927 }
2928
2929 size_t br_multicast_querier_state_size(void)
2930 {
2931         return nla_total_size(0) +              /* nest attribute */
2932                nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
2933                nla_total_size(sizeof(int)) +    /* BRIDGE_QUERIER_IP_PORT */
2934                nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
2935 #if IS_ENABLED(CONFIG_IPV6)
2936                nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
2937                nla_total_size(sizeof(int)) +             /* BRIDGE_QUERIER_IPV6_PORT */
2938                nla_total_size_64bit(sizeof(u64)) +       /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
2939 #endif
2940                0;
2941 }
2942
2943 /* protected by rtnl or rcu */
2944 int br_multicast_dump_querier_state(struct sk_buff *skb,
2945                                     const struct net_bridge_mcast *brmctx,
2946                                     int nest_attr)
2947 {
2948         struct bridge_mcast_querier querier = {};
2949         struct net_bridge_port *p;
2950         struct nlattr *nest;
2951
2952         if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
2953             br_multicast_ctx_vlan_global_disabled(brmctx))
2954                 return 0;
2955
2956         nest = nla_nest_start(skb, nest_attr);
2957         if (!nest)
2958                 return -EMSGSIZE;
2959
2960         rcu_read_lock();
2961         if (!brmctx->multicast_querier &&
2962             !timer_pending(&brmctx->ip4_other_query.timer))
2963                 goto out_v6;
2964
2965         br_multicast_read_querier(&brmctx->ip4_querier, &querier);
2966         if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
2967                             querier.addr.src.ip4)) {
2968                 rcu_read_unlock();
2969                 goto out_err;
2970         }
2971
2972         p = __br_multicast_get_querier_port(brmctx->br, &querier);
2973         if (timer_pending(&brmctx->ip4_other_query.timer) &&
2974             (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
2975                                br_timer_value(&brmctx->ip4_other_query.timer),
2976                                BRIDGE_QUERIER_PAD) ||
2977              (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
2978                 rcu_read_unlock();
2979                 goto out_err;
2980         }
2981
2982 out_v6:
2983 #if IS_ENABLED(CONFIG_IPV6)
2984         if (!brmctx->multicast_querier &&
2985             !timer_pending(&brmctx->ip6_other_query.timer))
2986                 goto out;
2987
2988         br_multicast_read_querier(&brmctx->ip6_querier, &querier);
2989         if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
2990                              &querier.addr.src.ip6)) {
2991                 rcu_read_unlock();
2992                 goto out_err;
2993         }
2994
2995         p = __br_multicast_get_querier_port(brmctx->br, &querier);
2996         if (timer_pending(&brmctx->ip6_other_query.timer) &&
2997             (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
2998                                br_timer_value(&brmctx->ip6_other_query.timer),
2999                                BRIDGE_QUERIER_PAD) ||
3000              (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
3001                                p->dev->ifindex)))) {
3002                 rcu_read_unlock();
3003                 goto out_err;
3004         }
3005 out:
3006 #endif
3007         rcu_read_unlock();
3008         nla_nest_end(skb, nest);
3009         if (!nla_len(nest))
3010                 nla_nest_cancel(skb, nest);
3011
3012         return 0;
3013
3014 out_err:
3015         nla_nest_cancel(skb, nest);
3016         return -EMSGSIZE;
3017 }
3018
3019 static void
3020 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3021                                 struct bridge_mcast_other_query *query,
3022                                 unsigned long max_delay)
3023 {
3024         if (!timer_pending(&query->timer))
3025                 query->delay_time = jiffies + max_delay;
3026
3027         mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3028 }
3029
3030 static void br_port_mc_router_state_change(struct net_bridge_port *p,
3031                                            bool is_mc_router)
3032 {
3033         struct switchdev_attr attr = {
3034                 .orig_dev = p->dev,
3035                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3036                 .flags = SWITCHDEV_F_DEFER,
3037                 .u.mrouter = is_mc_router,
3038         };
3039
3040         switchdev_port_attr_set(p->dev, &attr, NULL);
3041 }
3042
3043 static struct net_bridge_port *
3044 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3045                              struct hlist_head *mc_router_list,
3046                              struct hlist_node *rlist)
3047 {
3048         struct net_bridge_mcast_port *pmctx;
3049
3050 #if IS_ENABLED(CONFIG_IPV6)
3051         if (mc_router_list == &brmctx->ip6_mc_router_list)
3052                 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3053                                     ip6_rlist);
3054         else
3055 #endif
3056                 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3057                                     ip4_rlist);
3058
3059         return pmctx->port;
3060 }
3061
3062 static struct hlist_node *
3063 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3064                             struct net_bridge_port *port,
3065                             struct hlist_head *mc_router_list)
3066
3067 {
3068         struct hlist_node *slot = NULL;
3069         struct net_bridge_port *p;
3070         struct hlist_node *rlist;
3071
3072         hlist_for_each(rlist, mc_router_list) {
3073                 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3074
3075                 if ((unsigned long)port >= (unsigned long)p)
3076                         break;
3077
3078                 slot = rlist;
3079         }
3080
3081         return slot;
3082 }
3083
3084 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3085                                            struct hlist_node *rnode)
3086 {
3087 #if IS_ENABLED(CONFIG_IPV6)
3088         if (rnode != &pmctx->ip6_rlist)
3089                 return hlist_unhashed(&pmctx->ip6_rlist);
3090         else
3091                 return hlist_unhashed(&pmctx->ip4_rlist);
3092 #else
3093         return true;
3094 #endif
3095 }
3096
3097 /* Add port to router_list
3098  *  list is maintained ordered by pointer value
3099  *  and locked by br->multicast_lock and RCU
3100  */
3101 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3102                                     struct net_bridge_mcast_port *pmctx,
3103                                     struct hlist_node *rlist,
3104                                     struct hlist_head *mc_router_list)
3105 {
3106         struct hlist_node *slot;
3107
3108         if (!hlist_unhashed(rlist))
3109                 return;
3110
3111         slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
3112
3113         if (slot)
3114                 hlist_add_behind_rcu(rlist, slot);
3115         else
3116                 hlist_add_head_rcu(rlist, mc_router_list);
3117
3118         /* For backwards compatibility for now, only notify if we
3119          * switched from no IPv4/IPv6 multicast router to a new
3120          * IPv4 or IPv6 multicast router.
3121          */
3122         if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3123                 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3124                 br_port_mc_router_state_change(pmctx->port, true);
3125         }
3126 }
3127
3128 /* Add port to router_list
3129  *  list is maintained ordered by pointer value
3130  *  and locked by br->multicast_lock and RCU
3131  */
3132 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3133                                         struct net_bridge_mcast_port *pmctx)
3134 {
3135         br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3136                                 &brmctx->ip4_mc_router_list);
3137 }
3138
3139 /* Add port to router_list
3140  *  list is maintained ordered by pointer value
3141  *  and locked by br->multicast_lock and RCU
3142  */
3143 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3144                                         struct net_bridge_mcast_port *pmctx)
3145 {
3146 #if IS_ENABLED(CONFIG_IPV6)
3147         br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3148                                 &brmctx->ip6_mc_router_list);
3149 #endif
3150 }
3151
3152 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3153                                      struct net_bridge_mcast_port *pmctx,
3154                                      struct timer_list *timer,
3155                                      struct hlist_node *rlist,
3156                                      struct hlist_head *mc_router_list)
3157 {
3158         unsigned long now = jiffies;
3159
3160         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3161                 return;
3162
3163         if (!pmctx) {
3164                 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3165                         if (!br_ip4_multicast_is_router(brmctx) &&
3166                             !br_ip6_multicast_is_router(brmctx))
3167                                 br_mc_router_state_change(brmctx->br, true);
3168                         mod_timer(timer, now + brmctx->multicast_querier_interval);
3169                 }
3170                 return;
3171         }
3172
3173         if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3174             pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3175                 return;
3176
3177         br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3178         mod_timer(timer, now + brmctx->multicast_querier_interval);
3179 }
3180
3181 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3182                                          struct net_bridge_mcast_port *pmctx)
3183 {
3184         struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3185         struct hlist_node *rlist = NULL;
3186
3187         if (pmctx) {
3188                 timer = &pmctx->ip4_mc_router_timer;
3189                 rlist = &pmctx->ip4_rlist;
3190         }
3191
3192         br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3193                                  &brmctx->ip4_mc_router_list);
3194 }
3195
3196 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3197                                          struct net_bridge_mcast_port *pmctx)
3198 {
3199 #if IS_ENABLED(CONFIG_IPV6)
3200         struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3201         struct hlist_node *rlist = NULL;
3202
3203         if (pmctx) {
3204                 timer = &pmctx->ip6_mc_router_timer;
3205                 rlist = &pmctx->ip6_rlist;
3206         }
3207
3208         br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3209                                  &brmctx->ip6_mc_router_list);
3210 #endif
3211 }
3212
3213 static void
3214 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3215                                 struct net_bridge_mcast_port *pmctx,
3216                                 struct bridge_mcast_other_query *query,
3217                                 struct br_ip *saddr,
3218                                 unsigned long max_delay)
3219 {
3220         if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3221                 return;
3222
3223         br_multicast_update_query_timer(brmctx, query, max_delay);
3224         br_ip4_multicast_mark_router(brmctx, pmctx);
3225 }
3226
3227 #if IS_ENABLED(CONFIG_IPV6)
3228 static void
3229 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3230                                 struct net_bridge_mcast_port *pmctx,
3231                                 struct bridge_mcast_other_query *query,
3232                                 struct br_ip *saddr,
3233                                 unsigned long max_delay)
3234 {
3235         if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3236                 return;
3237
3238         br_multicast_update_query_timer(brmctx, query, max_delay);
3239         br_ip6_multicast_mark_router(brmctx, pmctx);
3240 }
3241 #endif
3242
3243 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3244                                    struct net_bridge_mcast_port *pmctx,
3245                                    struct sk_buff *skb,
3246                                    u16 vid)
3247 {
3248         unsigned int transport_len = ip_transport_len(skb);
3249         const struct iphdr *iph = ip_hdr(skb);
3250         struct igmphdr *ih = igmp_hdr(skb);
3251         struct net_bridge_mdb_entry *mp;
3252         struct igmpv3_query *ih3;
3253         struct net_bridge_port_group *p;
3254         struct net_bridge_port_group __rcu **pp;
3255         struct br_ip saddr = {};
3256         unsigned long max_delay;
3257         unsigned long now = jiffies;
3258         __be32 group;
3259
3260         spin_lock(&brmctx->br->multicast_lock);
3261         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3262                 goto out;
3263
3264         group = ih->group;
3265
3266         if (transport_len == sizeof(*ih)) {
3267                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3268
3269                 if (!max_delay) {
3270                         max_delay = 10 * HZ;
3271                         group = 0;
3272                 }
3273         } else if (transport_len >= sizeof(*ih3)) {
3274                 ih3 = igmpv3_query_hdr(skb);
3275                 if (ih3->nsrcs ||
3276                     (brmctx->multicast_igmp_version == 3 && group &&
3277                      ih3->suppress))
3278                         goto out;
3279
3280                 max_delay = ih3->code ?
3281                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3282         } else {
3283                 goto out;
3284         }
3285
3286         if (!group) {
3287                 saddr.proto = htons(ETH_P_IP);
3288                 saddr.src.ip4 = iph->saddr;
3289
3290                 br_ip4_multicast_query_received(brmctx, pmctx,
3291                                                 &brmctx->ip4_other_query,
3292                                                 &saddr, max_delay);
3293                 goto out;
3294         }
3295
3296         mp = br_mdb_ip4_get(brmctx->br, group, vid);
3297         if (!mp)
3298                 goto out;
3299
3300         max_delay *= brmctx->multicast_last_member_count;
3301
3302         if (mp->host_joined &&
3303             (timer_pending(&mp->timer) ?
3304              time_after(mp->timer.expires, now + max_delay) :
3305              try_to_del_timer_sync(&mp->timer) >= 0))
3306                 mod_timer(&mp->timer, now + max_delay);
3307
3308         for (pp = &mp->ports;
3309              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3310              pp = &p->next) {
3311                 if (timer_pending(&p->timer) ?
3312                     time_after(p->timer.expires, now + max_delay) :
3313                     try_to_del_timer_sync(&p->timer) >= 0 &&
3314                     (brmctx->multicast_igmp_version == 2 ||
3315                      p->filter_mode == MCAST_EXCLUDE))
3316                         mod_timer(&p->timer, now + max_delay);
3317         }
3318
3319 out:
3320         spin_unlock(&brmctx->br->multicast_lock);
3321 }
3322
3323 #if IS_ENABLED(CONFIG_IPV6)
3324 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3325                                   struct net_bridge_mcast_port *pmctx,
3326                                   struct sk_buff *skb,
3327                                   u16 vid)
3328 {
3329         unsigned int transport_len = ipv6_transport_len(skb);
3330         struct mld_msg *mld;
3331         struct net_bridge_mdb_entry *mp;
3332         struct mld2_query *mld2q;
3333         struct net_bridge_port_group *p;
3334         struct net_bridge_port_group __rcu **pp;
3335         struct br_ip saddr = {};
3336         unsigned long max_delay;
3337         unsigned long now = jiffies;
3338         unsigned int offset = skb_transport_offset(skb);
3339         const struct in6_addr *group = NULL;
3340         bool is_general_query;
3341         int err = 0;
3342
3343         spin_lock(&brmctx->br->multicast_lock);
3344         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3345                 goto out;
3346
3347         if (transport_len == sizeof(*mld)) {
3348                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3349                         err = -EINVAL;
3350                         goto out;
3351                 }
3352                 mld = (struct mld_msg *) icmp6_hdr(skb);
3353                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3354                 if (max_delay)
3355                         group = &mld->mld_mca;
3356         } else {
3357                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3358                         err = -EINVAL;
3359                         goto out;
3360                 }
3361                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3362                 if (!mld2q->mld2q_nsrcs)
3363                         group = &mld2q->mld2q_mca;
3364                 if (brmctx->multicast_mld_version == 2 &&
3365                     !ipv6_addr_any(&mld2q->mld2q_mca) &&
3366                     mld2q->mld2q_suppress)
3367                         goto out;
3368
3369                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3370         }
3371
3372         is_general_query = group && ipv6_addr_any(group);
3373
3374         if (is_general_query) {
3375                 saddr.proto = htons(ETH_P_IPV6);
3376                 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3377
3378                 br_ip6_multicast_query_received(brmctx, pmctx,
3379                                                 &brmctx->ip6_other_query,
3380                                                 &saddr, max_delay);
3381                 goto out;
3382         } else if (!group) {
3383                 goto out;
3384         }
3385
3386         mp = br_mdb_ip6_get(brmctx->br, group, vid);
3387         if (!mp)
3388                 goto out;
3389
3390         max_delay *= brmctx->multicast_last_member_count;
3391         if (mp->host_joined &&
3392             (timer_pending(&mp->timer) ?
3393              time_after(mp->timer.expires, now + max_delay) :
3394              try_to_del_timer_sync(&mp->timer) >= 0))
3395                 mod_timer(&mp->timer, now + max_delay);
3396
3397         for (pp = &mp->ports;
3398              (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3399              pp = &p->next) {
3400                 if (timer_pending(&p->timer) ?
3401                     time_after(p->timer.expires, now + max_delay) :
3402                     try_to_del_timer_sync(&p->timer) >= 0 &&
3403                     (brmctx->multicast_mld_version == 1 ||
3404                      p->filter_mode == MCAST_EXCLUDE))
3405                         mod_timer(&p->timer, now + max_delay);
3406         }
3407
3408 out:
3409         spin_unlock(&brmctx->br->multicast_lock);
3410         return err;
3411 }
3412 #endif
3413
3414 static void
3415 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3416                          struct net_bridge_mcast_port *pmctx,
3417                          struct br_ip *group,
3418                          struct bridge_mcast_other_query *other_query,
3419                          struct bridge_mcast_own_query *own_query,
3420                          const unsigned char *src)
3421 {
3422         struct net_bridge_mdb_entry *mp;
3423         struct net_bridge_port_group *p;
3424         unsigned long now;
3425         unsigned long time;
3426
3427         spin_lock(&brmctx->br->multicast_lock);
3428         if (!br_multicast_ctx_should_use(brmctx, pmctx))
3429                 goto out;
3430
3431         mp = br_mdb_ip_get(brmctx->br, group);
3432         if (!mp)
3433                 goto out;
3434
3435         if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3436                 struct net_bridge_port_group __rcu **pp;
3437
3438                 for (pp = &mp->ports;
3439                      (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3440                      pp = &p->next) {
3441                         if (!br_port_group_equal(p, pmctx->port, src))
3442                                 continue;
3443
3444                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
3445                                 break;
3446
3447                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3448                         br_multicast_del_pg(mp, p, pp);
3449                 }
3450                 goto out;
3451         }
3452
3453         if (timer_pending(&other_query->timer))
3454                 goto out;
3455
3456         if (brmctx->multicast_querier) {
3457                 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3458                                           false, 0, NULL);
3459
3460                 time = jiffies + brmctx->multicast_last_member_count *
3461                                  brmctx->multicast_last_member_interval;
3462
3463                 mod_timer(&own_query->timer, time);
3464
3465                 for (p = mlock_dereference(mp->ports, brmctx->br);
3466                      p != NULL && pmctx != NULL;
3467                      p = mlock_dereference(p->next, brmctx->br)) {
3468                         if (!br_port_group_equal(p, pmctx->port, src))
3469                                 continue;
3470
3471                         if (!hlist_unhashed(&p->mglist) &&
3472                             (timer_pending(&p->timer) ?
3473                              time_after(p->timer.expires, time) :
3474                              try_to_del_timer_sync(&p->timer) >= 0)) {
3475                                 mod_timer(&p->timer, time);
3476                         }
3477
3478                         break;
3479                 }
3480         }
3481
3482         now = jiffies;
3483         time = now + brmctx->multicast_last_member_count *
3484                      brmctx->multicast_last_member_interval;
3485
3486         if (!pmctx) {
3487                 if (mp->host_joined &&
3488                     (timer_pending(&mp->timer) ?
3489                      time_after(mp->timer.expires, time) :
3490                      try_to_del_timer_sync(&mp->timer) >= 0)) {
3491                         mod_timer(&mp->timer, time);
3492                 }
3493
3494                 goto out;
3495         }
3496
3497         for (p = mlock_dereference(mp->ports, brmctx->br);
3498              p != NULL;
3499              p = mlock_dereference(p->next, brmctx->br)) {
3500                 if (p->key.port != pmctx->port)
3501                         continue;
3502
3503                 if (!hlist_unhashed(&p->mglist) &&
3504                     (timer_pending(&p->timer) ?
3505                      time_after(p->timer.expires, time) :
3506                      try_to_del_timer_sync(&p->timer) >= 0)) {
3507                         mod_timer(&p->timer, time);
3508                 }
3509
3510                 break;
3511         }
3512 out:
3513         spin_unlock(&brmctx->br->multicast_lock);
3514 }
3515
3516 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3517                                          struct net_bridge_mcast_port *pmctx,
3518                                          __be32 group,
3519                                          __u16 vid,
3520                                          const unsigned char *src)
3521 {
3522         struct br_ip br_group;
3523         struct bridge_mcast_own_query *own_query;
3524
3525         if (ipv4_is_local_multicast(group))
3526                 return;
3527
3528         own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3529
3530         memset(&br_group, 0, sizeof(br_group));
3531         br_group.dst.ip4 = group;
3532         br_group.proto = htons(ETH_P_IP);
3533         br_group.vid = vid;
3534
3535         br_multicast_leave_group(brmctx, pmctx, &br_group,
3536                                  &brmctx->ip4_other_query,
3537                                  own_query, src);
3538 }
3539
3540 #if IS_ENABLED(CONFIG_IPV6)
3541 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3542                                          struct net_bridge_mcast_port *pmctx,
3543                                          const struct in6_addr *group,
3544                                          __u16 vid,
3545                                          const unsigned char *src)
3546 {
3547         struct br_ip br_group;
3548         struct bridge_mcast_own_query *own_query;
3549
3550         if (ipv6_addr_is_ll_all_nodes(group))
3551                 return;
3552
3553         own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3554
3555         memset(&br_group, 0, sizeof(br_group));
3556         br_group.dst.ip6 = *group;
3557         br_group.proto = htons(ETH_P_IPV6);
3558         br_group.vid = vid;
3559
3560         br_multicast_leave_group(brmctx, pmctx, &br_group,
3561                                  &brmctx->ip6_other_query,
3562                                  own_query, src);
3563 }
3564 #endif
3565
3566 static void br_multicast_err_count(const struct net_bridge *br,
3567                                    const struct net_bridge_port *p,
3568                                    __be16 proto)
3569 {
3570         struct bridge_mcast_stats __percpu *stats;
3571         struct bridge_mcast_stats *pstats;
3572
3573         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3574                 return;
3575
3576         if (p)
3577                 stats = p->mcast_stats;
3578         else
3579                 stats = br->mcast_stats;
3580         if (WARN_ON(!stats))
3581                 return;
3582
3583         pstats = this_cpu_ptr(stats);
3584
3585         u64_stats_update_begin(&pstats->syncp);
3586         switch (proto) {
3587         case htons(ETH_P_IP):
3588                 pstats->mstats.igmp_parse_errors++;
3589                 break;
3590 #if IS_ENABLED(CONFIG_IPV6)
3591         case htons(ETH_P_IPV6):
3592                 pstats->mstats.mld_parse_errors++;
3593                 break;
3594 #endif
3595         }
3596         u64_stats_update_end(&pstats->syncp);
3597 }
3598
3599 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3600                              struct net_bridge_mcast_port *pmctx,
3601                              const struct sk_buff *skb)
3602 {
3603         unsigned int offset = skb_transport_offset(skb);
3604         struct pimhdr *pimhdr, _pimhdr;
3605
3606         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3607         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3608             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3609                 return;
3610
3611         spin_lock(&brmctx->br->multicast_lock);
3612         br_ip4_multicast_mark_router(brmctx, pmctx);
3613         spin_unlock(&brmctx->br->multicast_lock);
3614 }
3615
3616 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3617                                     struct net_bridge_mcast_port *pmctx,
3618                                     struct sk_buff *skb)
3619 {
3620         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3621             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3622                 return -ENOMSG;
3623
3624         spin_lock(&brmctx->br->multicast_lock);
3625         br_ip4_multicast_mark_router(brmctx, pmctx);
3626         spin_unlock(&brmctx->br->multicast_lock);
3627
3628         return 0;
3629 }
3630
3631 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3632                                  struct net_bridge_mcast_port *pmctx,
3633                                  struct sk_buff *skb,
3634                                  u16 vid)
3635 {
3636         struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3637         const unsigned char *src;
3638         struct igmphdr *ih;
3639         int err;
3640
3641         err = ip_mc_check_igmp(skb);
3642
3643         if (err == -ENOMSG) {
3644                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3645                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3646                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3647                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3648                                 br_multicast_pim(brmctx, pmctx, skb);
3649                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3650                         br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3651                 }
3652
3653                 return 0;
3654         } else if (err < 0) {
3655                 br_multicast_err_count(brmctx->br, p, skb->protocol);
3656                 return err;
3657         }
3658
3659         ih = igmp_hdr(skb);
3660         src = eth_hdr(skb)->h_source;
3661         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3662
3663         switch (ih->type) {
3664         case IGMP_HOST_MEMBERSHIP_REPORT:
3665         case IGMPV2_HOST_MEMBERSHIP_REPORT:
3666                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3667                 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3668                                                  src, true);
3669                 break;
3670         case IGMPV3_HOST_MEMBERSHIP_REPORT:
3671                 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3672                 break;
3673         case IGMP_HOST_MEMBERSHIP_QUERY:
3674                 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3675                 break;
3676         case IGMP_HOST_LEAVE_MESSAGE:
3677                 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3678                 break;
3679         }
3680
3681         br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3682                            BR_MCAST_DIR_RX);
3683
3684         return err;
3685 }
3686
3687 #if IS_ENABLED(CONFIG_IPV6)
3688 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3689                                      struct net_bridge_mcast_port *pmctx,
3690                                      struct sk_buff *skb)
3691 {
3692         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3693                 return;
3694
3695         spin_lock(&brmctx->br->multicast_lock);
3696         br_ip6_multicast_mark_router(brmctx, pmctx);
3697         spin_unlock(&brmctx->br->multicast_lock);
3698 }
3699
3700 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3701                                  struct net_bridge_mcast_port *pmctx,
3702                                  struct sk_buff *skb,
3703                                  u16 vid)
3704 {
3705         struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3706         const unsigned char *src;
3707         struct mld_msg *mld;
3708         int err;
3709
3710         err = ipv6_mc_check_mld(skb);
3711
3712         if (err == -ENOMSG || err == -ENODATA) {
3713                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3714                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3715                 if (err == -ENODATA &&
3716                     ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3717                         br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3718
3719                 return 0;
3720         } else if (err < 0) {
3721                 br_multicast_err_count(brmctx->br, p, skb->protocol);
3722                 return err;
3723         }
3724
3725         mld = (struct mld_msg *)skb_transport_header(skb);
3726         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3727
3728         switch (mld->mld_type) {
3729         case ICMPV6_MGM_REPORT:
3730                 src = eth_hdr(skb)->h_source;
3731                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3732                 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3733                                                  vid, src, true);
3734                 break;
3735         case ICMPV6_MLD2_REPORT:
3736                 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3737                 break;
3738         case ICMPV6_MGM_QUERY:
3739                 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3740                 break;
3741         case ICMPV6_MGM_REDUCTION:
3742                 src = eth_hdr(skb)->h_source;
3743                 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3744                                              src);
3745                 break;
3746         }
3747
3748         br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3749                            BR_MCAST_DIR_RX);
3750
3751         return err;
3752 }
3753 #endif
3754
3755 int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3756                      struct net_bridge_mcast_port **pmctx,
3757                      struct net_bridge_vlan *vlan,
3758                      struct sk_buff *skb, u16 vid)
3759 {
3760         int ret = 0;
3761
3762         BR_INPUT_SKB_CB(skb)->igmp = 0;
3763         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3764
3765         if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3766                 return 0;
3767
3768         if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
3769                 const struct net_bridge_vlan *masterv;
3770
3771                 /* the vlan has the master flag set only when transmitting
3772                  * through the bridge device
3773                  */
3774                 if (br_vlan_is_master(vlan)) {
3775                         masterv = vlan;
3776                         *brmctx = &vlan->br_mcast_ctx;
3777                         *pmctx = NULL;
3778                 } else {
3779                         masterv = vlan->brvlan;
3780                         *brmctx = &vlan->brvlan->br_mcast_ctx;
3781                         *pmctx = &vlan->port_mcast_ctx;
3782                 }
3783
3784                 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3785                         return 0;
3786         }
3787
3788         switch (skb->protocol) {
3789         case htons(ETH_P_IP):
3790                 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3791                 break;
3792 #if IS_ENABLED(CONFIG_IPV6)
3793         case htons(ETH_P_IPV6):
3794                 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3795                 break;
3796 #endif
3797         }
3798
3799         return ret;
3800 }
3801
3802 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3803                                        struct bridge_mcast_own_query *query,
3804                                        struct bridge_mcast_querier *querier)
3805 {
3806         spin_lock(&brmctx->br->multicast_lock);
3807         if (br_multicast_ctx_vlan_disabled(brmctx))
3808                 goto out;
3809
3810         if (query->startup_sent < brmctx->multicast_startup_query_count)
3811                 query->startup_sent++;
3812
3813         br_multicast_send_query(brmctx, NULL, query);
3814 out:
3815         spin_unlock(&brmctx->br->multicast_lock);
3816 }
3817
3818 static void br_ip4_multicast_query_expired(struct timer_list *t)
3819 {
3820         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3821                                                      ip4_own_query.timer);
3822
3823         br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3824                                    &brmctx->ip4_querier);
3825 }
3826
3827 #if IS_ENABLED(CONFIG_IPV6)
3828 static void br_ip6_multicast_query_expired(struct timer_list *t)
3829 {
3830         struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3831                                                      ip6_own_query.timer);
3832
3833         br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3834                                    &brmctx->ip6_querier);
3835 }
3836 #endif
3837
3838 static void br_multicast_gc_work(struct work_struct *work)
3839 {
3840         struct net_bridge *br = container_of(work, struct net_bridge,
3841                                              mcast_gc_work);
3842         HLIST_HEAD(deleted_head);
3843
3844         spin_lock_bh(&br->multicast_lock);
3845         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3846         spin_unlock_bh(&br->multicast_lock);
3847
3848         br_multicast_gc(&deleted_head);
3849 }
3850
3851 void br_multicast_ctx_init(struct net_bridge *br,
3852                            struct net_bridge_vlan *vlan,
3853                            struct net_bridge_mcast *brmctx)
3854 {
3855         brmctx->br = br;
3856         brmctx->vlan = vlan;
3857         brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3858         brmctx->multicast_last_member_count = 2;
3859         brmctx->multicast_startup_query_count = 2;
3860
3861         brmctx->multicast_last_member_interval = HZ;
3862         brmctx->multicast_query_response_interval = 10 * HZ;
3863         brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3864         brmctx->multicast_query_interval = 125 * HZ;
3865         brmctx->multicast_querier_interval = 255 * HZ;
3866         brmctx->multicast_membership_interval = 260 * HZ;
3867
3868         brmctx->ip4_other_query.delay_time = 0;
3869         brmctx->ip4_querier.port_ifidx = 0;
3870         seqcount_init(&brmctx->ip4_querier.seq);
3871         brmctx->multicast_igmp_version = 2;
3872 #if IS_ENABLED(CONFIG_IPV6)
3873         brmctx->multicast_mld_version = 1;
3874         brmctx->ip6_other_query.delay_time = 0;
3875         brmctx->ip6_querier.port_ifidx = 0;
3876         seqcount_init(&brmctx->ip6_querier.seq);
3877 #endif
3878
3879         timer_setup(&brmctx->ip4_mc_router_timer,
3880                     br_ip4_multicast_local_router_expired, 0);
3881         timer_setup(&brmctx->ip4_other_query.timer,
3882                     br_ip4_multicast_querier_expired, 0);
3883         timer_setup(&brmctx->ip4_own_query.timer,
3884                     br_ip4_multicast_query_expired, 0);
3885 #if IS_ENABLED(CONFIG_IPV6)
3886         timer_setup(&brmctx->ip6_mc_router_timer,
3887                     br_ip6_multicast_local_router_expired, 0);
3888         timer_setup(&brmctx->ip6_other_query.timer,
3889                     br_ip6_multicast_querier_expired, 0);
3890         timer_setup(&brmctx->ip6_own_query.timer,
3891                     br_ip6_multicast_query_expired, 0);
3892 #endif
3893 }
3894
3895 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3896 {
3897         __br_multicast_stop(brmctx);
3898 }
3899
3900 void br_multicast_init(struct net_bridge *br)
3901 {
3902         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3903
3904         br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3905
3906         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3907         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3908
3909         spin_lock_init(&br->multicast_lock);
3910         INIT_HLIST_HEAD(&br->mdb_list);
3911         INIT_HLIST_HEAD(&br->mcast_gc_list);
3912         INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3913 }
3914
3915 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3916 {
3917         struct in_device *in_dev = in_dev_get(br->dev);
3918
3919         if (!in_dev)
3920                 return;
3921
3922         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3923         in_dev_put(in_dev);
3924 }
3925
3926 #if IS_ENABLED(CONFIG_IPV6)
3927 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3928 {
3929         struct in6_addr addr;
3930
3931         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3932         ipv6_dev_mc_inc(br->dev, &addr);
3933 }
3934 #else
3935 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3936 {
3937 }
3938 #endif
3939
3940 void br_multicast_join_snoopers(struct net_bridge *br)
3941 {
3942         br_ip4_multicast_join_snoopers(br);
3943         br_ip6_multicast_join_snoopers(br);
3944 }
3945
3946 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3947 {
3948         struct in_device *in_dev = in_dev_get(br->dev);
3949
3950         if (WARN_ON(!in_dev))
3951                 return;
3952
3953         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3954         in_dev_put(in_dev);
3955 }
3956
3957 #if IS_ENABLED(CONFIG_IPV6)
3958 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3959 {
3960         struct in6_addr addr;
3961
3962         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3963         ipv6_dev_mc_dec(br->dev, &addr);
3964 }
3965 #else
3966 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3967 {
3968 }
3969 #endif
3970
3971 void br_multicast_leave_snoopers(struct net_bridge *br)
3972 {
3973         br_ip4_multicast_leave_snoopers(br);
3974         br_ip6_multicast_leave_snoopers(br);
3975 }
3976
3977 static void __br_multicast_open_query(struct net_bridge *br,
3978                                       struct bridge_mcast_own_query *query)
3979 {
3980         query->startup_sent = 0;
3981
3982         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3983                 return;
3984
3985         mod_timer(&query->timer, jiffies);
3986 }
3987
3988 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3989 {
3990         __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3991 #if IS_ENABLED(CONFIG_IPV6)
3992         __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
3993 #endif
3994 }
3995
3996 void br_multicast_open(struct net_bridge *br)
3997 {
3998         ASSERT_RTNL();
3999
4000         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4001                 struct net_bridge_vlan_group *vg;
4002                 struct net_bridge_vlan *vlan;
4003
4004                 vg = br_vlan_group(br);
4005                 if (vg) {
4006                         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4007                                 struct net_bridge_mcast *brmctx;
4008
4009                                 brmctx = &vlan->br_mcast_ctx;
4010                                 if (br_vlan_is_brentry(vlan) &&
4011                                     !br_multicast_ctx_vlan_disabled(brmctx))
4012                                         __br_multicast_open(&vlan->br_mcast_ctx);
4013                         }
4014                 }
4015         } else {
4016                 __br_multicast_open(&br->multicast_ctx);
4017         }
4018 }
4019
4020 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4021 {
4022         del_timer_sync(&brmctx->ip4_mc_router_timer);
4023         del_timer_sync(&brmctx->ip4_other_query.timer);
4024         del_timer_sync(&brmctx->ip4_own_query.timer);
4025 #if IS_ENABLED(CONFIG_IPV6)
4026         del_timer_sync(&brmctx->ip6_mc_router_timer);
4027         del_timer_sync(&brmctx->ip6_other_query.timer);
4028         del_timer_sync(&brmctx->ip6_own_query.timer);
4029 #endif
4030 }
4031
4032 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4033 {
4034         struct net_bridge *br;
4035
4036         /* it's okay to check for the flag without the multicast lock because it
4037          * can only change under RTNL -> multicast_lock, we need the latter to
4038          * sync with timers and packets
4039          */
4040         if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4041                 return;
4042
4043         if (br_vlan_is_master(vlan)) {
4044                 br = vlan->br;
4045
4046                 if (!br_vlan_is_brentry(vlan) ||
4047                     (on &&
4048                      br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4049                         return;
4050
4051                 spin_lock_bh(&br->multicast_lock);
4052                 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4053                 spin_unlock_bh(&br->multicast_lock);
4054
4055                 if (on)
4056                         __br_multicast_open(&vlan->br_mcast_ctx);
4057                 else
4058                         __br_multicast_stop(&vlan->br_mcast_ctx);
4059         } else {
4060                 struct net_bridge_mcast *brmctx;
4061
4062                 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4063                 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4064                         return;
4065
4066                 br = vlan->port->br;
4067                 spin_lock_bh(&br->multicast_lock);
4068                 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4069                 if (on)
4070                         __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4071                 else
4072                         __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4073                 spin_unlock_bh(&br->multicast_lock);
4074         }
4075 }
4076
4077 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4078 {
4079         struct net_bridge_port *p;
4080
4081         if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4082                 return;
4083
4084         list_for_each_entry(p, &vlan->br->port_list, list) {
4085                 struct net_bridge_vlan *vport;
4086
4087                 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4088                 if (!vport)
4089                         continue;
4090                 br_multicast_toggle_one_vlan(vport, on);
4091         }
4092
4093         if (br_vlan_is_brentry(vlan))
4094                 br_multicast_toggle_one_vlan(vlan, on);
4095 }
4096
4097 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4098                                       struct netlink_ext_ack *extack)
4099 {
4100         struct net_bridge_vlan_group *vg;
4101         struct net_bridge_vlan *vlan;
4102         struct net_bridge_port *p;
4103
4104         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4105                 return 0;
4106
4107         if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4108                 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4109                 return -EINVAL;
4110         }
4111
4112         vg = br_vlan_group(br);
4113         if (!vg)
4114                 return 0;
4115
4116         br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4117
4118         /* disable/enable non-vlan mcast contexts based on vlan snooping */
4119         if (on)
4120                 __br_multicast_stop(&br->multicast_ctx);
4121         else
4122                 __br_multicast_open(&br->multicast_ctx);
4123         list_for_each_entry(p, &br->port_list, list) {
4124                 if (on)
4125                         br_multicast_disable_port(p);
4126                 else
4127                         br_multicast_enable_port(p);
4128         }
4129
4130         list_for_each_entry(vlan, &vg->vlan_list, vlist)
4131                 br_multicast_toggle_vlan(vlan, on);
4132
4133         return 0;
4134 }
4135
4136 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4137 {
4138         ASSERT_RTNL();
4139
4140         /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4141          * requires only RTNL to change
4142          */
4143         if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4144                 return false;
4145
4146         vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4147         br_multicast_toggle_vlan(vlan, on);
4148
4149         return true;
4150 }
4151
4152 void br_multicast_stop(struct net_bridge *br)
4153 {
4154         ASSERT_RTNL();
4155
4156         if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4157                 struct net_bridge_vlan_group *vg;
4158                 struct net_bridge_vlan *vlan;
4159
4160                 vg = br_vlan_group(br);
4161                 if (vg) {
4162                         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4163                                 struct net_bridge_mcast *brmctx;
4164
4165                                 brmctx = &vlan->br_mcast_ctx;
4166                                 if (br_vlan_is_brentry(vlan) &&
4167                                     !br_multicast_ctx_vlan_disabled(brmctx))
4168                                         __br_multicast_stop(&vlan->br_mcast_ctx);
4169                         }
4170                 }
4171         } else {
4172                 __br_multicast_stop(&br->multicast_ctx);
4173         }
4174 }
4175
4176 void br_multicast_dev_del(struct net_bridge *br)
4177 {
4178         struct net_bridge_mdb_entry *mp;
4179         HLIST_HEAD(deleted_head);
4180         struct hlist_node *tmp;
4181
4182         spin_lock_bh(&br->multicast_lock);
4183         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4184                 br_multicast_del_mdb_entry(mp);
4185         hlist_move_list(&br->mcast_gc_list, &deleted_head);
4186         spin_unlock_bh(&br->multicast_lock);
4187
4188         br_multicast_ctx_deinit(&br->multicast_ctx);
4189         br_multicast_gc(&deleted_head);
4190         cancel_work_sync(&br->mcast_gc_work);
4191
4192         rcu_barrier();
4193 }
4194
4195 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4196 {
4197         int err = -EINVAL;
4198
4199         spin_lock_bh(&brmctx->br->multicast_lock);
4200
4201         switch (val) {
4202         case MDB_RTR_TYPE_DISABLED:
4203         case MDB_RTR_TYPE_PERM:
4204                 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4205                 del_timer(&brmctx->ip4_mc_router_timer);
4206 #if IS_ENABLED(CONFIG_IPV6)
4207                 del_timer(&brmctx->ip6_mc_router_timer);
4208 #endif
4209                 brmctx->multicast_router = val;
4210                 err = 0;
4211                 break;
4212         case MDB_RTR_TYPE_TEMP_QUERY:
4213                 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4214                         br_mc_router_state_change(brmctx->br, false);
4215                 brmctx->multicast_router = val;
4216                 err = 0;
4217                 break;
4218         }
4219
4220         spin_unlock_bh(&brmctx->br->multicast_lock);
4221
4222         return err;
4223 }
4224
4225 static void
4226 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4227 {
4228         if (!deleted)
4229                 return;
4230
4231         /* For backwards compatibility for now, only notify if there is
4232          * no multicast router anymore for both IPv4 and IPv6.
4233          */
4234         if (!hlist_unhashed(&pmctx->ip4_rlist))
4235                 return;
4236 #if IS_ENABLED(CONFIG_IPV6)
4237         if (!hlist_unhashed(&pmctx->ip6_rlist))
4238                 return;
4239 #endif
4240
4241         br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4242         br_port_mc_router_state_change(pmctx->port, false);
4243
4244         /* don't allow timer refresh */
4245         if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4246                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4247 }
4248
4249 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4250                                  unsigned long val)
4251 {
4252         struct net_bridge_mcast *brmctx;
4253         unsigned long now = jiffies;
4254         int err = -EINVAL;
4255         bool del = false;
4256
4257         brmctx = br_multicast_port_ctx_get_global(pmctx);
4258         spin_lock_bh(&brmctx->br->multicast_lock);
4259         if (pmctx->multicast_router == val) {
4260                 /* Refresh the temp router port timer */
4261                 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4262                         mod_timer(&pmctx->ip4_mc_router_timer,
4263                                   now + brmctx->multicast_querier_interval);
4264 #if IS_ENABLED(CONFIG_IPV6)
4265                         mod_timer(&pmctx->ip6_mc_router_timer,
4266                                   now + brmctx->multicast_querier_interval);
4267 #endif
4268                 }
4269                 err = 0;
4270                 goto unlock;
4271         }
4272         switch (val) {
4273         case MDB_RTR_TYPE_DISABLED:
4274                 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4275                 del |= br_ip4_multicast_rport_del(pmctx);
4276                 del_timer(&pmctx->ip4_mc_router_timer);
4277                 del |= br_ip6_multicast_rport_del(pmctx);
4278 #if IS_ENABLED(CONFIG_IPV6)
4279                 del_timer(&pmctx->ip6_mc_router_timer);
4280 #endif
4281                 br_multicast_rport_del_notify(pmctx, del);
4282                 break;
4283         case MDB_RTR_TYPE_TEMP_QUERY:
4284                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4285                 del |= br_ip4_multicast_rport_del(pmctx);
4286                 del |= br_ip6_multicast_rport_del(pmctx);
4287                 br_multicast_rport_del_notify(pmctx, del);
4288                 break;
4289         case MDB_RTR_TYPE_PERM:
4290                 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4291                 del_timer(&pmctx->ip4_mc_router_timer);
4292                 br_ip4_multicast_add_router(brmctx, pmctx);
4293 #if IS_ENABLED(CONFIG_IPV6)
4294                 del_timer(&pmctx->ip6_mc_router_timer);
4295 #endif
4296                 br_ip6_multicast_add_router(brmctx, pmctx);
4297                 break;
4298         case MDB_RTR_TYPE_TEMP:
4299                 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4300                 br_ip4_multicast_mark_router(brmctx, pmctx);
4301                 br_ip6_multicast_mark_router(brmctx, pmctx);
4302                 break;
4303         default:
4304                 goto unlock;
4305         }
4306         err = 0;
4307 unlock:
4308         spin_unlock_bh(&brmctx->br->multicast_lock);
4309
4310         return err;
4311 }
4312
4313 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4314 {
4315         int err;
4316
4317         if (br_vlan_is_master(v))
4318                 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4319         else
4320                 err = br_multicast_set_port_router(&v->port_mcast_ctx,
4321                                                    mcast_router);
4322
4323         return err;
4324 }
4325
4326 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4327                                        struct bridge_mcast_own_query *query)
4328 {
4329         struct net_bridge_port *port;
4330
4331         if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4332                 return;
4333
4334         __br_multicast_open_query(brmctx->br, query);
4335
4336         rcu_read_lock();
4337         list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4338                 struct bridge_mcast_own_query *ip4_own_query;
4339 #if IS_ENABLED(CONFIG_IPV6)
4340                 struct bridge_mcast_own_query *ip6_own_query;
4341 #endif
4342
4343                 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4344                         continue;
4345
4346                 if (br_multicast_ctx_is_vlan(brmctx)) {
4347                         struct net_bridge_vlan *vlan;
4348
4349                         vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4350                                             brmctx->vlan->vid);
4351                         if (!vlan ||
4352                             br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4353                                 continue;
4354
4355                         ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4356 #if IS_ENABLED(CONFIG_IPV6)
4357                         ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4358 #endif
4359                 } else {
4360                         ip4_own_query = &port->multicast_ctx.ip4_own_query;
4361 #if IS_ENABLED(CONFIG_IPV6)
4362                         ip6_own_query = &port->multicast_ctx.ip6_own_query;
4363 #endif
4364                 }
4365
4366                 if (query == &brmctx->ip4_own_query)
4367                         br_multicast_enable(ip4_own_query);
4368 #if IS_ENABLED(CONFIG_IPV6)
4369                 else
4370                         br_multicast_enable(ip6_own_query);
4371 #endif
4372         }
4373         rcu_read_unlock();
4374 }
4375
4376 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4377                         struct netlink_ext_ack *extack)
4378 {
4379         struct net_bridge_port *port;
4380         bool change_snoopers = false;
4381         int err = 0;
4382
4383         spin_lock_bh(&br->multicast_lock);
4384         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4385                 goto unlock;
4386
4387         err = br_mc_disabled_update(br->dev, val, extack);
4388         if (err == -EOPNOTSUPP)
4389                 err = 0;
4390         if (err)
4391                 goto unlock;
4392
4393         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4394         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4395                 change_snoopers = true;
4396                 goto unlock;
4397         }
4398
4399         if (!netif_running(br->dev))
4400                 goto unlock;
4401
4402         br_multicast_open(br);
4403         list_for_each_entry(port, &br->port_list, list)
4404                 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4405
4406         change_snoopers = true;
4407
4408 unlock:
4409         spin_unlock_bh(&br->multicast_lock);
4410
4411         /* br_multicast_join_snoopers has the potential to cause
4412          * an MLD Report/Leave to be delivered to br_multicast_rcv,
4413          * which would in turn call br_multicast_add_group, which would
4414          * attempt to acquire multicast_lock. This function should be
4415          * called after the lock has been released to avoid deadlocks on
4416          * multicast_lock.
4417          *
4418          * br_multicast_leave_snoopers does not have the problem since
4419          * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4420          * returns without calling br_multicast_ipv4/6_rcv if it's not
4421          * enabled. Moved both functions out just for symmetry.
4422          */
4423         if (change_snoopers) {
4424                 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4425                         br_multicast_join_snoopers(br);
4426                 else
4427                         br_multicast_leave_snoopers(br);
4428         }
4429
4430         return err;
4431 }
4432
4433 bool br_multicast_enabled(const struct net_device *dev)
4434 {
4435         struct net_bridge *br = netdev_priv(dev);
4436
4437         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4438 }
4439 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4440
4441 bool br_multicast_router(const struct net_device *dev)
4442 {
4443         struct net_bridge *br = netdev_priv(dev);
4444         bool is_router;
4445
4446         spin_lock_bh(&br->multicast_lock);
4447         is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4448         spin_unlock_bh(&br->multicast_lock);
4449         return is_router;
4450 }
4451 EXPORT_SYMBOL_GPL(br_multicast_router);
4452
4453 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4454 {
4455         unsigned long max_delay;
4456
4457         val = !!val;
4458
4459         spin_lock_bh(&brmctx->br->multicast_lock);
4460         if (brmctx->multicast_querier == val)
4461                 goto unlock;
4462
4463         WRITE_ONCE(brmctx->multicast_querier, val);
4464         if (!val)
4465                 goto unlock;
4466
4467         max_delay = brmctx->multicast_query_response_interval;
4468
4469         if (!timer_pending(&brmctx->ip4_other_query.timer))
4470                 brmctx->ip4_other_query.delay_time = jiffies + max_delay;
4471
4472         br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4473
4474 #if IS_ENABLED(CONFIG_IPV6)
4475         if (!timer_pending(&brmctx->ip6_other_query.timer))
4476                 brmctx->ip6_other_query.delay_time = jiffies + max_delay;
4477
4478         br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4479 #endif
4480
4481 unlock:
4482         spin_unlock_bh(&brmctx->br->multicast_lock);
4483
4484         return 0;
4485 }
4486
4487 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4488                                   unsigned long val)
4489 {
4490         /* Currently we support only version 2 and 3 */
4491         switch (val) {
4492         case 2:
4493         case 3:
4494                 break;
4495         default:
4496                 return -EINVAL;
4497         }
4498
4499         spin_lock_bh(&brmctx->br->multicast_lock);
4500         brmctx->multicast_igmp_version = val;
4501         spin_unlock_bh(&brmctx->br->multicast_lock);
4502
4503         return 0;
4504 }
4505
4506 #if IS_ENABLED(CONFIG_IPV6)
4507 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4508                                  unsigned long val)
4509 {
4510         /* Currently we support version 1 and 2 */
4511         switch (val) {
4512         case 1:
4513         case 2:
4514                 break;
4515         default:
4516                 return -EINVAL;
4517         }
4518
4519         spin_lock_bh(&brmctx->br->multicast_lock);
4520         brmctx->multicast_mld_version = val;
4521         spin_unlock_bh(&brmctx->br->multicast_lock);
4522
4523         return 0;
4524 }
4525 #endif
4526
4527 /**
4528  * br_multicast_list_adjacent - Returns snooped multicast addresses
4529  * @dev:        The bridge port adjacent to which to retrieve addresses
4530  * @br_ip_list: The list to store found, snooped multicast IP addresses in
4531  *
4532  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4533  * snooping feature on all bridge ports of dev's bridge device, excluding
4534  * the addresses from dev itself.
4535  *
4536  * Returns the number of items added to br_ip_list.
4537  *
4538  * Notes:
4539  * - br_ip_list needs to be initialized by caller
4540  * - br_ip_list might contain duplicates in the end
4541  *   (needs to be taken care of by caller)
4542  * - br_ip_list needs to be freed by caller
4543  */
4544 int br_multicast_list_adjacent(struct net_device *dev,
4545                                struct list_head *br_ip_list)
4546 {
4547         struct net_bridge *br;
4548         struct net_bridge_port *port;
4549         struct net_bridge_port_group *group;
4550         struct br_ip_list *entry;
4551         int count = 0;
4552
4553         rcu_read_lock();
4554         if (!br_ip_list || !netif_is_bridge_port(dev))
4555                 goto unlock;
4556
4557         port = br_port_get_rcu(dev);
4558         if (!port || !port->br)
4559                 goto unlock;
4560
4561         br = port->br;
4562
4563         list_for_each_entry_rcu(port, &br->port_list, list) {
4564                 if (!port->dev || port->dev == dev)
4565                         continue;
4566
4567                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4568                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4569                         if (!entry)
4570                                 goto unlock;
4571
4572                         entry->addr = group->key.addr;
4573                         list_add(&entry->list, br_ip_list);
4574                         count++;
4575                 }
4576         }
4577
4578 unlock:
4579         rcu_read_unlock();
4580         return count;
4581 }
4582 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4583
4584 /**
4585  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4586  * @dev: The bridge port providing the bridge on which to check for a querier
4587  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4588  *
4589  * Checks whether the given interface has a bridge on top and if so returns
4590  * true if a valid querier exists anywhere on the bridged link layer.
4591  * Otherwise returns false.
4592  */
4593 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4594 {
4595         struct net_bridge *br;
4596         struct net_bridge_port *port;
4597         struct ethhdr eth;
4598         bool ret = false;
4599
4600         rcu_read_lock();
4601         if (!netif_is_bridge_port(dev))
4602                 goto unlock;
4603
4604         port = br_port_get_rcu(dev);
4605         if (!port || !port->br)
4606                 goto unlock;
4607
4608         br = port->br;
4609
4610         memset(&eth, 0, sizeof(eth));
4611         eth.h_proto = htons(proto);
4612
4613         ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
4614
4615 unlock:
4616         rcu_read_unlock();
4617         return ret;
4618 }
4619 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4620
4621 /**
4622  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4623  * @dev: The bridge port adjacent to which to check for a querier
4624  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4625  *
4626  * Checks whether the given interface has a bridge on top and if so returns
4627  * true if a selected querier is behind one of the other ports of this
4628  * bridge. Otherwise returns false.
4629  */
4630 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4631 {
4632         struct net_bridge_mcast *brmctx;
4633         struct net_bridge *br;
4634         struct net_bridge_port *port;
4635         bool ret = false;
4636         int port_ifidx;
4637
4638         rcu_read_lock();
4639         if (!netif_is_bridge_port(dev))
4640                 goto unlock;
4641
4642         port = br_port_get_rcu(dev);
4643         if (!port || !port->br)
4644                 goto unlock;
4645
4646         br = port->br;
4647         brmctx = &br->multicast_ctx;
4648
4649         switch (proto) {
4650         case ETH_P_IP:
4651                 port_ifidx = brmctx->ip4_querier.port_ifidx;
4652                 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4653                     port_ifidx == port->dev->ifindex)
4654                         goto unlock;
4655                 break;
4656 #if IS_ENABLED(CONFIG_IPV6)
4657         case ETH_P_IPV6:
4658                 port_ifidx = brmctx->ip6_querier.port_ifidx;
4659                 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4660                     port_ifidx == port->dev->ifindex)
4661                         goto unlock;
4662                 break;
4663 #endif
4664         default:
4665                 goto unlock;
4666         }
4667
4668         ret = true;
4669 unlock:
4670         rcu_read_unlock();
4671         return ret;
4672 }
4673 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4674
4675 /**
4676  * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4677  * @dev: The bridge port adjacent to which to check for a multicast router
4678  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4679  *
4680  * Checks whether the given interface has a bridge on top and if so returns
4681  * true if a multicast router is behind one of the other ports of this
4682  * bridge. Otherwise returns false.
4683  */
4684 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4685 {
4686         struct net_bridge_mcast_port *pmctx;
4687         struct net_bridge_mcast *brmctx;
4688         struct net_bridge_port *port;
4689         bool ret = false;
4690
4691         rcu_read_lock();
4692         port = br_port_get_check_rcu(dev);
4693         if (!port)
4694                 goto unlock;
4695
4696         brmctx = &port->br->multicast_ctx;
4697         switch (proto) {
4698         case ETH_P_IP:
4699                 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4700                                          ip4_rlist) {
4701                         if (pmctx->port == port)
4702                                 continue;
4703
4704                         ret = true;
4705                         goto unlock;
4706                 }
4707                 break;
4708 #if IS_ENABLED(CONFIG_IPV6)
4709         case ETH_P_IPV6:
4710                 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4711                                          ip6_rlist) {
4712                         if (pmctx->port == port)
4713                                 continue;
4714
4715                         ret = true;
4716                         goto unlock;
4717                 }
4718                 break;
4719 #endif
4720         default:
4721                 /* when compiled without IPv6 support, be conservative and
4722                  * always assume presence of an IPv6 multicast router
4723                  */
4724                 ret = true;
4725         }
4726
4727 unlock:
4728         rcu_read_unlock();
4729         return ret;
4730 }
4731 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4732
4733 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4734                                const struct sk_buff *skb, u8 type, u8 dir)
4735 {
4736         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4737         __be16 proto = skb->protocol;
4738         unsigned int t_len;
4739
4740         u64_stats_update_begin(&pstats->syncp);
4741         switch (proto) {
4742         case htons(ETH_P_IP):
4743                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4744                 switch (type) {
4745                 case IGMP_HOST_MEMBERSHIP_REPORT:
4746                         pstats->mstats.igmp_v1reports[dir]++;
4747                         break;
4748                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
4749                         pstats->mstats.igmp_v2reports[dir]++;
4750                         break;
4751                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
4752                         pstats->mstats.igmp_v3reports[dir]++;
4753                         break;
4754                 case IGMP_HOST_MEMBERSHIP_QUERY:
4755                         if (t_len != sizeof(struct igmphdr)) {
4756                                 pstats->mstats.igmp_v3queries[dir]++;
4757                         } else {
4758                                 unsigned int offset = skb_transport_offset(skb);
4759                                 struct igmphdr *ih, _ihdr;
4760
4761                                 ih = skb_header_pointer(skb, offset,
4762                                                         sizeof(_ihdr), &_ihdr);
4763                                 if (!ih)
4764                                         break;
4765                                 if (!ih->code)
4766                                         pstats->mstats.igmp_v1queries[dir]++;
4767                                 else
4768                                         pstats->mstats.igmp_v2queries[dir]++;
4769                         }
4770                         break;
4771                 case IGMP_HOST_LEAVE_MESSAGE:
4772                         pstats->mstats.igmp_leaves[dir]++;
4773                         break;
4774                 }
4775                 break;
4776 #if IS_ENABLED(CONFIG_IPV6)
4777         case htons(ETH_P_IPV6):
4778                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4779                         sizeof(struct ipv6hdr);
4780                 t_len -= skb_network_header_len(skb);
4781                 switch (type) {
4782                 case ICMPV6_MGM_REPORT:
4783                         pstats->mstats.mld_v1reports[dir]++;
4784                         break;
4785                 case ICMPV6_MLD2_REPORT:
4786                         pstats->mstats.mld_v2reports[dir]++;
4787                         break;
4788                 case ICMPV6_MGM_QUERY:
4789                         if (t_len != sizeof(struct mld_msg))
4790                                 pstats->mstats.mld_v2queries[dir]++;
4791                         else
4792                                 pstats->mstats.mld_v1queries[dir]++;
4793                         break;
4794                 case ICMPV6_MGM_REDUCTION:
4795                         pstats->mstats.mld_leaves[dir]++;
4796                         break;
4797                 }
4798                 break;
4799 #endif /* CONFIG_IPV6 */
4800         }
4801         u64_stats_update_end(&pstats->syncp);
4802 }
4803
4804 void br_multicast_count(struct net_bridge *br,
4805                         const struct net_bridge_port *p,
4806                         const struct sk_buff *skb, u8 type, u8 dir)
4807 {
4808         struct bridge_mcast_stats __percpu *stats;
4809
4810         /* if multicast_disabled is true then igmp type can't be set */
4811         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4812                 return;
4813
4814         if (p)
4815                 stats = p->mcast_stats;
4816         else
4817                 stats = br->mcast_stats;
4818         if (WARN_ON(!stats))
4819                 return;
4820
4821         br_mcast_stats_add(stats, skb, type, dir);
4822 }
4823
4824 int br_multicast_init_stats(struct net_bridge *br)
4825 {
4826         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4827         if (!br->mcast_stats)
4828                 return -ENOMEM;
4829
4830         return 0;
4831 }
4832
4833 void br_multicast_uninit_stats(struct net_bridge *br)
4834 {
4835         free_percpu(br->mcast_stats);
4836 }
4837
4838 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
4839 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4840 {
4841         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4842         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4843 }
4844
4845 void br_multicast_get_stats(const struct net_bridge *br,
4846                             const struct net_bridge_port *p,
4847                             struct br_mcast_stats *dest)
4848 {
4849         struct bridge_mcast_stats __percpu *stats;
4850         struct br_mcast_stats tdst;
4851         int i;
4852
4853         memset(dest, 0, sizeof(*dest));
4854         if (p)
4855                 stats = p->mcast_stats;
4856         else
4857                 stats = br->mcast_stats;
4858         if (WARN_ON(!stats))
4859                 return;
4860
4861         memset(&tdst, 0, sizeof(tdst));
4862         for_each_possible_cpu(i) {
4863                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4864                 struct br_mcast_stats temp;
4865                 unsigned int start;
4866
4867                 do {
4868                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4869                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4870                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4871
4872                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4873                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4874                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4875                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4876                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4877                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4878                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4879                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
4880
4881                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4882                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4883                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4884                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4885                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4886                 tdst.mld_parse_errors += temp.mld_parse_errors;
4887         }
4888         memcpy(dest, &tdst, sizeof(*dest));
4889 }
4890
4891 int br_mdb_hash_init(struct net_bridge *br)
4892 {
4893         int err;
4894
4895         err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4896         if (err)
4897                 return err;
4898
4899         err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4900         if (err) {
4901                 rhashtable_destroy(&br->sg_port_tbl);
4902                 return err;
4903         }
4904
4905         return 0;
4906 }
4907
4908 void br_mdb_hash_fini(struct net_bridge *br)
4909 {
4910         rhashtable_destroy(&br->sg_port_tbl);
4911         rhashtable_destroy(&br->mdb_hash_tbl);
4912 }