Merge tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul...
[linux-2.6-microblaze.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36
37 static const struct rhashtable_params br_mdb_rht_params = {
38         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40         .key_len = sizeof(struct br_ip),
41         .automatic_shrinking = true,
42 };
43
44 static const struct rhashtable_params br_sg_port_rht_params = {
45         .head_offset = offsetof(struct net_bridge_port_group, rhnode),
46         .key_offset = offsetof(struct net_bridge_port_group, key),
47         .key_len = sizeof(struct net_bridge_port_group_sg_key),
48         .automatic_shrinking = true,
49 };
50
51 static void br_multicast_start_querier(struct net_bridge *br,
52                                        struct bridge_mcast_own_query *query);
53 static void br_multicast_add_router(struct net_bridge *br,
54                                     struct net_bridge_port *port);
55 static void br_ip4_multicast_leave_group(struct net_bridge *br,
56                                          struct net_bridge_port *port,
57                                          __be32 group,
58                                          __u16 vid,
59                                          const unsigned char *src);
60 static void br_multicast_port_group_rexmit(struct timer_list *t);
61
62 static void __del_port_router(struct net_bridge_port *p);
63 #if IS_ENABLED(CONFIG_IPV6)
64 static void br_ip6_multicast_leave_group(struct net_bridge *br,
65                                          struct net_bridge_port *port,
66                                          const struct in6_addr *group,
67                                          __u16 vid, const unsigned char *src);
68 #endif
69 static struct net_bridge_port_group *
70 __br_multicast_add_group(struct net_bridge *br,
71                          struct net_bridge_port *port,
72                          struct br_ip *group,
73                          const unsigned char *src,
74                          u8 filter_mode,
75                          bool igmpv2_mldv1,
76                          bool blocked);
77 static void br_multicast_find_del_pg(struct net_bridge *br,
78                                      struct net_bridge_port_group *pg);
79
80 static struct net_bridge_port_group *
81 br_sg_port_find(struct net_bridge *br,
82                 struct net_bridge_port_group_sg_key *sg_p)
83 {
84         lockdep_assert_held_once(&br->multicast_lock);
85
86         return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
87                                       br_sg_port_rht_params);
88 }
89
90 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
91                                                       struct br_ip *dst)
92 {
93         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
94 }
95
96 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
97                                            struct br_ip *dst)
98 {
99         struct net_bridge_mdb_entry *ent;
100
101         lockdep_assert_held_once(&br->multicast_lock);
102
103         rcu_read_lock();
104         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
105         rcu_read_unlock();
106
107         return ent;
108 }
109
110 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
111                                                    __be32 dst, __u16 vid)
112 {
113         struct br_ip br_dst;
114
115         memset(&br_dst, 0, sizeof(br_dst));
116         br_dst.dst.ip4 = dst;
117         br_dst.proto = htons(ETH_P_IP);
118         br_dst.vid = vid;
119
120         return br_mdb_ip_get(br, &br_dst);
121 }
122
123 #if IS_ENABLED(CONFIG_IPV6)
124 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
125                                                    const struct in6_addr *dst,
126                                                    __u16 vid)
127 {
128         struct br_ip br_dst;
129
130         memset(&br_dst, 0, sizeof(br_dst));
131         br_dst.dst.ip6 = *dst;
132         br_dst.proto = htons(ETH_P_IPV6);
133         br_dst.vid = vid;
134
135         return br_mdb_ip_get(br, &br_dst);
136 }
137 #endif
138
139 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
140                                         struct sk_buff *skb, u16 vid)
141 {
142         struct br_ip ip;
143
144         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
145                 return NULL;
146
147         if (BR_INPUT_SKB_CB(skb)->igmp)
148                 return NULL;
149
150         memset(&ip, 0, sizeof(ip));
151         ip.proto = skb->protocol;
152         ip.vid = vid;
153
154         switch (skb->protocol) {
155         case htons(ETH_P_IP):
156                 ip.dst.ip4 = ip_hdr(skb)->daddr;
157                 if (br->multicast_igmp_version == 3) {
158                         struct net_bridge_mdb_entry *mdb;
159
160                         ip.src.ip4 = ip_hdr(skb)->saddr;
161                         mdb = br_mdb_ip_get_rcu(br, &ip);
162                         if (mdb)
163                                 return mdb;
164                         ip.src.ip4 = 0;
165                 }
166                 break;
167 #if IS_ENABLED(CONFIG_IPV6)
168         case htons(ETH_P_IPV6):
169                 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
170                 if (br->multicast_mld_version == 2) {
171                         struct net_bridge_mdb_entry *mdb;
172
173                         ip.src.ip6 = ipv6_hdr(skb)->saddr;
174                         mdb = br_mdb_ip_get_rcu(br, &ip);
175                         if (mdb)
176                                 return mdb;
177                         memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
178                 }
179                 break;
180 #endif
181         default:
182                 ip.proto = 0;
183                 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
184         }
185
186         return br_mdb_ip_get_rcu(br, &ip);
187 }
188
189 static bool br_port_group_equal(struct net_bridge_port_group *p,
190                                 struct net_bridge_port *port,
191                                 const unsigned char *src)
192 {
193         if (p->key.port != port)
194                 return false;
195
196         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
197                 return true;
198
199         return ether_addr_equal(src, p->eth_addr);
200 }
201
202 static void __fwd_add_star_excl(struct net_bridge_port_group *pg,
203                                 struct br_ip *sg_ip)
204 {
205         struct net_bridge_port_group_sg_key sg_key;
206         struct net_bridge *br = pg->key.port->br;
207         struct net_bridge_port_group *src_pg;
208
209         memset(&sg_key, 0, sizeof(sg_key));
210         sg_key.port = pg->key.port;
211         sg_key.addr = *sg_ip;
212         if (br_sg_port_find(br, &sg_key))
213                 return;
214
215         src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr,
216                                           MCAST_INCLUDE, false, false);
217         if (IS_ERR_OR_NULL(src_pg) ||
218             src_pg->rt_protocol != RTPROT_KERNEL)
219                 return;
220
221         src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
222 }
223
224 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
225                                 struct br_ip *sg_ip)
226 {
227         struct net_bridge_port_group_sg_key sg_key;
228         struct net_bridge *br = pg->key.port->br;
229         struct net_bridge_port_group *src_pg;
230
231         memset(&sg_key, 0, sizeof(sg_key));
232         sg_key.port = pg->key.port;
233         sg_key.addr = *sg_ip;
234         src_pg = br_sg_port_find(br, &sg_key);
235         if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
236             src_pg->rt_protocol != RTPROT_KERNEL)
237                 return;
238
239         br_multicast_find_del_pg(br, src_pg);
240 }
241
242 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
243  * to all other ports' S,G entries which are not blocked by the current group
244  * for proper replication, the assumption is that any S,G blocked entries
245  * are already added so the S,G,port lookup should skip them.
246  * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
247  * deleted we need to remove it from all ports' S,G entries where it was
248  * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
249  */
250 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
251                                      u8 filter_mode)
252 {
253         struct net_bridge *br = pg->key.port->br;
254         struct net_bridge_port_group *pg_lst;
255         struct net_bridge_mdb_entry *mp;
256         struct br_ip sg_ip;
257
258         if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
259                 return;
260
261         mp = br_mdb_ip_get(br, &pg->key.addr);
262         if (!mp)
263                 return;
264
265         memset(&sg_ip, 0, sizeof(sg_ip));
266         sg_ip = pg->key.addr;
267         for (pg_lst = mlock_dereference(mp->ports, br);
268              pg_lst;
269              pg_lst = mlock_dereference(pg_lst->next, br)) {
270                 struct net_bridge_group_src *src_ent;
271
272                 if (pg_lst == pg)
273                         continue;
274                 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
275                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
276                                 continue;
277                         sg_ip.src = src_ent->addr.src;
278                         switch (filter_mode) {
279                         case MCAST_INCLUDE:
280                                 __fwd_del_star_excl(pg, &sg_ip);
281                                 break;
282                         case MCAST_EXCLUDE:
283                                 __fwd_add_star_excl(pg, &sg_ip);
284                                 break;
285                         }
286                 }
287         }
288 }
289
290 /* called when adding a new S,G with host_joined == false by default */
291 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
292                                        struct net_bridge_port_group *sg)
293 {
294         struct net_bridge_mdb_entry *sg_mp;
295
296         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
297                 return;
298         if (!star_mp->host_joined)
299                 return;
300
301         sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
302         if (!sg_mp)
303                 return;
304         sg_mp->host_joined = true;
305 }
306
307 /* set the host_joined state of all of *,G's S,G entries */
308 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
309 {
310         struct net_bridge *br = star_mp->br;
311         struct net_bridge_mdb_entry *sg_mp;
312         struct net_bridge_port_group *pg;
313         struct br_ip sg_ip;
314
315         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
316                 return;
317
318         memset(&sg_ip, 0, sizeof(sg_ip));
319         sg_ip = star_mp->addr;
320         for (pg = mlock_dereference(star_mp->ports, br);
321              pg;
322              pg = mlock_dereference(pg->next, br)) {
323                 struct net_bridge_group_src *src_ent;
324
325                 hlist_for_each_entry(src_ent, &pg->src_list, node) {
326                         if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
327                                 continue;
328                         sg_ip.src = src_ent->addr.src;
329                         sg_mp = br_mdb_ip_get(br, &sg_ip);
330                         if (!sg_mp)
331                                 continue;
332                         sg_mp->host_joined = star_mp->host_joined;
333                 }
334         }
335 }
336
337 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
338 {
339         struct net_bridge_port_group __rcu **pp;
340         struct net_bridge_port_group *p;
341
342         /* *,G exclude ports are only added to S,G entries */
343         if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
344                 return;
345
346         /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
347          * we should ignore perm entries since they're managed by user-space
348          */
349         for (pp = &sgmp->ports;
350              (p = mlock_dereference(*pp, sgmp->br)) != NULL;
351              pp = &p->next)
352                 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
353                                   MDB_PG_FLAGS_PERMANENT)))
354                         return;
355
356         /* currently the host can only have joined the *,G which means
357          * we treat it as EXCLUDE {}, so for an S,G it's considered a
358          * STAR_EXCLUDE entry and we can safely leave it
359          */
360         sgmp->host_joined = false;
361
362         for (pp = &sgmp->ports;
363              (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
364                 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
365                         br_multicast_del_pg(sgmp, p, pp);
366                 else
367                         pp = &p->next;
368         }
369 }
370
371 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
372                                        struct net_bridge_port_group *sg)
373 {
374         struct net_bridge_port_group_sg_key sg_key;
375         struct net_bridge *br = star_mp->br;
376         struct net_bridge_port_group *pg;
377
378         if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
379                 return;
380         if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
381                 return;
382
383         br_multicast_sg_host_state(star_mp, sg);
384         memset(&sg_key, 0, sizeof(sg_key));
385         sg_key.addr = sg->key.addr;
386         /* we need to add all exclude ports to the S,G */
387         for (pg = mlock_dereference(star_mp->ports, br);
388              pg;
389              pg = mlock_dereference(pg->next, br)) {
390                 struct net_bridge_port_group *src_pg;
391
392                 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
393                         continue;
394
395                 sg_key.port = pg->key.port;
396                 if (br_sg_port_find(br, &sg_key))
397                         continue;
398
399                 src_pg = __br_multicast_add_group(br, pg->key.port,
400                                                   &sg->key.addr,
401                                                   sg->eth_addr,
402                                                   MCAST_INCLUDE, false, false);
403                 if (IS_ERR_OR_NULL(src_pg) ||
404                     src_pg->rt_protocol != RTPROT_KERNEL)
405                         continue;
406                 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
407         }
408 }
409
410 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
411 {
412         struct net_bridge_mdb_entry *star_mp;
413         struct net_bridge_port_group *sg;
414         struct br_ip sg_ip;
415
416         if (src->flags & BR_SGRP_F_INSTALLED)
417                 return;
418
419         memset(&sg_ip, 0, sizeof(sg_ip));
420         sg_ip = src->pg->key.addr;
421         sg_ip.src = src->addr.src;
422         sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip,
423                                       src->pg->eth_addr, MCAST_INCLUDE, false,
424                                       !timer_pending(&src->timer));
425         if (IS_ERR_OR_NULL(sg))
426                 return;
427         src->flags |= BR_SGRP_F_INSTALLED;
428         sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
429
430         /* if it was added by user-space as perm we can skip next steps */
431         if (sg->rt_protocol != RTPROT_KERNEL &&
432             (sg->flags & MDB_PG_FLAGS_PERMANENT))
433                 return;
434
435         /* the kernel is now responsible for removing this S,G */
436         del_timer(&sg->timer);
437         star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
438         if (!star_mp)
439                 return;
440
441         br_multicast_sg_add_exclude_ports(star_mp, sg);
442 }
443
444 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src)
445 {
446         struct net_bridge_port_group *p, *pg = src->pg;
447         struct net_bridge_port_group __rcu **pp;
448         struct net_bridge_mdb_entry *mp;
449         struct br_ip sg_ip;
450
451         memset(&sg_ip, 0, sizeof(sg_ip));
452         sg_ip = pg->key.addr;
453         sg_ip.src = src->addr.src;
454
455         mp = br_mdb_ip_get(src->br, &sg_ip);
456         if (!mp)
457                 return;
458
459         for (pp = &mp->ports;
460              (p = mlock_dereference(*pp, src->br)) != NULL;
461              pp = &p->next) {
462                 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
463                         continue;
464
465                 if (p->rt_protocol != RTPROT_KERNEL &&
466                     (p->flags & MDB_PG_FLAGS_PERMANENT))
467                         break;
468
469                 br_multicast_del_pg(mp, p, pp);
470                 break;
471         }
472         src->flags &= ~BR_SGRP_F_INSTALLED;
473 }
474
475 /* install S,G and based on src's timer enable or disable forwarding */
476 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
477 {
478         struct net_bridge_port_group_sg_key sg_key;
479         struct net_bridge_port_group *sg;
480         u8 old_flags;
481
482         br_multicast_fwd_src_add(src);
483
484         memset(&sg_key, 0, sizeof(sg_key));
485         sg_key.addr = src->pg->key.addr;
486         sg_key.addr.src = src->addr.src;
487         sg_key.port = src->pg->key.port;
488
489         sg = br_sg_port_find(src->br, &sg_key);
490         if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
491                 return;
492
493         old_flags = sg->flags;
494         if (timer_pending(&src->timer))
495                 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
496         else
497                 sg->flags |= MDB_PG_FLAGS_BLOCKED;
498
499         if (old_flags != sg->flags) {
500                 struct net_bridge_mdb_entry *sg_mp;
501
502                 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
503                 if (!sg_mp)
504                         return;
505                 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
506         }
507 }
508
509 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
510 {
511         struct net_bridge_mdb_entry *mp;
512
513         mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
514         WARN_ON(!hlist_unhashed(&mp->mdb_node));
515         WARN_ON(mp->ports);
516
517         del_timer_sync(&mp->timer);
518         kfree_rcu(mp, rcu);
519 }
520
521 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
522 {
523         struct net_bridge *br = mp->br;
524
525         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
526                                br_mdb_rht_params);
527         hlist_del_init_rcu(&mp->mdb_node);
528         hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
529         queue_work(system_long_wq, &br->mcast_gc_work);
530 }
531
532 static void br_multicast_group_expired(struct timer_list *t)
533 {
534         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
535         struct net_bridge *br = mp->br;
536
537         spin_lock(&br->multicast_lock);
538         if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
539             timer_pending(&mp->timer))
540                 goto out;
541
542         br_multicast_host_leave(mp, true);
543
544         if (mp->ports)
545                 goto out;
546         br_multicast_del_mdb_entry(mp);
547 out:
548         spin_unlock(&br->multicast_lock);
549 }
550
551 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
552 {
553         struct net_bridge_group_src *src;
554
555         src = container_of(gc, struct net_bridge_group_src, mcast_gc);
556         WARN_ON(!hlist_unhashed(&src->node));
557
558         del_timer_sync(&src->timer);
559         kfree_rcu(src, rcu);
560 }
561
562 static void br_multicast_del_group_src(struct net_bridge_group_src *src)
563 {
564         struct net_bridge *br = src->pg->key.port->br;
565
566         br_multicast_fwd_src_remove(src);
567         hlist_del_init_rcu(&src->node);
568         src->pg->src_ents--;
569         hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
570         queue_work(system_long_wq, &br->mcast_gc_work);
571 }
572
573 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
574 {
575         struct net_bridge_port_group *pg;
576
577         pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
578         WARN_ON(!hlist_unhashed(&pg->mglist));
579         WARN_ON(!hlist_empty(&pg->src_list));
580
581         del_timer_sync(&pg->rexmit_timer);
582         del_timer_sync(&pg->timer);
583         kfree_rcu(pg, rcu);
584 }
585
586 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
587                          struct net_bridge_port_group *pg,
588                          struct net_bridge_port_group __rcu **pp)
589 {
590         struct net_bridge *br = pg->key.port->br;
591         struct net_bridge_group_src *ent;
592         struct hlist_node *tmp;
593
594         rcu_assign_pointer(*pp, pg->next);
595         hlist_del_init(&pg->mglist);
596         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
597                 br_multicast_del_group_src(ent);
598         br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
599         if (!br_multicast_is_star_g(&mp->addr)) {
600                 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
601                                        br_sg_port_rht_params);
602                 br_multicast_sg_del_exclude_ports(mp);
603         } else {
604                 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
605         }
606         hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
607         queue_work(system_long_wq, &br->mcast_gc_work);
608
609         if (!mp->ports && !mp->host_joined && netif_running(br->dev))
610                 mod_timer(&mp->timer, jiffies);
611 }
612
613 static void br_multicast_find_del_pg(struct net_bridge *br,
614                                      struct net_bridge_port_group *pg)
615 {
616         struct net_bridge_port_group __rcu **pp;
617         struct net_bridge_mdb_entry *mp;
618         struct net_bridge_port_group *p;
619
620         mp = br_mdb_ip_get(br, &pg->key.addr);
621         if (WARN_ON(!mp))
622                 return;
623
624         for (pp = &mp->ports;
625              (p = mlock_dereference(*pp, br)) != NULL;
626              pp = &p->next) {
627                 if (p != pg)
628                         continue;
629
630                 br_multicast_del_pg(mp, pg, pp);
631                 return;
632         }
633
634         WARN_ON(1);
635 }
636
637 static void br_multicast_port_group_expired(struct timer_list *t)
638 {
639         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
640         struct net_bridge_group_src *src_ent;
641         struct net_bridge *br = pg->key.port->br;
642         struct hlist_node *tmp;
643         bool changed;
644
645         spin_lock(&br->multicast_lock);
646         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
647             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
648                 goto out;
649
650         changed = !!(pg->filter_mode == MCAST_EXCLUDE);
651         pg->filter_mode = MCAST_INCLUDE;
652         hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
653                 if (!timer_pending(&src_ent->timer)) {
654                         br_multicast_del_group_src(src_ent);
655                         changed = true;
656                 }
657         }
658
659         if (hlist_empty(&pg->src_list)) {
660                 br_multicast_find_del_pg(br, pg);
661         } else if (changed) {
662                 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
663
664                 if (changed && br_multicast_is_star_g(&pg->key.addr))
665                         br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
666
667                 if (WARN_ON(!mp))
668                         goto out;
669                 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
670         }
671 out:
672         spin_unlock(&br->multicast_lock);
673 }
674
675 static void br_multicast_gc(struct hlist_head *head)
676 {
677         struct net_bridge_mcast_gc *gcent;
678         struct hlist_node *tmp;
679
680         hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
681                 hlist_del_init(&gcent->gc_node);
682                 gcent->destroy(gcent);
683         }
684 }
685
686 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
687                                                     struct net_bridge_port_group *pg,
688                                                     __be32 ip_dst, __be32 group,
689                                                     bool with_srcs, bool over_lmqt,
690                                                     u8 sflag, u8 *igmp_type,
691                                                     bool *need_rexmit)
692 {
693         struct net_bridge_port *p = pg ? pg->key.port : NULL;
694         struct net_bridge_group_src *ent;
695         size_t pkt_size, igmp_hdr_size;
696         unsigned long now = jiffies;
697         struct igmpv3_query *ihv3;
698         void *csum_start = NULL;
699         __sum16 *csum = NULL;
700         struct sk_buff *skb;
701         struct igmphdr *ih;
702         struct ethhdr *eth;
703         unsigned long lmqt;
704         struct iphdr *iph;
705         u16 lmqt_srcs = 0;
706
707         igmp_hdr_size = sizeof(*ih);
708         if (br->multicast_igmp_version == 3) {
709                 igmp_hdr_size = sizeof(*ihv3);
710                 if (pg && with_srcs) {
711                         lmqt = now + (br->multicast_last_member_interval *
712                                       br->multicast_last_member_count);
713                         hlist_for_each_entry(ent, &pg->src_list, node) {
714                                 if (over_lmqt == time_after(ent->timer.expires,
715                                                             lmqt) &&
716                                     ent->src_query_rexmit_cnt > 0)
717                                         lmqt_srcs++;
718                         }
719
720                         if (!lmqt_srcs)
721                                 return NULL;
722                         igmp_hdr_size += lmqt_srcs * sizeof(__be32);
723                 }
724         }
725
726         pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
727         if ((p && pkt_size > p->dev->mtu) ||
728             pkt_size > br->dev->mtu)
729                 return NULL;
730
731         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
732         if (!skb)
733                 goto out;
734
735         skb->protocol = htons(ETH_P_IP);
736
737         skb_reset_mac_header(skb);
738         eth = eth_hdr(skb);
739
740         ether_addr_copy(eth->h_source, br->dev->dev_addr);
741         ip_eth_mc_map(ip_dst, eth->h_dest);
742         eth->h_proto = htons(ETH_P_IP);
743         skb_put(skb, sizeof(*eth));
744
745         skb_set_network_header(skb, skb->len);
746         iph = ip_hdr(skb);
747         iph->tot_len = htons(pkt_size - sizeof(*eth));
748
749         iph->version = 4;
750         iph->ihl = 6;
751         iph->tos = 0xc0;
752         iph->id = 0;
753         iph->frag_off = htons(IP_DF);
754         iph->ttl = 1;
755         iph->protocol = IPPROTO_IGMP;
756         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
757                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
758         iph->daddr = ip_dst;
759         ((u8 *)&iph[1])[0] = IPOPT_RA;
760         ((u8 *)&iph[1])[1] = 4;
761         ((u8 *)&iph[1])[2] = 0;
762         ((u8 *)&iph[1])[3] = 0;
763         ip_send_check(iph);
764         skb_put(skb, 24);
765
766         skb_set_transport_header(skb, skb->len);
767         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
768
769         switch (br->multicast_igmp_version) {
770         case 2:
771                 ih = igmp_hdr(skb);
772                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
773                 ih->code = (group ? br->multicast_last_member_interval :
774                                     br->multicast_query_response_interval) /
775                            (HZ / IGMP_TIMER_SCALE);
776                 ih->group = group;
777                 ih->csum = 0;
778                 csum = &ih->csum;
779                 csum_start = (void *)ih;
780                 break;
781         case 3:
782                 ihv3 = igmpv3_query_hdr(skb);
783                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
784                 ihv3->code = (group ? br->multicast_last_member_interval :
785                                       br->multicast_query_response_interval) /
786                              (HZ / IGMP_TIMER_SCALE);
787                 ihv3->group = group;
788                 ihv3->qqic = br->multicast_query_interval / HZ;
789                 ihv3->nsrcs = htons(lmqt_srcs);
790                 ihv3->resv = 0;
791                 ihv3->suppress = sflag;
792                 ihv3->qrv = 2;
793                 ihv3->csum = 0;
794                 csum = &ihv3->csum;
795                 csum_start = (void *)ihv3;
796                 if (!pg || !with_srcs)
797                         break;
798
799                 lmqt_srcs = 0;
800                 hlist_for_each_entry(ent, &pg->src_list, node) {
801                         if (over_lmqt == time_after(ent->timer.expires,
802                                                     lmqt) &&
803                             ent->src_query_rexmit_cnt > 0) {
804                                 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
805                                 ent->src_query_rexmit_cnt--;
806                                 if (need_rexmit && ent->src_query_rexmit_cnt)
807                                         *need_rexmit = true;
808                         }
809                 }
810                 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
811                         kfree_skb(skb);
812                         return NULL;
813                 }
814                 break;
815         }
816
817         if (WARN_ON(!csum || !csum_start)) {
818                 kfree_skb(skb);
819                 return NULL;
820         }
821
822         *csum = ip_compute_csum(csum_start, igmp_hdr_size);
823         skb_put(skb, igmp_hdr_size);
824         __skb_pull(skb, sizeof(*eth));
825
826 out:
827         return skb;
828 }
829
830 #if IS_ENABLED(CONFIG_IPV6)
831 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
832                                                     struct net_bridge_port_group *pg,
833                                                     const struct in6_addr *ip6_dst,
834                                                     const struct in6_addr *group,
835                                                     bool with_srcs, bool over_llqt,
836                                                     u8 sflag, u8 *igmp_type,
837                                                     bool *need_rexmit)
838 {
839         struct net_bridge_port *p = pg ? pg->key.port : NULL;
840         struct net_bridge_group_src *ent;
841         size_t pkt_size, mld_hdr_size;
842         unsigned long now = jiffies;
843         struct mld2_query *mld2q;
844         void *csum_start = NULL;
845         unsigned long interval;
846         __sum16 *csum = NULL;
847         struct ipv6hdr *ip6h;
848         struct mld_msg *mldq;
849         struct sk_buff *skb;
850         unsigned long llqt;
851         struct ethhdr *eth;
852         u16 llqt_srcs = 0;
853         u8 *hopopt;
854
855         mld_hdr_size = sizeof(*mldq);
856         if (br->multicast_mld_version == 2) {
857                 mld_hdr_size = sizeof(*mld2q);
858                 if (pg && with_srcs) {
859                         llqt = now + (br->multicast_last_member_interval *
860                                       br->multicast_last_member_count);
861                         hlist_for_each_entry(ent, &pg->src_list, node) {
862                                 if (over_llqt == time_after(ent->timer.expires,
863                                                             llqt) &&
864                                     ent->src_query_rexmit_cnt > 0)
865                                         llqt_srcs++;
866                         }
867
868                         if (!llqt_srcs)
869                                 return NULL;
870                         mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
871                 }
872         }
873
874         pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
875         if ((p && pkt_size > p->dev->mtu) ||
876             pkt_size > br->dev->mtu)
877                 return NULL;
878
879         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
880         if (!skb)
881                 goto out;
882
883         skb->protocol = htons(ETH_P_IPV6);
884
885         /* Ethernet header */
886         skb_reset_mac_header(skb);
887         eth = eth_hdr(skb);
888
889         ether_addr_copy(eth->h_source, br->dev->dev_addr);
890         eth->h_proto = htons(ETH_P_IPV6);
891         skb_put(skb, sizeof(*eth));
892
893         /* IPv6 header + HbH option */
894         skb_set_network_header(skb, skb->len);
895         ip6h = ipv6_hdr(skb);
896
897         *(__force __be32 *)ip6h = htonl(0x60000000);
898         ip6h->payload_len = htons(8 + mld_hdr_size);
899         ip6h->nexthdr = IPPROTO_HOPOPTS;
900         ip6h->hop_limit = 1;
901         ip6h->daddr = *ip6_dst;
902         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
903                                &ip6h->saddr)) {
904                 kfree_skb(skb);
905                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
906                 return NULL;
907         }
908
909         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
910         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
911
912         hopopt = (u8 *)(ip6h + 1);
913         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
914         hopopt[1] = 0;                          /* length of HbH */
915         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
916         hopopt[3] = 2;                          /* Length of RA Option */
917         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
918         hopopt[5] = 0;
919         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
920         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
921
922         skb_put(skb, sizeof(*ip6h) + 8);
923
924         /* ICMPv6 */
925         skb_set_transport_header(skb, skb->len);
926         interval = ipv6_addr_any(group) ?
927                         br->multicast_query_response_interval :
928                         br->multicast_last_member_interval;
929         *igmp_type = ICMPV6_MGM_QUERY;
930         switch (br->multicast_mld_version) {
931         case 1:
932                 mldq = (struct mld_msg *)icmp6_hdr(skb);
933                 mldq->mld_type = ICMPV6_MGM_QUERY;
934                 mldq->mld_code = 0;
935                 mldq->mld_cksum = 0;
936                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
937                 mldq->mld_reserved = 0;
938                 mldq->mld_mca = *group;
939                 csum = &mldq->mld_cksum;
940                 csum_start = (void *)mldq;
941                 break;
942         case 2:
943                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
944                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
945                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
946                 mld2q->mld2q_code = 0;
947                 mld2q->mld2q_cksum = 0;
948                 mld2q->mld2q_resv1 = 0;
949                 mld2q->mld2q_resv2 = 0;
950                 mld2q->mld2q_suppress = sflag;
951                 mld2q->mld2q_qrv = 2;
952                 mld2q->mld2q_nsrcs = htons(llqt_srcs);
953                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
954                 mld2q->mld2q_mca = *group;
955                 csum = &mld2q->mld2q_cksum;
956                 csum_start = (void *)mld2q;
957                 if (!pg || !with_srcs)
958                         break;
959
960                 llqt_srcs = 0;
961                 hlist_for_each_entry(ent, &pg->src_list, node) {
962                         if (over_llqt == time_after(ent->timer.expires,
963                                                     llqt) &&
964                             ent->src_query_rexmit_cnt > 0) {
965                                 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
966                                 ent->src_query_rexmit_cnt--;
967                                 if (need_rexmit && ent->src_query_rexmit_cnt)
968                                         *need_rexmit = true;
969                         }
970                 }
971                 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
972                         kfree_skb(skb);
973                         return NULL;
974                 }
975                 break;
976         }
977
978         if (WARN_ON(!csum || !csum_start)) {
979                 kfree_skb(skb);
980                 return NULL;
981         }
982
983         *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
984                                 IPPROTO_ICMPV6,
985                                 csum_partial(csum_start, mld_hdr_size, 0));
986         skb_put(skb, mld_hdr_size);
987         __skb_pull(skb, sizeof(*eth));
988
989 out:
990         return skb;
991 }
992 #endif
993
994 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
995                                                 struct net_bridge_port_group *pg,
996                                                 struct br_ip *ip_dst,
997                                                 struct br_ip *group,
998                                                 bool with_srcs, bool over_lmqt,
999                                                 u8 sflag, u8 *igmp_type,
1000                                                 bool *need_rexmit)
1001 {
1002         __be32 ip4_dst;
1003
1004         switch (group->proto) {
1005         case htons(ETH_P_IP):
1006                 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1007                 return br_ip4_multicast_alloc_query(br, pg,
1008                                                     ip4_dst, group->dst.ip4,
1009                                                     with_srcs, over_lmqt,
1010                                                     sflag, igmp_type,
1011                                                     need_rexmit);
1012 #if IS_ENABLED(CONFIG_IPV6)
1013         case htons(ETH_P_IPV6): {
1014                 struct in6_addr ip6_dst;
1015
1016                 if (ip_dst)
1017                         ip6_dst = ip_dst->dst.ip6;
1018                 else
1019                         ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1020                                       htonl(1));
1021
1022                 return br_ip6_multicast_alloc_query(br, pg,
1023                                                     &ip6_dst, &group->dst.ip6,
1024                                                     with_srcs, over_lmqt,
1025                                                     sflag, igmp_type,
1026                                                     need_rexmit);
1027         }
1028 #endif
1029         }
1030         return NULL;
1031 }
1032
1033 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1034                                                     struct br_ip *group)
1035 {
1036         struct net_bridge_mdb_entry *mp;
1037         int err;
1038
1039         mp = br_mdb_ip_get(br, group);
1040         if (mp)
1041                 return mp;
1042
1043         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1044                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1045                 return ERR_PTR(-E2BIG);
1046         }
1047
1048         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1049         if (unlikely(!mp))
1050                 return ERR_PTR(-ENOMEM);
1051
1052         mp->br = br;
1053         mp->addr = *group;
1054         mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1055         timer_setup(&mp->timer, br_multicast_group_expired, 0);
1056         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1057                                             br_mdb_rht_params);
1058         if (err) {
1059                 kfree(mp);
1060                 mp = ERR_PTR(err);
1061         } else {
1062                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1063         }
1064
1065         return mp;
1066 }
1067
1068 static void br_multicast_group_src_expired(struct timer_list *t)
1069 {
1070         struct net_bridge_group_src *src = from_timer(src, t, timer);
1071         struct net_bridge_port_group *pg;
1072         struct net_bridge *br = src->br;
1073
1074         spin_lock(&br->multicast_lock);
1075         if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1076             timer_pending(&src->timer))
1077                 goto out;
1078
1079         pg = src->pg;
1080         if (pg->filter_mode == MCAST_INCLUDE) {
1081                 br_multicast_del_group_src(src);
1082                 if (!hlist_empty(&pg->src_list))
1083                         goto out;
1084                 br_multicast_find_del_pg(br, pg);
1085         } else {
1086                 br_multicast_fwd_src_handle(src);
1087         }
1088
1089 out:
1090         spin_unlock(&br->multicast_lock);
1091 }
1092
1093 static struct net_bridge_group_src *
1094 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1095 {
1096         struct net_bridge_group_src *ent;
1097
1098         switch (ip->proto) {
1099         case htons(ETH_P_IP):
1100                 hlist_for_each_entry(ent, &pg->src_list, node)
1101                         if (ip->src.ip4 == ent->addr.src.ip4)
1102                                 return ent;
1103                 break;
1104 #if IS_ENABLED(CONFIG_IPV6)
1105         case htons(ETH_P_IPV6):
1106                 hlist_for_each_entry(ent, &pg->src_list, node)
1107                         if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1108                                 return ent;
1109                 break;
1110 #endif
1111         }
1112
1113         return NULL;
1114 }
1115
1116 static struct net_bridge_group_src *
1117 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1118 {
1119         struct net_bridge_group_src *grp_src;
1120
1121         if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1122                 return NULL;
1123
1124         switch (src_ip->proto) {
1125         case htons(ETH_P_IP):
1126                 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1127                     ipv4_is_multicast(src_ip->src.ip4))
1128                         return NULL;
1129                 break;
1130 #if IS_ENABLED(CONFIG_IPV6)
1131         case htons(ETH_P_IPV6):
1132                 if (ipv6_addr_any(&src_ip->src.ip6) ||
1133                     ipv6_addr_is_multicast(&src_ip->src.ip6))
1134                         return NULL;
1135                 break;
1136 #endif
1137         }
1138
1139         grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1140         if (unlikely(!grp_src))
1141                 return NULL;
1142
1143         grp_src->pg = pg;
1144         grp_src->br = pg->key.port->br;
1145         grp_src->addr = *src_ip;
1146         grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1147         timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1148
1149         hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1150         pg->src_ents++;
1151
1152         return grp_src;
1153 }
1154
1155 struct net_bridge_port_group *br_multicast_new_port_group(
1156                         struct net_bridge_port *port,
1157                         struct br_ip *group,
1158                         struct net_bridge_port_group __rcu *next,
1159                         unsigned char flags,
1160                         const unsigned char *src,
1161                         u8 filter_mode,
1162                         u8 rt_protocol)
1163 {
1164         struct net_bridge_port_group *p;
1165
1166         p = kzalloc(sizeof(*p), GFP_ATOMIC);
1167         if (unlikely(!p))
1168                 return NULL;
1169
1170         p->key.addr = *group;
1171         p->key.port = port;
1172         p->flags = flags;
1173         p->filter_mode = filter_mode;
1174         p->rt_protocol = rt_protocol;
1175         p->mcast_gc.destroy = br_multicast_destroy_port_group;
1176         INIT_HLIST_HEAD(&p->src_list);
1177
1178         if (!br_multicast_is_star_g(group) &&
1179             rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1180                                           br_sg_port_rht_params)) {
1181                 kfree(p);
1182                 return NULL;
1183         }
1184
1185         rcu_assign_pointer(p->next, next);
1186         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1187         timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1188         hlist_add_head(&p->mglist, &port->mglist);
1189
1190         if (src)
1191                 memcpy(p->eth_addr, src, ETH_ALEN);
1192         else
1193                 eth_broadcast_addr(p->eth_addr);
1194
1195         return p;
1196 }
1197
1198 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
1199 {
1200         if (!mp->host_joined) {
1201                 mp->host_joined = true;
1202                 if (br_multicast_is_star_g(&mp->addr))
1203                         br_multicast_star_g_host_state(mp);
1204                 if (notify)
1205                         br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1206         }
1207
1208         if (br_group_is_l2(&mp->addr))
1209                 return;
1210
1211         mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
1212 }
1213
1214 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1215 {
1216         if (!mp->host_joined)
1217                 return;
1218
1219         mp->host_joined = false;
1220         if (br_multicast_is_star_g(&mp->addr))
1221                 br_multicast_star_g_host_state(mp);
1222         if (notify)
1223                 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1224 }
1225
1226 static struct net_bridge_port_group *
1227 __br_multicast_add_group(struct net_bridge *br,
1228                          struct net_bridge_port *port,
1229                          struct br_ip *group,
1230                          const unsigned char *src,
1231                          u8 filter_mode,
1232                          bool igmpv2_mldv1,
1233                          bool blocked)
1234 {
1235         struct net_bridge_port_group __rcu **pp;
1236         struct net_bridge_port_group *p = NULL;
1237         struct net_bridge_mdb_entry *mp;
1238         unsigned long now = jiffies;
1239
1240         if (!netif_running(br->dev) ||
1241             (port && port->state == BR_STATE_DISABLED))
1242                 goto out;
1243
1244         mp = br_multicast_new_group(br, group);
1245         if (IS_ERR(mp))
1246                 return ERR_PTR(PTR_ERR(mp));
1247
1248         if (!port) {
1249                 br_multicast_host_join(mp, true);
1250                 goto out;
1251         }
1252
1253         for (pp = &mp->ports;
1254              (p = mlock_dereference(*pp, br)) != NULL;
1255              pp = &p->next) {
1256                 if (br_port_group_equal(p, port, src))
1257                         goto found;
1258                 if ((unsigned long)p->key.port < (unsigned long)port)
1259                         break;
1260         }
1261
1262         p = br_multicast_new_port_group(port, group, *pp, 0, src,
1263                                         filter_mode, RTPROT_KERNEL);
1264         if (unlikely(!p)) {
1265                 p = ERR_PTR(-ENOMEM);
1266                 goto out;
1267         }
1268         rcu_assign_pointer(*pp, p);
1269         if (blocked)
1270                 p->flags |= MDB_PG_FLAGS_BLOCKED;
1271         br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1272
1273 found:
1274         if (igmpv2_mldv1)
1275                 mod_timer(&p->timer, now + br->multicast_membership_interval);
1276
1277 out:
1278         return p;
1279 }
1280
1281 static int br_multicast_add_group(struct net_bridge *br,
1282                                   struct net_bridge_port *port,
1283                                   struct br_ip *group,
1284                                   const unsigned char *src,
1285                                   u8 filter_mode,
1286                                   bool igmpv2_mldv1)
1287 {
1288         struct net_bridge_port_group *pg;
1289         int err;
1290
1291         spin_lock(&br->multicast_lock);
1292         pg = __br_multicast_add_group(br, port, group, src, filter_mode,
1293                                       igmpv2_mldv1, false);
1294         /* NULL is considered valid for host joined groups */
1295         err = IS_ERR(pg) ? PTR_ERR(pg) : 0;
1296         spin_unlock(&br->multicast_lock);
1297
1298         return err;
1299 }
1300
1301 static int br_ip4_multicast_add_group(struct net_bridge *br,
1302                                       struct net_bridge_port *port,
1303                                       __be32 group,
1304                                       __u16 vid,
1305                                       const unsigned char *src,
1306                                       bool igmpv2)
1307 {
1308         struct br_ip br_group;
1309         u8 filter_mode;
1310
1311         if (ipv4_is_local_multicast(group))
1312                 return 0;
1313
1314         memset(&br_group, 0, sizeof(br_group));
1315         br_group.dst.ip4 = group;
1316         br_group.proto = htons(ETH_P_IP);
1317         br_group.vid = vid;
1318         filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1319
1320         return br_multicast_add_group(br, port, &br_group, src, filter_mode,
1321                                       igmpv2);
1322 }
1323
1324 #if IS_ENABLED(CONFIG_IPV6)
1325 static int br_ip6_multicast_add_group(struct net_bridge *br,
1326                                       struct net_bridge_port *port,
1327                                       const struct in6_addr *group,
1328                                       __u16 vid,
1329                                       const unsigned char *src,
1330                                       bool mldv1)
1331 {
1332         struct br_ip br_group;
1333         u8 filter_mode;
1334
1335         if (ipv6_addr_is_ll_all_nodes(group))
1336                 return 0;
1337
1338         memset(&br_group, 0, sizeof(br_group));
1339         br_group.dst.ip6 = *group;
1340         br_group.proto = htons(ETH_P_IPV6);
1341         br_group.vid = vid;
1342         filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1343
1344         return br_multicast_add_group(br, port, &br_group, src, filter_mode,
1345                                       mldv1);
1346 }
1347 #endif
1348
1349 static void br_multicast_router_expired(struct timer_list *t)
1350 {
1351         struct net_bridge_port *port =
1352                         from_timer(port, t, multicast_router_timer);
1353         struct net_bridge *br = port->br;
1354
1355         spin_lock(&br->multicast_lock);
1356         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1357             port->multicast_router == MDB_RTR_TYPE_PERM ||
1358             timer_pending(&port->multicast_router_timer))
1359                 goto out;
1360
1361         __del_port_router(port);
1362 out:
1363         spin_unlock(&br->multicast_lock);
1364 }
1365
1366 static void br_mc_router_state_change(struct net_bridge *p,
1367                                       bool is_mc_router)
1368 {
1369         struct switchdev_attr attr = {
1370                 .orig_dev = p->dev,
1371                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1372                 .flags = SWITCHDEV_F_DEFER,
1373                 .u.mrouter = is_mc_router,
1374         };
1375
1376         switchdev_port_attr_set(p->dev, &attr);
1377 }
1378
1379 static void br_multicast_local_router_expired(struct timer_list *t)
1380 {
1381         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
1382
1383         spin_lock(&br->multicast_lock);
1384         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
1385             br->multicast_router == MDB_RTR_TYPE_PERM ||
1386             timer_pending(&br->multicast_router_timer))
1387                 goto out;
1388
1389         br_mc_router_state_change(br, false);
1390 out:
1391         spin_unlock(&br->multicast_lock);
1392 }
1393
1394 static void br_multicast_querier_expired(struct net_bridge *br,
1395                                          struct bridge_mcast_own_query *query)
1396 {
1397         spin_lock(&br->multicast_lock);
1398         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1399                 goto out;
1400
1401         br_multicast_start_querier(br, query);
1402
1403 out:
1404         spin_unlock(&br->multicast_lock);
1405 }
1406
1407 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1408 {
1409         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
1410
1411         br_multicast_querier_expired(br, &br->ip4_own_query);
1412 }
1413
1414 #if IS_ENABLED(CONFIG_IPV6)
1415 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1416 {
1417         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
1418
1419         br_multicast_querier_expired(br, &br->ip6_own_query);
1420 }
1421 #endif
1422
1423 static void br_multicast_select_own_querier(struct net_bridge *br,
1424                                             struct br_ip *ip,
1425                                             struct sk_buff *skb)
1426 {
1427         if (ip->proto == htons(ETH_P_IP))
1428                 br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1429 #if IS_ENABLED(CONFIG_IPV6)
1430         else
1431                 br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1432 #endif
1433 }
1434
1435 static void __br_multicast_send_query(struct net_bridge *br,
1436                                       struct net_bridge_port *port,
1437                                       struct net_bridge_port_group *pg,
1438                                       struct br_ip *ip_dst,
1439                                       struct br_ip *group,
1440                                       bool with_srcs,
1441                                       u8 sflag,
1442                                       bool *need_rexmit)
1443 {
1444         bool over_lmqt = !!sflag;
1445         struct sk_buff *skb;
1446         u8 igmp_type;
1447
1448 again_under_lmqt:
1449         skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
1450                                        over_lmqt, sflag, &igmp_type,
1451                                        need_rexmit);
1452         if (!skb)
1453                 return;
1454
1455         if (port) {
1456                 skb->dev = port->dev;
1457                 br_multicast_count(br, port, skb, igmp_type,
1458                                    BR_MCAST_DIR_TX);
1459                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1460                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
1461                         br_dev_queue_push_xmit);
1462
1463                 if (over_lmqt && with_srcs && sflag) {
1464                         over_lmqt = false;
1465                         goto again_under_lmqt;
1466                 }
1467         } else {
1468                 br_multicast_select_own_querier(br, group, skb);
1469                 br_multicast_count(br, port, skb, igmp_type,
1470                                    BR_MCAST_DIR_RX);
1471                 netif_rx(skb);
1472         }
1473 }
1474
1475 static void br_multicast_send_query(struct net_bridge *br,
1476                                     struct net_bridge_port *port,
1477                                     struct bridge_mcast_own_query *own_query)
1478 {
1479         struct bridge_mcast_other_query *other_query = NULL;
1480         struct br_ip br_group;
1481         unsigned long time;
1482
1483         if (!netif_running(br->dev) ||
1484             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1485             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1486                 return;
1487
1488         memset(&br_group.dst, 0, sizeof(br_group.dst));
1489
1490         if (port ? (own_query == &port->ip4_own_query) :
1491                    (own_query == &br->ip4_own_query)) {
1492                 other_query = &br->ip4_other_query;
1493                 br_group.proto = htons(ETH_P_IP);
1494 #if IS_ENABLED(CONFIG_IPV6)
1495         } else {
1496                 other_query = &br->ip6_other_query;
1497                 br_group.proto = htons(ETH_P_IPV6);
1498 #endif
1499         }
1500
1501         if (!other_query || timer_pending(&other_query->timer))
1502                 return;
1503
1504         __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
1505                                   NULL);
1506
1507         time = jiffies;
1508         time += own_query->startup_sent < br->multicast_startup_query_count ?
1509                 br->multicast_startup_query_interval :
1510                 br->multicast_query_interval;
1511         mod_timer(&own_query->timer, time);
1512 }
1513
1514 static void
1515 br_multicast_port_query_expired(struct net_bridge_port *port,
1516                                 struct bridge_mcast_own_query *query)
1517 {
1518         struct net_bridge *br = port->br;
1519
1520         spin_lock(&br->multicast_lock);
1521         if (port->state == BR_STATE_DISABLED ||
1522             port->state == BR_STATE_BLOCKING)
1523                 goto out;
1524
1525         if (query->startup_sent < br->multicast_startup_query_count)
1526                 query->startup_sent++;
1527
1528         br_multicast_send_query(port->br, port, query);
1529
1530 out:
1531         spin_unlock(&br->multicast_lock);
1532 }
1533
1534 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1535 {
1536         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1537
1538         br_multicast_port_query_expired(port, &port->ip4_own_query);
1539 }
1540
1541 #if IS_ENABLED(CONFIG_IPV6)
1542 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1543 {
1544         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1545
1546         br_multicast_port_query_expired(port, &port->ip6_own_query);
1547 }
1548 #endif
1549
1550 static void br_multicast_port_group_rexmit(struct timer_list *t)
1551 {
1552         struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1553         struct bridge_mcast_other_query *other_query = NULL;
1554         struct net_bridge *br = pg->key.port->br;
1555         bool need_rexmit = false;
1556
1557         spin_lock(&br->multicast_lock);
1558         if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1559             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1560             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1561                 goto out;
1562
1563         if (pg->key.addr.proto == htons(ETH_P_IP))
1564                 other_query = &br->ip4_other_query;
1565 #if IS_ENABLED(CONFIG_IPV6)
1566         else
1567                 other_query = &br->ip6_other_query;
1568 #endif
1569
1570         if (!other_query || timer_pending(&other_query->timer))
1571                 goto out;
1572
1573         if (pg->grp_query_rexmit_cnt) {
1574                 pg->grp_query_rexmit_cnt--;
1575                 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1576                                           &pg->key.addr, false, 1, NULL);
1577         }
1578         __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1579                                   &pg->key.addr, true, 0, &need_rexmit);
1580
1581         if (pg->grp_query_rexmit_cnt || need_rexmit)
1582                 mod_timer(&pg->rexmit_timer, jiffies +
1583                                              br->multicast_last_member_interval);
1584 out:
1585         spin_unlock(&br->multicast_lock);
1586 }
1587
1588 static void br_mc_disabled_update(struct net_device *dev, bool value)
1589 {
1590         struct switchdev_attr attr = {
1591                 .orig_dev = dev,
1592                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1593                 .flags = SWITCHDEV_F_DEFER,
1594                 .u.mc_disabled = !value,
1595         };
1596
1597         switchdev_port_attr_set(dev, &attr);
1598 }
1599
1600 int br_multicast_add_port(struct net_bridge_port *port)
1601 {
1602         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1603
1604         timer_setup(&port->multicast_router_timer,
1605                     br_multicast_router_expired, 0);
1606         timer_setup(&port->ip4_own_query.timer,
1607                     br_ip4_multicast_port_query_expired, 0);
1608 #if IS_ENABLED(CONFIG_IPV6)
1609         timer_setup(&port->ip6_own_query.timer,
1610                     br_ip6_multicast_port_query_expired, 0);
1611 #endif
1612         br_mc_disabled_update(port->dev,
1613                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
1614
1615         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1616         if (!port->mcast_stats)
1617                 return -ENOMEM;
1618
1619         return 0;
1620 }
1621
1622 void br_multicast_del_port(struct net_bridge_port *port)
1623 {
1624         struct net_bridge *br = port->br;
1625         struct net_bridge_port_group *pg;
1626         HLIST_HEAD(deleted_head);
1627         struct hlist_node *n;
1628
1629         /* Take care of the remaining groups, only perm ones should be left */
1630         spin_lock_bh(&br->multicast_lock);
1631         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1632                 br_multicast_find_del_pg(br, pg);
1633         hlist_move_list(&br->mcast_gc_list, &deleted_head);
1634         spin_unlock_bh(&br->multicast_lock);
1635         br_multicast_gc(&deleted_head);
1636         del_timer_sync(&port->multicast_router_timer);
1637         free_percpu(port->mcast_stats);
1638 }
1639
1640 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1641 {
1642         query->startup_sent = 0;
1643
1644         if (try_to_del_timer_sync(&query->timer) >= 0 ||
1645             del_timer(&query->timer))
1646                 mod_timer(&query->timer, jiffies);
1647 }
1648
1649 static void __br_multicast_enable_port(struct net_bridge_port *port)
1650 {
1651         struct net_bridge *br = port->br;
1652
1653         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
1654                 return;
1655
1656         br_multicast_enable(&port->ip4_own_query);
1657 #if IS_ENABLED(CONFIG_IPV6)
1658         br_multicast_enable(&port->ip6_own_query);
1659 #endif
1660         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1661             hlist_unhashed(&port->rlist))
1662                 br_multicast_add_router(br, port);
1663 }
1664
1665 void br_multicast_enable_port(struct net_bridge_port *port)
1666 {
1667         struct net_bridge *br = port->br;
1668
1669         spin_lock(&br->multicast_lock);
1670         __br_multicast_enable_port(port);
1671         spin_unlock(&br->multicast_lock);
1672 }
1673
1674 void br_multicast_disable_port(struct net_bridge_port *port)
1675 {
1676         struct net_bridge *br = port->br;
1677         struct net_bridge_port_group *pg;
1678         struct hlist_node *n;
1679
1680         spin_lock(&br->multicast_lock);
1681         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1682                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1683                         br_multicast_find_del_pg(br, pg);
1684
1685         __del_port_router(port);
1686
1687         del_timer(&port->multicast_router_timer);
1688         del_timer(&port->ip4_own_query.timer);
1689 #if IS_ENABLED(CONFIG_IPV6)
1690         del_timer(&port->ip6_own_query.timer);
1691 #endif
1692         spin_unlock(&br->multicast_lock);
1693 }
1694
1695 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1696 {
1697         struct net_bridge_group_src *ent;
1698         struct hlist_node *tmp;
1699         int deleted = 0;
1700
1701         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1702                 if (ent->flags & BR_SGRP_F_DELETE) {
1703                         br_multicast_del_group_src(ent);
1704                         deleted++;
1705                 }
1706
1707         return deleted;
1708 }
1709
1710 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1711                                 unsigned long expires)
1712 {
1713         mod_timer(&src->timer, expires);
1714         br_multicast_fwd_src_handle(src);
1715 }
1716
1717 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
1718 {
1719         struct bridge_mcast_other_query *other_query = NULL;
1720         struct net_bridge *br = pg->key.port->br;
1721         u32 lmqc = br->multicast_last_member_count;
1722         unsigned long lmqt, lmi, now = jiffies;
1723         struct net_bridge_group_src *ent;
1724
1725         if (!netif_running(br->dev) ||
1726             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1727                 return;
1728
1729         if (pg->key.addr.proto == htons(ETH_P_IP))
1730                 other_query = &br->ip4_other_query;
1731 #if IS_ENABLED(CONFIG_IPV6)
1732         else
1733                 other_query = &br->ip6_other_query;
1734 #endif
1735
1736         lmqt = now + br_multicast_lmqt(br);
1737         hlist_for_each_entry(ent, &pg->src_list, node) {
1738                 if (ent->flags & BR_SGRP_F_SEND) {
1739                         ent->flags &= ~BR_SGRP_F_SEND;
1740                         if (ent->timer.expires > lmqt) {
1741                                 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1742                                     other_query &&
1743                                     !timer_pending(&other_query->timer))
1744                                         ent->src_query_rexmit_cnt = lmqc;
1745                                 __grp_src_mod_timer(ent, lmqt);
1746                         }
1747                 }
1748         }
1749
1750         if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) ||
1751             !other_query || timer_pending(&other_query->timer))
1752                 return;
1753
1754         __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1755                                   &pg->key.addr, true, 1, NULL);
1756
1757         lmi = now + br->multicast_last_member_interval;
1758         if (!timer_pending(&pg->rexmit_timer) ||
1759             time_after(pg->rexmit_timer.expires, lmi))
1760                 mod_timer(&pg->rexmit_timer, lmi);
1761 }
1762
1763 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
1764 {
1765         struct bridge_mcast_other_query *other_query = NULL;
1766         struct net_bridge *br = pg->key.port->br;
1767         unsigned long now = jiffies, lmi;
1768
1769         if (!netif_running(br->dev) ||
1770             !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1771                 return;
1772
1773         if (pg->key.addr.proto == htons(ETH_P_IP))
1774                 other_query = &br->ip4_other_query;
1775 #if IS_ENABLED(CONFIG_IPV6)
1776         else
1777                 other_query = &br->ip6_other_query;
1778 #endif
1779
1780         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1781             other_query && !timer_pending(&other_query->timer)) {
1782                 lmi = now + br->multicast_last_member_interval;
1783                 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
1784                 __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
1785                                           &pg->key.addr, false, 0, NULL);
1786                 if (!timer_pending(&pg->rexmit_timer) ||
1787                     time_after(pg->rexmit_timer.expires, lmi))
1788                         mod_timer(&pg->rexmit_timer, lmi);
1789         }
1790
1791         if (pg->filter_mode == MCAST_EXCLUDE &&
1792             (!timer_pending(&pg->timer) ||
1793              time_after(pg->timer.expires, now + br_multicast_lmqt(br))))
1794                 mod_timer(&pg->timer, now + br_multicast_lmqt(br));
1795 }
1796
1797 /* State          Msg type      New state                Actions
1798  * INCLUDE (A)    IS_IN (B)     INCLUDE (A+B)            (B)=GMI
1799  * INCLUDE (A)    ALLOW (B)     INCLUDE (A+B)            (B)=GMI
1800  * EXCLUDE (X,Y)  ALLOW (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1801  */
1802 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
1803                                      void *srcs, u32 nsrcs, size_t src_size)
1804 {
1805         struct net_bridge *br = pg->key.port->br;
1806         struct net_bridge_group_src *ent;
1807         unsigned long now = jiffies;
1808         bool changed = false;
1809         struct br_ip src_ip;
1810         u32 src_idx;
1811
1812         memset(&src_ip, 0, sizeof(src_ip));
1813         src_ip.proto = pg->key.addr.proto;
1814         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1815                 memcpy(&src_ip.src, srcs, src_size);
1816                 ent = br_multicast_find_group_src(pg, &src_ip);
1817                 if (!ent) {
1818                         ent = br_multicast_new_group_src(pg, &src_ip);
1819                         if (ent)
1820                                 changed = true;
1821                 }
1822
1823                 if (ent)
1824                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
1825                 srcs += src_size;
1826         }
1827
1828         return changed;
1829 }
1830
1831 /* State          Msg type      New state                Actions
1832  * INCLUDE (A)    IS_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
1833  *                                                       Delete (A-B)
1834  *                                                       Group Timer=GMI
1835  */
1836 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
1837                                  void *srcs, u32 nsrcs, size_t src_size)
1838 {
1839         struct net_bridge_group_src *ent;
1840         struct br_ip src_ip;
1841         u32 src_idx;
1842
1843         hlist_for_each_entry(ent, &pg->src_list, node)
1844                 ent->flags |= BR_SGRP_F_DELETE;
1845
1846         memset(&src_ip, 0, sizeof(src_ip));
1847         src_ip.proto = pg->key.addr.proto;
1848         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1849                 memcpy(&src_ip.src, srcs, src_size);
1850                 ent = br_multicast_find_group_src(pg, &src_ip);
1851                 if (ent)
1852                         ent->flags &= ~BR_SGRP_F_DELETE;
1853                 else
1854                         ent = br_multicast_new_group_src(pg, &src_ip);
1855                 if (ent)
1856                         br_multicast_fwd_src_handle(ent);
1857                 srcs += src_size;
1858         }
1859
1860         __grp_src_delete_marked(pg);
1861 }
1862
1863 /* State          Msg type      New state                Actions
1864  * EXCLUDE (X,Y)  IS_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=GMI
1865  *                                                       Delete (X-A)
1866  *                                                       Delete (Y-A)
1867  *                                                       Group Timer=GMI
1868  */
1869 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
1870                                  void *srcs, u32 nsrcs, size_t src_size)
1871 {
1872         struct net_bridge *br = pg->key.port->br;
1873         struct net_bridge_group_src *ent;
1874         unsigned long now = jiffies;
1875         bool changed = false;
1876         struct br_ip src_ip;
1877         u32 src_idx;
1878
1879         hlist_for_each_entry(ent, &pg->src_list, node)
1880                 ent->flags |= BR_SGRP_F_DELETE;
1881
1882         memset(&src_ip, 0, sizeof(src_ip));
1883         src_ip.proto = pg->key.addr.proto;
1884         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1885                 memcpy(&src_ip.src, srcs, src_size);
1886                 ent = br_multicast_find_group_src(pg, &src_ip);
1887                 if (ent) {
1888                         ent->flags &= ~BR_SGRP_F_DELETE;
1889                 } else {
1890                         ent = br_multicast_new_group_src(pg, &src_ip);
1891                         if (ent) {
1892                                 __grp_src_mod_timer(ent,
1893                                                     now + br_multicast_gmi(br));
1894                                 changed = true;
1895                         }
1896                 }
1897                 srcs += src_size;
1898         }
1899
1900         if (__grp_src_delete_marked(pg))
1901                 changed = true;
1902
1903         return changed;
1904 }
1905
1906 static bool br_multicast_isexc(struct net_bridge_port_group *pg,
1907                                void *srcs, u32 nsrcs, size_t src_size)
1908 {
1909         struct net_bridge *br = pg->key.port->br;
1910         bool changed = false;
1911
1912         switch (pg->filter_mode) {
1913         case MCAST_INCLUDE:
1914                 __grp_src_isexc_incl(pg, srcs, nsrcs, src_size);
1915                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
1916                 changed = true;
1917                 break;
1918         case MCAST_EXCLUDE:
1919                 changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size);
1920                 break;
1921         }
1922
1923         pg->filter_mode = MCAST_EXCLUDE;
1924         mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1925
1926         return changed;
1927 }
1928
1929 /* State          Msg type      New state                Actions
1930  * INCLUDE (A)    TO_IN (B)     INCLUDE (A+B)            (B)=GMI
1931  *                                                       Send Q(G,A-B)
1932  */
1933 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
1934                                 void *srcs, u32 nsrcs, size_t src_size)
1935 {
1936         struct net_bridge *br = pg->key.port->br;
1937         u32 src_idx, to_send = pg->src_ents;
1938         struct net_bridge_group_src *ent;
1939         unsigned long now = jiffies;
1940         bool changed = false;
1941         struct br_ip src_ip;
1942
1943         hlist_for_each_entry(ent, &pg->src_list, node)
1944                 ent->flags |= BR_SGRP_F_SEND;
1945
1946         memset(&src_ip, 0, sizeof(src_ip));
1947         src_ip.proto = pg->key.addr.proto;
1948         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1949                 memcpy(&src_ip.src, srcs, src_size);
1950                 ent = br_multicast_find_group_src(pg, &src_ip);
1951                 if (ent) {
1952                         ent->flags &= ~BR_SGRP_F_SEND;
1953                         to_send--;
1954                 } else {
1955                         ent = br_multicast_new_group_src(pg, &src_ip);
1956                         if (ent)
1957                                 changed = true;
1958                 }
1959                 if (ent)
1960                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
1961                 srcs += src_size;
1962         }
1963
1964         if (to_send)
1965                 __grp_src_query_marked_and_rexmit(pg);
1966
1967         return changed;
1968 }
1969
1970 /* State          Msg type      New state                Actions
1971  * EXCLUDE (X,Y)  TO_IN (A)     EXCLUDE (X+A,Y-A)        (A)=GMI
1972  *                                                       Send Q(G,X-A)
1973  *                                                       Send Q(G)
1974  */
1975 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
1976                                 void *srcs, u32 nsrcs, size_t src_size)
1977 {
1978         struct net_bridge *br = pg->key.port->br;
1979         u32 src_idx, to_send = pg->src_ents;
1980         struct net_bridge_group_src *ent;
1981         unsigned long now = jiffies;
1982         bool changed = false;
1983         struct br_ip src_ip;
1984
1985         hlist_for_each_entry(ent, &pg->src_list, node)
1986                 if (timer_pending(&ent->timer))
1987                         ent->flags |= BR_SGRP_F_SEND;
1988
1989         memset(&src_ip, 0, sizeof(src_ip));
1990         src_ip.proto = pg->key.addr.proto;
1991         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1992                 memcpy(&src_ip.src, srcs, src_size);
1993                 ent = br_multicast_find_group_src(pg, &src_ip);
1994                 if (ent) {
1995                         if (timer_pending(&ent->timer)) {
1996                                 ent->flags &= ~BR_SGRP_F_SEND;
1997                                 to_send--;
1998                         }
1999                 } else {
2000                         ent = br_multicast_new_group_src(pg, &src_ip);
2001                         if (ent)
2002                                 changed = true;
2003                 }
2004                 if (ent)
2005                         __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
2006                 srcs += src_size;
2007         }
2008
2009         if (to_send)
2010                 __grp_src_query_marked_and_rexmit(pg);
2011
2012         __grp_send_query_and_rexmit(pg);
2013
2014         return changed;
2015 }
2016
2017 static bool br_multicast_toin(struct net_bridge_port_group *pg,
2018                               void *srcs, u32 nsrcs, size_t src_size)
2019 {
2020         bool changed = false;
2021
2022         switch (pg->filter_mode) {
2023         case MCAST_INCLUDE:
2024                 changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size);
2025                 break;
2026         case MCAST_EXCLUDE:
2027                 changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size);
2028                 break;
2029         }
2030
2031         return changed;
2032 }
2033
2034 /* State          Msg type      New state                Actions
2035  * INCLUDE (A)    TO_EX (B)     EXCLUDE (A*B,B-A)        (B-A)=0
2036  *                                                       Delete (A-B)
2037  *                                                       Send Q(G,A*B)
2038  *                                                       Group Timer=GMI
2039  */
2040 static void __grp_src_toex_incl(struct net_bridge_port_group *pg,
2041                                 void *srcs, u32 nsrcs, size_t src_size)
2042 {
2043         struct net_bridge_group_src *ent;
2044         u32 src_idx, to_send = 0;
2045         struct br_ip src_ip;
2046
2047         hlist_for_each_entry(ent, &pg->src_list, node)
2048                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2049
2050         memset(&src_ip, 0, sizeof(src_ip));
2051         src_ip.proto = pg->key.addr.proto;
2052         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2053                 memcpy(&src_ip.src, srcs, src_size);
2054                 ent = br_multicast_find_group_src(pg, &src_ip);
2055                 if (ent) {
2056                         ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2057                                      BR_SGRP_F_SEND;
2058                         to_send++;
2059                 } else {
2060                         ent = br_multicast_new_group_src(pg, &src_ip);
2061                 }
2062                 if (ent)
2063                         br_multicast_fwd_src_handle(ent);
2064                 srcs += src_size;
2065         }
2066
2067         __grp_src_delete_marked(pg);
2068         if (to_send)
2069                 __grp_src_query_marked_and_rexmit(pg);
2070 }
2071
2072 /* State          Msg type      New state                Actions
2073  * EXCLUDE (X,Y)  TO_EX (A)     EXCLUDE (A-Y,Y*A)        (A-X-Y)=Group Timer
2074  *                                                       Delete (X-A)
2075  *                                                       Delete (Y-A)
2076  *                                                       Send Q(G,A-Y)
2077  *                                                       Group Timer=GMI
2078  */
2079 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
2080                                 void *srcs, u32 nsrcs, size_t src_size)
2081 {
2082         struct net_bridge_group_src *ent;
2083         u32 src_idx, to_send = 0;
2084         bool changed = false;
2085         struct br_ip src_ip;
2086
2087         hlist_for_each_entry(ent, &pg->src_list, node)
2088                 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2089
2090         memset(&src_ip, 0, sizeof(src_ip));
2091         src_ip.proto = pg->key.addr.proto;
2092         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2093                 memcpy(&src_ip.src, srcs, src_size);
2094                 ent = br_multicast_find_group_src(pg, &src_ip);
2095                 if (ent) {
2096                         ent->flags &= ~BR_SGRP_F_DELETE;
2097                 } else {
2098                         ent = br_multicast_new_group_src(pg, &src_ip);
2099                         if (ent) {
2100                                 __grp_src_mod_timer(ent, pg->timer.expires);
2101                                 changed = true;
2102                         }
2103                 }
2104                 if (ent && timer_pending(&ent->timer)) {
2105                         ent->flags |= BR_SGRP_F_SEND;
2106                         to_send++;
2107                 }
2108                 srcs += src_size;
2109         }
2110
2111         if (__grp_src_delete_marked(pg))
2112                 changed = true;
2113         if (to_send)
2114                 __grp_src_query_marked_and_rexmit(pg);
2115
2116         return changed;
2117 }
2118
2119 static bool br_multicast_toex(struct net_bridge_port_group *pg,
2120                               void *srcs, u32 nsrcs, size_t src_size)
2121 {
2122         struct net_bridge *br = pg->key.port->br;
2123         bool changed = false;
2124
2125         switch (pg->filter_mode) {
2126         case MCAST_INCLUDE:
2127                 __grp_src_toex_incl(pg, srcs, nsrcs, src_size);
2128                 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2129                 changed = true;
2130                 break;
2131         case MCAST_EXCLUDE:
2132                 changed = __grp_src_toex_excl(pg, srcs, nsrcs, src_size);
2133                 break;
2134         }
2135
2136         pg->filter_mode = MCAST_EXCLUDE;
2137         mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
2138
2139         return changed;
2140 }
2141
2142 /* State          Msg type      New state                Actions
2143  * INCLUDE (A)    BLOCK (B)     INCLUDE (A)              Send Q(G,A*B)
2144  */
2145 static void __grp_src_block_incl(struct net_bridge_port_group *pg,
2146                                  void *srcs, u32 nsrcs, size_t src_size)
2147 {
2148         struct net_bridge_group_src *ent;
2149         u32 src_idx, to_send = 0;
2150         struct br_ip src_ip;
2151
2152         hlist_for_each_entry(ent, &pg->src_list, node)
2153                 ent->flags &= ~BR_SGRP_F_SEND;
2154
2155         memset(&src_ip, 0, sizeof(src_ip));
2156         src_ip.proto = pg->key.addr.proto;
2157         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2158                 memcpy(&src_ip.src, srcs, src_size);
2159                 ent = br_multicast_find_group_src(pg, &src_ip);
2160                 if (ent) {
2161                         ent->flags |= BR_SGRP_F_SEND;
2162                         to_send++;
2163                 }
2164                 srcs += src_size;
2165         }
2166
2167         if (to_send)
2168                 __grp_src_query_marked_and_rexmit(pg);
2169
2170         if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list))
2171                 br_multicast_find_del_pg(pg->key.port->br, pg);
2172 }
2173
2174 /* State          Msg type      New state                Actions
2175  * EXCLUDE (X,Y)  BLOCK (A)     EXCLUDE (X+(A-Y),Y)      (A-X-Y)=Group Timer
2176  *                                                       Send Q(G,A-Y)
2177  */
2178 static bool __grp_src_block_excl(struct net_bridge_port_group *pg,
2179                                  void *srcs, u32 nsrcs, size_t src_size)
2180 {
2181         struct net_bridge_group_src *ent;
2182         u32 src_idx, to_send = 0;
2183         bool changed = false;
2184         struct br_ip src_ip;
2185
2186         hlist_for_each_entry(ent, &pg->src_list, node)
2187                 ent->flags &= ~BR_SGRP_F_SEND;
2188
2189         memset(&src_ip, 0, sizeof(src_ip));
2190         src_ip.proto = pg->key.addr.proto;
2191         for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2192                 memcpy(&src_ip.src, srcs, src_size);
2193                 ent = br_multicast_find_group_src(pg, &src_ip);
2194                 if (!ent) {
2195                         ent = br_multicast_new_group_src(pg, &src_ip);
2196                         if (ent) {
2197                                 __grp_src_mod_timer(ent, pg->timer.expires);
2198                                 changed = true;
2199                         }
2200                 }
2201                 if (ent && timer_pending(&ent->timer)) {
2202                         ent->flags |= BR_SGRP_F_SEND;
2203                         to_send++;
2204                 }
2205                 srcs += src_size;
2206         }
2207
2208         if (to_send)
2209                 __grp_src_query_marked_and_rexmit(pg);
2210
2211         return changed;
2212 }
2213
2214 static bool br_multicast_block(struct net_bridge_port_group *pg,
2215                                void *srcs, u32 nsrcs, size_t src_size)
2216 {
2217         bool changed = false;
2218
2219         switch (pg->filter_mode) {
2220         case MCAST_INCLUDE:
2221                 __grp_src_block_incl(pg, srcs, nsrcs, src_size);
2222                 break;
2223         case MCAST_EXCLUDE:
2224                 changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size);
2225                 break;
2226         }
2227
2228         return changed;
2229 }
2230
2231 static struct net_bridge_port_group *
2232 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2233                        struct net_bridge_port *p,
2234                        const unsigned char *src)
2235 {
2236         struct net_bridge *br __maybe_unused = mp->br;
2237         struct net_bridge_port_group *pg;
2238
2239         for (pg = mlock_dereference(mp->ports, br);
2240              pg;
2241              pg = mlock_dereference(pg->next, br))
2242                 if (br_port_group_equal(pg, p, src))
2243                         return pg;
2244
2245         return NULL;
2246 }
2247
2248 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
2249                                          struct net_bridge_port *port,
2250                                          struct sk_buff *skb,
2251                                          u16 vid)
2252 {
2253         bool igmpv2 = br->multicast_igmp_version == 2;
2254         struct net_bridge_mdb_entry *mdst;
2255         struct net_bridge_port_group *pg;
2256         const unsigned char *src;
2257         struct igmpv3_report *ih;
2258         struct igmpv3_grec *grec;
2259         int i, len, num, type;
2260         bool changed = false;
2261         __be32 group;
2262         int err = 0;
2263         u16 nsrcs;
2264
2265         ih = igmpv3_report_hdr(skb);
2266         num = ntohs(ih->ngrec);
2267         len = skb_transport_offset(skb) + sizeof(*ih);
2268
2269         for (i = 0; i < num; i++) {
2270                 len += sizeof(*grec);
2271                 if (!ip_mc_may_pull(skb, len))
2272                         return -EINVAL;
2273
2274                 grec = (void *)(skb->data + len - sizeof(*grec));
2275                 group = grec->grec_mca;
2276                 type = grec->grec_type;
2277                 nsrcs = ntohs(grec->grec_nsrcs);
2278
2279                 len += nsrcs * 4;
2280                 if (!ip_mc_may_pull(skb, len))
2281                         return -EINVAL;
2282
2283                 switch (type) {
2284                 case IGMPV3_MODE_IS_INCLUDE:
2285                 case IGMPV3_MODE_IS_EXCLUDE:
2286                 case IGMPV3_CHANGE_TO_INCLUDE:
2287                 case IGMPV3_CHANGE_TO_EXCLUDE:
2288                 case IGMPV3_ALLOW_NEW_SOURCES:
2289                 case IGMPV3_BLOCK_OLD_SOURCES:
2290                         break;
2291
2292                 default:
2293                         continue;
2294                 }
2295
2296                 src = eth_hdr(skb)->h_source;
2297                 if (nsrcs == 0 &&
2298                     (type == IGMPV3_CHANGE_TO_INCLUDE ||
2299                      type == IGMPV3_MODE_IS_INCLUDE)) {
2300                         if (!port || igmpv2) {
2301                                 br_ip4_multicast_leave_group(br, port, group, vid, src);
2302                                 continue;
2303                         }
2304                 } else {
2305                         err = br_ip4_multicast_add_group(br, port, group, vid,
2306                                                          src, igmpv2);
2307                         if (err)
2308                                 break;
2309                 }
2310
2311                 if (!port || igmpv2)
2312                         continue;
2313
2314                 spin_lock_bh(&br->multicast_lock);
2315                 mdst = br_mdb_ip4_get(br, group, vid);
2316                 if (!mdst)
2317                         goto unlock_continue;
2318                 pg = br_multicast_find_port(mdst, port, src);
2319                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2320                         goto unlock_continue;
2321                 /* reload grec */
2322                 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2323                 switch (type) {
2324                 case IGMPV3_ALLOW_NEW_SOURCES:
2325                         changed = br_multicast_isinc_allow(pg, grec->grec_src,
2326                                                            nsrcs, sizeof(__be32));
2327                         break;
2328                 case IGMPV3_MODE_IS_INCLUDE:
2329                         changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
2330                                                            sizeof(__be32));
2331                         break;
2332                 case IGMPV3_MODE_IS_EXCLUDE:
2333                         changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
2334                                                      sizeof(__be32));
2335                         break;
2336                 case IGMPV3_CHANGE_TO_INCLUDE:
2337                         changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
2338                                                     sizeof(__be32));
2339                         break;
2340                 case IGMPV3_CHANGE_TO_EXCLUDE:
2341                         changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
2342                                                     sizeof(__be32));
2343                         break;
2344                 case IGMPV3_BLOCK_OLD_SOURCES:
2345                         changed = br_multicast_block(pg, grec->grec_src, nsrcs,
2346                                                      sizeof(__be32));
2347                         break;
2348                 }
2349                 if (changed)
2350                         br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2351 unlock_continue:
2352                 spin_unlock_bh(&br->multicast_lock);
2353         }
2354
2355         return err;
2356 }
2357
2358 #if IS_ENABLED(CONFIG_IPV6)
2359 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
2360                                         struct net_bridge_port *port,
2361                                         struct sk_buff *skb,
2362                                         u16 vid)
2363 {
2364         bool mldv1 = br->multicast_mld_version == 1;
2365         struct net_bridge_mdb_entry *mdst;
2366         struct net_bridge_port_group *pg;
2367         unsigned int nsrcs_offset;
2368         const unsigned char *src;
2369         struct icmp6hdr *icmp6h;
2370         struct mld2_grec *grec;
2371         unsigned int grec_len;
2372         bool changed = false;
2373         int i, len, num;
2374         int err = 0;
2375
2376         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
2377                 return -EINVAL;
2378
2379         icmp6h = icmp6_hdr(skb);
2380         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
2381         len = skb_transport_offset(skb) + sizeof(*icmp6h);
2382
2383         for (i = 0; i < num; i++) {
2384                 __be16 *_nsrcs, __nsrcs;
2385                 u16 nsrcs;
2386
2387                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2388
2389                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2390                     nsrcs_offset + sizeof(__nsrcs))
2391                         return -EINVAL;
2392
2393                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2394                                             sizeof(__nsrcs), &__nsrcs);
2395                 if (!_nsrcs)
2396                         return -EINVAL;
2397
2398                 nsrcs = ntohs(*_nsrcs);
2399                 grec_len = struct_size(grec, grec_src, nsrcs);
2400
2401                 if (!ipv6_mc_may_pull(skb, len + grec_len))
2402                         return -EINVAL;
2403
2404                 grec = (struct mld2_grec *)(skb->data + len);
2405                 len += grec_len;
2406
2407                 switch (grec->grec_type) {
2408                 case MLD2_MODE_IS_INCLUDE:
2409                 case MLD2_MODE_IS_EXCLUDE:
2410                 case MLD2_CHANGE_TO_INCLUDE:
2411                 case MLD2_CHANGE_TO_EXCLUDE:
2412                 case MLD2_ALLOW_NEW_SOURCES:
2413                 case MLD2_BLOCK_OLD_SOURCES:
2414                         break;
2415
2416                 default:
2417                         continue;
2418                 }
2419
2420                 src = eth_hdr(skb)->h_source;
2421                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2422                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2423                     nsrcs == 0) {
2424                         if (!port || mldv1) {
2425                                 br_ip6_multicast_leave_group(br, port,
2426                                                              &grec->grec_mca,
2427                                                              vid, src);
2428                                 continue;
2429                         }
2430                 } else {
2431                         err = br_ip6_multicast_add_group(br, port,
2432                                                          &grec->grec_mca, vid,
2433                                                          src, mldv1);
2434                         if (err)
2435                                 break;
2436                 }
2437
2438                 if (!port || mldv1)
2439                         continue;
2440
2441                 spin_lock_bh(&br->multicast_lock);
2442                 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid);
2443                 if (!mdst)
2444                         goto unlock_continue;
2445                 pg = br_multicast_find_port(mdst, port, src);
2446                 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2447                         goto unlock_continue;
2448                 switch (grec->grec_type) {
2449                 case MLD2_ALLOW_NEW_SOURCES:
2450                         changed = br_multicast_isinc_allow(pg, grec->grec_src,
2451                                                            nsrcs,
2452                                                            sizeof(struct in6_addr));
2453                         break;
2454                 case MLD2_MODE_IS_INCLUDE:
2455                         changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
2456                                                            sizeof(struct in6_addr));
2457                         break;
2458                 case MLD2_MODE_IS_EXCLUDE:
2459                         changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
2460                                                      sizeof(struct in6_addr));
2461                         break;
2462                 case MLD2_CHANGE_TO_INCLUDE:
2463                         changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
2464                                                     sizeof(struct in6_addr));
2465                         break;
2466                 case MLD2_CHANGE_TO_EXCLUDE:
2467                         changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
2468                                                     sizeof(struct in6_addr));
2469                         break;
2470                 case MLD2_BLOCK_OLD_SOURCES:
2471                         changed = br_multicast_block(pg, grec->grec_src, nsrcs,
2472                                                      sizeof(struct in6_addr));
2473                         break;
2474                 }
2475                 if (changed)
2476                         br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2477 unlock_continue:
2478                 spin_unlock_bh(&br->multicast_lock);
2479         }
2480
2481         return err;
2482 }
2483 #endif
2484
2485 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
2486                                             struct net_bridge_port *port,
2487                                             __be32 saddr)
2488 {
2489         if (!timer_pending(&br->ip4_own_query.timer) &&
2490             !timer_pending(&br->ip4_other_query.timer))
2491                 goto update;
2492
2493         if (!br->ip4_querier.addr.src.ip4)
2494                 goto update;
2495
2496         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4))
2497                 goto update;
2498
2499         return false;
2500
2501 update:
2502         br->ip4_querier.addr.src.ip4 = saddr;
2503
2504         /* update protected by general multicast_lock by caller */
2505         rcu_assign_pointer(br->ip4_querier.port, port);
2506
2507         return true;
2508 }
2509
2510 #if IS_ENABLED(CONFIG_IPV6)
2511 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
2512                                             struct net_bridge_port *port,
2513                                             struct in6_addr *saddr)
2514 {
2515         if (!timer_pending(&br->ip6_own_query.timer) &&
2516             !timer_pending(&br->ip6_other_query.timer))
2517                 goto update;
2518
2519         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0)
2520                 goto update;
2521
2522         return false;
2523
2524 update:
2525         br->ip6_querier.addr.src.ip6 = *saddr;
2526
2527         /* update protected by general multicast_lock by caller */
2528         rcu_assign_pointer(br->ip6_querier.port, port);
2529
2530         return true;
2531 }
2532 #endif
2533
2534 static bool br_multicast_select_querier(struct net_bridge *br,
2535                                         struct net_bridge_port *port,
2536                                         struct br_ip *saddr)
2537 {
2538         switch (saddr->proto) {
2539         case htons(ETH_P_IP):
2540                 return br_ip4_multicast_select_querier(br, port, saddr->src.ip4);
2541 #if IS_ENABLED(CONFIG_IPV6)
2542         case htons(ETH_P_IPV6):
2543                 return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6);
2544 #endif
2545         }
2546
2547         return false;
2548 }
2549
2550 static void
2551 br_multicast_update_query_timer(struct net_bridge *br,
2552                                 struct bridge_mcast_other_query *query,
2553                                 unsigned long max_delay)
2554 {
2555         if (!timer_pending(&query->timer))
2556                 query->delay_time = jiffies + max_delay;
2557
2558         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
2559 }
2560
2561 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2562                                            bool is_mc_router)
2563 {
2564         struct switchdev_attr attr = {
2565                 .orig_dev = p->dev,
2566                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2567                 .flags = SWITCHDEV_F_DEFER,
2568                 .u.mrouter = is_mc_router,
2569         };
2570
2571         switchdev_port_attr_set(p->dev, &attr);
2572 }
2573
2574 /*
2575  * Add port to router_list
2576  *  list is maintained ordered by pointer value
2577  *  and locked by br->multicast_lock and RCU
2578  */
2579 static void br_multicast_add_router(struct net_bridge *br,
2580                                     struct net_bridge_port *port)
2581 {
2582         struct net_bridge_port *p;
2583         struct hlist_node *slot = NULL;
2584
2585         if (!hlist_unhashed(&port->rlist))
2586                 return;
2587
2588         hlist_for_each_entry(p, &br->router_list, rlist) {
2589                 if ((unsigned long) port >= (unsigned long) p)
2590                         break;
2591                 slot = &p->rlist;
2592         }
2593
2594         if (slot)
2595                 hlist_add_behind_rcu(&port->rlist, slot);
2596         else
2597                 hlist_add_head_rcu(&port->rlist, &br->router_list);
2598         br_rtr_notify(br->dev, port, RTM_NEWMDB);
2599         br_port_mc_router_state_change(port, true);
2600 }
2601
2602 static void br_multicast_mark_router(struct net_bridge *br,
2603                                      struct net_bridge_port *port)
2604 {
2605         unsigned long now = jiffies;
2606
2607         if (!port) {
2608                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
2609                         if (!timer_pending(&br->multicast_router_timer))
2610                                 br_mc_router_state_change(br, true);
2611                         mod_timer(&br->multicast_router_timer,
2612                                   now + br->multicast_querier_interval);
2613                 }
2614                 return;
2615         }
2616
2617         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
2618             port->multicast_router == MDB_RTR_TYPE_PERM)
2619                 return;
2620
2621         br_multicast_add_router(br, port);
2622
2623         mod_timer(&port->multicast_router_timer,
2624                   now + br->multicast_querier_interval);
2625 }
2626
2627 static void br_multicast_query_received(struct net_bridge *br,
2628                                         struct net_bridge_port *port,
2629                                         struct bridge_mcast_other_query *query,
2630                                         struct br_ip *saddr,
2631                                         unsigned long max_delay)
2632 {
2633         if (!br_multicast_select_querier(br, port, saddr))
2634                 return;
2635
2636         br_multicast_update_query_timer(br, query, max_delay);
2637         br_multicast_mark_router(br, port);
2638 }
2639
2640 static void br_ip4_multicast_query(struct net_bridge *br,
2641                                    struct net_bridge_port *port,
2642                                    struct sk_buff *skb,
2643                                    u16 vid)
2644 {
2645         unsigned int transport_len = ip_transport_len(skb);
2646         const struct iphdr *iph = ip_hdr(skb);
2647         struct igmphdr *ih = igmp_hdr(skb);
2648         struct net_bridge_mdb_entry *mp;
2649         struct igmpv3_query *ih3;
2650         struct net_bridge_port_group *p;
2651         struct net_bridge_port_group __rcu **pp;
2652         struct br_ip saddr;
2653         unsigned long max_delay;
2654         unsigned long now = jiffies;
2655         __be32 group;
2656
2657         spin_lock(&br->multicast_lock);
2658         if (!netif_running(br->dev) ||
2659             (port && port->state == BR_STATE_DISABLED))
2660                 goto out;
2661
2662         group = ih->group;
2663
2664         if (transport_len == sizeof(*ih)) {
2665                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
2666
2667                 if (!max_delay) {
2668                         max_delay = 10 * HZ;
2669                         group = 0;
2670                 }
2671         } else if (transport_len >= sizeof(*ih3)) {
2672                 ih3 = igmpv3_query_hdr(skb);
2673                 if (ih3->nsrcs ||
2674                     (br->multicast_igmp_version == 3 && group && ih3->suppress))
2675                         goto out;
2676
2677                 max_delay = ih3->code ?
2678                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
2679         } else {
2680                 goto out;
2681         }
2682
2683         if (!group) {
2684                 saddr.proto = htons(ETH_P_IP);
2685                 saddr.src.ip4 = iph->saddr;
2686
2687                 br_multicast_query_received(br, port, &br->ip4_other_query,
2688                                             &saddr, max_delay);
2689                 goto out;
2690         }
2691
2692         mp = br_mdb_ip4_get(br, group, vid);
2693         if (!mp)
2694                 goto out;
2695
2696         max_delay *= br->multicast_last_member_count;
2697
2698         if (mp->host_joined &&
2699             (timer_pending(&mp->timer) ?
2700              time_after(mp->timer.expires, now + max_delay) :
2701              try_to_del_timer_sync(&mp->timer) >= 0))
2702                 mod_timer(&mp->timer, now + max_delay);
2703
2704         for (pp = &mp->ports;
2705              (p = mlock_dereference(*pp, br)) != NULL;
2706              pp = &p->next) {
2707                 if (timer_pending(&p->timer) ?
2708                     time_after(p->timer.expires, now + max_delay) :
2709                     try_to_del_timer_sync(&p->timer) >= 0 &&
2710                     (br->multicast_igmp_version == 2 ||
2711                      p->filter_mode == MCAST_EXCLUDE))
2712                         mod_timer(&p->timer, now + max_delay);
2713         }
2714
2715 out:
2716         spin_unlock(&br->multicast_lock);
2717 }
2718
2719 #if IS_ENABLED(CONFIG_IPV6)
2720 static int br_ip6_multicast_query(struct net_bridge *br,
2721                                   struct net_bridge_port *port,
2722                                   struct sk_buff *skb,
2723                                   u16 vid)
2724 {
2725         unsigned int transport_len = ipv6_transport_len(skb);
2726         struct mld_msg *mld;
2727         struct net_bridge_mdb_entry *mp;
2728         struct mld2_query *mld2q;
2729         struct net_bridge_port_group *p;
2730         struct net_bridge_port_group __rcu **pp;
2731         struct br_ip saddr;
2732         unsigned long max_delay;
2733         unsigned long now = jiffies;
2734         unsigned int offset = skb_transport_offset(skb);
2735         const struct in6_addr *group = NULL;
2736         bool is_general_query;
2737         int err = 0;
2738
2739         spin_lock(&br->multicast_lock);
2740         if (!netif_running(br->dev) ||
2741             (port && port->state == BR_STATE_DISABLED))
2742                 goto out;
2743
2744         if (transport_len == sizeof(*mld)) {
2745                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
2746                         err = -EINVAL;
2747                         goto out;
2748                 }
2749                 mld = (struct mld_msg *) icmp6_hdr(skb);
2750                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
2751                 if (max_delay)
2752                         group = &mld->mld_mca;
2753         } else {
2754                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
2755                         err = -EINVAL;
2756                         goto out;
2757                 }
2758                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
2759                 if (!mld2q->mld2q_nsrcs)
2760                         group = &mld2q->mld2q_mca;
2761                 if (br->multicast_mld_version == 2 &&
2762                     !ipv6_addr_any(&mld2q->mld2q_mca) &&
2763                     mld2q->mld2q_suppress)
2764                         goto out;
2765
2766                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
2767         }
2768
2769         is_general_query = group && ipv6_addr_any(group);
2770
2771         if (is_general_query) {
2772                 saddr.proto = htons(ETH_P_IPV6);
2773                 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
2774
2775                 br_multicast_query_received(br, port, &br->ip6_other_query,
2776                                             &saddr, max_delay);
2777                 goto out;
2778         } else if (!group) {
2779                 goto out;
2780         }
2781
2782         mp = br_mdb_ip6_get(br, group, vid);
2783         if (!mp)
2784                 goto out;
2785
2786         max_delay *= br->multicast_last_member_count;
2787         if (mp->host_joined &&
2788             (timer_pending(&mp->timer) ?
2789              time_after(mp->timer.expires, now + max_delay) :
2790              try_to_del_timer_sync(&mp->timer) >= 0))
2791                 mod_timer(&mp->timer, now + max_delay);
2792
2793         for (pp = &mp->ports;
2794              (p = mlock_dereference(*pp, br)) != NULL;
2795              pp = &p->next) {
2796                 if (timer_pending(&p->timer) ?
2797                     time_after(p->timer.expires, now + max_delay) :
2798                     try_to_del_timer_sync(&p->timer) >= 0 &&
2799                     (br->multicast_mld_version == 1 ||
2800                      p->filter_mode == MCAST_EXCLUDE))
2801                         mod_timer(&p->timer, now + max_delay);
2802         }
2803
2804 out:
2805         spin_unlock(&br->multicast_lock);
2806         return err;
2807 }
2808 #endif
2809
2810 static void
2811 br_multicast_leave_group(struct net_bridge *br,
2812                          struct net_bridge_port *port,
2813                          struct br_ip *group,
2814                          struct bridge_mcast_other_query *other_query,
2815                          struct bridge_mcast_own_query *own_query,
2816                          const unsigned char *src)
2817 {
2818         struct net_bridge_mdb_entry *mp;
2819         struct net_bridge_port_group *p;
2820         unsigned long now;
2821         unsigned long time;
2822
2823         spin_lock(&br->multicast_lock);
2824         if (!netif_running(br->dev) ||
2825             (port && port->state == BR_STATE_DISABLED))
2826                 goto out;
2827
2828         mp = br_mdb_ip_get(br, group);
2829         if (!mp)
2830                 goto out;
2831
2832         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
2833                 struct net_bridge_port_group __rcu **pp;
2834
2835                 for (pp = &mp->ports;
2836                      (p = mlock_dereference(*pp, br)) != NULL;
2837                      pp = &p->next) {
2838                         if (!br_port_group_equal(p, port, src))
2839                                 continue;
2840
2841                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
2842                                 break;
2843
2844                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2845                         br_multicast_del_pg(mp, p, pp);
2846                 }
2847                 goto out;
2848         }
2849
2850         if (timer_pending(&other_query->timer))
2851                 goto out;
2852
2853         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
2854                 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
2855                                           false, 0, NULL);
2856
2857                 time = jiffies + br->multicast_last_member_count *
2858                                  br->multicast_last_member_interval;
2859
2860                 mod_timer(&own_query->timer, time);
2861
2862                 for (p = mlock_dereference(mp->ports, br);
2863                      p != NULL;
2864                      p = mlock_dereference(p->next, br)) {
2865                         if (!br_port_group_equal(p, port, src))
2866                                 continue;
2867
2868                         if (!hlist_unhashed(&p->mglist) &&
2869                             (timer_pending(&p->timer) ?
2870                              time_after(p->timer.expires, time) :
2871                              try_to_del_timer_sync(&p->timer) >= 0)) {
2872                                 mod_timer(&p->timer, time);
2873                         }
2874
2875                         break;
2876                 }
2877         }
2878
2879         now = jiffies;
2880         time = now + br->multicast_last_member_count *
2881                      br->multicast_last_member_interval;
2882
2883         if (!port) {
2884                 if (mp->host_joined &&
2885                     (timer_pending(&mp->timer) ?
2886                      time_after(mp->timer.expires, time) :
2887                      try_to_del_timer_sync(&mp->timer) >= 0)) {
2888                         mod_timer(&mp->timer, time);
2889                 }
2890
2891                 goto out;
2892         }
2893
2894         for (p = mlock_dereference(mp->ports, br);
2895              p != NULL;
2896              p = mlock_dereference(p->next, br)) {
2897                 if (p->key.port != port)
2898                         continue;
2899
2900                 if (!hlist_unhashed(&p->mglist) &&
2901                     (timer_pending(&p->timer) ?
2902                      time_after(p->timer.expires, time) :
2903                      try_to_del_timer_sync(&p->timer) >= 0)) {
2904                         mod_timer(&p->timer, time);
2905                 }
2906
2907                 break;
2908         }
2909 out:
2910         spin_unlock(&br->multicast_lock);
2911 }
2912
2913 static void br_ip4_multicast_leave_group(struct net_bridge *br,
2914                                          struct net_bridge_port *port,
2915                                          __be32 group,
2916                                          __u16 vid,
2917                                          const unsigned char *src)
2918 {
2919         struct br_ip br_group;
2920         struct bridge_mcast_own_query *own_query;
2921
2922         if (ipv4_is_local_multicast(group))
2923                 return;
2924
2925         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
2926
2927         memset(&br_group, 0, sizeof(br_group));
2928         br_group.dst.ip4 = group;
2929         br_group.proto = htons(ETH_P_IP);
2930         br_group.vid = vid;
2931
2932         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
2933                                  own_query, src);
2934 }
2935
2936 #if IS_ENABLED(CONFIG_IPV6)
2937 static void br_ip6_multicast_leave_group(struct net_bridge *br,
2938                                          struct net_bridge_port *port,
2939                                          const struct in6_addr *group,
2940                                          __u16 vid,
2941                                          const unsigned char *src)
2942 {
2943         struct br_ip br_group;
2944         struct bridge_mcast_own_query *own_query;
2945
2946         if (ipv6_addr_is_ll_all_nodes(group))
2947                 return;
2948
2949         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
2950
2951         memset(&br_group, 0, sizeof(br_group));
2952         br_group.dst.ip6 = *group;
2953         br_group.proto = htons(ETH_P_IPV6);
2954         br_group.vid = vid;
2955
2956         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
2957                                  own_query, src);
2958 }
2959 #endif
2960
2961 static void br_multicast_err_count(const struct net_bridge *br,
2962                                    const struct net_bridge_port *p,
2963                                    __be16 proto)
2964 {
2965         struct bridge_mcast_stats __percpu *stats;
2966         struct bridge_mcast_stats *pstats;
2967
2968         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2969                 return;
2970
2971         if (p)
2972                 stats = p->mcast_stats;
2973         else
2974                 stats = br->mcast_stats;
2975         if (WARN_ON(!stats))
2976                 return;
2977
2978         pstats = this_cpu_ptr(stats);
2979
2980         u64_stats_update_begin(&pstats->syncp);
2981         switch (proto) {
2982         case htons(ETH_P_IP):
2983                 pstats->mstats.igmp_parse_errors++;
2984                 break;
2985 #if IS_ENABLED(CONFIG_IPV6)
2986         case htons(ETH_P_IPV6):
2987                 pstats->mstats.mld_parse_errors++;
2988                 break;
2989 #endif
2990         }
2991         u64_stats_update_end(&pstats->syncp);
2992 }
2993
2994 static void br_multicast_pim(struct net_bridge *br,
2995                              struct net_bridge_port *port,
2996                              const struct sk_buff *skb)
2997 {
2998         unsigned int offset = skb_transport_offset(skb);
2999         struct pimhdr *pimhdr, _pimhdr;
3000
3001         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3002         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3003             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3004                 return;
3005
3006         br_multicast_mark_router(br, port);
3007 }
3008
3009 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
3010                                     struct net_bridge_port *port,
3011                                     struct sk_buff *skb)
3012 {
3013         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3014             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3015                 return -ENOMSG;
3016
3017         br_multicast_mark_router(br, port);
3018
3019         return 0;
3020 }
3021
3022 static int br_multicast_ipv4_rcv(struct net_bridge *br,
3023                                  struct net_bridge_port *port,
3024                                  struct sk_buff *skb,
3025                                  u16 vid)
3026 {
3027         const unsigned char *src;
3028         struct igmphdr *ih;
3029         int err;
3030
3031         err = ip_mc_check_igmp(skb);
3032
3033         if (err == -ENOMSG) {
3034                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3035                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3036                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3037                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3038                                 br_multicast_pim(br, port, skb);
3039                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3040                         br_ip4_multicast_mrd_rcv(br, port, skb);
3041                 }
3042
3043                 return 0;
3044         } else if (err < 0) {
3045                 br_multicast_err_count(br, port, skb->protocol);
3046                 return err;
3047         }
3048
3049         ih = igmp_hdr(skb);
3050         src = eth_hdr(skb)->h_source;
3051         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3052
3053         switch (ih->type) {
3054         case IGMP_HOST_MEMBERSHIP_REPORT:
3055         case IGMPV2_HOST_MEMBERSHIP_REPORT:
3056                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3057                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
3058                                                  true);
3059                 break;
3060         case IGMPV3_HOST_MEMBERSHIP_REPORT:
3061                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
3062                 break;
3063         case IGMP_HOST_MEMBERSHIP_QUERY:
3064                 br_ip4_multicast_query(br, port, skb, vid);
3065                 break;
3066         case IGMP_HOST_LEAVE_MESSAGE:
3067                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
3068                 break;
3069         }
3070
3071         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
3072                            BR_MCAST_DIR_RX);
3073
3074         return err;
3075 }
3076
3077 #if IS_ENABLED(CONFIG_IPV6)
3078 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
3079                                     struct net_bridge_port *port,
3080                                     struct sk_buff *skb)
3081 {
3082         int ret;
3083
3084         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
3085                 return -ENOMSG;
3086
3087         ret = ipv6_mc_check_icmpv6(skb);
3088         if (ret < 0)
3089                 return ret;
3090
3091         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3092                 return -ENOMSG;
3093
3094         br_multicast_mark_router(br, port);
3095
3096         return 0;
3097 }
3098
3099 static int br_multicast_ipv6_rcv(struct net_bridge *br,
3100                                  struct net_bridge_port *port,
3101                                  struct sk_buff *skb,
3102                                  u16 vid)
3103 {
3104         const unsigned char *src;
3105         struct mld_msg *mld;
3106         int err;
3107
3108         err = ipv6_mc_check_mld(skb);
3109
3110         if (err == -ENOMSG) {
3111                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3112                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3113
3114                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
3115                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
3116
3117                         if (err < 0 && err != -ENOMSG) {
3118                                 br_multicast_err_count(br, port, skb->protocol);
3119                                 return err;
3120                         }
3121                 }
3122
3123                 return 0;
3124         } else if (err < 0) {
3125                 br_multicast_err_count(br, port, skb->protocol);
3126                 return err;
3127         }
3128
3129         mld = (struct mld_msg *)skb_transport_header(skb);
3130         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3131
3132         switch (mld->mld_type) {
3133         case ICMPV6_MGM_REPORT:
3134                 src = eth_hdr(skb)->h_source;
3135                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3136                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
3137                                                  src, true);
3138                 break;
3139         case ICMPV6_MLD2_REPORT:
3140                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
3141                 break;
3142         case ICMPV6_MGM_QUERY:
3143                 err = br_ip6_multicast_query(br, port, skb, vid);
3144                 break;
3145         case ICMPV6_MGM_REDUCTION:
3146                 src = eth_hdr(skb)->h_source;
3147                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
3148                 break;
3149         }
3150
3151         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
3152                            BR_MCAST_DIR_RX);
3153
3154         return err;
3155 }
3156 #endif
3157
3158 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
3159                      struct sk_buff *skb, u16 vid)
3160 {
3161         int ret = 0;
3162
3163         BR_INPUT_SKB_CB(skb)->igmp = 0;
3164         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3165
3166         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3167                 return 0;
3168
3169         switch (skb->protocol) {
3170         case htons(ETH_P_IP):
3171                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
3172                 break;
3173 #if IS_ENABLED(CONFIG_IPV6)
3174         case htons(ETH_P_IPV6):
3175                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
3176                 break;
3177 #endif
3178         }
3179
3180         return ret;
3181 }
3182
3183 static void br_multicast_query_expired(struct net_bridge *br,
3184                                        struct bridge_mcast_own_query *query,
3185                                        struct bridge_mcast_querier *querier)
3186 {
3187         spin_lock(&br->multicast_lock);
3188         if (query->startup_sent < br->multicast_startup_query_count)
3189                 query->startup_sent++;
3190
3191         RCU_INIT_POINTER(querier->port, NULL);
3192         br_multicast_send_query(br, NULL, query);
3193         spin_unlock(&br->multicast_lock);
3194 }
3195
3196 static void br_ip4_multicast_query_expired(struct timer_list *t)
3197 {
3198         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
3199
3200         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
3201 }
3202
3203 #if IS_ENABLED(CONFIG_IPV6)
3204 static void br_ip6_multicast_query_expired(struct timer_list *t)
3205 {
3206         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
3207
3208         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
3209 }
3210 #endif
3211
3212 static void br_multicast_gc_work(struct work_struct *work)
3213 {
3214         struct net_bridge *br = container_of(work, struct net_bridge,
3215                                              mcast_gc_work);
3216         HLIST_HEAD(deleted_head);
3217
3218         spin_lock_bh(&br->multicast_lock);
3219         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3220         spin_unlock_bh(&br->multicast_lock);
3221
3222         br_multicast_gc(&deleted_head);
3223 }
3224
3225 void br_multicast_init(struct net_bridge *br)
3226 {
3227         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3228
3229         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3230         br->multicast_last_member_count = 2;
3231         br->multicast_startup_query_count = 2;
3232
3233         br->multicast_last_member_interval = HZ;
3234         br->multicast_query_response_interval = 10 * HZ;
3235         br->multicast_startup_query_interval = 125 * HZ / 4;
3236         br->multicast_query_interval = 125 * HZ;
3237         br->multicast_querier_interval = 255 * HZ;
3238         br->multicast_membership_interval = 260 * HZ;
3239
3240         br->ip4_other_query.delay_time = 0;
3241         br->ip4_querier.port = NULL;
3242         br->multicast_igmp_version = 2;
3243 #if IS_ENABLED(CONFIG_IPV6)
3244         br->multicast_mld_version = 1;
3245         br->ip6_other_query.delay_time = 0;
3246         br->ip6_querier.port = NULL;
3247 #endif
3248         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3249         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3250
3251         spin_lock_init(&br->multicast_lock);
3252         timer_setup(&br->multicast_router_timer,
3253                     br_multicast_local_router_expired, 0);
3254         timer_setup(&br->ip4_other_query.timer,
3255                     br_ip4_multicast_querier_expired, 0);
3256         timer_setup(&br->ip4_own_query.timer,
3257                     br_ip4_multicast_query_expired, 0);
3258 #if IS_ENABLED(CONFIG_IPV6)
3259         timer_setup(&br->ip6_other_query.timer,
3260                     br_ip6_multicast_querier_expired, 0);
3261         timer_setup(&br->ip6_own_query.timer,
3262                     br_ip6_multicast_query_expired, 0);
3263 #endif
3264         INIT_HLIST_HEAD(&br->mdb_list);
3265         INIT_HLIST_HEAD(&br->mcast_gc_list);
3266         INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3267 }
3268
3269 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3270 {
3271         struct in_device *in_dev = in_dev_get(br->dev);
3272
3273         if (!in_dev)
3274                 return;
3275
3276         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3277         in_dev_put(in_dev);
3278 }
3279
3280 #if IS_ENABLED(CONFIG_IPV6)
3281 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3282 {
3283         struct in6_addr addr;
3284
3285         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3286         ipv6_dev_mc_inc(br->dev, &addr);
3287 }
3288 #else
3289 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3290 {
3291 }
3292 #endif
3293
3294 void br_multicast_join_snoopers(struct net_bridge *br)
3295 {
3296         br_ip4_multicast_join_snoopers(br);
3297         br_ip6_multicast_join_snoopers(br);
3298 }
3299
3300 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3301 {
3302         struct in_device *in_dev = in_dev_get(br->dev);
3303
3304         if (WARN_ON(!in_dev))
3305                 return;
3306
3307         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3308         in_dev_put(in_dev);
3309 }
3310
3311 #if IS_ENABLED(CONFIG_IPV6)
3312 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3313 {
3314         struct in6_addr addr;
3315
3316         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3317         ipv6_dev_mc_dec(br->dev, &addr);
3318 }
3319 #else
3320 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3321 {
3322 }
3323 #endif
3324
3325 void br_multicast_leave_snoopers(struct net_bridge *br)
3326 {
3327         br_ip4_multicast_leave_snoopers(br);
3328         br_ip6_multicast_leave_snoopers(br);
3329 }
3330
3331 static void __br_multicast_open(struct net_bridge *br,
3332                                 struct bridge_mcast_own_query *query)
3333 {
3334         query->startup_sent = 0;
3335
3336         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3337                 return;
3338
3339         mod_timer(&query->timer, jiffies);
3340 }
3341
3342 void br_multicast_open(struct net_bridge *br)
3343 {
3344         __br_multicast_open(br, &br->ip4_own_query);
3345 #if IS_ENABLED(CONFIG_IPV6)
3346         __br_multicast_open(br, &br->ip6_own_query);
3347 #endif
3348 }
3349
3350 void br_multicast_stop(struct net_bridge *br)
3351 {
3352         del_timer_sync(&br->multicast_router_timer);
3353         del_timer_sync(&br->ip4_other_query.timer);
3354         del_timer_sync(&br->ip4_own_query.timer);
3355 #if IS_ENABLED(CONFIG_IPV6)
3356         del_timer_sync(&br->ip6_other_query.timer);
3357         del_timer_sync(&br->ip6_own_query.timer);
3358 #endif
3359 }
3360
3361 void br_multicast_dev_del(struct net_bridge *br)
3362 {
3363         struct net_bridge_mdb_entry *mp;
3364         HLIST_HEAD(deleted_head);
3365         struct hlist_node *tmp;
3366
3367         spin_lock_bh(&br->multicast_lock);
3368         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
3369                 br_multicast_del_mdb_entry(mp);
3370         hlist_move_list(&br->mcast_gc_list, &deleted_head);
3371         spin_unlock_bh(&br->multicast_lock);
3372
3373         br_multicast_gc(&deleted_head);
3374         cancel_work_sync(&br->mcast_gc_work);
3375
3376         rcu_barrier();
3377 }
3378
3379 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
3380 {
3381         int err = -EINVAL;
3382
3383         spin_lock_bh(&br->multicast_lock);
3384
3385         switch (val) {
3386         case MDB_RTR_TYPE_DISABLED:
3387         case MDB_RTR_TYPE_PERM:
3388                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
3389                 del_timer(&br->multicast_router_timer);
3390                 br->multicast_router = val;
3391                 err = 0;
3392                 break;
3393         case MDB_RTR_TYPE_TEMP_QUERY:
3394                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
3395                         br_mc_router_state_change(br, false);
3396                 br->multicast_router = val;
3397                 err = 0;
3398                 break;
3399         }
3400
3401         spin_unlock_bh(&br->multicast_lock);
3402
3403         return err;
3404 }
3405
3406 static void __del_port_router(struct net_bridge_port *p)
3407 {
3408         if (hlist_unhashed(&p->rlist))
3409                 return;
3410         hlist_del_init_rcu(&p->rlist);
3411         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
3412         br_port_mc_router_state_change(p, false);
3413
3414         /* don't allow timer refresh */
3415         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3416                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3417 }
3418
3419 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
3420 {
3421         struct net_bridge *br = p->br;
3422         unsigned long now = jiffies;
3423         int err = -EINVAL;
3424
3425         spin_lock(&br->multicast_lock);
3426         if (p->multicast_router == val) {
3427                 /* Refresh the temp router port timer */
3428                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3429                         mod_timer(&p->multicast_router_timer,
3430                                   now + br->multicast_querier_interval);
3431                 err = 0;
3432                 goto unlock;
3433         }
3434         switch (val) {
3435         case MDB_RTR_TYPE_DISABLED:
3436                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
3437                 __del_port_router(p);
3438                 del_timer(&p->multicast_router_timer);
3439                 break;
3440         case MDB_RTR_TYPE_TEMP_QUERY:
3441                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3442                 __del_port_router(p);
3443                 break;
3444         case MDB_RTR_TYPE_PERM:
3445                 p->multicast_router = MDB_RTR_TYPE_PERM;
3446                 del_timer(&p->multicast_router_timer);
3447                 br_multicast_add_router(br, p);
3448                 break;
3449         case MDB_RTR_TYPE_TEMP:
3450                 p->multicast_router = MDB_RTR_TYPE_TEMP;
3451                 br_multicast_mark_router(br, p);
3452                 break;
3453         default:
3454                 goto unlock;
3455         }
3456         err = 0;
3457 unlock:
3458         spin_unlock(&br->multicast_lock);
3459
3460         return err;
3461 }
3462
3463 static void br_multicast_start_querier(struct net_bridge *br,
3464                                        struct bridge_mcast_own_query *query)
3465 {
3466         struct net_bridge_port *port;
3467
3468         __br_multicast_open(br, query);
3469
3470         rcu_read_lock();
3471         list_for_each_entry_rcu(port, &br->port_list, list) {
3472                 if (port->state == BR_STATE_DISABLED ||
3473                     port->state == BR_STATE_BLOCKING)
3474                         continue;
3475
3476                 if (query == &br->ip4_own_query)
3477                         br_multicast_enable(&port->ip4_own_query);
3478 #if IS_ENABLED(CONFIG_IPV6)
3479                 else
3480                         br_multicast_enable(&port->ip6_own_query);
3481 #endif
3482         }
3483         rcu_read_unlock();
3484 }
3485
3486 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3487 {
3488         struct net_bridge_port *port;
3489         bool change_snoopers = false;
3490
3491         spin_lock_bh(&br->multicast_lock);
3492         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
3493                 goto unlock;
3494
3495         br_mc_disabled_update(br->dev, val);
3496         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
3497         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
3498                 change_snoopers = true;
3499                 goto unlock;
3500         }
3501
3502         if (!netif_running(br->dev))
3503                 goto unlock;
3504
3505         br_multicast_open(br);
3506         list_for_each_entry(port, &br->port_list, list)
3507                 __br_multicast_enable_port(port);
3508
3509         change_snoopers = true;
3510
3511 unlock:
3512         spin_unlock_bh(&br->multicast_lock);
3513
3514         /* br_multicast_join_snoopers has the potential to cause
3515          * an MLD Report/Leave to be delivered to br_multicast_rcv,
3516          * which would in turn call br_multicast_add_group, which would
3517          * attempt to acquire multicast_lock. This function should be
3518          * called after the lock has been released to avoid deadlocks on
3519          * multicast_lock.
3520          *
3521          * br_multicast_leave_snoopers does not have the problem since
3522          * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
3523          * returns without calling br_multicast_ipv4/6_rcv if it's not
3524          * enabled. Moved both functions out just for symmetry.
3525          */
3526         if (change_snoopers) {
3527                 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
3528                         br_multicast_join_snoopers(br);
3529                 else
3530                         br_multicast_leave_snoopers(br);
3531         }
3532
3533         return 0;
3534 }
3535
3536 bool br_multicast_enabled(const struct net_device *dev)
3537 {
3538         struct net_bridge *br = netdev_priv(dev);
3539
3540         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
3541 }
3542 EXPORT_SYMBOL_GPL(br_multicast_enabled);
3543
3544 bool br_multicast_router(const struct net_device *dev)
3545 {
3546         struct net_bridge *br = netdev_priv(dev);
3547         bool is_router;
3548
3549         spin_lock_bh(&br->multicast_lock);
3550         is_router = br_multicast_is_router(br);
3551         spin_unlock_bh(&br->multicast_lock);
3552         return is_router;
3553 }
3554 EXPORT_SYMBOL_GPL(br_multicast_router);
3555
3556 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
3557 {
3558         unsigned long max_delay;
3559
3560         val = !!val;
3561
3562         spin_lock_bh(&br->multicast_lock);
3563         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
3564                 goto unlock;
3565
3566         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
3567         if (!val)
3568                 goto unlock;
3569
3570         max_delay = br->multicast_query_response_interval;
3571
3572         if (!timer_pending(&br->ip4_other_query.timer))
3573                 br->ip4_other_query.delay_time = jiffies + max_delay;
3574
3575         br_multicast_start_querier(br, &br->ip4_own_query);
3576
3577 #if IS_ENABLED(CONFIG_IPV6)
3578         if (!timer_pending(&br->ip6_other_query.timer))
3579                 br->ip6_other_query.delay_time = jiffies + max_delay;
3580
3581         br_multicast_start_querier(br, &br->ip6_own_query);
3582 #endif
3583
3584 unlock:
3585         spin_unlock_bh(&br->multicast_lock);
3586
3587         return 0;
3588 }
3589
3590 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
3591 {
3592         /* Currently we support only version 2 and 3 */
3593         switch (val) {
3594         case 2:
3595         case 3:
3596                 break;
3597         default:
3598                 return -EINVAL;
3599         }
3600
3601         spin_lock_bh(&br->multicast_lock);
3602         br->multicast_igmp_version = val;
3603         spin_unlock_bh(&br->multicast_lock);
3604
3605         return 0;
3606 }
3607
3608 #if IS_ENABLED(CONFIG_IPV6)
3609 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
3610 {
3611         /* Currently we support version 1 and 2 */
3612         switch (val) {
3613         case 1:
3614         case 2:
3615                 break;
3616         default:
3617                 return -EINVAL;
3618         }
3619
3620         spin_lock_bh(&br->multicast_lock);
3621         br->multicast_mld_version = val;
3622         spin_unlock_bh(&br->multicast_lock);
3623
3624         return 0;
3625 }
3626 #endif
3627
3628 /**
3629  * br_multicast_list_adjacent - Returns snooped multicast addresses
3630  * @dev:        The bridge port adjacent to which to retrieve addresses
3631  * @br_ip_list: The list to store found, snooped multicast IP addresses in
3632  *
3633  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
3634  * snooping feature on all bridge ports of dev's bridge device, excluding
3635  * the addresses from dev itself.
3636  *
3637  * Returns the number of items added to br_ip_list.
3638  *
3639  * Notes:
3640  * - br_ip_list needs to be initialized by caller
3641  * - br_ip_list might contain duplicates in the end
3642  *   (needs to be taken care of by caller)
3643  * - br_ip_list needs to be freed by caller
3644  */
3645 int br_multicast_list_adjacent(struct net_device *dev,
3646                                struct list_head *br_ip_list)
3647 {
3648         struct net_bridge *br;
3649         struct net_bridge_port *port;
3650         struct net_bridge_port_group *group;
3651         struct br_ip_list *entry;
3652         int count = 0;
3653
3654         rcu_read_lock();
3655         if (!br_ip_list || !netif_is_bridge_port(dev))
3656                 goto unlock;
3657
3658         port = br_port_get_rcu(dev);
3659         if (!port || !port->br)
3660                 goto unlock;
3661
3662         br = port->br;
3663
3664         list_for_each_entry_rcu(port, &br->port_list, list) {
3665                 if (!port->dev || port->dev == dev)
3666                         continue;
3667
3668                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
3669                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
3670                         if (!entry)
3671                                 goto unlock;
3672
3673                         entry->addr = group->key.addr;
3674                         list_add(&entry->list, br_ip_list);
3675                         count++;
3676                 }
3677         }
3678
3679 unlock:
3680         rcu_read_unlock();
3681         return count;
3682 }
3683 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
3684
3685 /**
3686  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
3687  * @dev: The bridge port providing the bridge on which to check for a querier
3688  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3689  *
3690  * Checks whether the given interface has a bridge on top and if so returns
3691  * true if a valid querier exists anywhere on the bridged link layer.
3692  * Otherwise returns false.
3693  */
3694 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
3695 {
3696         struct net_bridge *br;
3697         struct net_bridge_port *port;
3698         struct ethhdr eth;
3699         bool ret = false;
3700
3701         rcu_read_lock();
3702         if (!netif_is_bridge_port(dev))
3703                 goto unlock;
3704
3705         port = br_port_get_rcu(dev);
3706         if (!port || !port->br)
3707                 goto unlock;
3708
3709         br = port->br;
3710
3711         memset(&eth, 0, sizeof(eth));
3712         eth.h_proto = htons(proto);
3713
3714         ret = br_multicast_querier_exists(br, &eth, NULL);
3715
3716 unlock:
3717         rcu_read_unlock();
3718         return ret;
3719 }
3720 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
3721
3722 /**
3723  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
3724  * @dev: The bridge port adjacent to which to check for a querier
3725  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3726  *
3727  * Checks whether the given interface has a bridge on top and if so returns
3728  * true if a selected querier is behind one of the other ports of this
3729  * bridge. Otherwise returns false.
3730  */
3731 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
3732 {
3733         struct net_bridge *br;
3734         struct net_bridge_port *port;
3735         bool ret = false;
3736
3737         rcu_read_lock();
3738         if (!netif_is_bridge_port(dev))
3739                 goto unlock;
3740
3741         port = br_port_get_rcu(dev);
3742         if (!port || !port->br)
3743                 goto unlock;
3744
3745         br = port->br;
3746
3747         switch (proto) {
3748         case ETH_P_IP:
3749                 if (!timer_pending(&br->ip4_other_query.timer) ||
3750                     rcu_dereference(br->ip4_querier.port) == port)
3751                         goto unlock;
3752                 break;
3753 #if IS_ENABLED(CONFIG_IPV6)
3754         case ETH_P_IPV6:
3755                 if (!timer_pending(&br->ip6_other_query.timer) ||
3756                     rcu_dereference(br->ip6_querier.port) == port)
3757                         goto unlock;
3758                 break;
3759 #endif
3760         default:
3761                 goto unlock;
3762         }
3763
3764         ret = true;
3765 unlock:
3766         rcu_read_unlock();
3767         return ret;
3768 }
3769 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
3770
3771 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
3772                                const struct sk_buff *skb, u8 type, u8 dir)
3773 {
3774         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
3775         __be16 proto = skb->protocol;
3776         unsigned int t_len;
3777
3778         u64_stats_update_begin(&pstats->syncp);
3779         switch (proto) {
3780         case htons(ETH_P_IP):
3781                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
3782                 switch (type) {
3783                 case IGMP_HOST_MEMBERSHIP_REPORT:
3784                         pstats->mstats.igmp_v1reports[dir]++;
3785                         break;
3786                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3787                         pstats->mstats.igmp_v2reports[dir]++;
3788                         break;
3789                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3790                         pstats->mstats.igmp_v3reports[dir]++;
3791                         break;
3792                 case IGMP_HOST_MEMBERSHIP_QUERY:
3793                         if (t_len != sizeof(struct igmphdr)) {
3794                                 pstats->mstats.igmp_v3queries[dir]++;
3795                         } else {
3796                                 unsigned int offset = skb_transport_offset(skb);
3797                                 struct igmphdr *ih, _ihdr;
3798
3799                                 ih = skb_header_pointer(skb, offset,
3800                                                         sizeof(_ihdr), &_ihdr);
3801                                 if (!ih)
3802                                         break;
3803                                 if (!ih->code)
3804                                         pstats->mstats.igmp_v1queries[dir]++;
3805                                 else
3806                                         pstats->mstats.igmp_v2queries[dir]++;
3807                         }
3808                         break;
3809                 case IGMP_HOST_LEAVE_MESSAGE:
3810                         pstats->mstats.igmp_leaves[dir]++;
3811                         break;
3812                 }
3813                 break;
3814 #if IS_ENABLED(CONFIG_IPV6)
3815         case htons(ETH_P_IPV6):
3816                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
3817                         sizeof(struct ipv6hdr);
3818                 t_len -= skb_network_header_len(skb);
3819                 switch (type) {
3820                 case ICMPV6_MGM_REPORT:
3821                         pstats->mstats.mld_v1reports[dir]++;
3822                         break;
3823                 case ICMPV6_MLD2_REPORT:
3824                         pstats->mstats.mld_v2reports[dir]++;
3825                         break;
3826                 case ICMPV6_MGM_QUERY:
3827                         if (t_len != sizeof(struct mld_msg))
3828                                 pstats->mstats.mld_v2queries[dir]++;
3829                         else
3830                                 pstats->mstats.mld_v1queries[dir]++;
3831                         break;
3832                 case ICMPV6_MGM_REDUCTION:
3833                         pstats->mstats.mld_leaves[dir]++;
3834                         break;
3835                 }
3836                 break;
3837 #endif /* CONFIG_IPV6 */
3838         }
3839         u64_stats_update_end(&pstats->syncp);
3840 }
3841
3842 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
3843                         const struct sk_buff *skb, u8 type, u8 dir)
3844 {
3845         struct bridge_mcast_stats __percpu *stats;
3846
3847         /* if multicast_disabled is true then igmp type can't be set */
3848         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3849                 return;
3850
3851         if (p)
3852                 stats = p->mcast_stats;
3853         else
3854                 stats = br->mcast_stats;
3855         if (WARN_ON(!stats))
3856                 return;
3857
3858         br_mcast_stats_add(stats, skb, type, dir);
3859 }
3860
3861 int br_multicast_init_stats(struct net_bridge *br)
3862 {
3863         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
3864         if (!br->mcast_stats)
3865                 return -ENOMEM;
3866
3867         return 0;
3868 }
3869
3870 void br_multicast_uninit_stats(struct net_bridge *br)
3871 {
3872         free_percpu(br->mcast_stats);
3873 }
3874
3875 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
3876 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
3877 {
3878         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
3879         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
3880 }
3881
3882 void br_multicast_get_stats(const struct net_bridge *br,
3883                             const struct net_bridge_port *p,
3884                             struct br_mcast_stats *dest)
3885 {
3886         struct bridge_mcast_stats __percpu *stats;
3887         struct br_mcast_stats tdst;
3888         int i;
3889
3890         memset(dest, 0, sizeof(*dest));
3891         if (p)
3892                 stats = p->mcast_stats;
3893         else
3894                 stats = br->mcast_stats;
3895         if (WARN_ON(!stats))
3896                 return;
3897
3898         memset(&tdst, 0, sizeof(tdst));
3899         for_each_possible_cpu(i) {
3900                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
3901                 struct br_mcast_stats temp;
3902                 unsigned int start;
3903
3904                 do {
3905                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3906                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
3907                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3908
3909                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
3910                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
3911                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
3912                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
3913                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
3914                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
3915                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
3916                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
3917
3918                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
3919                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
3920                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
3921                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
3922                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
3923                 tdst.mld_parse_errors += temp.mld_parse_errors;
3924         }
3925         memcpy(dest, &tdst, sizeof(*dest));
3926 }
3927
3928 int br_mdb_hash_init(struct net_bridge *br)
3929 {
3930         int err;
3931
3932         err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
3933         if (err)
3934                 return err;
3935
3936         err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
3937         if (err) {
3938                 rhashtable_destroy(&br->sg_port_tbl);
3939                 return err;
3940         }
3941
3942         return 0;
3943 }
3944
3945 void br_mdb_hash_fini(struct net_bridge *br)
3946 {
3947         rhashtable_destroy(&br->sg_port_tbl);
3948         rhashtable_destroy(&br->mdb_hash_tbl);
3949 }