net: bridge: mcast: delete expired port groups without srcs
[linux-2.6-microblaze.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36
37 static const struct rhashtable_params br_mdb_rht_params = {
38         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40         .key_len = sizeof(struct br_ip),
41         .automatic_shrinking = true,
42 };
43
44 static void br_multicast_start_querier(struct net_bridge *br,
45                                        struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47                                     struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49                                          struct net_bridge_port *port,
50                                          __be32 group,
51                                          __u16 vid,
52                                          const unsigned char *src);
53 static void br_multicast_port_group_rexmit(struct timer_list *t);
54
55 static void __del_port_router(struct net_bridge_port *p);
56 #if IS_ENABLED(CONFIG_IPV6)
57 static void br_ip6_multicast_leave_group(struct net_bridge *br,
58                                          struct net_bridge_port *port,
59                                          const struct in6_addr *group,
60                                          __u16 vid, const unsigned char *src);
61 #endif
62
63 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
64                                                       struct br_ip *dst)
65 {
66         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
67 }
68
69 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
70                                            struct br_ip *dst)
71 {
72         struct net_bridge_mdb_entry *ent;
73
74         lockdep_assert_held_once(&br->multicast_lock);
75
76         rcu_read_lock();
77         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
78         rcu_read_unlock();
79
80         return ent;
81 }
82
83 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
84                                                    __be32 dst, __u16 vid)
85 {
86         struct br_ip br_dst;
87
88         memset(&br_dst, 0, sizeof(br_dst));
89         br_dst.u.ip4 = dst;
90         br_dst.proto = htons(ETH_P_IP);
91         br_dst.vid = vid;
92
93         return br_mdb_ip_get(br, &br_dst);
94 }
95
96 #if IS_ENABLED(CONFIG_IPV6)
97 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
98                                                    const struct in6_addr *dst,
99                                                    __u16 vid)
100 {
101         struct br_ip br_dst;
102
103         memset(&br_dst, 0, sizeof(br_dst));
104         br_dst.u.ip6 = *dst;
105         br_dst.proto = htons(ETH_P_IPV6);
106         br_dst.vid = vid;
107
108         return br_mdb_ip_get(br, &br_dst);
109 }
110 #endif
111
112 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
113                                         struct sk_buff *skb, u16 vid)
114 {
115         struct br_ip ip;
116
117         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
118                 return NULL;
119
120         if (BR_INPUT_SKB_CB(skb)->igmp)
121                 return NULL;
122
123         memset(&ip, 0, sizeof(ip));
124         ip.proto = skb->protocol;
125         ip.vid = vid;
126
127         switch (skb->protocol) {
128         case htons(ETH_P_IP):
129                 ip.u.ip4 = ip_hdr(skb)->daddr;
130                 break;
131 #if IS_ENABLED(CONFIG_IPV6)
132         case htons(ETH_P_IPV6):
133                 ip.u.ip6 = ipv6_hdr(skb)->daddr;
134                 break;
135 #endif
136         default:
137                 return NULL;
138         }
139
140         return br_mdb_ip_get_rcu(br, &ip);
141 }
142
143 static void br_multicast_group_expired(struct timer_list *t)
144 {
145         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
146         struct net_bridge *br = mp->br;
147
148         spin_lock(&br->multicast_lock);
149         if (!netif_running(br->dev) || timer_pending(&mp->timer))
150                 goto out;
151
152         br_multicast_host_leave(mp, true);
153
154         if (mp->ports)
155                 goto out;
156
157         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
158                                br_mdb_rht_params);
159         hlist_del_rcu(&mp->mdb_node);
160
161         kfree_rcu(mp, rcu);
162
163 out:
164         spin_unlock(&br->multicast_lock);
165 }
166
167 static void br_multicast_del_group_src(struct net_bridge_group_src *src)
168 {
169         struct net_bridge *br = src->pg->port->br;
170
171         hlist_del_init_rcu(&src->node);
172         src->pg->src_ents--;
173         hlist_add_head(&src->del_node, &br->src_gc_list);
174         queue_work(system_long_wq, &br->src_gc_work);
175 }
176
177 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
178                          struct net_bridge_port_group *pg,
179                          struct net_bridge_port_group __rcu **pp)
180 {
181         struct net_bridge *br = pg->port->br;
182         struct net_bridge_group_src *ent;
183         struct hlist_node *tmp;
184
185         rcu_assign_pointer(*pp, pg->next);
186         hlist_del_init(&pg->mglist);
187         del_timer(&pg->timer);
188         del_timer(&pg->rexmit_timer);
189         hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
190                 br_multicast_del_group_src(ent);
191         br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
192         kfree_rcu(pg, rcu);
193
194         if (!mp->ports && !mp->host_joined && netif_running(br->dev))
195                 mod_timer(&mp->timer, jiffies);
196 }
197
198 static void br_multicast_find_del_pg(struct net_bridge *br,
199                                      struct net_bridge_port_group *pg)
200 {
201         struct net_bridge_port_group __rcu **pp;
202         struct net_bridge_mdb_entry *mp;
203         struct net_bridge_port_group *p;
204
205         mp = br_mdb_ip_get(br, &pg->addr);
206         if (WARN_ON(!mp))
207                 return;
208
209         for (pp = &mp->ports;
210              (p = mlock_dereference(*pp, br)) != NULL;
211              pp = &p->next) {
212                 if (p != pg)
213                         continue;
214
215                 br_multicast_del_pg(mp, pg, pp);
216                 return;
217         }
218
219         WARN_ON(1);
220 }
221
222 static void br_multicast_port_group_expired(struct timer_list *t)
223 {
224         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
225         struct net_bridge_group_src *src_ent;
226         struct net_bridge *br = pg->port->br;
227         struct hlist_node *tmp;
228         bool changed;
229
230         spin_lock(&br->multicast_lock);
231         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
232             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
233                 goto out;
234
235         changed = !!(pg->filter_mode == MCAST_EXCLUDE);
236         pg->filter_mode = MCAST_INCLUDE;
237         hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
238                 if (!timer_pending(&src_ent->timer)) {
239                         br_multicast_del_group_src(src_ent);
240                         changed = true;
241                 }
242         }
243
244         if (hlist_empty(&pg->src_list)) {
245                 br_multicast_find_del_pg(br, pg);
246         } else if (changed) {
247                 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr);
248
249                 if (WARN_ON(!mp))
250                         goto out;
251                 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
252         }
253 out:
254         spin_unlock(&br->multicast_lock);
255 }
256
257 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
258                                                     struct net_bridge_port_group *pg,
259                                                     __be32 ip_dst, __be32 group,
260                                                     bool with_srcs, bool over_lmqt,
261                                                     u8 sflag, u8 *igmp_type,
262                                                     bool *need_rexmit)
263 {
264         struct net_bridge_port *p = pg ? pg->port : NULL;
265         struct net_bridge_group_src *ent;
266         size_t pkt_size, igmp_hdr_size;
267         unsigned long now = jiffies;
268         struct igmpv3_query *ihv3;
269         void *csum_start = NULL;
270         __sum16 *csum = NULL;
271         struct sk_buff *skb;
272         struct igmphdr *ih;
273         struct ethhdr *eth;
274         unsigned long lmqt;
275         struct iphdr *iph;
276         u16 lmqt_srcs = 0;
277
278         igmp_hdr_size = sizeof(*ih);
279         if (br->multicast_igmp_version == 3) {
280                 igmp_hdr_size = sizeof(*ihv3);
281                 if (pg && with_srcs) {
282                         lmqt = now + (br->multicast_last_member_interval *
283                                       br->multicast_last_member_count);
284                         hlist_for_each_entry(ent, &pg->src_list, node) {
285                                 if (over_lmqt == time_after(ent->timer.expires,
286                                                             lmqt) &&
287                                     ent->src_query_rexmit_cnt > 0)
288                                         lmqt_srcs++;
289                         }
290
291                         if (!lmqt_srcs)
292                                 return NULL;
293                         igmp_hdr_size += lmqt_srcs * sizeof(__be32);
294                 }
295         }
296
297         pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
298         if ((p && pkt_size > p->dev->mtu) ||
299             pkt_size > br->dev->mtu)
300                 return NULL;
301
302         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
303         if (!skb)
304                 goto out;
305
306         skb->protocol = htons(ETH_P_IP);
307
308         skb_reset_mac_header(skb);
309         eth = eth_hdr(skb);
310
311         ether_addr_copy(eth->h_source, br->dev->dev_addr);
312         ip_eth_mc_map(ip_dst, eth->h_dest);
313         eth->h_proto = htons(ETH_P_IP);
314         skb_put(skb, sizeof(*eth));
315
316         skb_set_network_header(skb, skb->len);
317         iph = ip_hdr(skb);
318         iph->tot_len = htons(pkt_size - sizeof(*eth));
319
320         iph->version = 4;
321         iph->ihl = 6;
322         iph->tos = 0xc0;
323         iph->id = 0;
324         iph->frag_off = htons(IP_DF);
325         iph->ttl = 1;
326         iph->protocol = IPPROTO_IGMP;
327         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
328                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
329         iph->daddr = ip_dst;
330         ((u8 *)&iph[1])[0] = IPOPT_RA;
331         ((u8 *)&iph[1])[1] = 4;
332         ((u8 *)&iph[1])[2] = 0;
333         ((u8 *)&iph[1])[3] = 0;
334         ip_send_check(iph);
335         skb_put(skb, 24);
336
337         skb_set_transport_header(skb, skb->len);
338         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
339
340         switch (br->multicast_igmp_version) {
341         case 2:
342                 ih = igmp_hdr(skb);
343                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
344                 ih->code = (group ? br->multicast_last_member_interval :
345                                     br->multicast_query_response_interval) /
346                            (HZ / IGMP_TIMER_SCALE);
347                 ih->group = group;
348                 ih->csum = 0;
349                 csum = &ih->csum;
350                 csum_start = (void *)ih;
351                 break;
352         case 3:
353                 ihv3 = igmpv3_query_hdr(skb);
354                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
355                 ihv3->code = (group ? br->multicast_last_member_interval :
356                                       br->multicast_query_response_interval) /
357                              (HZ / IGMP_TIMER_SCALE);
358                 ihv3->group = group;
359                 ihv3->qqic = br->multicast_query_interval / HZ;
360                 ihv3->nsrcs = htons(lmqt_srcs);
361                 ihv3->resv = 0;
362                 ihv3->suppress = sflag;
363                 ihv3->qrv = 2;
364                 ihv3->csum = 0;
365                 csum = &ihv3->csum;
366                 csum_start = (void *)ihv3;
367                 if (!pg || !with_srcs)
368                         break;
369
370                 lmqt_srcs = 0;
371                 hlist_for_each_entry(ent, &pg->src_list, node) {
372                         if (over_lmqt == time_after(ent->timer.expires,
373                                                     lmqt) &&
374                             ent->src_query_rexmit_cnt > 0) {
375                                 ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4;
376                                 ent->src_query_rexmit_cnt--;
377                                 if (need_rexmit && ent->src_query_rexmit_cnt)
378                                         *need_rexmit = true;
379                         }
380                 }
381                 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
382                         kfree_skb(skb);
383                         return NULL;
384                 }
385                 break;
386         }
387
388         if (WARN_ON(!csum || !csum_start)) {
389                 kfree_skb(skb);
390                 return NULL;
391         }
392
393         *csum = ip_compute_csum(csum_start, igmp_hdr_size);
394         skb_put(skb, igmp_hdr_size);
395         __skb_pull(skb, sizeof(*eth));
396
397 out:
398         return skb;
399 }
400
401 #if IS_ENABLED(CONFIG_IPV6)
402 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
403                                                     struct net_bridge_port_group *pg,
404                                                     const struct in6_addr *ip6_dst,
405                                                     const struct in6_addr *group,
406                                                     bool with_srcs, bool over_llqt,
407                                                     u8 sflag, u8 *igmp_type,
408                                                     bool *need_rexmit)
409 {
410         struct net_bridge_port *p = pg ? pg->port : NULL;
411         struct net_bridge_group_src *ent;
412         size_t pkt_size, mld_hdr_size;
413         unsigned long now = jiffies;
414         struct mld2_query *mld2q;
415         void *csum_start = NULL;
416         unsigned long interval;
417         __sum16 *csum = NULL;
418         struct ipv6hdr *ip6h;
419         struct mld_msg *mldq;
420         struct sk_buff *skb;
421         unsigned long llqt;
422         struct ethhdr *eth;
423         u16 llqt_srcs = 0;
424         u8 *hopopt;
425
426         mld_hdr_size = sizeof(*mldq);
427         if (br->multicast_mld_version == 2) {
428                 mld_hdr_size = sizeof(*mld2q);
429                 if (pg && with_srcs) {
430                         llqt = now + (br->multicast_last_member_interval *
431                                       br->multicast_last_member_count);
432                         hlist_for_each_entry(ent, &pg->src_list, node) {
433                                 if (over_llqt == time_after(ent->timer.expires,
434                                                             llqt) &&
435                                     ent->src_query_rexmit_cnt > 0)
436                                         llqt_srcs++;
437                         }
438
439                         if (!llqt_srcs)
440                                 return NULL;
441                         mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
442                 }
443         }
444
445         pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
446         if ((p && pkt_size > p->dev->mtu) ||
447             pkt_size > br->dev->mtu)
448                 return NULL;
449
450         skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
451         if (!skb)
452                 goto out;
453
454         skb->protocol = htons(ETH_P_IPV6);
455
456         /* Ethernet header */
457         skb_reset_mac_header(skb);
458         eth = eth_hdr(skb);
459
460         ether_addr_copy(eth->h_source, br->dev->dev_addr);
461         eth->h_proto = htons(ETH_P_IPV6);
462         skb_put(skb, sizeof(*eth));
463
464         /* IPv6 header + HbH option */
465         skb_set_network_header(skb, skb->len);
466         ip6h = ipv6_hdr(skb);
467
468         *(__force __be32 *)ip6h = htonl(0x60000000);
469         ip6h->payload_len = htons(8 + mld_hdr_size);
470         ip6h->nexthdr = IPPROTO_HOPOPTS;
471         ip6h->hop_limit = 1;
472         ip6h->daddr = *ip6_dst;
473         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
474                                &ip6h->saddr)) {
475                 kfree_skb(skb);
476                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
477                 return NULL;
478         }
479
480         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
481         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
482
483         hopopt = (u8 *)(ip6h + 1);
484         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
485         hopopt[1] = 0;                          /* length of HbH */
486         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
487         hopopt[3] = 2;                          /* Length of RA Option */
488         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
489         hopopt[5] = 0;
490         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
491         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
492
493         skb_put(skb, sizeof(*ip6h) + 8);
494
495         /* ICMPv6 */
496         skb_set_transport_header(skb, skb->len);
497         interval = ipv6_addr_any(group) ?
498                         br->multicast_query_response_interval :
499                         br->multicast_last_member_interval;
500         *igmp_type = ICMPV6_MGM_QUERY;
501         switch (br->multicast_mld_version) {
502         case 1:
503                 mldq = (struct mld_msg *)icmp6_hdr(skb);
504                 mldq->mld_type = ICMPV6_MGM_QUERY;
505                 mldq->mld_code = 0;
506                 mldq->mld_cksum = 0;
507                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
508                 mldq->mld_reserved = 0;
509                 mldq->mld_mca = *group;
510                 csum = &mldq->mld_cksum;
511                 csum_start = (void *)mldq;
512                 break;
513         case 2:
514                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
515                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
516                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
517                 mld2q->mld2q_code = 0;
518                 mld2q->mld2q_cksum = 0;
519                 mld2q->mld2q_resv1 = 0;
520                 mld2q->mld2q_resv2 = 0;
521                 mld2q->mld2q_suppress = sflag;
522                 mld2q->mld2q_qrv = 2;
523                 mld2q->mld2q_nsrcs = htons(llqt_srcs);
524                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
525                 mld2q->mld2q_mca = *group;
526                 csum = &mld2q->mld2q_cksum;
527                 csum_start = (void *)mld2q;
528                 if (!pg || !with_srcs)
529                         break;
530
531                 llqt_srcs = 0;
532                 hlist_for_each_entry(ent, &pg->src_list, node) {
533                         if (over_llqt == time_after(ent->timer.expires,
534                                                     llqt) &&
535                             ent->src_query_rexmit_cnt > 0) {
536                                 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6;
537                                 ent->src_query_rexmit_cnt--;
538                                 if (need_rexmit && ent->src_query_rexmit_cnt)
539                                         *need_rexmit = true;
540                         }
541                 }
542                 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
543                         kfree_skb(skb);
544                         return NULL;
545                 }
546                 break;
547         }
548
549         if (WARN_ON(!csum || !csum_start)) {
550                 kfree_skb(skb);
551                 return NULL;
552         }
553
554         *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
555                                 IPPROTO_ICMPV6,
556                                 csum_partial(csum_start, mld_hdr_size, 0));
557         skb_put(skb, mld_hdr_size);
558         __skb_pull(skb, sizeof(*eth));
559
560 out:
561         return skb;
562 }
563 #endif
564
565 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
566                                                 struct net_bridge_port_group *pg,
567                                                 struct br_ip *ip_dst,
568                                                 struct br_ip *group,
569                                                 bool with_srcs, bool over_lmqt,
570                                                 u8 sflag, u8 *igmp_type,
571                                                 bool *need_rexmit)
572 {
573         __be32 ip4_dst;
574
575         switch (group->proto) {
576         case htons(ETH_P_IP):
577                 ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
578                 return br_ip4_multicast_alloc_query(br, pg,
579                                                     ip4_dst, group->u.ip4,
580                                                     with_srcs, over_lmqt,
581                                                     sflag, igmp_type,
582                                                     need_rexmit);
583 #if IS_ENABLED(CONFIG_IPV6)
584         case htons(ETH_P_IPV6): {
585                 struct in6_addr ip6_dst;
586
587                 if (ip_dst)
588                         ip6_dst = ip_dst->u.ip6;
589                 else
590                         ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
591                                       htonl(1));
592
593                 return br_ip6_multicast_alloc_query(br, pg,
594                                                     &ip6_dst, &group->u.ip6,
595                                                     with_srcs, over_lmqt,
596                                                     sflag, igmp_type,
597                                                     need_rexmit);
598         }
599 #endif
600         }
601         return NULL;
602 }
603
604 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
605                                                     struct br_ip *group)
606 {
607         struct net_bridge_mdb_entry *mp;
608         int err;
609
610         mp = br_mdb_ip_get(br, group);
611         if (mp)
612                 return mp;
613
614         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
615                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
616                 return ERR_PTR(-E2BIG);
617         }
618
619         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
620         if (unlikely(!mp))
621                 return ERR_PTR(-ENOMEM);
622
623         mp->br = br;
624         mp->addr = *group;
625         timer_setup(&mp->timer, br_multicast_group_expired, 0);
626         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
627                                             br_mdb_rht_params);
628         if (err) {
629                 kfree(mp);
630                 mp = ERR_PTR(err);
631         } else {
632                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
633         }
634
635         return mp;
636 }
637
638 static void br_multicast_group_src_expired(struct timer_list *t)
639 {
640         struct net_bridge_group_src *src = from_timer(src, t, timer);
641         struct net_bridge_port_group *pg;
642         struct net_bridge *br = src->br;
643
644         spin_lock(&br->multicast_lock);
645         if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
646             timer_pending(&src->timer))
647                 goto out;
648
649         pg = src->pg;
650         if (pg->filter_mode == MCAST_INCLUDE) {
651                 br_multicast_del_group_src(src);
652                 if (!hlist_empty(&pg->src_list))
653                         goto out;
654                 br_multicast_find_del_pg(br, pg);
655         }
656 out:
657         spin_unlock(&br->multicast_lock);
658 }
659
660 static struct net_bridge_group_src *
661 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
662 {
663         struct net_bridge_group_src *ent;
664
665         switch (ip->proto) {
666         case htons(ETH_P_IP):
667                 hlist_for_each_entry(ent, &pg->src_list, node)
668                         if (ip->u.ip4 == ent->addr.u.ip4)
669                                 return ent;
670                 break;
671 #if IS_ENABLED(CONFIG_IPV6)
672         case htons(ETH_P_IPV6):
673                 hlist_for_each_entry(ent, &pg->src_list, node)
674                         if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6))
675                                 return ent;
676                 break;
677 #endif
678         }
679
680         return NULL;
681 }
682
683 static struct net_bridge_group_src *
684 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
685 {
686         struct net_bridge_group_src *grp_src;
687
688         if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
689                 return NULL;
690
691         switch (src_ip->proto) {
692         case htons(ETH_P_IP):
693                 if (ipv4_is_zeronet(src_ip->u.ip4) ||
694                     ipv4_is_multicast(src_ip->u.ip4))
695                         return NULL;
696                 break;
697 #if IS_ENABLED(CONFIG_IPV6)
698         case htons(ETH_P_IPV6):
699                 if (ipv6_addr_any(&src_ip->u.ip6) ||
700                     ipv6_addr_is_multicast(&src_ip->u.ip6))
701                         return NULL;
702                 break;
703 #endif
704         }
705
706         grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
707         if (unlikely(!grp_src))
708                 return NULL;
709
710         grp_src->pg = pg;
711         grp_src->br = pg->port->br;
712         grp_src->addr = *src_ip;
713         timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
714
715         hlist_add_head_rcu(&grp_src->node, &pg->src_list);
716         pg->src_ents++;
717
718         return grp_src;
719 }
720
721 struct net_bridge_port_group *br_multicast_new_port_group(
722                         struct net_bridge_port *port,
723                         struct br_ip *group,
724                         struct net_bridge_port_group __rcu *next,
725                         unsigned char flags,
726                         const unsigned char *src,
727                         u8 filter_mode)
728 {
729         struct net_bridge_port_group *p;
730
731         p = kzalloc(sizeof(*p), GFP_ATOMIC);
732         if (unlikely(!p))
733                 return NULL;
734
735         p->addr = *group;
736         p->port = port;
737         p->flags = flags;
738         p->filter_mode = filter_mode;
739         INIT_HLIST_HEAD(&p->src_list);
740         rcu_assign_pointer(p->next, next);
741         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
742         timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
743         hlist_add_head(&p->mglist, &port->mglist);
744
745         if (src)
746                 memcpy(p->eth_addr, src, ETH_ALEN);
747         else
748                 eth_broadcast_addr(p->eth_addr);
749
750         return p;
751 }
752
753 static bool br_port_group_equal(struct net_bridge_port_group *p,
754                                 struct net_bridge_port *port,
755                                 const unsigned char *src)
756 {
757         if (p->port != port)
758                 return false;
759
760         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
761                 return true;
762
763         return ether_addr_equal(src, p->eth_addr);
764 }
765
766 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
767 {
768         if (!mp->host_joined) {
769                 mp->host_joined = true;
770                 if (notify)
771                         br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
772         }
773         mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
774 }
775
776 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
777 {
778         if (!mp->host_joined)
779                 return;
780
781         mp->host_joined = false;
782         if (notify)
783                 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
784 }
785
786 static int br_multicast_add_group(struct net_bridge *br,
787                                   struct net_bridge_port *port,
788                                   struct br_ip *group,
789                                   const unsigned char *src,
790                                   u8 filter_mode)
791 {
792         struct net_bridge_port_group __rcu **pp;
793         struct net_bridge_port_group *p;
794         struct net_bridge_mdb_entry *mp;
795         unsigned long now = jiffies;
796         int err;
797
798         spin_lock(&br->multicast_lock);
799         if (!netif_running(br->dev) ||
800             (port && port->state == BR_STATE_DISABLED))
801                 goto out;
802
803         mp = br_multicast_new_group(br, group);
804         err = PTR_ERR(mp);
805         if (IS_ERR(mp))
806                 goto err;
807
808         if (!port) {
809                 br_multicast_host_join(mp, true);
810                 goto out;
811         }
812
813         for (pp = &mp->ports;
814              (p = mlock_dereference(*pp, br)) != NULL;
815              pp = &p->next) {
816                 if (br_port_group_equal(p, port, src))
817                         goto found;
818                 if ((unsigned long)p->port < (unsigned long)port)
819                         break;
820         }
821
822         p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode);
823         if (unlikely(!p))
824                 goto err;
825         rcu_assign_pointer(*pp, p);
826         br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
827
828 found:
829         mod_timer(&p->timer, now + br->multicast_membership_interval);
830
831 out:
832         err = 0;
833
834 err:
835         spin_unlock(&br->multicast_lock);
836         return err;
837 }
838
839 static int br_ip4_multicast_add_group(struct net_bridge *br,
840                                       struct net_bridge_port *port,
841                                       __be32 group,
842                                       __u16 vid,
843                                       const unsigned char *src,
844                                       bool igmpv2)
845 {
846         struct br_ip br_group;
847         u8 filter_mode;
848
849         if (ipv4_is_local_multicast(group))
850                 return 0;
851
852         memset(&br_group, 0, sizeof(br_group));
853         br_group.u.ip4 = group;
854         br_group.proto = htons(ETH_P_IP);
855         br_group.vid = vid;
856         filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
857
858         return br_multicast_add_group(br, port, &br_group, src, filter_mode);
859 }
860
861 #if IS_ENABLED(CONFIG_IPV6)
862 static int br_ip6_multicast_add_group(struct net_bridge *br,
863                                       struct net_bridge_port *port,
864                                       const struct in6_addr *group,
865                                       __u16 vid,
866                                       const unsigned char *src,
867                                       bool mldv1)
868 {
869         struct br_ip br_group;
870         u8 filter_mode;
871
872         if (ipv6_addr_is_ll_all_nodes(group))
873                 return 0;
874
875         memset(&br_group, 0, sizeof(br_group));
876         br_group.u.ip6 = *group;
877         br_group.proto = htons(ETH_P_IPV6);
878         br_group.vid = vid;
879         filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
880
881         return br_multicast_add_group(br, port, &br_group, src, filter_mode);
882 }
883 #endif
884
885 static void br_multicast_router_expired(struct timer_list *t)
886 {
887         struct net_bridge_port *port =
888                         from_timer(port, t, multicast_router_timer);
889         struct net_bridge *br = port->br;
890
891         spin_lock(&br->multicast_lock);
892         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
893             port->multicast_router == MDB_RTR_TYPE_PERM ||
894             timer_pending(&port->multicast_router_timer))
895                 goto out;
896
897         __del_port_router(port);
898 out:
899         spin_unlock(&br->multicast_lock);
900 }
901
902 static void br_mc_router_state_change(struct net_bridge *p,
903                                       bool is_mc_router)
904 {
905         struct switchdev_attr attr = {
906                 .orig_dev = p->dev,
907                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
908                 .flags = SWITCHDEV_F_DEFER,
909                 .u.mrouter = is_mc_router,
910         };
911
912         switchdev_port_attr_set(p->dev, &attr);
913 }
914
915 static void br_multicast_local_router_expired(struct timer_list *t)
916 {
917         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
918
919         spin_lock(&br->multicast_lock);
920         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
921             br->multicast_router == MDB_RTR_TYPE_PERM ||
922             timer_pending(&br->multicast_router_timer))
923                 goto out;
924
925         br_mc_router_state_change(br, false);
926 out:
927         spin_unlock(&br->multicast_lock);
928 }
929
930 static void br_multicast_querier_expired(struct net_bridge *br,
931                                          struct bridge_mcast_own_query *query)
932 {
933         spin_lock(&br->multicast_lock);
934         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
935                 goto out;
936
937         br_multicast_start_querier(br, query);
938
939 out:
940         spin_unlock(&br->multicast_lock);
941 }
942
943 static void br_ip4_multicast_querier_expired(struct timer_list *t)
944 {
945         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
946
947         br_multicast_querier_expired(br, &br->ip4_own_query);
948 }
949
950 #if IS_ENABLED(CONFIG_IPV6)
951 static void br_ip6_multicast_querier_expired(struct timer_list *t)
952 {
953         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
954
955         br_multicast_querier_expired(br, &br->ip6_own_query);
956 }
957 #endif
958
959 static void br_multicast_select_own_querier(struct net_bridge *br,
960                                             struct br_ip *ip,
961                                             struct sk_buff *skb)
962 {
963         if (ip->proto == htons(ETH_P_IP))
964                 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
965 #if IS_ENABLED(CONFIG_IPV6)
966         else
967                 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
968 #endif
969 }
970
971 static void __br_multicast_send_query(struct net_bridge *br,
972                                       struct net_bridge_port *port,
973                                       struct net_bridge_port_group *pg,
974                                       struct br_ip *ip_dst,
975                                       struct br_ip *group,
976                                       bool with_srcs,
977                                       u8 sflag,
978                                       bool *need_rexmit)
979 {
980         bool over_lmqt = !!sflag;
981         struct sk_buff *skb;
982         u8 igmp_type;
983
984 again_under_lmqt:
985         skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
986                                        over_lmqt, sflag, &igmp_type,
987                                        need_rexmit);
988         if (!skb)
989                 return;
990
991         if (port) {
992                 skb->dev = port->dev;
993                 br_multicast_count(br, port, skb, igmp_type,
994                                    BR_MCAST_DIR_TX);
995                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
996                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
997                         br_dev_queue_push_xmit);
998
999                 if (over_lmqt && with_srcs && sflag) {
1000                         over_lmqt = false;
1001                         goto again_under_lmqt;
1002                 }
1003         } else {
1004                 br_multicast_select_own_querier(br, group, skb);
1005                 br_multicast_count(br, port, skb, igmp_type,
1006                                    BR_MCAST_DIR_RX);
1007                 netif_rx(skb);
1008         }
1009 }
1010
1011 static void br_multicast_send_query(struct net_bridge *br,
1012                                     struct net_bridge_port *port,
1013                                     struct bridge_mcast_own_query *own_query)
1014 {
1015         struct bridge_mcast_other_query *other_query = NULL;
1016         struct br_ip br_group;
1017         unsigned long time;
1018
1019         if (!netif_running(br->dev) ||
1020             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1021             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1022                 return;
1023
1024         memset(&br_group.u, 0, sizeof(br_group.u));
1025
1026         if (port ? (own_query == &port->ip4_own_query) :
1027                    (own_query == &br->ip4_own_query)) {
1028                 other_query = &br->ip4_other_query;
1029                 br_group.proto = htons(ETH_P_IP);
1030 #if IS_ENABLED(CONFIG_IPV6)
1031         } else {
1032                 other_query = &br->ip6_other_query;
1033                 br_group.proto = htons(ETH_P_IPV6);
1034 #endif
1035         }
1036
1037         if (!other_query || timer_pending(&other_query->timer))
1038                 return;
1039
1040         __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
1041                                   NULL);
1042
1043         time = jiffies;
1044         time += own_query->startup_sent < br->multicast_startup_query_count ?
1045                 br->multicast_startup_query_interval :
1046                 br->multicast_query_interval;
1047         mod_timer(&own_query->timer, time);
1048 }
1049
1050 static void
1051 br_multicast_port_query_expired(struct net_bridge_port *port,
1052                                 struct bridge_mcast_own_query *query)
1053 {
1054         struct net_bridge *br = port->br;
1055
1056         spin_lock(&br->multicast_lock);
1057         if (port->state == BR_STATE_DISABLED ||
1058             port->state == BR_STATE_BLOCKING)
1059                 goto out;
1060
1061         if (query->startup_sent < br->multicast_startup_query_count)
1062                 query->startup_sent++;
1063
1064         br_multicast_send_query(port->br, port, query);
1065
1066 out:
1067         spin_unlock(&br->multicast_lock);
1068 }
1069
1070 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1071 {
1072         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1073
1074         br_multicast_port_query_expired(port, &port->ip4_own_query);
1075 }
1076
1077 #if IS_ENABLED(CONFIG_IPV6)
1078 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1079 {
1080         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1081
1082         br_multicast_port_query_expired(port, &port->ip6_own_query);
1083 }
1084 #endif
1085
1086 static void br_multicast_port_group_rexmit(struct timer_list *t)
1087 {
1088         struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1089         struct bridge_mcast_other_query *other_query = NULL;
1090         struct net_bridge *br = pg->port->br;
1091         bool need_rexmit = false;
1092
1093         spin_lock(&br->multicast_lock);
1094         if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1095             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1096             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1097                 goto out;
1098
1099         if (pg->addr.proto == htons(ETH_P_IP))
1100                 other_query = &br->ip4_other_query;
1101 #if IS_ENABLED(CONFIG_IPV6)
1102         else
1103                 other_query = &br->ip6_other_query;
1104 #endif
1105
1106         if (!other_query || timer_pending(&other_query->timer))
1107                 goto out;
1108
1109         if (pg->grp_query_rexmit_cnt) {
1110                 pg->grp_query_rexmit_cnt--;
1111                 __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1112                                           &pg->addr, false, 1, NULL);
1113         }
1114         __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1115                                   &pg->addr, true, 0, &need_rexmit);
1116
1117         if (pg->grp_query_rexmit_cnt || need_rexmit)
1118                 mod_timer(&pg->rexmit_timer, jiffies +
1119                                              br->multicast_last_member_interval);
1120 out:
1121         spin_unlock(&br->multicast_lock);
1122 }
1123
1124 static void br_mc_disabled_update(struct net_device *dev, bool value)
1125 {
1126         struct switchdev_attr attr = {
1127                 .orig_dev = dev,
1128                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1129                 .flags = SWITCHDEV_F_DEFER,
1130                 .u.mc_disabled = !value,
1131         };
1132
1133         switchdev_port_attr_set(dev, &attr);
1134 }
1135
1136 int br_multicast_add_port(struct net_bridge_port *port)
1137 {
1138         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1139
1140         timer_setup(&port->multicast_router_timer,
1141                     br_multicast_router_expired, 0);
1142         timer_setup(&port->ip4_own_query.timer,
1143                     br_ip4_multicast_port_query_expired, 0);
1144 #if IS_ENABLED(CONFIG_IPV6)
1145         timer_setup(&port->ip6_own_query.timer,
1146                     br_ip6_multicast_port_query_expired, 0);
1147 #endif
1148         br_mc_disabled_update(port->dev,
1149                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
1150
1151         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1152         if (!port->mcast_stats)
1153                 return -ENOMEM;
1154
1155         return 0;
1156 }
1157
1158 void br_multicast_del_port(struct net_bridge_port *port)
1159 {
1160         struct net_bridge *br = port->br;
1161         struct net_bridge_port_group *pg;
1162         struct hlist_node *n;
1163
1164         /* Take care of the remaining groups, only perm ones should be left */
1165         spin_lock_bh(&br->multicast_lock);
1166         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1167                 br_multicast_find_del_pg(br, pg);
1168         spin_unlock_bh(&br->multicast_lock);
1169         del_timer_sync(&port->multicast_router_timer);
1170         free_percpu(port->mcast_stats);
1171 }
1172
1173 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1174 {
1175         query->startup_sent = 0;
1176
1177         if (try_to_del_timer_sync(&query->timer) >= 0 ||
1178             del_timer(&query->timer))
1179                 mod_timer(&query->timer, jiffies);
1180 }
1181
1182 static void __br_multicast_enable_port(struct net_bridge_port *port)
1183 {
1184         struct net_bridge *br = port->br;
1185
1186         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
1187                 return;
1188
1189         br_multicast_enable(&port->ip4_own_query);
1190 #if IS_ENABLED(CONFIG_IPV6)
1191         br_multicast_enable(&port->ip6_own_query);
1192 #endif
1193         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1194             hlist_unhashed(&port->rlist))
1195                 br_multicast_add_router(br, port);
1196 }
1197
1198 void br_multicast_enable_port(struct net_bridge_port *port)
1199 {
1200         struct net_bridge *br = port->br;
1201
1202         spin_lock(&br->multicast_lock);
1203         __br_multicast_enable_port(port);
1204         spin_unlock(&br->multicast_lock);
1205 }
1206
1207 void br_multicast_disable_port(struct net_bridge_port *port)
1208 {
1209         struct net_bridge *br = port->br;
1210         struct net_bridge_port_group *pg;
1211         struct hlist_node *n;
1212
1213         spin_lock(&br->multicast_lock);
1214         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1215                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1216                         br_multicast_find_del_pg(br, pg);
1217
1218         __del_port_router(port);
1219
1220         del_timer(&port->multicast_router_timer);
1221         del_timer(&port->ip4_own_query.timer);
1222 #if IS_ENABLED(CONFIG_IPV6)
1223         del_timer(&port->ip6_own_query.timer);
1224 #endif
1225         spin_unlock(&br->multicast_lock);
1226 }
1227
1228 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1229                                          struct net_bridge_port *port,
1230                                          struct sk_buff *skb,
1231                                          u16 vid)
1232 {
1233         const unsigned char *src;
1234         struct igmpv3_report *ih;
1235         struct igmpv3_grec *grec;
1236         int i;
1237         int len;
1238         int num;
1239         int type;
1240         int err = 0;
1241         __be32 group;
1242         u16 nsrcs;
1243
1244         ih = igmpv3_report_hdr(skb);
1245         num = ntohs(ih->ngrec);
1246         len = skb_transport_offset(skb) + sizeof(*ih);
1247
1248         for (i = 0; i < num; i++) {
1249                 len += sizeof(*grec);
1250                 if (!ip_mc_may_pull(skb, len))
1251                         return -EINVAL;
1252
1253                 grec = (void *)(skb->data + len - sizeof(*grec));
1254                 group = grec->grec_mca;
1255                 type = grec->grec_type;
1256                 nsrcs = ntohs(grec->grec_nsrcs);
1257
1258                 len += nsrcs * 4;
1259                 if (!ip_mc_may_pull(skb, len))
1260                         return -EINVAL;
1261
1262                 /* We treat this as an IGMPv2 report for now. */
1263                 switch (type) {
1264                 case IGMPV3_MODE_IS_INCLUDE:
1265                 case IGMPV3_MODE_IS_EXCLUDE:
1266                 case IGMPV3_CHANGE_TO_INCLUDE:
1267                 case IGMPV3_CHANGE_TO_EXCLUDE:
1268                 case IGMPV3_ALLOW_NEW_SOURCES:
1269                 case IGMPV3_BLOCK_OLD_SOURCES:
1270                         break;
1271
1272                 default:
1273                         continue;
1274                 }
1275
1276                 src = eth_hdr(skb)->h_source;
1277                 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1278                      type == IGMPV3_MODE_IS_INCLUDE) &&
1279                     nsrcs == 0) {
1280                         br_ip4_multicast_leave_group(br, port, group, vid, src);
1281                 } else {
1282                         err = br_ip4_multicast_add_group(br, port, group, vid,
1283                                                          src, true);
1284                         if (err)
1285                                 break;
1286                 }
1287         }
1288
1289         return err;
1290 }
1291
1292 #if IS_ENABLED(CONFIG_IPV6)
1293 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1294                                         struct net_bridge_port *port,
1295                                         struct sk_buff *skb,
1296                                         u16 vid)
1297 {
1298         unsigned int nsrcs_offset;
1299         const unsigned char *src;
1300         struct icmp6hdr *icmp6h;
1301         struct mld2_grec *grec;
1302         unsigned int grec_len;
1303         int i;
1304         int len;
1305         int num;
1306         int err = 0;
1307
1308         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
1309                 return -EINVAL;
1310
1311         icmp6h = icmp6_hdr(skb);
1312         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1313         len = skb_transport_offset(skb) + sizeof(*icmp6h);
1314
1315         for (i = 0; i < num; i++) {
1316                 __be16 *_nsrcs, __nsrcs;
1317                 u16 nsrcs;
1318
1319                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1320
1321                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1322                     nsrcs_offset + sizeof(__nsrcs))
1323                         return -EINVAL;
1324
1325                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
1326                                             sizeof(__nsrcs), &__nsrcs);
1327                 if (!_nsrcs)
1328                         return -EINVAL;
1329
1330                 nsrcs = ntohs(*_nsrcs);
1331                 grec_len = struct_size(grec, grec_src, nsrcs);
1332
1333                 if (!ipv6_mc_may_pull(skb, len + grec_len))
1334                         return -EINVAL;
1335
1336                 grec = (struct mld2_grec *)(skb->data + len);
1337                 len += grec_len;
1338
1339                 /* We treat these as MLDv1 reports for now. */
1340                 switch (grec->grec_type) {
1341                 case MLD2_MODE_IS_INCLUDE:
1342                 case MLD2_MODE_IS_EXCLUDE:
1343                 case MLD2_CHANGE_TO_INCLUDE:
1344                 case MLD2_CHANGE_TO_EXCLUDE:
1345                 case MLD2_ALLOW_NEW_SOURCES:
1346                 case MLD2_BLOCK_OLD_SOURCES:
1347                         break;
1348
1349                 default:
1350                         continue;
1351                 }
1352
1353                 src = eth_hdr(skb)->h_source;
1354                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1355                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1356                     nsrcs == 0) {
1357                         br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1358                                                      vid, src);
1359                 } else {
1360                         err = br_ip6_multicast_add_group(br, port,
1361                                                          &grec->grec_mca, vid,
1362                                                          src, true);
1363                         if (err)
1364                                 break;
1365                 }
1366         }
1367
1368         return err;
1369 }
1370 #endif
1371
1372 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1373                                             struct net_bridge_port *port,
1374                                             __be32 saddr)
1375 {
1376         if (!timer_pending(&br->ip4_own_query.timer) &&
1377             !timer_pending(&br->ip4_other_query.timer))
1378                 goto update;
1379
1380         if (!br->ip4_querier.addr.u.ip4)
1381                 goto update;
1382
1383         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1384                 goto update;
1385
1386         return false;
1387
1388 update:
1389         br->ip4_querier.addr.u.ip4 = saddr;
1390
1391         /* update protected by general multicast_lock by caller */
1392         rcu_assign_pointer(br->ip4_querier.port, port);
1393
1394         return true;
1395 }
1396
1397 #if IS_ENABLED(CONFIG_IPV6)
1398 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1399                                             struct net_bridge_port *port,
1400                                             struct in6_addr *saddr)
1401 {
1402         if (!timer_pending(&br->ip6_own_query.timer) &&
1403             !timer_pending(&br->ip6_other_query.timer))
1404                 goto update;
1405
1406         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1407                 goto update;
1408
1409         return false;
1410
1411 update:
1412         br->ip6_querier.addr.u.ip6 = *saddr;
1413
1414         /* update protected by general multicast_lock by caller */
1415         rcu_assign_pointer(br->ip6_querier.port, port);
1416
1417         return true;
1418 }
1419 #endif
1420
1421 static bool br_multicast_select_querier(struct net_bridge *br,
1422                                         struct net_bridge_port *port,
1423                                         struct br_ip *saddr)
1424 {
1425         switch (saddr->proto) {
1426         case htons(ETH_P_IP):
1427                 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1428 #if IS_ENABLED(CONFIG_IPV6)
1429         case htons(ETH_P_IPV6):
1430                 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1431 #endif
1432         }
1433
1434         return false;
1435 }
1436
1437 static void
1438 br_multicast_update_query_timer(struct net_bridge *br,
1439                                 struct bridge_mcast_other_query *query,
1440                                 unsigned long max_delay)
1441 {
1442         if (!timer_pending(&query->timer))
1443                 query->delay_time = jiffies + max_delay;
1444
1445         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1446 }
1447
1448 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1449                                            bool is_mc_router)
1450 {
1451         struct switchdev_attr attr = {
1452                 .orig_dev = p->dev,
1453                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1454                 .flags = SWITCHDEV_F_DEFER,
1455                 .u.mrouter = is_mc_router,
1456         };
1457
1458         switchdev_port_attr_set(p->dev, &attr);
1459 }
1460
1461 /*
1462  * Add port to router_list
1463  *  list is maintained ordered by pointer value
1464  *  and locked by br->multicast_lock and RCU
1465  */
1466 static void br_multicast_add_router(struct net_bridge *br,
1467                                     struct net_bridge_port *port)
1468 {
1469         struct net_bridge_port *p;
1470         struct hlist_node *slot = NULL;
1471
1472         if (!hlist_unhashed(&port->rlist))
1473                 return;
1474
1475         hlist_for_each_entry(p, &br->router_list, rlist) {
1476                 if ((unsigned long) port >= (unsigned long) p)
1477                         break;
1478                 slot = &p->rlist;
1479         }
1480
1481         if (slot)
1482                 hlist_add_behind_rcu(&port->rlist, slot);
1483         else
1484                 hlist_add_head_rcu(&port->rlist, &br->router_list);
1485         br_rtr_notify(br->dev, port, RTM_NEWMDB);
1486         br_port_mc_router_state_change(port, true);
1487 }
1488
1489 static void br_multicast_mark_router(struct net_bridge *br,
1490                                      struct net_bridge_port *port)
1491 {
1492         unsigned long now = jiffies;
1493
1494         if (!port) {
1495                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1496                         if (!timer_pending(&br->multicast_router_timer))
1497                                 br_mc_router_state_change(br, true);
1498                         mod_timer(&br->multicast_router_timer,
1499                                   now + br->multicast_querier_interval);
1500                 }
1501                 return;
1502         }
1503
1504         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1505             port->multicast_router == MDB_RTR_TYPE_PERM)
1506                 return;
1507
1508         br_multicast_add_router(br, port);
1509
1510         mod_timer(&port->multicast_router_timer,
1511                   now + br->multicast_querier_interval);
1512 }
1513
1514 static void br_multicast_query_received(struct net_bridge *br,
1515                                         struct net_bridge_port *port,
1516                                         struct bridge_mcast_other_query *query,
1517                                         struct br_ip *saddr,
1518                                         unsigned long max_delay)
1519 {
1520         if (!br_multicast_select_querier(br, port, saddr))
1521                 return;
1522
1523         br_multicast_update_query_timer(br, query, max_delay);
1524         br_multicast_mark_router(br, port);
1525 }
1526
1527 static void br_ip4_multicast_query(struct net_bridge *br,
1528                                    struct net_bridge_port *port,
1529                                    struct sk_buff *skb,
1530                                    u16 vid)
1531 {
1532         unsigned int transport_len = ip_transport_len(skb);
1533         const struct iphdr *iph = ip_hdr(skb);
1534         struct igmphdr *ih = igmp_hdr(skb);
1535         struct net_bridge_mdb_entry *mp;
1536         struct igmpv3_query *ih3;
1537         struct net_bridge_port_group *p;
1538         struct net_bridge_port_group __rcu **pp;
1539         struct br_ip saddr;
1540         unsigned long max_delay;
1541         unsigned long now = jiffies;
1542         __be32 group;
1543
1544         spin_lock(&br->multicast_lock);
1545         if (!netif_running(br->dev) ||
1546             (port && port->state == BR_STATE_DISABLED))
1547                 goto out;
1548
1549         group = ih->group;
1550
1551         if (transport_len == sizeof(*ih)) {
1552                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1553
1554                 if (!max_delay) {
1555                         max_delay = 10 * HZ;
1556                         group = 0;
1557                 }
1558         } else if (transport_len >= sizeof(*ih3)) {
1559                 ih3 = igmpv3_query_hdr(skb);
1560                 if (ih3->nsrcs)
1561                         goto out;
1562
1563                 max_delay = ih3->code ?
1564                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1565         } else {
1566                 goto out;
1567         }
1568
1569         if (!group) {
1570                 saddr.proto = htons(ETH_P_IP);
1571                 saddr.u.ip4 = iph->saddr;
1572
1573                 br_multicast_query_received(br, port, &br->ip4_other_query,
1574                                             &saddr, max_delay);
1575                 goto out;
1576         }
1577
1578         mp = br_mdb_ip4_get(br, group, vid);
1579         if (!mp)
1580                 goto out;
1581
1582         max_delay *= br->multicast_last_member_count;
1583
1584         if (mp->host_joined &&
1585             (timer_pending(&mp->timer) ?
1586              time_after(mp->timer.expires, now + max_delay) :
1587              try_to_del_timer_sync(&mp->timer) >= 0))
1588                 mod_timer(&mp->timer, now + max_delay);
1589
1590         for (pp = &mp->ports;
1591              (p = mlock_dereference(*pp, br)) != NULL;
1592              pp = &p->next) {
1593                 if (timer_pending(&p->timer) ?
1594                     time_after(p->timer.expires, now + max_delay) :
1595                     try_to_del_timer_sync(&p->timer) >= 0)
1596                         mod_timer(&p->timer, now + max_delay);
1597         }
1598
1599 out:
1600         spin_unlock(&br->multicast_lock);
1601 }
1602
1603 #if IS_ENABLED(CONFIG_IPV6)
1604 static int br_ip6_multicast_query(struct net_bridge *br,
1605                                   struct net_bridge_port *port,
1606                                   struct sk_buff *skb,
1607                                   u16 vid)
1608 {
1609         unsigned int transport_len = ipv6_transport_len(skb);
1610         struct mld_msg *mld;
1611         struct net_bridge_mdb_entry *mp;
1612         struct mld2_query *mld2q;
1613         struct net_bridge_port_group *p;
1614         struct net_bridge_port_group __rcu **pp;
1615         struct br_ip saddr;
1616         unsigned long max_delay;
1617         unsigned long now = jiffies;
1618         unsigned int offset = skb_transport_offset(skb);
1619         const struct in6_addr *group = NULL;
1620         bool is_general_query;
1621         int err = 0;
1622
1623         spin_lock(&br->multicast_lock);
1624         if (!netif_running(br->dev) ||
1625             (port && port->state == BR_STATE_DISABLED))
1626                 goto out;
1627
1628         if (transport_len == sizeof(*mld)) {
1629                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1630                         err = -EINVAL;
1631                         goto out;
1632                 }
1633                 mld = (struct mld_msg *) icmp6_hdr(skb);
1634                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1635                 if (max_delay)
1636                         group = &mld->mld_mca;
1637         } else {
1638                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1639                         err = -EINVAL;
1640                         goto out;
1641                 }
1642                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1643                 if (!mld2q->mld2q_nsrcs)
1644                         group = &mld2q->mld2q_mca;
1645
1646                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1647         }
1648
1649         is_general_query = group && ipv6_addr_any(group);
1650
1651         if (is_general_query) {
1652                 saddr.proto = htons(ETH_P_IPV6);
1653                 saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1654
1655                 br_multicast_query_received(br, port, &br->ip6_other_query,
1656                                             &saddr, max_delay);
1657                 goto out;
1658         } else if (!group) {
1659                 goto out;
1660         }
1661
1662         mp = br_mdb_ip6_get(br, group, vid);
1663         if (!mp)
1664                 goto out;
1665
1666         max_delay *= br->multicast_last_member_count;
1667         if (mp->host_joined &&
1668             (timer_pending(&mp->timer) ?
1669              time_after(mp->timer.expires, now + max_delay) :
1670              try_to_del_timer_sync(&mp->timer) >= 0))
1671                 mod_timer(&mp->timer, now + max_delay);
1672
1673         for (pp = &mp->ports;
1674              (p = mlock_dereference(*pp, br)) != NULL;
1675              pp = &p->next) {
1676                 if (timer_pending(&p->timer) ?
1677                     time_after(p->timer.expires, now + max_delay) :
1678                     try_to_del_timer_sync(&p->timer) >= 0)
1679                         mod_timer(&p->timer, now + max_delay);
1680         }
1681
1682 out:
1683         spin_unlock(&br->multicast_lock);
1684         return err;
1685 }
1686 #endif
1687
1688 static void
1689 br_multicast_leave_group(struct net_bridge *br,
1690                          struct net_bridge_port *port,
1691                          struct br_ip *group,
1692                          struct bridge_mcast_other_query *other_query,
1693                          struct bridge_mcast_own_query *own_query,
1694                          const unsigned char *src)
1695 {
1696         struct net_bridge_mdb_entry *mp;
1697         struct net_bridge_port_group *p;
1698         unsigned long now;
1699         unsigned long time;
1700
1701         spin_lock(&br->multicast_lock);
1702         if (!netif_running(br->dev) ||
1703             (port && port->state == BR_STATE_DISABLED))
1704                 goto out;
1705
1706         mp = br_mdb_ip_get(br, group);
1707         if (!mp)
1708                 goto out;
1709
1710         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1711                 struct net_bridge_port_group __rcu **pp;
1712
1713                 for (pp = &mp->ports;
1714                      (p = mlock_dereference(*pp, br)) != NULL;
1715                      pp = &p->next) {
1716                         if (!br_port_group_equal(p, port, src))
1717                                 continue;
1718
1719                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
1720                                 break;
1721
1722                         p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
1723                         br_multicast_del_pg(mp, p, pp);
1724                 }
1725                 goto out;
1726         }
1727
1728         if (timer_pending(&other_query->timer))
1729                 goto out;
1730
1731         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1732                 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
1733                                           false, 0, NULL);
1734
1735                 time = jiffies + br->multicast_last_member_count *
1736                                  br->multicast_last_member_interval;
1737
1738                 mod_timer(&own_query->timer, time);
1739
1740                 for (p = mlock_dereference(mp->ports, br);
1741                      p != NULL;
1742                      p = mlock_dereference(p->next, br)) {
1743                         if (!br_port_group_equal(p, port, src))
1744                                 continue;
1745
1746                         if (!hlist_unhashed(&p->mglist) &&
1747                             (timer_pending(&p->timer) ?
1748                              time_after(p->timer.expires, time) :
1749                              try_to_del_timer_sync(&p->timer) >= 0)) {
1750                                 mod_timer(&p->timer, time);
1751                         }
1752
1753                         break;
1754                 }
1755         }
1756
1757         now = jiffies;
1758         time = now + br->multicast_last_member_count *
1759                      br->multicast_last_member_interval;
1760
1761         if (!port) {
1762                 if (mp->host_joined &&
1763                     (timer_pending(&mp->timer) ?
1764                      time_after(mp->timer.expires, time) :
1765                      try_to_del_timer_sync(&mp->timer) >= 0)) {
1766                         mod_timer(&mp->timer, time);
1767                 }
1768
1769                 goto out;
1770         }
1771
1772         for (p = mlock_dereference(mp->ports, br);
1773              p != NULL;
1774              p = mlock_dereference(p->next, br)) {
1775                 if (p->port != port)
1776                         continue;
1777
1778                 if (!hlist_unhashed(&p->mglist) &&
1779                     (timer_pending(&p->timer) ?
1780                      time_after(p->timer.expires, time) :
1781                      try_to_del_timer_sync(&p->timer) >= 0)) {
1782                         mod_timer(&p->timer, time);
1783                 }
1784
1785                 break;
1786         }
1787 out:
1788         spin_unlock(&br->multicast_lock);
1789 }
1790
1791 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1792                                          struct net_bridge_port *port,
1793                                          __be32 group,
1794                                          __u16 vid,
1795                                          const unsigned char *src)
1796 {
1797         struct br_ip br_group;
1798         struct bridge_mcast_own_query *own_query;
1799
1800         if (ipv4_is_local_multicast(group))
1801                 return;
1802
1803         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1804
1805         memset(&br_group, 0, sizeof(br_group));
1806         br_group.u.ip4 = group;
1807         br_group.proto = htons(ETH_P_IP);
1808         br_group.vid = vid;
1809
1810         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1811                                  own_query, src);
1812 }
1813
1814 #if IS_ENABLED(CONFIG_IPV6)
1815 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1816                                          struct net_bridge_port *port,
1817                                          const struct in6_addr *group,
1818                                          __u16 vid,
1819                                          const unsigned char *src)
1820 {
1821         struct br_ip br_group;
1822         struct bridge_mcast_own_query *own_query;
1823
1824         if (ipv6_addr_is_ll_all_nodes(group))
1825                 return;
1826
1827         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1828
1829         memset(&br_group, 0, sizeof(br_group));
1830         br_group.u.ip6 = *group;
1831         br_group.proto = htons(ETH_P_IPV6);
1832         br_group.vid = vid;
1833
1834         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1835                                  own_query, src);
1836 }
1837 #endif
1838
1839 static void br_multicast_err_count(const struct net_bridge *br,
1840                                    const struct net_bridge_port *p,
1841                                    __be16 proto)
1842 {
1843         struct bridge_mcast_stats __percpu *stats;
1844         struct bridge_mcast_stats *pstats;
1845
1846         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1847                 return;
1848
1849         if (p)
1850                 stats = p->mcast_stats;
1851         else
1852                 stats = br->mcast_stats;
1853         if (WARN_ON(!stats))
1854                 return;
1855
1856         pstats = this_cpu_ptr(stats);
1857
1858         u64_stats_update_begin(&pstats->syncp);
1859         switch (proto) {
1860         case htons(ETH_P_IP):
1861                 pstats->mstats.igmp_parse_errors++;
1862                 break;
1863 #if IS_ENABLED(CONFIG_IPV6)
1864         case htons(ETH_P_IPV6):
1865                 pstats->mstats.mld_parse_errors++;
1866                 break;
1867 #endif
1868         }
1869         u64_stats_update_end(&pstats->syncp);
1870 }
1871
1872 static void br_multicast_pim(struct net_bridge *br,
1873                              struct net_bridge_port *port,
1874                              const struct sk_buff *skb)
1875 {
1876         unsigned int offset = skb_transport_offset(skb);
1877         struct pimhdr *pimhdr, _pimhdr;
1878
1879         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1880         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1881             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1882                 return;
1883
1884         br_multicast_mark_router(br, port);
1885 }
1886
1887 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1888                                     struct net_bridge_port *port,
1889                                     struct sk_buff *skb)
1890 {
1891         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1892             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1893                 return -ENOMSG;
1894
1895         br_multicast_mark_router(br, port);
1896
1897         return 0;
1898 }
1899
1900 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1901                                  struct net_bridge_port *port,
1902                                  struct sk_buff *skb,
1903                                  u16 vid)
1904 {
1905         const unsigned char *src;
1906         struct igmphdr *ih;
1907         int err;
1908
1909         err = ip_mc_check_igmp(skb);
1910
1911         if (err == -ENOMSG) {
1912                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1913                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1914                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1915                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1916                                 br_multicast_pim(br, port, skb);
1917                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1918                         br_ip4_multicast_mrd_rcv(br, port, skb);
1919                 }
1920
1921                 return 0;
1922         } else if (err < 0) {
1923                 br_multicast_err_count(br, port, skb->protocol);
1924                 return err;
1925         }
1926
1927         ih = igmp_hdr(skb);
1928         src = eth_hdr(skb)->h_source;
1929         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1930
1931         switch (ih->type) {
1932         case IGMP_HOST_MEMBERSHIP_REPORT:
1933         case IGMPV2_HOST_MEMBERSHIP_REPORT:
1934                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1935                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
1936                                                  true);
1937                 break;
1938         case IGMPV3_HOST_MEMBERSHIP_REPORT:
1939                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1940                 break;
1941         case IGMP_HOST_MEMBERSHIP_QUERY:
1942                 br_ip4_multicast_query(br, port, skb, vid);
1943                 break;
1944         case IGMP_HOST_LEAVE_MESSAGE:
1945                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1946                 break;
1947         }
1948
1949         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1950                            BR_MCAST_DIR_RX);
1951
1952         return err;
1953 }
1954
1955 #if IS_ENABLED(CONFIG_IPV6)
1956 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1957                                     struct net_bridge_port *port,
1958                                     struct sk_buff *skb)
1959 {
1960         int ret;
1961
1962         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1963                 return -ENOMSG;
1964
1965         ret = ipv6_mc_check_icmpv6(skb);
1966         if (ret < 0)
1967                 return ret;
1968
1969         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1970                 return -ENOMSG;
1971
1972         br_multicast_mark_router(br, port);
1973
1974         return 0;
1975 }
1976
1977 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1978                                  struct net_bridge_port *port,
1979                                  struct sk_buff *skb,
1980                                  u16 vid)
1981 {
1982         const unsigned char *src;
1983         struct mld_msg *mld;
1984         int err;
1985
1986         err = ipv6_mc_check_mld(skb);
1987
1988         if (err == -ENOMSG) {
1989                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1990                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1991
1992                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1993                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
1994
1995                         if (err < 0 && err != -ENOMSG) {
1996                                 br_multicast_err_count(br, port, skb->protocol);
1997                                 return err;
1998                         }
1999                 }
2000
2001                 return 0;
2002         } else if (err < 0) {
2003                 br_multicast_err_count(br, port, skb->protocol);
2004                 return err;
2005         }
2006
2007         mld = (struct mld_msg *)skb_transport_header(skb);
2008         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
2009
2010         switch (mld->mld_type) {
2011         case ICMPV6_MGM_REPORT:
2012                 src = eth_hdr(skb)->h_source;
2013                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2014                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
2015                                                  src, true);
2016                 break;
2017         case ICMPV6_MLD2_REPORT:
2018                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
2019                 break;
2020         case ICMPV6_MGM_QUERY:
2021                 err = br_ip6_multicast_query(br, port, skb, vid);
2022                 break;
2023         case ICMPV6_MGM_REDUCTION:
2024                 src = eth_hdr(skb)->h_source;
2025                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
2026                 break;
2027         }
2028
2029         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
2030                            BR_MCAST_DIR_RX);
2031
2032         return err;
2033 }
2034 #endif
2035
2036 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
2037                      struct sk_buff *skb, u16 vid)
2038 {
2039         int ret = 0;
2040
2041         BR_INPUT_SKB_CB(skb)->igmp = 0;
2042         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
2043
2044         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2045                 return 0;
2046
2047         switch (skb->protocol) {
2048         case htons(ETH_P_IP):
2049                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
2050                 break;
2051 #if IS_ENABLED(CONFIG_IPV6)
2052         case htons(ETH_P_IPV6):
2053                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
2054                 break;
2055 #endif
2056         }
2057
2058         return ret;
2059 }
2060
2061 static void br_multicast_query_expired(struct net_bridge *br,
2062                                        struct bridge_mcast_own_query *query,
2063                                        struct bridge_mcast_querier *querier)
2064 {
2065         spin_lock(&br->multicast_lock);
2066         if (query->startup_sent < br->multicast_startup_query_count)
2067                 query->startup_sent++;
2068
2069         RCU_INIT_POINTER(querier->port, NULL);
2070         br_multicast_send_query(br, NULL, query);
2071         spin_unlock(&br->multicast_lock);
2072 }
2073
2074 static void br_ip4_multicast_query_expired(struct timer_list *t)
2075 {
2076         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
2077
2078         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
2079 }
2080
2081 #if IS_ENABLED(CONFIG_IPV6)
2082 static void br_ip6_multicast_query_expired(struct timer_list *t)
2083 {
2084         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
2085
2086         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
2087 }
2088 #endif
2089
2090 static void __grp_src_gc(struct hlist_head *head)
2091 {
2092         struct net_bridge_group_src *ent;
2093         struct hlist_node *tmp;
2094
2095         hlist_for_each_entry_safe(ent, tmp, head, del_node) {
2096                 hlist_del_init(&ent->del_node);
2097                 del_timer_sync(&ent->timer);
2098                 kfree_rcu(ent, rcu);
2099         }
2100 }
2101
2102 static void br_multicast_src_gc(struct work_struct *work)
2103 {
2104         struct net_bridge *br = container_of(work, struct net_bridge,
2105                                              src_gc_work);
2106         HLIST_HEAD(deleted_head);
2107
2108         spin_lock_bh(&br->multicast_lock);
2109         hlist_move_list(&br->src_gc_list, &deleted_head);
2110         spin_unlock_bh(&br->multicast_lock);
2111
2112         __grp_src_gc(&deleted_head);
2113 }
2114
2115 void br_multicast_init(struct net_bridge *br)
2116 {
2117         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
2118
2119         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2120         br->multicast_last_member_count = 2;
2121         br->multicast_startup_query_count = 2;
2122
2123         br->multicast_last_member_interval = HZ;
2124         br->multicast_query_response_interval = 10 * HZ;
2125         br->multicast_startup_query_interval = 125 * HZ / 4;
2126         br->multicast_query_interval = 125 * HZ;
2127         br->multicast_querier_interval = 255 * HZ;
2128         br->multicast_membership_interval = 260 * HZ;
2129
2130         br->ip4_other_query.delay_time = 0;
2131         br->ip4_querier.port = NULL;
2132         br->multicast_igmp_version = 2;
2133 #if IS_ENABLED(CONFIG_IPV6)
2134         br->multicast_mld_version = 1;
2135         br->ip6_other_query.delay_time = 0;
2136         br->ip6_querier.port = NULL;
2137 #endif
2138         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
2139         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
2140
2141         spin_lock_init(&br->multicast_lock);
2142         timer_setup(&br->multicast_router_timer,
2143                     br_multicast_local_router_expired, 0);
2144         timer_setup(&br->ip4_other_query.timer,
2145                     br_ip4_multicast_querier_expired, 0);
2146         timer_setup(&br->ip4_own_query.timer,
2147                     br_ip4_multicast_query_expired, 0);
2148 #if IS_ENABLED(CONFIG_IPV6)
2149         timer_setup(&br->ip6_other_query.timer,
2150                     br_ip6_multicast_querier_expired, 0);
2151         timer_setup(&br->ip6_own_query.timer,
2152                     br_ip6_multicast_query_expired, 0);
2153 #endif
2154         INIT_HLIST_HEAD(&br->mdb_list);
2155         INIT_HLIST_HEAD(&br->src_gc_list);
2156         INIT_WORK(&br->src_gc_work, br_multicast_src_gc);
2157 }
2158
2159 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
2160 {
2161         struct in_device *in_dev = in_dev_get(br->dev);
2162
2163         if (!in_dev)
2164                 return;
2165
2166         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2167         in_dev_put(in_dev);
2168 }
2169
2170 #if IS_ENABLED(CONFIG_IPV6)
2171 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2172 {
2173         struct in6_addr addr;
2174
2175         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2176         ipv6_dev_mc_inc(br->dev, &addr);
2177 }
2178 #else
2179 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2180 {
2181 }
2182 #endif
2183
2184 static void br_multicast_join_snoopers(struct net_bridge *br)
2185 {
2186         br_ip4_multicast_join_snoopers(br);
2187         br_ip6_multicast_join_snoopers(br);
2188 }
2189
2190 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
2191 {
2192         struct in_device *in_dev = in_dev_get(br->dev);
2193
2194         if (WARN_ON(!in_dev))
2195                 return;
2196
2197         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2198         in_dev_put(in_dev);
2199 }
2200
2201 #if IS_ENABLED(CONFIG_IPV6)
2202 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2203 {
2204         struct in6_addr addr;
2205
2206         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2207         ipv6_dev_mc_dec(br->dev, &addr);
2208 }
2209 #else
2210 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2211 {
2212 }
2213 #endif
2214
2215 static void br_multicast_leave_snoopers(struct net_bridge *br)
2216 {
2217         br_ip4_multicast_leave_snoopers(br);
2218         br_ip6_multicast_leave_snoopers(br);
2219 }
2220
2221 static void __br_multicast_open(struct net_bridge *br,
2222                                 struct bridge_mcast_own_query *query)
2223 {
2224         query->startup_sent = 0;
2225
2226         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2227                 return;
2228
2229         mod_timer(&query->timer, jiffies);
2230 }
2231
2232 void br_multicast_open(struct net_bridge *br)
2233 {
2234         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2235                 br_multicast_join_snoopers(br);
2236
2237         __br_multicast_open(br, &br->ip4_own_query);
2238 #if IS_ENABLED(CONFIG_IPV6)
2239         __br_multicast_open(br, &br->ip6_own_query);
2240 #endif
2241 }
2242
2243 void br_multicast_stop(struct net_bridge *br)
2244 {
2245         del_timer_sync(&br->multicast_router_timer);
2246         del_timer_sync(&br->ip4_other_query.timer);
2247         del_timer_sync(&br->ip4_own_query.timer);
2248 #if IS_ENABLED(CONFIG_IPV6)
2249         del_timer_sync(&br->ip6_other_query.timer);
2250         del_timer_sync(&br->ip6_own_query.timer);
2251 #endif
2252
2253         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2254                 br_multicast_leave_snoopers(br);
2255 }
2256
2257 void br_multicast_dev_del(struct net_bridge *br)
2258 {
2259         struct net_bridge_mdb_entry *mp;
2260         HLIST_HEAD(deleted_head);
2261         struct hlist_node *tmp;
2262
2263         spin_lock_bh(&br->multicast_lock);
2264         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
2265                 del_timer(&mp->timer);
2266                 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
2267                                        br_mdb_rht_params);
2268                 hlist_del_rcu(&mp->mdb_node);
2269                 kfree_rcu(mp, rcu);
2270         }
2271         hlist_move_list(&br->src_gc_list, &deleted_head);
2272         spin_unlock_bh(&br->multicast_lock);
2273
2274         __grp_src_gc(&deleted_head);
2275         cancel_work_sync(&br->src_gc_work);
2276
2277         rcu_barrier();
2278 }
2279
2280 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2281 {
2282         int err = -EINVAL;
2283
2284         spin_lock_bh(&br->multicast_lock);
2285
2286         switch (val) {
2287         case MDB_RTR_TYPE_DISABLED:
2288         case MDB_RTR_TYPE_PERM:
2289                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
2290                 del_timer(&br->multicast_router_timer);
2291                 br->multicast_router = val;
2292                 err = 0;
2293                 break;
2294         case MDB_RTR_TYPE_TEMP_QUERY:
2295                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
2296                         br_mc_router_state_change(br, false);
2297                 br->multicast_router = val;
2298                 err = 0;
2299                 break;
2300         }
2301
2302         spin_unlock_bh(&br->multicast_lock);
2303
2304         return err;
2305 }
2306
2307 static void __del_port_router(struct net_bridge_port *p)
2308 {
2309         if (hlist_unhashed(&p->rlist))
2310                 return;
2311         hlist_del_init_rcu(&p->rlist);
2312         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2313         br_port_mc_router_state_change(p, false);
2314
2315         /* don't allow timer refresh */
2316         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2317                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2318 }
2319
2320 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
2321 {
2322         struct net_bridge *br = p->br;
2323         unsigned long now = jiffies;
2324         int err = -EINVAL;
2325
2326         spin_lock(&br->multicast_lock);
2327         if (p->multicast_router == val) {
2328                 /* Refresh the temp router port timer */
2329                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2330                         mod_timer(&p->multicast_router_timer,
2331                                   now + br->multicast_querier_interval);
2332                 err = 0;
2333                 goto unlock;
2334         }
2335         switch (val) {
2336         case MDB_RTR_TYPE_DISABLED:
2337                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2338                 __del_port_router(p);
2339                 del_timer(&p->multicast_router_timer);
2340                 break;
2341         case MDB_RTR_TYPE_TEMP_QUERY:
2342                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2343                 __del_port_router(p);
2344                 break;
2345         case MDB_RTR_TYPE_PERM:
2346                 p->multicast_router = MDB_RTR_TYPE_PERM;
2347                 del_timer(&p->multicast_router_timer);
2348                 br_multicast_add_router(br, p);
2349                 break;
2350         case MDB_RTR_TYPE_TEMP:
2351                 p->multicast_router = MDB_RTR_TYPE_TEMP;
2352                 br_multicast_mark_router(br, p);
2353                 break;
2354         default:
2355                 goto unlock;
2356         }
2357         err = 0;
2358 unlock:
2359         spin_unlock(&br->multicast_lock);
2360
2361         return err;
2362 }
2363
2364 static void br_multicast_start_querier(struct net_bridge *br,
2365                                        struct bridge_mcast_own_query *query)
2366 {
2367         struct net_bridge_port *port;
2368
2369         __br_multicast_open(br, query);
2370
2371         rcu_read_lock();
2372         list_for_each_entry_rcu(port, &br->port_list, list) {
2373                 if (port->state == BR_STATE_DISABLED ||
2374                     port->state == BR_STATE_BLOCKING)
2375                         continue;
2376
2377                 if (query == &br->ip4_own_query)
2378                         br_multicast_enable(&port->ip4_own_query);
2379 #if IS_ENABLED(CONFIG_IPV6)
2380                 else
2381                         br_multicast_enable(&port->ip6_own_query);
2382 #endif
2383         }
2384         rcu_read_unlock();
2385 }
2386
2387 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2388 {
2389         struct net_bridge_port *port;
2390
2391         spin_lock_bh(&br->multicast_lock);
2392         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2393                 goto unlock;
2394
2395         br_mc_disabled_update(br->dev, val);
2396         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2397         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2398                 br_multicast_leave_snoopers(br);
2399                 goto unlock;
2400         }
2401
2402         if (!netif_running(br->dev))
2403                 goto unlock;
2404
2405         br_multicast_open(br);
2406         list_for_each_entry(port, &br->port_list, list)
2407                 __br_multicast_enable_port(port);
2408
2409 unlock:
2410         spin_unlock_bh(&br->multicast_lock);
2411
2412         return 0;
2413 }
2414
2415 bool br_multicast_enabled(const struct net_device *dev)
2416 {
2417         struct net_bridge *br = netdev_priv(dev);
2418
2419         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2420 }
2421 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2422
2423 bool br_multicast_router(const struct net_device *dev)
2424 {
2425         struct net_bridge *br = netdev_priv(dev);
2426         bool is_router;
2427
2428         spin_lock_bh(&br->multicast_lock);
2429         is_router = br_multicast_is_router(br);
2430         spin_unlock_bh(&br->multicast_lock);
2431         return is_router;
2432 }
2433 EXPORT_SYMBOL_GPL(br_multicast_router);
2434
2435 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2436 {
2437         unsigned long max_delay;
2438
2439         val = !!val;
2440
2441         spin_lock_bh(&br->multicast_lock);
2442         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2443                 goto unlock;
2444
2445         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2446         if (!val)
2447                 goto unlock;
2448
2449         max_delay = br->multicast_query_response_interval;
2450
2451         if (!timer_pending(&br->ip4_other_query.timer))
2452                 br->ip4_other_query.delay_time = jiffies + max_delay;
2453
2454         br_multicast_start_querier(br, &br->ip4_own_query);
2455
2456 #if IS_ENABLED(CONFIG_IPV6)
2457         if (!timer_pending(&br->ip6_other_query.timer))
2458                 br->ip6_other_query.delay_time = jiffies + max_delay;
2459
2460         br_multicast_start_querier(br, &br->ip6_own_query);
2461 #endif
2462
2463 unlock:
2464         spin_unlock_bh(&br->multicast_lock);
2465
2466         return 0;
2467 }
2468
2469 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2470 {
2471         /* Currently we support only version 2 and 3 */
2472         switch (val) {
2473         case 2:
2474         case 3:
2475                 break;
2476         default:
2477                 return -EINVAL;
2478         }
2479
2480         spin_lock_bh(&br->multicast_lock);
2481         br->multicast_igmp_version = val;
2482         spin_unlock_bh(&br->multicast_lock);
2483
2484         return 0;
2485 }
2486
2487 #if IS_ENABLED(CONFIG_IPV6)
2488 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2489 {
2490         /* Currently we support version 1 and 2 */
2491         switch (val) {
2492         case 1:
2493         case 2:
2494                 break;
2495         default:
2496                 return -EINVAL;
2497         }
2498
2499         spin_lock_bh(&br->multicast_lock);
2500         br->multicast_mld_version = val;
2501         spin_unlock_bh(&br->multicast_lock);
2502
2503         return 0;
2504 }
2505 #endif
2506
2507 /**
2508  * br_multicast_list_adjacent - Returns snooped multicast addresses
2509  * @dev:        The bridge port adjacent to which to retrieve addresses
2510  * @br_ip_list: The list to store found, snooped multicast IP addresses in
2511  *
2512  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2513  * snooping feature on all bridge ports of dev's bridge device, excluding
2514  * the addresses from dev itself.
2515  *
2516  * Returns the number of items added to br_ip_list.
2517  *
2518  * Notes:
2519  * - br_ip_list needs to be initialized by caller
2520  * - br_ip_list might contain duplicates in the end
2521  *   (needs to be taken care of by caller)
2522  * - br_ip_list needs to be freed by caller
2523  */
2524 int br_multicast_list_adjacent(struct net_device *dev,
2525                                struct list_head *br_ip_list)
2526 {
2527         struct net_bridge *br;
2528         struct net_bridge_port *port;
2529         struct net_bridge_port_group *group;
2530         struct br_ip_list *entry;
2531         int count = 0;
2532
2533         rcu_read_lock();
2534         if (!br_ip_list || !netif_is_bridge_port(dev))
2535                 goto unlock;
2536
2537         port = br_port_get_rcu(dev);
2538         if (!port || !port->br)
2539                 goto unlock;
2540
2541         br = port->br;
2542
2543         list_for_each_entry_rcu(port, &br->port_list, list) {
2544                 if (!port->dev || port->dev == dev)
2545                         continue;
2546
2547                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2548                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2549                         if (!entry)
2550                                 goto unlock;
2551
2552                         entry->addr = group->addr;
2553                         list_add(&entry->list, br_ip_list);
2554                         count++;
2555                 }
2556         }
2557
2558 unlock:
2559         rcu_read_unlock();
2560         return count;
2561 }
2562 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2563
2564 /**
2565  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2566  * @dev: The bridge port providing the bridge on which to check for a querier
2567  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2568  *
2569  * Checks whether the given interface has a bridge on top and if so returns
2570  * true if a valid querier exists anywhere on the bridged link layer.
2571  * Otherwise returns false.
2572  */
2573 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2574 {
2575         struct net_bridge *br;
2576         struct net_bridge_port *port;
2577         struct ethhdr eth;
2578         bool ret = false;
2579
2580         rcu_read_lock();
2581         if (!netif_is_bridge_port(dev))
2582                 goto unlock;
2583
2584         port = br_port_get_rcu(dev);
2585         if (!port || !port->br)
2586                 goto unlock;
2587
2588         br = port->br;
2589
2590         memset(&eth, 0, sizeof(eth));
2591         eth.h_proto = htons(proto);
2592
2593         ret = br_multicast_querier_exists(br, &eth);
2594
2595 unlock:
2596         rcu_read_unlock();
2597         return ret;
2598 }
2599 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2600
2601 /**
2602  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2603  * @dev: The bridge port adjacent to which to check for a querier
2604  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2605  *
2606  * Checks whether the given interface has a bridge on top and if so returns
2607  * true if a selected querier is behind one of the other ports of this
2608  * bridge. Otherwise returns false.
2609  */
2610 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2611 {
2612         struct net_bridge *br;
2613         struct net_bridge_port *port;
2614         bool ret = false;
2615
2616         rcu_read_lock();
2617         if (!netif_is_bridge_port(dev))
2618                 goto unlock;
2619
2620         port = br_port_get_rcu(dev);
2621         if (!port || !port->br)
2622                 goto unlock;
2623
2624         br = port->br;
2625
2626         switch (proto) {
2627         case ETH_P_IP:
2628                 if (!timer_pending(&br->ip4_other_query.timer) ||
2629                     rcu_dereference(br->ip4_querier.port) == port)
2630                         goto unlock;
2631                 break;
2632 #if IS_ENABLED(CONFIG_IPV6)
2633         case ETH_P_IPV6:
2634                 if (!timer_pending(&br->ip6_other_query.timer) ||
2635                     rcu_dereference(br->ip6_querier.port) == port)
2636                         goto unlock;
2637                 break;
2638 #endif
2639         default:
2640                 goto unlock;
2641         }
2642
2643         ret = true;
2644 unlock:
2645         rcu_read_unlock();
2646         return ret;
2647 }
2648 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2649
2650 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2651                                const struct sk_buff *skb, u8 type, u8 dir)
2652 {
2653         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2654         __be16 proto = skb->protocol;
2655         unsigned int t_len;
2656
2657         u64_stats_update_begin(&pstats->syncp);
2658         switch (proto) {
2659         case htons(ETH_P_IP):
2660                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2661                 switch (type) {
2662                 case IGMP_HOST_MEMBERSHIP_REPORT:
2663                         pstats->mstats.igmp_v1reports[dir]++;
2664                         break;
2665                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2666                         pstats->mstats.igmp_v2reports[dir]++;
2667                         break;
2668                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2669                         pstats->mstats.igmp_v3reports[dir]++;
2670                         break;
2671                 case IGMP_HOST_MEMBERSHIP_QUERY:
2672                         if (t_len != sizeof(struct igmphdr)) {
2673                                 pstats->mstats.igmp_v3queries[dir]++;
2674                         } else {
2675                                 unsigned int offset = skb_transport_offset(skb);
2676                                 struct igmphdr *ih, _ihdr;
2677
2678                                 ih = skb_header_pointer(skb, offset,
2679                                                         sizeof(_ihdr), &_ihdr);
2680                                 if (!ih)
2681                                         break;
2682                                 if (!ih->code)
2683                                         pstats->mstats.igmp_v1queries[dir]++;
2684                                 else
2685                                         pstats->mstats.igmp_v2queries[dir]++;
2686                         }
2687                         break;
2688                 case IGMP_HOST_LEAVE_MESSAGE:
2689                         pstats->mstats.igmp_leaves[dir]++;
2690                         break;
2691                 }
2692                 break;
2693 #if IS_ENABLED(CONFIG_IPV6)
2694         case htons(ETH_P_IPV6):
2695                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2696                         sizeof(struct ipv6hdr);
2697                 t_len -= skb_network_header_len(skb);
2698                 switch (type) {
2699                 case ICMPV6_MGM_REPORT:
2700                         pstats->mstats.mld_v1reports[dir]++;
2701                         break;
2702                 case ICMPV6_MLD2_REPORT:
2703                         pstats->mstats.mld_v2reports[dir]++;
2704                         break;
2705                 case ICMPV6_MGM_QUERY:
2706                         if (t_len != sizeof(struct mld_msg))
2707                                 pstats->mstats.mld_v2queries[dir]++;
2708                         else
2709                                 pstats->mstats.mld_v1queries[dir]++;
2710                         break;
2711                 case ICMPV6_MGM_REDUCTION:
2712                         pstats->mstats.mld_leaves[dir]++;
2713                         break;
2714                 }
2715                 break;
2716 #endif /* CONFIG_IPV6 */
2717         }
2718         u64_stats_update_end(&pstats->syncp);
2719 }
2720
2721 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2722                         const struct sk_buff *skb, u8 type, u8 dir)
2723 {
2724         struct bridge_mcast_stats __percpu *stats;
2725
2726         /* if multicast_disabled is true then igmp type can't be set */
2727         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2728                 return;
2729
2730         if (p)
2731                 stats = p->mcast_stats;
2732         else
2733                 stats = br->mcast_stats;
2734         if (WARN_ON(!stats))
2735                 return;
2736
2737         br_mcast_stats_add(stats, skb, type, dir);
2738 }
2739
2740 int br_multicast_init_stats(struct net_bridge *br)
2741 {
2742         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2743         if (!br->mcast_stats)
2744                 return -ENOMEM;
2745
2746         return 0;
2747 }
2748
2749 void br_multicast_uninit_stats(struct net_bridge *br)
2750 {
2751         free_percpu(br->mcast_stats);
2752 }
2753
2754 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
2755 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
2756 {
2757         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2758         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2759 }
2760
2761 void br_multicast_get_stats(const struct net_bridge *br,
2762                             const struct net_bridge_port *p,
2763                             struct br_mcast_stats *dest)
2764 {
2765         struct bridge_mcast_stats __percpu *stats;
2766         struct br_mcast_stats tdst;
2767         int i;
2768
2769         memset(dest, 0, sizeof(*dest));
2770         if (p)
2771                 stats = p->mcast_stats;
2772         else
2773                 stats = br->mcast_stats;
2774         if (WARN_ON(!stats))
2775                 return;
2776
2777         memset(&tdst, 0, sizeof(tdst));
2778         for_each_possible_cpu(i) {
2779                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2780                 struct br_mcast_stats temp;
2781                 unsigned int start;
2782
2783                 do {
2784                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2785                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2786                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2787
2788                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2789                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2790                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2791                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2792                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2793                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2794                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2795                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2796
2797                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2798                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2799                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2800                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2801                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2802                 tdst.mld_parse_errors += temp.mld_parse_errors;
2803         }
2804         memcpy(dest, &tdst, sizeof(*dest));
2805 }
2806
2807 int br_mdb_hash_init(struct net_bridge *br)
2808 {
2809         return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2810 }
2811
2812 void br_mdb_hash_fini(struct net_bridge *br)
2813 {
2814         rhashtable_destroy(&br->mdb_hash_tbl);
2815 }