pinctrl: qcom: Handle broken/missing PDC dual edge IRQs on sc7180
[linux-2.6-microblaze.git] / net / bridge / br_multicast.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Bridge multicast support.
4  *
5  * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6  */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36
37 static const struct rhashtable_params br_mdb_rht_params = {
38         .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39         .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40         .key_len = sizeof(struct br_ip),
41         .automatic_shrinking = true,
42 };
43
44 static void br_multicast_start_querier(struct net_bridge *br,
45                                        struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47                                     struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49                                          struct net_bridge_port *port,
50                                          __be32 group,
51                                          __u16 vid,
52                                          const unsigned char *src);
53
54 static void __del_port_router(struct net_bridge_port *p);
55 #if IS_ENABLED(CONFIG_IPV6)
56 static void br_ip6_multicast_leave_group(struct net_bridge *br,
57                                          struct net_bridge_port *port,
58                                          const struct in6_addr *group,
59                                          __u16 vid, const unsigned char *src);
60 #endif
61
62 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
63                                                       struct br_ip *dst)
64 {
65         return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
66 }
67
68 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
69                                            struct br_ip *dst)
70 {
71         struct net_bridge_mdb_entry *ent;
72
73         lockdep_assert_held_once(&br->multicast_lock);
74
75         rcu_read_lock();
76         ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
77         rcu_read_unlock();
78
79         return ent;
80 }
81
82 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
83                                                    __be32 dst, __u16 vid)
84 {
85         struct br_ip br_dst;
86
87         memset(&br_dst, 0, sizeof(br_dst));
88         br_dst.u.ip4 = dst;
89         br_dst.proto = htons(ETH_P_IP);
90         br_dst.vid = vid;
91
92         return br_mdb_ip_get(br, &br_dst);
93 }
94
95 #if IS_ENABLED(CONFIG_IPV6)
96 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
97                                                    const struct in6_addr *dst,
98                                                    __u16 vid)
99 {
100         struct br_ip br_dst;
101
102         memset(&br_dst, 0, sizeof(br_dst));
103         br_dst.u.ip6 = *dst;
104         br_dst.proto = htons(ETH_P_IPV6);
105         br_dst.vid = vid;
106
107         return br_mdb_ip_get(br, &br_dst);
108 }
109 #endif
110
111 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
112                                         struct sk_buff *skb, u16 vid)
113 {
114         struct br_ip ip;
115
116         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
117                 return NULL;
118
119         if (BR_INPUT_SKB_CB(skb)->igmp)
120                 return NULL;
121
122         memset(&ip, 0, sizeof(ip));
123         ip.proto = skb->protocol;
124         ip.vid = vid;
125
126         switch (skb->protocol) {
127         case htons(ETH_P_IP):
128                 ip.u.ip4 = ip_hdr(skb)->daddr;
129                 break;
130 #if IS_ENABLED(CONFIG_IPV6)
131         case htons(ETH_P_IPV6):
132                 ip.u.ip6 = ipv6_hdr(skb)->daddr;
133                 break;
134 #endif
135         default:
136                 return NULL;
137         }
138
139         return br_mdb_ip_get_rcu(br, &ip);
140 }
141
142 static void br_multicast_group_expired(struct timer_list *t)
143 {
144         struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
145         struct net_bridge *br = mp->br;
146
147         spin_lock(&br->multicast_lock);
148         if (!netif_running(br->dev) || timer_pending(&mp->timer))
149                 goto out;
150
151         br_multicast_host_leave(mp, true);
152
153         if (mp->ports)
154                 goto out;
155
156         rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
157                                br_mdb_rht_params);
158         hlist_del_rcu(&mp->mdb_node);
159
160         kfree_rcu(mp, rcu);
161
162 out:
163         spin_unlock(&br->multicast_lock);
164 }
165
166 static void br_multicast_del_pg(struct net_bridge *br,
167                                 struct net_bridge_port_group *pg)
168 {
169         struct net_bridge_mdb_entry *mp;
170         struct net_bridge_port_group *p;
171         struct net_bridge_port_group __rcu **pp;
172
173         mp = br_mdb_ip_get(br, &pg->addr);
174         if (WARN_ON(!mp))
175                 return;
176
177         for (pp = &mp->ports;
178              (p = mlock_dereference(*pp, br)) != NULL;
179              pp = &p->next) {
180                 if (p != pg)
181                         continue;
182
183                 rcu_assign_pointer(*pp, p->next);
184                 hlist_del_init(&p->mglist);
185                 del_timer(&p->timer);
186                 br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
187                               p->flags);
188                 kfree_rcu(p, rcu);
189
190                 if (!mp->ports && !mp->host_joined &&
191                     netif_running(br->dev))
192                         mod_timer(&mp->timer, jiffies);
193
194                 return;
195         }
196
197         WARN_ON(1);
198 }
199
200 static void br_multicast_port_group_expired(struct timer_list *t)
201 {
202         struct net_bridge_port_group *pg = from_timer(pg, t, timer);
203         struct net_bridge *br = pg->port->br;
204
205         spin_lock(&br->multicast_lock);
206         if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
207             hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
208                 goto out;
209
210         br_multicast_del_pg(br, pg);
211
212 out:
213         spin_unlock(&br->multicast_lock);
214 }
215
216 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
217                                                     __be32 group,
218                                                     u8 *igmp_type)
219 {
220         struct igmpv3_query *ihv3;
221         size_t igmp_hdr_size;
222         struct sk_buff *skb;
223         struct igmphdr *ih;
224         struct ethhdr *eth;
225         struct iphdr *iph;
226
227         igmp_hdr_size = sizeof(*ih);
228         if (br->multicast_igmp_version == 3)
229                 igmp_hdr_size = sizeof(*ihv3);
230         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
231                                                  igmp_hdr_size + 4);
232         if (!skb)
233                 goto out;
234
235         skb->protocol = htons(ETH_P_IP);
236
237         skb_reset_mac_header(skb);
238         eth = eth_hdr(skb);
239
240         ether_addr_copy(eth->h_source, br->dev->dev_addr);
241         eth->h_dest[0] = 1;
242         eth->h_dest[1] = 0;
243         eth->h_dest[2] = 0x5e;
244         eth->h_dest[3] = 0;
245         eth->h_dest[4] = 0;
246         eth->h_dest[5] = 1;
247         eth->h_proto = htons(ETH_P_IP);
248         skb_put(skb, sizeof(*eth));
249
250         skb_set_network_header(skb, skb->len);
251         iph = ip_hdr(skb);
252
253         iph->version = 4;
254         iph->ihl = 6;
255         iph->tos = 0xc0;
256         iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
257         iph->id = 0;
258         iph->frag_off = htons(IP_DF);
259         iph->ttl = 1;
260         iph->protocol = IPPROTO_IGMP;
261         iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
262                      inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
263         iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
264         ((u8 *)&iph[1])[0] = IPOPT_RA;
265         ((u8 *)&iph[1])[1] = 4;
266         ((u8 *)&iph[1])[2] = 0;
267         ((u8 *)&iph[1])[3] = 0;
268         ip_send_check(iph);
269         skb_put(skb, 24);
270
271         skb_set_transport_header(skb, skb->len);
272         *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
273
274         switch (br->multicast_igmp_version) {
275         case 2:
276                 ih = igmp_hdr(skb);
277                 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
278                 ih->code = (group ? br->multicast_last_member_interval :
279                                     br->multicast_query_response_interval) /
280                            (HZ / IGMP_TIMER_SCALE);
281                 ih->group = group;
282                 ih->csum = 0;
283                 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
284                 break;
285         case 3:
286                 ihv3 = igmpv3_query_hdr(skb);
287                 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
288                 ihv3->code = (group ? br->multicast_last_member_interval :
289                                       br->multicast_query_response_interval) /
290                              (HZ / IGMP_TIMER_SCALE);
291                 ihv3->group = group;
292                 ihv3->qqic = br->multicast_query_interval / HZ;
293                 ihv3->nsrcs = 0;
294                 ihv3->resv = 0;
295                 ihv3->suppress = 0;
296                 ihv3->qrv = 2;
297                 ihv3->csum = 0;
298                 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
299                 break;
300         }
301
302         skb_put(skb, igmp_hdr_size);
303         __skb_pull(skb, sizeof(*eth));
304
305 out:
306         return skb;
307 }
308
309 #if IS_ENABLED(CONFIG_IPV6)
310 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
311                                                     const struct in6_addr *grp,
312                                                     u8 *igmp_type)
313 {
314         struct mld2_query *mld2q;
315         unsigned long interval;
316         struct ipv6hdr *ip6h;
317         struct mld_msg *mldq;
318         size_t mld_hdr_size;
319         struct sk_buff *skb;
320         struct ethhdr *eth;
321         u8 *hopopt;
322
323         mld_hdr_size = sizeof(*mldq);
324         if (br->multicast_mld_version == 2)
325                 mld_hdr_size = sizeof(*mld2q);
326         skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
327                                                  8 + mld_hdr_size);
328         if (!skb)
329                 goto out;
330
331         skb->protocol = htons(ETH_P_IPV6);
332
333         /* Ethernet header */
334         skb_reset_mac_header(skb);
335         eth = eth_hdr(skb);
336
337         ether_addr_copy(eth->h_source, br->dev->dev_addr);
338         eth->h_proto = htons(ETH_P_IPV6);
339         skb_put(skb, sizeof(*eth));
340
341         /* IPv6 header + HbH option */
342         skb_set_network_header(skb, skb->len);
343         ip6h = ipv6_hdr(skb);
344
345         *(__force __be32 *)ip6h = htonl(0x60000000);
346         ip6h->payload_len = htons(8 + mld_hdr_size);
347         ip6h->nexthdr = IPPROTO_HOPOPTS;
348         ip6h->hop_limit = 1;
349         ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
350         if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
351                                &ip6h->saddr)) {
352                 kfree_skb(skb);
353                 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
354                 return NULL;
355         }
356
357         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
358         ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
359
360         hopopt = (u8 *)(ip6h + 1);
361         hopopt[0] = IPPROTO_ICMPV6;             /* next hdr */
362         hopopt[1] = 0;                          /* length of HbH */
363         hopopt[2] = IPV6_TLV_ROUTERALERT;       /* Router Alert */
364         hopopt[3] = 2;                          /* Length of RA Option */
365         hopopt[4] = 0;                          /* Type = 0x0000 (MLD) */
366         hopopt[5] = 0;
367         hopopt[6] = IPV6_TLV_PAD1;              /* Pad1 */
368         hopopt[7] = IPV6_TLV_PAD1;              /* Pad1 */
369
370         skb_put(skb, sizeof(*ip6h) + 8);
371
372         /* ICMPv6 */
373         skb_set_transport_header(skb, skb->len);
374         interval = ipv6_addr_any(grp) ?
375                         br->multicast_query_response_interval :
376                         br->multicast_last_member_interval;
377         *igmp_type = ICMPV6_MGM_QUERY;
378         switch (br->multicast_mld_version) {
379         case 1:
380                 mldq = (struct mld_msg *)icmp6_hdr(skb);
381                 mldq->mld_type = ICMPV6_MGM_QUERY;
382                 mldq->mld_code = 0;
383                 mldq->mld_cksum = 0;
384                 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
385                 mldq->mld_reserved = 0;
386                 mldq->mld_mca = *grp;
387                 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
388                                                   sizeof(*mldq), IPPROTO_ICMPV6,
389                                                   csum_partial(mldq,
390                                                                sizeof(*mldq),
391                                                                0));
392                 break;
393         case 2:
394                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
395                 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
396                 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
397                 mld2q->mld2q_code = 0;
398                 mld2q->mld2q_cksum = 0;
399                 mld2q->mld2q_resv1 = 0;
400                 mld2q->mld2q_resv2 = 0;
401                 mld2q->mld2q_suppress = 0;
402                 mld2q->mld2q_qrv = 2;
403                 mld2q->mld2q_nsrcs = 0;
404                 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
405                 mld2q->mld2q_mca = *grp;
406                 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
407                                                      sizeof(*mld2q),
408                                                      IPPROTO_ICMPV6,
409                                                      csum_partial(mld2q,
410                                                                   sizeof(*mld2q),
411                                                                   0));
412                 break;
413         }
414         skb_put(skb, mld_hdr_size);
415
416         __skb_pull(skb, sizeof(*eth));
417
418 out:
419         return skb;
420 }
421 #endif
422
423 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
424                                                 struct br_ip *addr,
425                                                 u8 *igmp_type)
426 {
427         switch (addr->proto) {
428         case htons(ETH_P_IP):
429                 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
430 #if IS_ENABLED(CONFIG_IPV6)
431         case htons(ETH_P_IPV6):
432                 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
433                                                     igmp_type);
434 #endif
435         }
436         return NULL;
437 }
438
439 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
440                                                     struct br_ip *group)
441 {
442         struct net_bridge_mdb_entry *mp;
443         int err;
444
445         mp = br_mdb_ip_get(br, group);
446         if (mp)
447                 return mp;
448
449         if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
450                 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
451                 return ERR_PTR(-E2BIG);
452         }
453
454         mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
455         if (unlikely(!mp))
456                 return ERR_PTR(-ENOMEM);
457
458         mp->br = br;
459         mp->addr = *group;
460         timer_setup(&mp->timer, br_multicast_group_expired, 0);
461         err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
462                                             br_mdb_rht_params);
463         if (err) {
464                 kfree(mp);
465                 mp = ERR_PTR(err);
466         } else {
467                 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
468         }
469
470         return mp;
471 }
472
473 struct net_bridge_port_group *br_multicast_new_port_group(
474                         struct net_bridge_port *port,
475                         struct br_ip *group,
476                         struct net_bridge_port_group __rcu *next,
477                         unsigned char flags,
478                         const unsigned char *src)
479 {
480         struct net_bridge_port_group *p;
481
482         p = kzalloc(sizeof(*p), GFP_ATOMIC);
483         if (unlikely(!p))
484                 return NULL;
485
486         p->addr = *group;
487         p->port = port;
488         p->flags = flags;
489         rcu_assign_pointer(p->next, next);
490         hlist_add_head(&p->mglist, &port->mglist);
491         timer_setup(&p->timer, br_multicast_port_group_expired, 0);
492
493         if (src)
494                 memcpy(p->eth_addr, src, ETH_ALEN);
495         else
496                 eth_broadcast_addr(p->eth_addr);
497
498         return p;
499 }
500
501 static bool br_port_group_equal(struct net_bridge_port_group *p,
502                                 struct net_bridge_port *port,
503                                 const unsigned char *src)
504 {
505         if (p->port != port)
506                 return false;
507
508         if (!(port->flags & BR_MULTICAST_TO_UNICAST))
509                 return true;
510
511         return ether_addr_equal(src, p->eth_addr);
512 }
513
514 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
515 {
516         if (!mp->host_joined) {
517                 mp->host_joined = true;
518                 if (notify)
519                         br_mdb_notify(mp->br->dev, NULL, &mp->addr,
520                                       RTM_NEWMDB, 0);
521         }
522         mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
523 }
524
525 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
526 {
527         if (!mp->host_joined)
528                 return;
529
530         mp->host_joined = false;
531         if (notify)
532                 br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
533 }
534
535 static int br_multicast_add_group(struct net_bridge *br,
536                                   struct net_bridge_port *port,
537                                   struct br_ip *group,
538                                   const unsigned char *src)
539 {
540         struct net_bridge_port_group __rcu **pp;
541         struct net_bridge_port_group *p;
542         struct net_bridge_mdb_entry *mp;
543         unsigned long now = jiffies;
544         int err;
545
546         spin_lock(&br->multicast_lock);
547         if (!netif_running(br->dev) ||
548             (port && port->state == BR_STATE_DISABLED))
549                 goto out;
550
551         mp = br_multicast_new_group(br, group);
552         err = PTR_ERR(mp);
553         if (IS_ERR(mp))
554                 goto err;
555
556         if (!port) {
557                 br_multicast_host_join(mp, true);
558                 goto out;
559         }
560
561         for (pp = &mp->ports;
562              (p = mlock_dereference(*pp, br)) != NULL;
563              pp = &p->next) {
564                 if (br_port_group_equal(p, port, src))
565                         goto found;
566                 if ((unsigned long)p->port < (unsigned long)port)
567                         break;
568         }
569
570         p = br_multicast_new_port_group(port, group, *pp, 0, src);
571         if (unlikely(!p))
572                 goto err;
573         rcu_assign_pointer(*pp, p);
574         br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
575
576 found:
577         mod_timer(&p->timer, now + br->multicast_membership_interval);
578 out:
579         err = 0;
580
581 err:
582         spin_unlock(&br->multicast_lock);
583         return err;
584 }
585
586 static int br_ip4_multicast_add_group(struct net_bridge *br,
587                                       struct net_bridge_port *port,
588                                       __be32 group,
589                                       __u16 vid,
590                                       const unsigned char *src)
591 {
592         struct br_ip br_group;
593
594         if (ipv4_is_local_multicast(group))
595                 return 0;
596
597         memset(&br_group, 0, sizeof(br_group));
598         br_group.u.ip4 = group;
599         br_group.proto = htons(ETH_P_IP);
600         br_group.vid = vid;
601
602         return br_multicast_add_group(br, port, &br_group, src);
603 }
604
605 #if IS_ENABLED(CONFIG_IPV6)
606 static int br_ip6_multicast_add_group(struct net_bridge *br,
607                                       struct net_bridge_port *port,
608                                       const struct in6_addr *group,
609                                       __u16 vid,
610                                       const unsigned char *src)
611 {
612         struct br_ip br_group;
613
614         if (ipv6_addr_is_ll_all_nodes(group))
615                 return 0;
616
617         memset(&br_group, 0, sizeof(br_group));
618         br_group.u.ip6 = *group;
619         br_group.proto = htons(ETH_P_IPV6);
620         br_group.vid = vid;
621
622         return br_multicast_add_group(br, port, &br_group, src);
623 }
624 #endif
625
626 static void br_multicast_router_expired(struct timer_list *t)
627 {
628         struct net_bridge_port *port =
629                         from_timer(port, t, multicast_router_timer);
630         struct net_bridge *br = port->br;
631
632         spin_lock(&br->multicast_lock);
633         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
634             port->multicast_router == MDB_RTR_TYPE_PERM ||
635             timer_pending(&port->multicast_router_timer))
636                 goto out;
637
638         __del_port_router(port);
639 out:
640         spin_unlock(&br->multicast_lock);
641 }
642
643 static void br_mc_router_state_change(struct net_bridge *p,
644                                       bool is_mc_router)
645 {
646         struct switchdev_attr attr = {
647                 .orig_dev = p->dev,
648                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
649                 .flags = SWITCHDEV_F_DEFER,
650                 .u.mrouter = is_mc_router,
651         };
652
653         switchdev_port_attr_set(p->dev, &attr);
654 }
655
656 static void br_multicast_local_router_expired(struct timer_list *t)
657 {
658         struct net_bridge *br = from_timer(br, t, multicast_router_timer);
659
660         spin_lock(&br->multicast_lock);
661         if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
662             br->multicast_router == MDB_RTR_TYPE_PERM ||
663             timer_pending(&br->multicast_router_timer))
664                 goto out;
665
666         br_mc_router_state_change(br, false);
667 out:
668         spin_unlock(&br->multicast_lock);
669 }
670
671 static void br_multicast_querier_expired(struct net_bridge *br,
672                                          struct bridge_mcast_own_query *query)
673 {
674         spin_lock(&br->multicast_lock);
675         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
676                 goto out;
677
678         br_multicast_start_querier(br, query);
679
680 out:
681         spin_unlock(&br->multicast_lock);
682 }
683
684 static void br_ip4_multicast_querier_expired(struct timer_list *t)
685 {
686         struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
687
688         br_multicast_querier_expired(br, &br->ip4_own_query);
689 }
690
691 #if IS_ENABLED(CONFIG_IPV6)
692 static void br_ip6_multicast_querier_expired(struct timer_list *t)
693 {
694         struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
695
696         br_multicast_querier_expired(br, &br->ip6_own_query);
697 }
698 #endif
699
700 static void br_multicast_select_own_querier(struct net_bridge *br,
701                                             struct br_ip *ip,
702                                             struct sk_buff *skb)
703 {
704         if (ip->proto == htons(ETH_P_IP))
705                 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
706 #if IS_ENABLED(CONFIG_IPV6)
707         else
708                 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
709 #endif
710 }
711
712 static void __br_multicast_send_query(struct net_bridge *br,
713                                       struct net_bridge_port *port,
714                                       struct br_ip *ip)
715 {
716         struct sk_buff *skb;
717         u8 igmp_type;
718
719         skb = br_multicast_alloc_query(br, ip, &igmp_type);
720         if (!skb)
721                 return;
722
723         if (port) {
724                 skb->dev = port->dev;
725                 br_multicast_count(br, port, skb, igmp_type,
726                                    BR_MCAST_DIR_TX);
727                 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
728                         dev_net(port->dev), NULL, skb, NULL, skb->dev,
729                         br_dev_queue_push_xmit);
730         } else {
731                 br_multicast_select_own_querier(br, ip, skb);
732                 br_multicast_count(br, port, skb, igmp_type,
733                                    BR_MCAST_DIR_RX);
734                 netif_rx(skb);
735         }
736 }
737
738 static void br_multicast_send_query(struct net_bridge *br,
739                                     struct net_bridge_port *port,
740                                     struct bridge_mcast_own_query *own_query)
741 {
742         struct bridge_mcast_other_query *other_query = NULL;
743         struct br_ip br_group;
744         unsigned long time;
745
746         if (!netif_running(br->dev) ||
747             !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
748             !br_opt_get(br, BROPT_MULTICAST_QUERIER))
749                 return;
750
751         memset(&br_group.u, 0, sizeof(br_group.u));
752
753         if (port ? (own_query == &port->ip4_own_query) :
754                    (own_query == &br->ip4_own_query)) {
755                 other_query = &br->ip4_other_query;
756                 br_group.proto = htons(ETH_P_IP);
757 #if IS_ENABLED(CONFIG_IPV6)
758         } else {
759                 other_query = &br->ip6_other_query;
760                 br_group.proto = htons(ETH_P_IPV6);
761 #endif
762         }
763
764         if (!other_query || timer_pending(&other_query->timer))
765                 return;
766
767         __br_multicast_send_query(br, port, &br_group);
768
769         time = jiffies;
770         time += own_query->startup_sent < br->multicast_startup_query_count ?
771                 br->multicast_startup_query_interval :
772                 br->multicast_query_interval;
773         mod_timer(&own_query->timer, time);
774 }
775
776 static void
777 br_multicast_port_query_expired(struct net_bridge_port *port,
778                                 struct bridge_mcast_own_query *query)
779 {
780         struct net_bridge *br = port->br;
781
782         spin_lock(&br->multicast_lock);
783         if (port->state == BR_STATE_DISABLED ||
784             port->state == BR_STATE_BLOCKING)
785                 goto out;
786
787         if (query->startup_sent < br->multicast_startup_query_count)
788                 query->startup_sent++;
789
790         br_multicast_send_query(port->br, port, query);
791
792 out:
793         spin_unlock(&br->multicast_lock);
794 }
795
796 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
797 {
798         struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
799
800         br_multicast_port_query_expired(port, &port->ip4_own_query);
801 }
802
803 #if IS_ENABLED(CONFIG_IPV6)
804 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
805 {
806         struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
807
808         br_multicast_port_query_expired(port, &port->ip6_own_query);
809 }
810 #endif
811
812 static void br_mc_disabled_update(struct net_device *dev, bool value)
813 {
814         struct switchdev_attr attr = {
815                 .orig_dev = dev,
816                 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
817                 .flags = SWITCHDEV_F_DEFER,
818                 .u.mc_disabled = !value,
819         };
820
821         switchdev_port_attr_set(dev, &attr);
822 }
823
824 int br_multicast_add_port(struct net_bridge_port *port)
825 {
826         port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
827
828         timer_setup(&port->multicast_router_timer,
829                     br_multicast_router_expired, 0);
830         timer_setup(&port->ip4_own_query.timer,
831                     br_ip4_multicast_port_query_expired, 0);
832 #if IS_ENABLED(CONFIG_IPV6)
833         timer_setup(&port->ip6_own_query.timer,
834                     br_ip6_multicast_port_query_expired, 0);
835 #endif
836         br_mc_disabled_update(port->dev,
837                               br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
838
839         port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
840         if (!port->mcast_stats)
841                 return -ENOMEM;
842
843         return 0;
844 }
845
846 void br_multicast_del_port(struct net_bridge_port *port)
847 {
848         struct net_bridge *br = port->br;
849         struct net_bridge_port_group *pg;
850         struct hlist_node *n;
851
852         /* Take care of the remaining groups, only perm ones should be left */
853         spin_lock_bh(&br->multicast_lock);
854         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
855                 br_multicast_del_pg(br, pg);
856         spin_unlock_bh(&br->multicast_lock);
857         del_timer_sync(&port->multicast_router_timer);
858         free_percpu(port->mcast_stats);
859 }
860
861 static void br_multicast_enable(struct bridge_mcast_own_query *query)
862 {
863         query->startup_sent = 0;
864
865         if (try_to_del_timer_sync(&query->timer) >= 0 ||
866             del_timer(&query->timer))
867                 mod_timer(&query->timer, jiffies);
868 }
869
870 static void __br_multicast_enable_port(struct net_bridge_port *port)
871 {
872         struct net_bridge *br = port->br;
873
874         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
875                 return;
876
877         br_multicast_enable(&port->ip4_own_query);
878 #if IS_ENABLED(CONFIG_IPV6)
879         br_multicast_enable(&port->ip6_own_query);
880 #endif
881         if (port->multicast_router == MDB_RTR_TYPE_PERM &&
882             hlist_unhashed(&port->rlist))
883                 br_multicast_add_router(br, port);
884 }
885
886 void br_multicast_enable_port(struct net_bridge_port *port)
887 {
888         struct net_bridge *br = port->br;
889
890         spin_lock(&br->multicast_lock);
891         __br_multicast_enable_port(port);
892         spin_unlock(&br->multicast_lock);
893 }
894
895 void br_multicast_disable_port(struct net_bridge_port *port)
896 {
897         struct net_bridge *br = port->br;
898         struct net_bridge_port_group *pg;
899         struct hlist_node *n;
900
901         spin_lock(&br->multicast_lock);
902         hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
903                 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
904                         br_multicast_del_pg(br, pg);
905
906         __del_port_router(port);
907
908         del_timer(&port->multicast_router_timer);
909         del_timer(&port->ip4_own_query.timer);
910 #if IS_ENABLED(CONFIG_IPV6)
911         del_timer(&port->ip6_own_query.timer);
912 #endif
913         spin_unlock(&br->multicast_lock);
914 }
915
916 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
917                                          struct net_bridge_port *port,
918                                          struct sk_buff *skb,
919                                          u16 vid)
920 {
921         const unsigned char *src;
922         struct igmpv3_report *ih;
923         struct igmpv3_grec *grec;
924         int i;
925         int len;
926         int num;
927         int type;
928         int err = 0;
929         __be32 group;
930         u16 nsrcs;
931
932         ih = igmpv3_report_hdr(skb);
933         num = ntohs(ih->ngrec);
934         len = skb_transport_offset(skb) + sizeof(*ih);
935
936         for (i = 0; i < num; i++) {
937                 len += sizeof(*grec);
938                 if (!ip_mc_may_pull(skb, len))
939                         return -EINVAL;
940
941                 grec = (void *)(skb->data + len - sizeof(*grec));
942                 group = grec->grec_mca;
943                 type = grec->grec_type;
944                 nsrcs = ntohs(grec->grec_nsrcs);
945
946                 len += nsrcs * 4;
947                 if (!ip_mc_may_pull(skb, len))
948                         return -EINVAL;
949
950                 /* We treat this as an IGMPv2 report for now. */
951                 switch (type) {
952                 case IGMPV3_MODE_IS_INCLUDE:
953                 case IGMPV3_MODE_IS_EXCLUDE:
954                 case IGMPV3_CHANGE_TO_INCLUDE:
955                 case IGMPV3_CHANGE_TO_EXCLUDE:
956                 case IGMPV3_ALLOW_NEW_SOURCES:
957                 case IGMPV3_BLOCK_OLD_SOURCES:
958                         break;
959
960                 default:
961                         continue;
962                 }
963
964                 src = eth_hdr(skb)->h_source;
965                 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
966                      type == IGMPV3_MODE_IS_INCLUDE) &&
967                     nsrcs == 0) {
968                         br_ip4_multicast_leave_group(br, port, group, vid, src);
969                 } else {
970                         err = br_ip4_multicast_add_group(br, port, group, vid,
971                                                          src);
972                         if (err)
973                                 break;
974                 }
975         }
976
977         return err;
978 }
979
980 #if IS_ENABLED(CONFIG_IPV6)
981 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
982                                         struct net_bridge_port *port,
983                                         struct sk_buff *skb,
984                                         u16 vid)
985 {
986         unsigned int nsrcs_offset;
987         const unsigned char *src;
988         struct icmp6hdr *icmp6h;
989         struct mld2_grec *grec;
990         unsigned int grec_len;
991         int i;
992         int len;
993         int num;
994         int err = 0;
995
996         if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
997                 return -EINVAL;
998
999         icmp6h = icmp6_hdr(skb);
1000         num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1001         len = skb_transport_offset(skb) + sizeof(*icmp6h);
1002
1003         for (i = 0; i < num; i++) {
1004                 __be16 *_nsrcs, __nsrcs;
1005                 u16 nsrcs;
1006
1007                 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1008
1009                 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1010                     nsrcs_offset + sizeof(_nsrcs))
1011                         return -EINVAL;
1012
1013                 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
1014                                             sizeof(__nsrcs), &__nsrcs);
1015                 if (!_nsrcs)
1016                         return -EINVAL;
1017
1018                 nsrcs = ntohs(*_nsrcs);
1019                 grec_len = struct_size(grec, grec_src, nsrcs);
1020
1021                 if (!ipv6_mc_may_pull(skb, len + grec_len))
1022                         return -EINVAL;
1023
1024                 grec = (struct mld2_grec *)(skb->data + len);
1025                 len += grec_len;
1026
1027                 /* We treat these as MLDv1 reports for now. */
1028                 switch (grec->grec_type) {
1029                 case MLD2_MODE_IS_INCLUDE:
1030                 case MLD2_MODE_IS_EXCLUDE:
1031                 case MLD2_CHANGE_TO_INCLUDE:
1032                 case MLD2_CHANGE_TO_EXCLUDE:
1033                 case MLD2_ALLOW_NEW_SOURCES:
1034                 case MLD2_BLOCK_OLD_SOURCES:
1035                         break;
1036
1037                 default:
1038                         continue;
1039                 }
1040
1041                 src = eth_hdr(skb)->h_source;
1042                 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1043                      grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1044                     nsrcs == 0) {
1045                         br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1046                                                      vid, src);
1047                 } else {
1048                         err = br_ip6_multicast_add_group(br, port,
1049                                                          &grec->grec_mca, vid,
1050                                                          src);
1051                         if (err)
1052                                 break;
1053                 }
1054         }
1055
1056         return err;
1057 }
1058 #endif
1059
1060 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1061                                             struct net_bridge_port *port,
1062                                             __be32 saddr)
1063 {
1064         if (!timer_pending(&br->ip4_own_query.timer) &&
1065             !timer_pending(&br->ip4_other_query.timer))
1066                 goto update;
1067
1068         if (!br->ip4_querier.addr.u.ip4)
1069                 goto update;
1070
1071         if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1072                 goto update;
1073
1074         return false;
1075
1076 update:
1077         br->ip4_querier.addr.u.ip4 = saddr;
1078
1079         /* update protected by general multicast_lock by caller */
1080         rcu_assign_pointer(br->ip4_querier.port, port);
1081
1082         return true;
1083 }
1084
1085 #if IS_ENABLED(CONFIG_IPV6)
1086 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1087                                             struct net_bridge_port *port,
1088                                             struct in6_addr *saddr)
1089 {
1090         if (!timer_pending(&br->ip6_own_query.timer) &&
1091             !timer_pending(&br->ip6_other_query.timer))
1092                 goto update;
1093
1094         if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1095                 goto update;
1096
1097         return false;
1098
1099 update:
1100         br->ip6_querier.addr.u.ip6 = *saddr;
1101
1102         /* update protected by general multicast_lock by caller */
1103         rcu_assign_pointer(br->ip6_querier.port, port);
1104
1105         return true;
1106 }
1107 #endif
1108
1109 static bool br_multicast_select_querier(struct net_bridge *br,
1110                                         struct net_bridge_port *port,
1111                                         struct br_ip *saddr)
1112 {
1113         switch (saddr->proto) {
1114         case htons(ETH_P_IP):
1115                 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1116 #if IS_ENABLED(CONFIG_IPV6)
1117         case htons(ETH_P_IPV6):
1118                 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1119 #endif
1120         }
1121
1122         return false;
1123 }
1124
1125 static void
1126 br_multicast_update_query_timer(struct net_bridge *br,
1127                                 struct bridge_mcast_other_query *query,
1128                                 unsigned long max_delay)
1129 {
1130         if (!timer_pending(&query->timer))
1131                 query->delay_time = jiffies + max_delay;
1132
1133         mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1134 }
1135
1136 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1137                                            bool is_mc_router)
1138 {
1139         struct switchdev_attr attr = {
1140                 .orig_dev = p->dev,
1141                 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1142                 .flags = SWITCHDEV_F_DEFER,
1143                 .u.mrouter = is_mc_router,
1144         };
1145
1146         switchdev_port_attr_set(p->dev, &attr);
1147 }
1148
1149 /*
1150  * Add port to router_list
1151  *  list is maintained ordered by pointer value
1152  *  and locked by br->multicast_lock and RCU
1153  */
1154 static void br_multicast_add_router(struct net_bridge *br,
1155                                     struct net_bridge_port *port)
1156 {
1157         struct net_bridge_port *p;
1158         struct hlist_node *slot = NULL;
1159
1160         if (!hlist_unhashed(&port->rlist))
1161                 return;
1162
1163         hlist_for_each_entry(p, &br->router_list, rlist) {
1164                 if ((unsigned long) port >= (unsigned long) p)
1165                         break;
1166                 slot = &p->rlist;
1167         }
1168
1169         if (slot)
1170                 hlist_add_behind_rcu(&port->rlist, slot);
1171         else
1172                 hlist_add_head_rcu(&port->rlist, &br->router_list);
1173         br_rtr_notify(br->dev, port, RTM_NEWMDB);
1174         br_port_mc_router_state_change(port, true);
1175 }
1176
1177 static void br_multicast_mark_router(struct net_bridge *br,
1178                                      struct net_bridge_port *port)
1179 {
1180         unsigned long now = jiffies;
1181
1182         if (!port) {
1183                 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1184                         if (!timer_pending(&br->multicast_router_timer))
1185                                 br_mc_router_state_change(br, true);
1186                         mod_timer(&br->multicast_router_timer,
1187                                   now + br->multicast_querier_interval);
1188                 }
1189                 return;
1190         }
1191
1192         if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1193             port->multicast_router == MDB_RTR_TYPE_PERM)
1194                 return;
1195
1196         br_multicast_add_router(br, port);
1197
1198         mod_timer(&port->multicast_router_timer,
1199                   now + br->multicast_querier_interval);
1200 }
1201
1202 static void br_multicast_query_received(struct net_bridge *br,
1203                                         struct net_bridge_port *port,
1204                                         struct bridge_mcast_other_query *query,
1205                                         struct br_ip *saddr,
1206                                         unsigned long max_delay)
1207 {
1208         if (!br_multicast_select_querier(br, port, saddr))
1209                 return;
1210
1211         br_multicast_update_query_timer(br, query, max_delay);
1212         br_multicast_mark_router(br, port);
1213 }
1214
1215 static void br_ip4_multicast_query(struct net_bridge *br,
1216                                    struct net_bridge_port *port,
1217                                    struct sk_buff *skb,
1218                                    u16 vid)
1219 {
1220         unsigned int transport_len = ip_transport_len(skb);
1221         const struct iphdr *iph = ip_hdr(skb);
1222         struct igmphdr *ih = igmp_hdr(skb);
1223         struct net_bridge_mdb_entry *mp;
1224         struct igmpv3_query *ih3;
1225         struct net_bridge_port_group *p;
1226         struct net_bridge_port_group __rcu **pp;
1227         struct br_ip saddr;
1228         unsigned long max_delay;
1229         unsigned long now = jiffies;
1230         __be32 group;
1231
1232         spin_lock(&br->multicast_lock);
1233         if (!netif_running(br->dev) ||
1234             (port && port->state == BR_STATE_DISABLED))
1235                 goto out;
1236
1237         group = ih->group;
1238
1239         if (transport_len == sizeof(*ih)) {
1240                 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1241
1242                 if (!max_delay) {
1243                         max_delay = 10 * HZ;
1244                         group = 0;
1245                 }
1246         } else if (transport_len >= sizeof(*ih3)) {
1247                 ih3 = igmpv3_query_hdr(skb);
1248                 if (ih3->nsrcs)
1249                         goto out;
1250
1251                 max_delay = ih3->code ?
1252                             IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1253         } else {
1254                 goto out;
1255         }
1256
1257         if (!group) {
1258                 saddr.proto = htons(ETH_P_IP);
1259                 saddr.u.ip4 = iph->saddr;
1260
1261                 br_multicast_query_received(br, port, &br->ip4_other_query,
1262                                             &saddr, max_delay);
1263                 goto out;
1264         }
1265
1266         mp = br_mdb_ip4_get(br, group, vid);
1267         if (!mp)
1268                 goto out;
1269
1270         max_delay *= br->multicast_last_member_count;
1271
1272         if (mp->host_joined &&
1273             (timer_pending(&mp->timer) ?
1274              time_after(mp->timer.expires, now + max_delay) :
1275              try_to_del_timer_sync(&mp->timer) >= 0))
1276                 mod_timer(&mp->timer, now + max_delay);
1277
1278         for (pp = &mp->ports;
1279              (p = mlock_dereference(*pp, br)) != NULL;
1280              pp = &p->next) {
1281                 if (timer_pending(&p->timer) ?
1282                     time_after(p->timer.expires, now + max_delay) :
1283                     try_to_del_timer_sync(&p->timer) >= 0)
1284                         mod_timer(&p->timer, now + max_delay);
1285         }
1286
1287 out:
1288         spin_unlock(&br->multicast_lock);
1289 }
1290
1291 #if IS_ENABLED(CONFIG_IPV6)
1292 static int br_ip6_multicast_query(struct net_bridge *br,
1293                                   struct net_bridge_port *port,
1294                                   struct sk_buff *skb,
1295                                   u16 vid)
1296 {
1297         unsigned int transport_len = ipv6_transport_len(skb);
1298         struct mld_msg *mld;
1299         struct net_bridge_mdb_entry *mp;
1300         struct mld2_query *mld2q;
1301         struct net_bridge_port_group *p;
1302         struct net_bridge_port_group __rcu **pp;
1303         struct br_ip saddr;
1304         unsigned long max_delay;
1305         unsigned long now = jiffies;
1306         unsigned int offset = skb_transport_offset(skb);
1307         const struct in6_addr *group = NULL;
1308         bool is_general_query;
1309         int err = 0;
1310
1311         spin_lock(&br->multicast_lock);
1312         if (!netif_running(br->dev) ||
1313             (port && port->state == BR_STATE_DISABLED))
1314                 goto out;
1315
1316         if (transport_len == sizeof(*mld)) {
1317                 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1318                         err = -EINVAL;
1319                         goto out;
1320                 }
1321                 mld = (struct mld_msg *) icmp6_hdr(skb);
1322                 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1323                 if (max_delay)
1324                         group = &mld->mld_mca;
1325         } else {
1326                 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1327                         err = -EINVAL;
1328                         goto out;
1329                 }
1330                 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1331                 if (!mld2q->mld2q_nsrcs)
1332                         group = &mld2q->mld2q_mca;
1333
1334                 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1335         }
1336
1337         is_general_query = group && ipv6_addr_any(group);
1338
1339         if (is_general_query) {
1340                 saddr.proto = htons(ETH_P_IPV6);
1341                 saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1342
1343                 br_multicast_query_received(br, port, &br->ip6_other_query,
1344                                             &saddr, max_delay);
1345                 goto out;
1346         } else if (!group) {
1347                 goto out;
1348         }
1349
1350         mp = br_mdb_ip6_get(br, group, vid);
1351         if (!mp)
1352                 goto out;
1353
1354         max_delay *= br->multicast_last_member_count;
1355         if (mp->host_joined &&
1356             (timer_pending(&mp->timer) ?
1357              time_after(mp->timer.expires, now + max_delay) :
1358              try_to_del_timer_sync(&mp->timer) >= 0))
1359                 mod_timer(&mp->timer, now + max_delay);
1360
1361         for (pp = &mp->ports;
1362              (p = mlock_dereference(*pp, br)) != NULL;
1363              pp = &p->next) {
1364                 if (timer_pending(&p->timer) ?
1365                     time_after(p->timer.expires, now + max_delay) :
1366                     try_to_del_timer_sync(&p->timer) >= 0)
1367                         mod_timer(&p->timer, now + max_delay);
1368         }
1369
1370 out:
1371         spin_unlock(&br->multicast_lock);
1372         return err;
1373 }
1374 #endif
1375
1376 static void
1377 br_multicast_leave_group(struct net_bridge *br,
1378                          struct net_bridge_port *port,
1379                          struct br_ip *group,
1380                          struct bridge_mcast_other_query *other_query,
1381                          struct bridge_mcast_own_query *own_query,
1382                          const unsigned char *src)
1383 {
1384         struct net_bridge_mdb_entry *mp;
1385         struct net_bridge_port_group *p;
1386         unsigned long now;
1387         unsigned long time;
1388
1389         spin_lock(&br->multicast_lock);
1390         if (!netif_running(br->dev) ||
1391             (port && port->state == BR_STATE_DISABLED))
1392                 goto out;
1393
1394         mp = br_mdb_ip_get(br, group);
1395         if (!mp)
1396                 goto out;
1397
1398         if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1399                 struct net_bridge_port_group __rcu **pp;
1400
1401                 for (pp = &mp->ports;
1402                      (p = mlock_dereference(*pp, br)) != NULL;
1403                      pp = &p->next) {
1404                         if (!br_port_group_equal(p, port, src))
1405                                 continue;
1406
1407                         if (p->flags & MDB_PG_FLAGS_PERMANENT)
1408                                 break;
1409
1410                         rcu_assign_pointer(*pp, p->next);
1411                         hlist_del_init(&p->mglist);
1412                         del_timer(&p->timer);
1413                         kfree_rcu(p, rcu);
1414                         br_mdb_notify(br->dev, port, group, RTM_DELMDB,
1415                                       p->flags | MDB_PG_FLAGS_FAST_LEAVE);
1416
1417                         if (!mp->ports && !mp->host_joined &&
1418                             netif_running(br->dev))
1419                                 mod_timer(&mp->timer, jiffies);
1420                 }
1421                 goto out;
1422         }
1423
1424         if (timer_pending(&other_query->timer))
1425                 goto out;
1426
1427         if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1428                 __br_multicast_send_query(br, port, &mp->addr);
1429
1430                 time = jiffies + br->multicast_last_member_count *
1431                                  br->multicast_last_member_interval;
1432
1433                 mod_timer(&own_query->timer, time);
1434
1435                 for (p = mlock_dereference(mp->ports, br);
1436                      p != NULL;
1437                      p = mlock_dereference(p->next, br)) {
1438                         if (!br_port_group_equal(p, port, src))
1439                                 continue;
1440
1441                         if (!hlist_unhashed(&p->mglist) &&
1442                             (timer_pending(&p->timer) ?
1443                              time_after(p->timer.expires, time) :
1444                              try_to_del_timer_sync(&p->timer) >= 0)) {
1445                                 mod_timer(&p->timer, time);
1446                         }
1447
1448                         break;
1449                 }
1450         }
1451
1452         now = jiffies;
1453         time = now + br->multicast_last_member_count *
1454                      br->multicast_last_member_interval;
1455
1456         if (!port) {
1457                 if (mp->host_joined &&
1458                     (timer_pending(&mp->timer) ?
1459                      time_after(mp->timer.expires, time) :
1460                      try_to_del_timer_sync(&mp->timer) >= 0)) {
1461                         mod_timer(&mp->timer, time);
1462                 }
1463
1464                 goto out;
1465         }
1466
1467         for (p = mlock_dereference(mp->ports, br);
1468              p != NULL;
1469              p = mlock_dereference(p->next, br)) {
1470                 if (p->port != port)
1471                         continue;
1472
1473                 if (!hlist_unhashed(&p->mglist) &&
1474                     (timer_pending(&p->timer) ?
1475                      time_after(p->timer.expires, time) :
1476                      try_to_del_timer_sync(&p->timer) >= 0)) {
1477                         mod_timer(&p->timer, time);
1478                 }
1479
1480                 break;
1481         }
1482 out:
1483         spin_unlock(&br->multicast_lock);
1484 }
1485
1486 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1487                                          struct net_bridge_port *port,
1488                                          __be32 group,
1489                                          __u16 vid,
1490                                          const unsigned char *src)
1491 {
1492         struct br_ip br_group;
1493         struct bridge_mcast_own_query *own_query;
1494
1495         if (ipv4_is_local_multicast(group))
1496                 return;
1497
1498         own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499
1500         memset(&br_group, 0, sizeof(br_group));
1501         br_group.u.ip4 = group;
1502         br_group.proto = htons(ETH_P_IP);
1503         br_group.vid = vid;
1504
1505         br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1506                                  own_query, src);
1507 }
1508
1509 #if IS_ENABLED(CONFIG_IPV6)
1510 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1511                                          struct net_bridge_port *port,
1512                                          const struct in6_addr *group,
1513                                          __u16 vid,
1514                                          const unsigned char *src)
1515 {
1516         struct br_ip br_group;
1517         struct bridge_mcast_own_query *own_query;
1518
1519         if (ipv6_addr_is_ll_all_nodes(group))
1520                 return;
1521
1522         own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1523
1524         memset(&br_group, 0, sizeof(br_group));
1525         br_group.u.ip6 = *group;
1526         br_group.proto = htons(ETH_P_IPV6);
1527         br_group.vid = vid;
1528
1529         br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1530                                  own_query, src);
1531 }
1532 #endif
1533
1534 static void br_multicast_err_count(const struct net_bridge *br,
1535                                    const struct net_bridge_port *p,
1536                                    __be16 proto)
1537 {
1538         struct bridge_mcast_stats __percpu *stats;
1539         struct bridge_mcast_stats *pstats;
1540
1541         if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1542                 return;
1543
1544         if (p)
1545                 stats = p->mcast_stats;
1546         else
1547                 stats = br->mcast_stats;
1548         if (WARN_ON(!stats))
1549                 return;
1550
1551         pstats = this_cpu_ptr(stats);
1552
1553         u64_stats_update_begin(&pstats->syncp);
1554         switch (proto) {
1555         case htons(ETH_P_IP):
1556                 pstats->mstats.igmp_parse_errors++;
1557                 break;
1558 #if IS_ENABLED(CONFIG_IPV6)
1559         case htons(ETH_P_IPV6):
1560                 pstats->mstats.mld_parse_errors++;
1561                 break;
1562 #endif
1563         }
1564         u64_stats_update_end(&pstats->syncp);
1565 }
1566
1567 static void br_multicast_pim(struct net_bridge *br,
1568                              struct net_bridge_port *port,
1569                              const struct sk_buff *skb)
1570 {
1571         unsigned int offset = skb_transport_offset(skb);
1572         struct pimhdr *pimhdr, _pimhdr;
1573
1574         pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1575         if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1576             pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1577                 return;
1578
1579         br_multicast_mark_router(br, port);
1580 }
1581
1582 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1583                                     struct net_bridge_port *port,
1584                                     struct sk_buff *skb)
1585 {
1586         if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1587             igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1588                 return -ENOMSG;
1589
1590         br_multicast_mark_router(br, port);
1591
1592         return 0;
1593 }
1594
1595 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1596                                  struct net_bridge_port *port,
1597                                  struct sk_buff *skb,
1598                                  u16 vid)
1599 {
1600         const unsigned char *src;
1601         struct igmphdr *ih;
1602         int err;
1603
1604         err = ip_mc_check_igmp(skb);
1605
1606         if (err == -ENOMSG) {
1607                 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1608                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1609                 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1610                         if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1611                                 br_multicast_pim(br, port, skb);
1612                 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1613                         br_ip4_multicast_mrd_rcv(br, port, skb);
1614                 }
1615
1616                 return 0;
1617         } else if (err < 0) {
1618                 br_multicast_err_count(br, port, skb->protocol);
1619                 return err;
1620         }
1621
1622         ih = igmp_hdr(skb);
1623         src = eth_hdr(skb)->h_source;
1624         BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1625
1626         switch (ih->type) {
1627         case IGMP_HOST_MEMBERSHIP_REPORT:
1628         case IGMPV2_HOST_MEMBERSHIP_REPORT:
1629                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1630                 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
1631                 break;
1632         case IGMPV3_HOST_MEMBERSHIP_REPORT:
1633                 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1634                 break;
1635         case IGMP_HOST_MEMBERSHIP_QUERY:
1636                 br_ip4_multicast_query(br, port, skb, vid);
1637                 break;
1638         case IGMP_HOST_LEAVE_MESSAGE:
1639                 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1640                 break;
1641         }
1642
1643         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1644                            BR_MCAST_DIR_RX);
1645
1646         return err;
1647 }
1648
1649 #if IS_ENABLED(CONFIG_IPV6)
1650 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1651                                     struct net_bridge_port *port,
1652                                     struct sk_buff *skb)
1653 {
1654         int ret;
1655
1656         if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1657                 return -ENOMSG;
1658
1659         ret = ipv6_mc_check_icmpv6(skb);
1660         if (ret < 0)
1661                 return ret;
1662
1663         if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1664                 return -ENOMSG;
1665
1666         br_multicast_mark_router(br, port);
1667
1668         return 0;
1669 }
1670
1671 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1672                                  struct net_bridge_port *port,
1673                                  struct sk_buff *skb,
1674                                  u16 vid)
1675 {
1676         const unsigned char *src;
1677         struct mld_msg *mld;
1678         int err;
1679
1680         err = ipv6_mc_check_mld(skb);
1681
1682         if (err == -ENOMSG) {
1683                 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1684                         BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1685
1686                 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1687                         err = br_ip6_multicast_mrd_rcv(br, port, skb);
1688
1689                         if (err < 0 && err != -ENOMSG) {
1690                                 br_multicast_err_count(br, port, skb->protocol);
1691                                 return err;
1692                         }
1693                 }
1694
1695                 return 0;
1696         } else if (err < 0) {
1697                 br_multicast_err_count(br, port, skb->protocol);
1698                 return err;
1699         }
1700
1701         mld = (struct mld_msg *)skb_transport_header(skb);
1702         BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1703
1704         switch (mld->mld_type) {
1705         case ICMPV6_MGM_REPORT:
1706                 src = eth_hdr(skb)->h_source;
1707                 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1708                 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1709                                                  src);
1710                 break;
1711         case ICMPV6_MLD2_REPORT:
1712                 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1713                 break;
1714         case ICMPV6_MGM_QUERY:
1715                 err = br_ip6_multicast_query(br, port, skb, vid);
1716                 break;
1717         case ICMPV6_MGM_REDUCTION:
1718                 src = eth_hdr(skb)->h_source;
1719                 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1720                 break;
1721         }
1722
1723         br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1724                            BR_MCAST_DIR_RX);
1725
1726         return err;
1727 }
1728 #endif
1729
1730 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1731                      struct sk_buff *skb, u16 vid)
1732 {
1733         int ret = 0;
1734
1735         BR_INPUT_SKB_CB(skb)->igmp = 0;
1736         BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1737
1738         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1739                 return 0;
1740
1741         switch (skb->protocol) {
1742         case htons(ETH_P_IP):
1743                 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1744                 break;
1745 #if IS_ENABLED(CONFIG_IPV6)
1746         case htons(ETH_P_IPV6):
1747                 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1748                 break;
1749 #endif
1750         }
1751
1752         return ret;
1753 }
1754
1755 static void br_multicast_query_expired(struct net_bridge *br,
1756                                        struct bridge_mcast_own_query *query,
1757                                        struct bridge_mcast_querier *querier)
1758 {
1759         spin_lock(&br->multicast_lock);
1760         if (query->startup_sent < br->multicast_startup_query_count)
1761                 query->startup_sent++;
1762
1763         RCU_INIT_POINTER(querier->port, NULL);
1764         br_multicast_send_query(br, NULL, query);
1765         spin_unlock(&br->multicast_lock);
1766 }
1767
1768 static void br_ip4_multicast_query_expired(struct timer_list *t)
1769 {
1770         struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1771
1772         br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1773 }
1774
1775 #if IS_ENABLED(CONFIG_IPV6)
1776 static void br_ip6_multicast_query_expired(struct timer_list *t)
1777 {
1778         struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1779
1780         br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1781 }
1782 #endif
1783
1784 void br_multicast_init(struct net_bridge *br)
1785 {
1786         br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1787
1788         br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1789         br->multicast_last_member_count = 2;
1790         br->multicast_startup_query_count = 2;
1791
1792         br->multicast_last_member_interval = HZ;
1793         br->multicast_query_response_interval = 10 * HZ;
1794         br->multicast_startup_query_interval = 125 * HZ / 4;
1795         br->multicast_query_interval = 125 * HZ;
1796         br->multicast_querier_interval = 255 * HZ;
1797         br->multicast_membership_interval = 260 * HZ;
1798
1799         br->ip4_other_query.delay_time = 0;
1800         br->ip4_querier.port = NULL;
1801         br->multicast_igmp_version = 2;
1802 #if IS_ENABLED(CONFIG_IPV6)
1803         br->multicast_mld_version = 1;
1804         br->ip6_other_query.delay_time = 0;
1805         br->ip6_querier.port = NULL;
1806 #endif
1807         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1808         br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1809
1810         spin_lock_init(&br->multicast_lock);
1811         timer_setup(&br->multicast_router_timer,
1812                     br_multicast_local_router_expired, 0);
1813         timer_setup(&br->ip4_other_query.timer,
1814                     br_ip4_multicast_querier_expired, 0);
1815         timer_setup(&br->ip4_own_query.timer,
1816                     br_ip4_multicast_query_expired, 0);
1817 #if IS_ENABLED(CONFIG_IPV6)
1818         timer_setup(&br->ip6_other_query.timer,
1819                     br_ip6_multicast_querier_expired, 0);
1820         timer_setup(&br->ip6_own_query.timer,
1821                     br_ip6_multicast_query_expired, 0);
1822 #endif
1823         INIT_HLIST_HEAD(&br->mdb_list);
1824 }
1825
1826 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1827 {
1828         struct in_device *in_dev = in_dev_get(br->dev);
1829
1830         if (!in_dev)
1831                 return;
1832
1833         __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1834         in_dev_put(in_dev);
1835 }
1836
1837 #if IS_ENABLED(CONFIG_IPV6)
1838 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1839 {
1840         struct in6_addr addr;
1841
1842         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1843         ipv6_dev_mc_inc(br->dev, &addr);
1844 }
1845 #else
1846 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1847 {
1848 }
1849 #endif
1850
1851 static void br_multicast_join_snoopers(struct net_bridge *br)
1852 {
1853         br_ip4_multicast_join_snoopers(br);
1854         br_ip6_multicast_join_snoopers(br);
1855 }
1856
1857 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1858 {
1859         struct in_device *in_dev = in_dev_get(br->dev);
1860
1861         if (WARN_ON(!in_dev))
1862                 return;
1863
1864         __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1865         in_dev_put(in_dev);
1866 }
1867
1868 #if IS_ENABLED(CONFIG_IPV6)
1869 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1870 {
1871         struct in6_addr addr;
1872
1873         ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1874         ipv6_dev_mc_dec(br->dev, &addr);
1875 }
1876 #else
1877 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
1878 {
1879 }
1880 #endif
1881
1882 static void br_multicast_leave_snoopers(struct net_bridge *br)
1883 {
1884         br_ip4_multicast_leave_snoopers(br);
1885         br_ip6_multicast_leave_snoopers(br);
1886 }
1887
1888 static void __br_multicast_open(struct net_bridge *br,
1889                                 struct bridge_mcast_own_query *query)
1890 {
1891         query->startup_sent = 0;
1892
1893         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1894                 return;
1895
1896         mod_timer(&query->timer, jiffies);
1897 }
1898
1899 void br_multicast_open(struct net_bridge *br)
1900 {
1901         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1902                 br_multicast_join_snoopers(br);
1903
1904         __br_multicast_open(br, &br->ip4_own_query);
1905 #if IS_ENABLED(CONFIG_IPV6)
1906         __br_multicast_open(br, &br->ip6_own_query);
1907 #endif
1908 }
1909
1910 void br_multicast_stop(struct net_bridge *br)
1911 {
1912         del_timer_sync(&br->multicast_router_timer);
1913         del_timer_sync(&br->ip4_other_query.timer);
1914         del_timer_sync(&br->ip4_own_query.timer);
1915 #if IS_ENABLED(CONFIG_IPV6)
1916         del_timer_sync(&br->ip6_other_query.timer);
1917         del_timer_sync(&br->ip6_own_query.timer);
1918 #endif
1919
1920         if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
1921                 br_multicast_leave_snoopers(br);
1922 }
1923
1924 void br_multicast_dev_del(struct net_bridge *br)
1925 {
1926         struct net_bridge_mdb_entry *mp;
1927         struct hlist_node *tmp;
1928
1929         spin_lock_bh(&br->multicast_lock);
1930         hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
1931                 del_timer(&mp->timer);
1932                 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
1933                                        br_mdb_rht_params);
1934                 hlist_del_rcu(&mp->mdb_node);
1935                 kfree_rcu(mp, rcu);
1936         }
1937         spin_unlock_bh(&br->multicast_lock);
1938
1939         rcu_barrier();
1940 }
1941
1942 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1943 {
1944         int err = -EINVAL;
1945
1946         spin_lock_bh(&br->multicast_lock);
1947
1948         switch (val) {
1949         case MDB_RTR_TYPE_DISABLED:
1950         case MDB_RTR_TYPE_PERM:
1951                 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
1952                 del_timer(&br->multicast_router_timer);
1953                 br->multicast_router = val;
1954                 err = 0;
1955                 break;
1956         case MDB_RTR_TYPE_TEMP_QUERY:
1957                 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
1958                         br_mc_router_state_change(br, false);
1959                 br->multicast_router = val;
1960                 err = 0;
1961                 break;
1962         }
1963
1964         spin_unlock_bh(&br->multicast_lock);
1965
1966         return err;
1967 }
1968
1969 static void __del_port_router(struct net_bridge_port *p)
1970 {
1971         if (hlist_unhashed(&p->rlist))
1972                 return;
1973         hlist_del_init_rcu(&p->rlist);
1974         br_rtr_notify(p->br->dev, p, RTM_DELMDB);
1975         br_port_mc_router_state_change(p, false);
1976
1977         /* don't allow timer refresh */
1978         if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1979                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1980 }
1981
1982 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1983 {
1984         struct net_bridge *br = p->br;
1985         unsigned long now = jiffies;
1986         int err = -EINVAL;
1987
1988         spin_lock(&br->multicast_lock);
1989         if (p->multicast_router == val) {
1990                 /* Refresh the temp router port timer */
1991                 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
1992                         mod_timer(&p->multicast_router_timer,
1993                                   now + br->multicast_querier_interval);
1994                 err = 0;
1995                 goto unlock;
1996         }
1997         switch (val) {
1998         case MDB_RTR_TYPE_DISABLED:
1999                 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2000                 __del_port_router(p);
2001                 del_timer(&p->multicast_router_timer);
2002                 break;
2003         case MDB_RTR_TYPE_TEMP_QUERY:
2004                 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2005                 __del_port_router(p);
2006                 break;
2007         case MDB_RTR_TYPE_PERM:
2008                 p->multicast_router = MDB_RTR_TYPE_PERM;
2009                 del_timer(&p->multicast_router_timer);
2010                 br_multicast_add_router(br, p);
2011                 break;
2012         case MDB_RTR_TYPE_TEMP:
2013                 p->multicast_router = MDB_RTR_TYPE_TEMP;
2014                 br_multicast_mark_router(br, p);
2015                 break;
2016         default:
2017                 goto unlock;
2018         }
2019         err = 0;
2020 unlock:
2021         spin_unlock(&br->multicast_lock);
2022
2023         return err;
2024 }
2025
2026 static void br_multicast_start_querier(struct net_bridge *br,
2027                                        struct bridge_mcast_own_query *query)
2028 {
2029         struct net_bridge_port *port;
2030
2031         __br_multicast_open(br, query);
2032
2033         rcu_read_lock();
2034         list_for_each_entry_rcu(port, &br->port_list, list) {
2035                 if (port->state == BR_STATE_DISABLED ||
2036                     port->state == BR_STATE_BLOCKING)
2037                         continue;
2038
2039                 if (query == &br->ip4_own_query)
2040                         br_multicast_enable(&port->ip4_own_query);
2041 #if IS_ENABLED(CONFIG_IPV6)
2042                 else
2043                         br_multicast_enable(&port->ip6_own_query);
2044 #endif
2045         }
2046         rcu_read_unlock();
2047 }
2048
2049 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2050 {
2051         struct net_bridge_port *port;
2052
2053         spin_lock_bh(&br->multicast_lock);
2054         if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2055                 goto unlock;
2056
2057         br_mc_disabled_update(br->dev, val);
2058         br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2059         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2060                 br_multicast_leave_snoopers(br);
2061                 goto unlock;
2062         }
2063
2064         if (!netif_running(br->dev))
2065                 goto unlock;
2066
2067         br_multicast_open(br);
2068         list_for_each_entry(port, &br->port_list, list)
2069                 __br_multicast_enable_port(port);
2070
2071 unlock:
2072         spin_unlock_bh(&br->multicast_lock);
2073
2074         return 0;
2075 }
2076
2077 bool br_multicast_enabled(const struct net_device *dev)
2078 {
2079         struct net_bridge *br = netdev_priv(dev);
2080
2081         return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2082 }
2083 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2084
2085 bool br_multicast_router(const struct net_device *dev)
2086 {
2087         struct net_bridge *br = netdev_priv(dev);
2088         bool is_router;
2089
2090         spin_lock_bh(&br->multicast_lock);
2091         is_router = br_multicast_is_router(br);
2092         spin_unlock_bh(&br->multicast_lock);
2093         return is_router;
2094 }
2095 EXPORT_SYMBOL_GPL(br_multicast_router);
2096
2097 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2098 {
2099         unsigned long max_delay;
2100
2101         val = !!val;
2102
2103         spin_lock_bh(&br->multicast_lock);
2104         if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2105                 goto unlock;
2106
2107         br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2108         if (!val)
2109                 goto unlock;
2110
2111         max_delay = br->multicast_query_response_interval;
2112
2113         if (!timer_pending(&br->ip4_other_query.timer))
2114                 br->ip4_other_query.delay_time = jiffies + max_delay;
2115
2116         br_multicast_start_querier(br, &br->ip4_own_query);
2117
2118 #if IS_ENABLED(CONFIG_IPV6)
2119         if (!timer_pending(&br->ip6_other_query.timer))
2120                 br->ip6_other_query.delay_time = jiffies + max_delay;
2121
2122         br_multicast_start_querier(br, &br->ip6_own_query);
2123 #endif
2124
2125 unlock:
2126         spin_unlock_bh(&br->multicast_lock);
2127
2128         return 0;
2129 }
2130
2131 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2132 {
2133         /* Currently we support only version 2 and 3 */
2134         switch (val) {
2135         case 2:
2136         case 3:
2137                 break;
2138         default:
2139                 return -EINVAL;
2140         }
2141
2142         spin_lock_bh(&br->multicast_lock);
2143         br->multicast_igmp_version = val;
2144         spin_unlock_bh(&br->multicast_lock);
2145
2146         return 0;
2147 }
2148
2149 #if IS_ENABLED(CONFIG_IPV6)
2150 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2151 {
2152         /* Currently we support version 1 and 2 */
2153         switch (val) {
2154         case 1:
2155         case 2:
2156                 break;
2157         default:
2158                 return -EINVAL;
2159         }
2160
2161         spin_lock_bh(&br->multicast_lock);
2162         br->multicast_mld_version = val;
2163         spin_unlock_bh(&br->multicast_lock);
2164
2165         return 0;
2166 }
2167 #endif
2168
2169 /**
2170  * br_multicast_list_adjacent - Returns snooped multicast addresses
2171  * @dev:        The bridge port adjacent to which to retrieve addresses
2172  * @br_ip_list: The list to store found, snooped multicast IP addresses in
2173  *
2174  * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2175  * snooping feature on all bridge ports of dev's bridge device, excluding
2176  * the addresses from dev itself.
2177  *
2178  * Returns the number of items added to br_ip_list.
2179  *
2180  * Notes:
2181  * - br_ip_list needs to be initialized by caller
2182  * - br_ip_list might contain duplicates in the end
2183  *   (needs to be taken care of by caller)
2184  * - br_ip_list needs to be freed by caller
2185  */
2186 int br_multicast_list_adjacent(struct net_device *dev,
2187                                struct list_head *br_ip_list)
2188 {
2189         struct net_bridge *br;
2190         struct net_bridge_port *port;
2191         struct net_bridge_port_group *group;
2192         struct br_ip_list *entry;
2193         int count = 0;
2194
2195         rcu_read_lock();
2196         if (!br_ip_list || !netif_is_bridge_port(dev))
2197                 goto unlock;
2198
2199         port = br_port_get_rcu(dev);
2200         if (!port || !port->br)
2201                 goto unlock;
2202
2203         br = port->br;
2204
2205         list_for_each_entry_rcu(port, &br->port_list, list) {
2206                 if (!port->dev || port->dev == dev)
2207                         continue;
2208
2209                 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2210                         entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2211                         if (!entry)
2212                                 goto unlock;
2213
2214                         entry->addr = group->addr;
2215                         list_add(&entry->list, br_ip_list);
2216                         count++;
2217                 }
2218         }
2219
2220 unlock:
2221         rcu_read_unlock();
2222         return count;
2223 }
2224 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2225
2226 /**
2227  * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2228  * @dev: The bridge port providing the bridge on which to check for a querier
2229  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2230  *
2231  * Checks whether the given interface has a bridge on top and if so returns
2232  * true if a valid querier exists anywhere on the bridged link layer.
2233  * Otherwise returns false.
2234  */
2235 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2236 {
2237         struct net_bridge *br;
2238         struct net_bridge_port *port;
2239         struct ethhdr eth;
2240         bool ret = false;
2241
2242         rcu_read_lock();
2243         if (!netif_is_bridge_port(dev))
2244                 goto unlock;
2245
2246         port = br_port_get_rcu(dev);
2247         if (!port || !port->br)
2248                 goto unlock;
2249
2250         br = port->br;
2251
2252         memset(&eth, 0, sizeof(eth));
2253         eth.h_proto = htons(proto);
2254
2255         ret = br_multicast_querier_exists(br, &eth);
2256
2257 unlock:
2258         rcu_read_unlock();
2259         return ret;
2260 }
2261 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2262
2263 /**
2264  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2265  * @dev: The bridge port adjacent to which to check for a querier
2266  * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2267  *
2268  * Checks whether the given interface has a bridge on top and if so returns
2269  * true if a selected querier is behind one of the other ports of this
2270  * bridge. Otherwise returns false.
2271  */
2272 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2273 {
2274         struct net_bridge *br;
2275         struct net_bridge_port *port;
2276         bool ret = false;
2277
2278         rcu_read_lock();
2279         if (!netif_is_bridge_port(dev))
2280                 goto unlock;
2281
2282         port = br_port_get_rcu(dev);
2283         if (!port || !port->br)
2284                 goto unlock;
2285
2286         br = port->br;
2287
2288         switch (proto) {
2289         case ETH_P_IP:
2290                 if (!timer_pending(&br->ip4_other_query.timer) ||
2291                     rcu_dereference(br->ip4_querier.port) == port)
2292                         goto unlock;
2293                 break;
2294 #if IS_ENABLED(CONFIG_IPV6)
2295         case ETH_P_IPV6:
2296                 if (!timer_pending(&br->ip6_other_query.timer) ||
2297                     rcu_dereference(br->ip6_querier.port) == port)
2298                         goto unlock;
2299                 break;
2300 #endif
2301         default:
2302                 goto unlock;
2303         }
2304
2305         ret = true;
2306 unlock:
2307         rcu_read_unlock();
2308         return ret;
2309 }
2310 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2311
2312 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2313                                const struct sk_buff *skb, u8 type, u8 dir)
2314 {
2315         struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2316         __be16 proto = skb->protocol;
2317         unsigned int t_len;
2318
2319         u64_stats_update_begin(&pstats->syncp);
2320         switch (proto) {
2321         case htons(ETH_P_IP):
2322                 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2323                 switch (type) {
2324                 case IGMP_HOST_MEMBERSHIP_REPORT:
2325                         pstats->mstats.igmp_v1reports[dir]++;
2326                         break;
2327                 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2328                         pstats->mstats.igmp_v2reports[dir]++;
2329                         break;
2330                 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2331                         pstats->mstats.igmp_v3reports[dir]++;
2332                         break;
2333                 case IGMP_HOST_MEMBERSHIP_QUERY:
2334                         if (t_len != sizeof(struct igmphdr)) {
2335                                 pstats->mstats.igmp_v3queries[dir]++;
2336                         } else {
2337                                 unsigned int offset = skb_transport_offset(skb);
2338                                 struct igmphdr *ih, _ihdr;
2339
2340                                 ih = skb_header_pointer(skb, offset,
2341                                                         sizeof(_ihdr), &_ihdr);
2342                                 if (!ih)
2343                                         break;
2344                                 if (!ih->code)
2345                                         pstats->mstats.igmp_v1queries[dir]++;
2346                                 else
2347                                         pstats->mstats.igmp_v2queries[dir]++;
2348                         }
2349                         break;
2350                 case IGMP_HOST_LEAVE_MESSAGE:
2351                         pstats->mstats.igmp_leaves[dir]++;
2352                         break;
2353                 }
2354                 break;
2355 #if IS_ENABLED(CONFIG_IPV6)
2356         case htons(ETH_P_IPV6):
2357                 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2358                         sizeof(struct ipv6hdr);
2359                 t_len -= skb_network_header_len(skb);
2360                 switch (type) {
2361                 case ICMPV6_MGM_REPORT:
2362                         pstats->mstats.mld_v1reports[dir]++;
2363                         break;
2364                 case ICMPV6_MLD2_REPORT:
2365                         pstats->mstats.mld_v2reports[dir]++;
2366                         break;
2367                 case ICMPV6_MGM_QUERY:
2368                         if (t_len != sizeof(struct mld_msg))
2369                                 pstats->mstats.mld_v2queries[dir]++;
2370                         else
2371                                 pstats->mstats.mld_v1queries[dir]++;
2372                         break;
2373                 case ICMPV6_MGM_REDUCTION:
2374                         pstats->mstats.mld_leaves[dir]++;
2375                         break;
2376                 }
2377                 break;
2378 #endif /* CONFIG_IPV6 */
2379         }
2380         u64_stats_update_end(&pstats->syncp);
2381 }
2382
2383 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2384                         const struct sk_buff *skb, u8 type, u8 dir)
2385 {
2386         struct bridge_mcast_stats __percpu *stats;
2387
2388         /* if multicast_disabled is true then igmp type can't be set */
2389         if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2390                 return;
2391
2392         if (p)
2393                 stats = p->mcast_stats;
2394         else
2395                 stats = br->mcast_stats;
2396         if (WARN_ON(!stats))
2397                 return;
2398
2399         br_mcast_stats_add(stats, skb, type, dir);
2400 }
2401
2402 int br_multicast_init_stats(struct net_bridge *br)
2403 {
2404         br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2405         if (!br->mcast_stats)
2406                 return -ENOMEM;
2407
2408         return 0;
2409 }
2410
2411 void br_multicast_uninit_stats(struct net_bridge *br)
2412 {
2413         free_percpu(br->mcast_stats);
2414 }
2415
2416 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
2417 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
2418 {
2419         dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2420         dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2421 }
2422
2423 void br_multicast_get_stats(const struct net_bridge *br,
2424                             const struct net_bridge_port *p,
2425                             struct br_mcast_stats *dest)
2426 {
2427         struct bridge_mcast_stats __percpu *stats;
2428         struct br_mcast_stats tdst;
2429         int i;
2430
2431         memset(dest, 0, sizeof(*dest));
2432         if (p)
2433                 stats = p->mcast_stats;
2434         else
2435                 stats = br->mcast_stats;
2436         if (WARN_ON(!stats))
2437                 return;
2438
2439         memset(&tdst, 0, sizeof(tdst));
2440         for_each_possible_cpu(i) {
2441                 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2442                 struct br_mcast_stats temp;
2443                 unsigned int start;
2444
2445                 do {
2446                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2447                         memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2448                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2449
2450                 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2451                 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2452                 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2453                 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2454                 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2455                 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2456                 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2457                 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2458
2459                 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2460                 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2461                 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2462                 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2463                 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2464                 tdst.mld_parse_errors += temp.mld_parse_errors;
2465         }
2466         memcpy(dest, &tdst, sizeof(*dest));
2467 }
2468
2469 int br_mdb_hash_init(struct net_bridge *br)
2470 {
2471         return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2472 }
2473
2474 void br_mdb_hash_fini(struct net_bridge *br)
2475 {
2476         rhashtable_destroy(&br->mdb_hash_tbl);
2477 }