1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Bridge multicast support.
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
35 #include "br_private.h"
37 static const struct rhashtable_params br_mdb_rht_params = {
38 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40 .key_len = sizeof(struct br_ip),
41 .automatic_shrinking = true,
44 static void br_multicast_start_querier(struct net_bridge *br,
45 struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47 struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49 struct net_bridge_port *port,
52 const unsigned char *src);
53 static void br_multicast_port_group_rexmit(struct timer_list *t);
55 static void __del_port_router(struct net_bridge_port *p);
56 #if IS_ENABLED(CONFIG_IPV6)
57 static void br_ip6_multicast_leave_group(struct net_bridge *br,
58 struct net_bridge_port *port,
59 const struct in6_addr *group,
60 __u16 vid, const unsigned char *src);
63 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
66 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
69 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
72 struct net_bridge_mdb_entry *ent;
74 lockdep_assert_held_once(&br->multicast_lock);
77 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
83 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
84 __be32 dst, __u16 vid)
88 memset(&br_dst, 0, sizeof(br_dst));
90 br_dst.proto = htons(ETH_P_IP);
93 return br_mdb_ip_get(br, &br_dst);
96 #if IS_ENABLED(CONFIG_IPV6)
97 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
98 const struct in6_addr *dst,
103 memset(&br_dst, 0, sizeof(br_dst));
105 br_dst.proto = htons(ETH_P_IPV6);
108 return br_mdb_ip_get(br, &br_dst);
112 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
113 struct sk_buff *skb, u16 vid)
117 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
120 if (BR_INPUT_SKB_CB(skb)->igmp)
123 memset(&ip, 0, sizeof(ip));
124 ip.proto = skb->protocol;
127 switch (skb->protocol) {
128 case htons(ETH_P_IP):
129 ip.u.ip4 = ip_hdr(skb)->daddr;
131 #if IS_ENABLED(CONFIG_IPV6)
132 case htons(ETH_P_IPV6):
133 ip.u.ip6 = ipv6_hdr(skb)->daddr;
140 return br_mdb_ip_get_rcu(br, &ip);
143 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
145 struct net_bridge_mdb_entry *mp;
147 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
148 WARN_ON(!hlist_unhashed(&mp->mdb_node));
151 del_timer_sync(&mp->timer);
155 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
157 struct net_bridge *br = mp->br;
159 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
161 hlist_del_init_rcu(&mp->mdb_node);
162 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
163 queue_work(system_long_wq, &br->mcast_gc_work);
166 static void br_multicast_group_expired(struct timer_list *t)
168 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
169 struct net_bridge *br = mp->br;
171 spin_lock(&br->multicast_lock);
172 if (!netif_running(br->dev) || timer_pending(&mp->timer))
175 br_multicast_host_leave(mp, true);
179 br_multicast_del_mdb_entry(mp);
181 spin_unlock(&br->multicast_lock);
184 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
186 struct net_bridge_group_src *src;
188 src = container_of(gc, struct net_bridge_group_src, mcast_gc);
189 WARN_ON(!hlist_unhashed(&src->node));
191 del_timer_sync(&src->timer);
195 static void br_multicast_del_group_src(struct net_bridge_group_src *src)
197 struct net_bridge *br = src->pg->port->br;
199 hlist_del_init_rcu(&src->node);
201 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
202 queue_work(system_long_wq, &br->mcast_gc_work);
205 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
207 struct net_bridge_port_group *pg;
209 pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
210 WARN_ON(!hlist_unhashed(&pg->mglist));
211 WARN_ON(!hlist_empty(&pg->src_list));
213 del_timer_sync(&pg->rexmit_timer);
214 del_timer_sync(&pg->timer);
218 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
219 struct net_bridge_port_group *pg,
220 struct net_bridge_port_group __rcu **pp)
222 struct net_bridge *br = pg->port->br;
223 struct net_bridge_group_src *ent;
224 struct hlist_node *tmp;
226 rcu_assign_pointer(*pp, pg->next);
227 hlist_del_init(&pg->mglist);
228 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
229 br_multicast_del_group_src(ent);
230 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
231 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
232 queue_work(system_long_wq, &br->mcast_gc_work);
234 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
235 mod_timer(&mp->timer, jiffies);
238 static void br_multicast_find_del_pg(struct net_bridge *br,
239 struct net_bridge_port_group *pg)
241 struct net_bridge_port_group __rcu **pp;
242 struct net_bridge_mdb_entry *mp;
243 struct net_bridge_port_group *p;
245 mp = br_mdb_ip_get(br, &pg->addr);
249 for (pp = &mp->ports;
250 (p = mlock_dereference(*pp, br)) != NULL;
255 br_multicast_del_pg(mp, pg, pp);
262 static void br_multicast_port_group_expired(struct timer_list *t)
264 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
265 struct net_bridge_group_src *src_ent;
266 struct net_bridge *br = pg->port->br;
267 struct hlist_node *tmp;
270 spin_lock(&br->multicast_lock);
271 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
272 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
275 changed = !!(pg->filter_mode == MCAST_EXCLUDE);
276 pg->filter_mode = MCAST_INCLUDE;
277 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
278 if (!timer_pending(&src_ent->timer)) {
279 br_multicast_del_group_src(src_ent);
284 if (hlist_empty(&pg->src_list)) {
285 br_multicast_find_del_pg(br, pg);
286 } else if (changed) {
287 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->addr);
291 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
294 spin_unlock(&br->multicast_lock);
297 static void br_multicast_gc(struct hlist_head *head)
299 struct net_bridge_mcast_gc *gcent;
300 struct hlist_node *tmp;
302 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
303 hlist_del_init(&gcent->gc_node);
304 gcent->destroy(gcent);
308 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
309 struct net_bridge_port_group *pg,
310 __be32 ip_dst, __be32 group,
311 bool with_srcs, bool over_lmqt,
312 u8 sflag, u8 *igmp_type,
315 struct net_bridge_port *p = pg ? pg->port : NULL;
316 struct net_bridge_group_src *ent;
317 size_t pkt_size, igmp_hdr_size;
318 unsigned long now = jiffies;
319 struct igmpv3_query *ihv3;
320 void *csum_start = NULL;
321 __sum16 *csum = NULL;
329 igmp_hdr_size = sizeof(*ih);
330 if (br->multicast_igmp_version == 3) {
331 igmp_hdr_size = sizeof(*ihv3);
332 if (pg && with_srcs) {
333 lmqt = now + (br->multicast_last_member_interval *
334 br->multicast_last_member_count);
335 hlist_for_each_entry(ent, &pg->src_list, node) {
336 if (over_lmqt == time_after(ent->timer.expires,
338 ent->src_query_rexmit_cnt > 0)
344 igmp_hdr_size += lmqt_srcs * sizeof(__be32);
348 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
349 if ((p && pkt_size > p->dev->mtu) ||
350 pkt_size > br->dev->mtu)
353 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
357 skb->protocol = htons(ETH_P_IP);
359 skb_reset_mac_header(skb);
362 ether_addr_copy(eth->h_source, br->dev->dev_addr);
363 ip_eth_mc_map(ip_dst, eth->h_dest);
364 eth->h_proto = htons(ETH_P_IP);
365 skb_put(skb, sizeof(*eth));
367 skb_set_network_header(skb, skb->len);
369 iph->tot_len = htons(pkt_size - sizeof(*eth));
375 iph->frag_off = htons(IP_DF);
377 iph->protocol = IPPROTO_IGMP;
378 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
379 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
381 ((u8 *)&iph[1])[0] = IPOPT_RA;
382 ((u8 *)&iph[1])[1] = 4;
383 ((u8 *)&iph[1])[2] = 0;
384 ((u8 *)&iph[1])[3] = 0;
388 skb_set_transport_header(skb, skb->len);
389 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
391 switch (br->multicast_igmp_version) {
394 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
395 ih->code = (group ? br->multicast_last_member_interval :
396 br->multicast_query_response_interval) /
397 (HZ / IGMP_TIMER_SCALE);
401 csum_start = (void *)ih;
404 ihv3 = igmpv3_query_hdr(skb);
405 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
406 ihv3->code = (group ? br->multicast_last_member_interval :
407 br->multicast_query_response_interval) /
408 (HZ / IGMP_TIMER_SCALE);
410 ihv3->qqic = br->multicast_query_interval / HZ;
411 ihv3->nsrcs = htons(lmqt_srcs);
413 ihv3->suppress = sflag;
417 csum_start = (void *)ihv3;
418 if (!pg || !with_srcs)
422 hlist_for_each_entry(ent, &pg->src_list, node) {
423 if (over_lmqt == time_after(ent->timer.expires,
425 ent->src_query_rexmit_cnt > 0) {
426 ihv3->srcs[lmqt_srcs++] = ent->addr.u.ip4;
427 ent->src_query_rexmit_cnt--;
428 if (need_rexmit && ent->src_query_rexmit_cnt)
432 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
439 if (WARN_ON(!csum || !csum_start)) {
444 *csum = ip_compute_csum(csum_start, igmp_hdr_size);
445 skb_put(skb, igmp_hdr_size);
446 __skb_pull(skb, sizeof(*eth));
452 #if IS_ENABLED(CONFIG_IPV6)
453 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
454 struct net_bridge_port_group *pg,
455 const struct in6_addr *ip6_dst,
456 const struct in6_addr *group,
457 bool with_srcs, bool over_llqt,
458 u8 sflag, u8 *igmp_type,
461 struct net_bridge_port *p = pg ? pg->port : NULL;
462 struct net_bridge_group_src *ent;
463 size_t pkt_size, mld_hdr_size;
464 unsigned long now = jiffies;
465 struct mld2_query *mld2q;
466 void *csum_start = NULL;
467 unsigned long interval;
468 __sum16 *csum = NULL;
469 struct ipv6hdr *ip6h;
470 struct mld_msg *mldq;
477 mld_hdr_size = sizeof(*mldq);
478 if (br->multicast_mld_version == 2) {
479 mld_hdr_size = sizeof(*mld2q);
480 if (pg && with_srcs) {
481 llqt = now + (br->multicast_last_member_interval *
482 br->multicast_last_member_count);
483 hlist_for_each_entry(ent, &pg->src_list, node) {
484 if (over_llqt == time_after(ent->timer.expires,
486 ent->src_query_rexmit_cnt > 0)
492 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
496 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
497 if ((p && pkt_size > p->dev->mtu) ||
498 pkt_size > br->dev->mtu)
501 skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
505 skb->protocol = htons(ETH_P_IPV6);
507 /* Ethernet header */
508 skb_reset_mac_header(skb);
511 ether_addr_copy(eth->h_source, br->dev->dev_addr);
512 eth->h_proto = htons(ETH_P_IPV6);
513 skb_put(skb, sizeof(*eth));
515 /* IPv6 header + HbH option */
516 skb_set_network_header(skb, skb->len);
517 ip6h = ipv6_hdr(skb);
519 *(__force __be32 *)ip6h = htonl(0x60000000);
520 ip6h->payload_len = htons(8 + mld_hdr_size);
521 ip6h->nexthdr = IPPROTO_HOPOPTS;
523 ip6h->daddr = *ip6_dst;
524 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
527 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
531 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
532 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
534 hopopt = (u8 *)(ip6h + 1);
535 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
536 hopopt[1] = 0; /* length of HbH */
537 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
538 hopopt[3] = 2; /* Length of RA Option */
539 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
541 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
542 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
544 skb_put(skb, sizeof(*ip6h) + 8);
547 skb_set_transport_header(skb, skb->len);
548 interval = ipv6_addr_any(group) ?
549 br->multicast_query_response_interval :
550 br->multicast_last_member_interval;
551 *igmp_type = ICMPV6_MGM_QUERY;
552 switch (br->multicast_mld_version) {
554 mldq = (struct mld_msg *)icmp6_hdr(skb);
555 mldq->mld_type = ICMPV6_MGM_QUERY;
558 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
559 mldq->mld_reserved = 0;
560 mldq->mld_mca = *group;
561 csum = &mldq->mld_cksum;
562 csum_start = (void *)mldq;
565 mld2q = (struct mld2_query *)icmp6_hdr(skb);
566 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
567 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
568 mld2q->mld2q_code = 0;
569 mld2q->mld2q_cksum = 0;
570 mld2q->mld2q_resv1 = 0;
571 mld2q->mld2q_resv2 = 0;
572 mld2q->mld2q_suppress = sflag;
573 mld2q->mld2q_qrv = 2;
574 mld2q->mld2q_nsrcs = htons(llqt_srcs);
575 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
576 mld2q->mld2q_mca = *group;
577 csum = &mld2q->mld2q_cksum;
578 csum_start = (void *)mld2q;
579 if (!pg || !with_srcs)
583 hlist_for_each_entry(ent, &pg->src_list, node) {
584 if (over_llqt == time_after(ent->timer.expires,
586 ent->src_query_rexmit_cnt > 0) {
587 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.u.ip6;
588 ent->src_query_rexmit_cnt--;
589 if (need_rexmit && ent->src_query_rexmit_cnt)
593 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
600 if (WARN_ON(!csum || !csum_start)) {
605 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
607 csum_partial(csum_start, mld_hdr_size, 0));
608 skb_put(skb, mld_hdr_size);
609 __skb_pull(skb, sizeof(*eth));
616 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
617 struct net_bridge_port_group *pg,
618 struct br_ip *ip_dst,
620 bool with_srcs, bool over_lmqt,
621 u8 sflag, u8 *igmp_type,
626 switch (group->proto) {
627 case htons(ETH_P_IP):
628 ip4_dst = ip_dst ? ip_dst->u.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
629 return br_ip4_multicast_alloc_query(br, pg,
630 ip4_dst, group->u.ip4,
631 with_srcs, over_lmqt,
634 #if IS_ENABLED(CONFIG_IPV6)
635 case htons(ETH_P_IPV6): {
636 struct in6_addr ip6_dst;
639 ip6_dst = ip_dst->u.ip6;
641 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
644 return br_ip6_multicast_alloc_query(br, pg,
645 &ip6_dst, &group->u.ip6,
646 with_srcs, over_lmqt,
655 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
658 struct net_bridge_mdb_entry *mp;
661 mp = br_mdb_ip_get(br, group);
665 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
666 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
667 return ERR_PTR(-E2BIG);
670 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
672 return ERR_PTR(-ENOMEM);
676 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
677 timer_setup(&mp->timer, br_multicast_group_expired, 0);
678 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
684 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
690 static void br_multicast_group_src_expired(struct timer_list *t)
692 struct net_bridge_group_src *src = from_timer(src, t, timer);
693 struct net_bridge_port_group *pg;
694 struct net_bridge *br = src->br;
696 spin_lock(&br->multicast_lock);
697 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
698 timer_pending(&src->timer))
702 if (pg->filter_mode == MCAST_INCLUDE) {
703 br_multicast_del_group_src(src);
704 if (!hlist_empty(&pg->src_list))
706 br_multicast_find_del_pg(br, pg);
709 spin_unlock(&br->multicast_lock);
712 static struct net_bridge_group_src *
713 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
715 struct net_bridge_group_src *ent;
718 case htons(ETH_P_IP):
719 hlist_for_each_entry(ent, &pg->src_list, node)
720 if (ip->u.ip4 == ent->addr.u.ip4)
723 #if IS_ENABLED(CONFIG_IPV6)
724 case htons(ETH_P_IPV6):
725 hlist_for_each_entry(ent, &pg->src_list, node)
726 if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6))
735 static struct net_bridge_group_src *
736 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
738 struct net_bridge_group_src *grp_src;
740 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
743 switch (src_ip->proto) {
744 case htons(ETH_P_IP):
745 if (ipv4_is_zeronet(src_ip->u.ip4) ||
746 ipv4_is_multicast(src_ip->u.ip4))
749 #if IS_ENABLED(CONFIG_IPV6)
750 case htons(ETH_P_IPV6):
751 if (ipv6_addr_any(&src_ip->u.ip6) ||
752 ipv6_addr_is_multicast(&src_ip->u.ip6))
758 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
759 if (unlikely(!grp_src))
763 grp_src->br = pg->port->br;
764 grp_src->addr = *src_ip;
765 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
766 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
768 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
774 struct net_bridge_port_group *br_multicast_new_port_group(
775 struct net_bridge_port *port,
777 struct net_bridge_port_group __rcu *next,
779 const unsigned char *src,
782 struct net_bridge_port_group *p;
784 p = kzalloc(sizeof(*p), GFP_ATOMIC);
791 p->filter_mode = filter_mode;
792 p->mcast_gc.destroy = br_multicast_destroy_port_group;
793 INIT_HLIST_HEAD(&p->src_list);
794 rcu_assign_pointer(p->next, next);
795 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
796 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
797 hlist_add_head(&p->mglist, &port->mglist);
800 memcpy(p->eth_addr, src, ETH_ALEN);
802 eth_broadcast_addr(p->eth_addr);
807 static bool br_port_group_equal(struct net_bridge_port_group *p,
808 struct net_bridge_port *port,
809 const unsigned char *src)
814 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
817 return ether_addr_equal(src, p->eth_addr);
820 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
822 if (!mp->host_joined) {
823 mp->host_joined = true;
825 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
827 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
830 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
832 if (!mp->host_joined)
835 mp->host_joined = false;
837 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
840 static int br_multicast_add_group(struct net_bridge *br,
841 struct net_bridge_port *port,
843 const unsigned char *src,
847 struct net_bridge_port_group __rcu **pp;
848 struct net_bridge_port_group *p;
849 struct net_bridge_mdb_entry *mp;
850 unsigned long now = jiffies;
853 spin_lock(&br->multicast_lock);
854 if (!netif_running(br->dev) ||
855 (port && port->state == BR_STATE_DISABLED))
858 mp = br_multicast_new_group(br, group);
864 br_multicast_host_join(mp, true);
868 for (pp = &mp->ports;
869 (p = mlock_dereference(*pp, br)) != NULL;
871 if (br_port_group_equal(p, port, src))
873 if ((unsigned long)p->port < (unsigned long)port)
877 p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode);
880 rcu_assign_pointer(*pp, p);
881 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
885 mod_timer(&p->timer, now + br->multicast_membership_interval);
891 spin_unlock(&br->multicast_lock);
895 static int br_ip4_multicast_add_group(struct net_bridge *br,
896 struct net_bridge_port *port,
899 const unsigned char *src,
902 struct br_ip br_group;
905 if (ipv4_is_local_multicast(group))
908 memset(&br_group, 0, sizeof(br_group));
909 br_group.u.ip4 = group;
910 br_group.proto = htons(ETH_P_IP);
912 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
914 return br_multicast_add_group(br, port, &br_group, src, filter_mode,
918 #if IS_ENABLED(CONFIG_IPV6)
919 static int br_ip6_multicast_add_group(struct net_bridge *br,
920 struct net_bridge_port *port,
921 const struct in6_addr *group,
923 const unsigned char *src,
926 struct br_ip br_group;
929 if (ipv6_addr_is_ll_all_nodes(group))
932 memset(&br_group, 0, sizeof(br_group));
933 br_group.u.ip6 = *group;
934 br_group.proto = htons(ETH_P_IPV6);
936 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
938 return br_multicast_add_group(br, port, &br_group, src, filter_mode,
943 static void br_multicast_router_expired(struct timer_list *t)
945 struct net_bridge_port *port =
946 from_timer(port, t, multicast_router_timer);
947 struct net_bridge *br = port->br;
949 spin_lock(&br->multicast_lock);
950 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
951 port->multicast_router == MDB_RTR_TYPE_PERM ||
952 timer_pending(&port->multicast_router_timer))
955 __del_port_router(port);
957 spin_unlock(&br->multicast_lock);
960 static void br_mc_router_state_change(struct net_bridge *p,
963 struct switchdev_attr attr = {
965 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
966 .flags = SWITCHDEV_F_DEFER,
967 .u.mrouter = is_mc_router,
970 switchdev_port_attr_set(p->dev, &attr);
973 static void br_multicast_local_router_expired(struct timer_list *t)
975 struct net_bridge *br = from_timer(br, t, multicast_router_timer);
977 spin_lock(&br->multicast_lock);
978 if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
979 br->multicast_router == MDB_RTR_TYPE_PERM ||
980 timer_pending(&br->multicast_router_timer))
983 br_mc_router_state_change(br, false);
985 spin_unlock(&br->multicast_lock);
988 static void br_multicast_querier_expired(struct net_bridge *br,
989 struct bridge_mcast_own_query *query)
991 spin_lock(&br->multicast_lock);
992 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
995 br_multicast_start_querier(br, query);
998 spin_unlock(&br->multicast_lock);
1001 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1003 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
1005 br_multicast_querier_expired(br, &br->ip4_own_query);
1008 #if IS_ENABLED(CONFIG_IPV6)
1009 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1011 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
1013 br_multicast_querier_expired(br, &br->ip6_own_query);
1017 static void br_multicast_select_own_querier(struct net_bridge *br,
1019 struct sk_buff *skb)
1021 if (ip->proto == htons(ETH_P_IP))
1022 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
1023 #if IS_ENABLED(CONFIG_IPV6)
1025 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
1029 static void __br_multicast_send_query(struct net_bridge *br,
1030 struct net_bridge_port *port,
1031 struct net_bridge_port_group *pg,
1032 struct br_ip *ip_dst,
1033 struct br_ip *group,
1038 bool over_lmqt = !!sflag;
1039 struct sk_buff *skb;
1043 skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
1044 over_lmqt, sflag, &igmp_type,
1050 skb->dev = port->dev;
1051 br_multicast_count(br, port, skb, igmp_type,
1053 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1054 dev_net(port->dev), NULL, skb, NULL, skb->dev,
1055 br_dev_queue_push_xmit);
1057 if (over_lmqt && with_srcs && sflag) {
1059 goto again_under_lmqt;
1062 br_multicast_select_own_querier(br, group, skb);
1063 br_multicast_count(br, port, skb, igmp_type,
1069 static void br_multicast_send_query(struct net_bridge *br,
1070 struct net_bridge_port *port,
1071 struct bridge_mcast_own_query *own_query)
1073 struct bridge_mcast_other_query *other_query = NULL;
1074 struct br_ip br_group;
1077 if (!netif_running(br->dev) ||
1078 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1079 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1082 memset(&br_group.u, 0, sizeof(br_group.u));
1084 if (port ? (own_query == &port->ip4_own_query) :
1085 (own_query == &br->ip4_own_query)) {
1086 other_query = &br->ip4_other_query;
1087 br_group.proto = htons(ETH_P_IP);
1088 #if IS_ENABLED(CONFIG_IPV6)
1090 other_query = &br->ip6_other_query;
1091 br_group.proto = htons(ETH_P_IPV6);
1095 if (!other_query || timer_pending(&other_query->timer))
1098 __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
1102 time += own_query->startup_sent < br->multicast_startup_query_count ?
1103 br->multicast_startup_query_interval :
1104 br->multicast_query_interval;
1105 mod_timer(&own_query->timer, time);
1109 br_multicast_port_query_expired(struct net_bridge_port *port,
1110 struct bridge_mcast_own_query *query)
1112 struct net_bridge *br = port->br;
1114 spin_lock(&br->multicast_lock);
1115 if (port->state == BR_STATE_DISABLED ||
1116 port->state == BR_STATE_BLOCKING)
1119 if (query->startup_sent < br->multicast_startup_query_count)
1120 query->startup_sent++;
1122 br_multicast_send_query(port->br, port, query);
1125 spin_unlock(&br->multicast_lock);
1128 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1130 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
1132 br_multicast_port_query_expired(port, &port->ip4_own_query);
1135 #if IS_ENABLED(CONFIG_IPV6)
1136 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1138 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
1140 br_multicast_port_query_expired(port, &port->ip6_own_query);
1144 static void br_multicast_port_group_rexmit(struct timer_list *t)
1146 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1147 struct bridge_mcast_other_query *other_query = NULL;
1148 struct net_bridge *br = pg->port->br;
1149 bool need_rexmit = false;
1151 spin_lock(&br->multicast_lock);
1152 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1153 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1154 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
1157 if (pg->addr.proto == htons(ETH_P_IP))
1158 other_query = &br->ip4_other_query;
1159 #if IS_ENABLED(CONFIG_IPV6)
1161 other_query = &br->ip6_other_query;
1164 if (!other_query || timer_pending(&other_query->timer))
1167 if (pg->grp_query_rexmit_cnt) {
1168 pg->grp_query_rexmit_cnt--;
1169 __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1170 &pg->addr, false, 1, NULL);
1172 __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1173 &pg->addr, true, 0, &need_rexmit);
1175 if (pg->grp_query_rexmit_cnt || need_rexmit)
1176 mod_timer(&pg->rexmit_timer, jiffies +
1177 br->multicast_last_member_interval);
1179 spin_unlock(&br->multicast_lock);
1182 static void br_mc_disabled_update(struct net_device *dev, bool value)
1184 struct switchdev_attr attr = {
1186 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1187 .flags = SWITCHDEV_F_DEFER,
1188 .u.mc_disabled = !value,
1191 switchdev_port_attr_set(dev, &attr);
1194 int br_multicast_add_port(struct net_bridge_port *port)
1196 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1198 timer_setup(&port->multicast_router_timer,
1199 br_multicast_router_expired, 0);
1200 timer_setup(&port->ip4_own_query.timer,
1201 br_ip4_multicast_port_query_expired, 0);
1202 #if IS_ENABLED(CONFIG_IPV6)
1203 timer_setup(&port->ip6_own_query.timer,
1204 br_ip6_multicast_port_query_expired, 0);
1206 br_mc_disabled_update(port->dev,
1207 br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
1209 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1210 if (!port->mcast_stats)
1216 void br_multicast_del_port(struct net_bridge_port *port)
1218 struct net_bridge *br = port->br;
1219 struct net_bridge_port_group *pg;
1220 HLIST_HEAD(deleted_head);
1221 struct hlist_node *n;
1223 /* Take care of the remaining groups, only perm ones should be left */
1224 spin_lock_bh(&br->multicast_lock);
1225 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1226 br_multicast_find_del_pg(br, pg);
1227 hlist_move_list(&br->mcast_gc_list, &deleted_head);
1228 spin_unlock_bh(&br->multicast_lock);
1229 br_multicast_gc(&deleted_head);
1230 del_timer_sync(&port->multicast_router_timer);
1231 free_percpu(port->mcast_stats);
1234 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1236 query->startup_sent = 0;
1238 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1239 del_timer(&query->timer))
1240 mod_timer(&query->timer, jiffies);
1243 static void __br_multicast_enable_port(struct net_bridge_port *port)
1245 struct net_bridge *br = port->br;
1247 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
1250 br_multicast_enable(&port->ip4_own_query);
1251 #if IS_ENABLED(CONFIG_IPV6)
1252 br_multicast_enable(&port->ip6_own_query);
1254 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
1255 hlist_unhashed(&port->rlist))
1256 br_multicast_add_router(br, port);
1259 void br_multicast_enable_port(struct net_bridge_port *port)
1261 struct net_bridge *br = port->br;
1263 spin_lock(&br->multicast_lock);
1264 __br_multicast_enable_port(port);
1265 spin_unlock(&br->multicast_lock);
1268 void br_multicast_disable_port(struct net_bridge_port *port)
1270 struct net_bridge *br = port->br;
1271 struct net_bridge_port_group *pg;
1272 struct hlist_node *n;
1274 spin_lock(&br->multicast_lock);
1275 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1276 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1277 br_multicast_find_del_pg(br, pg);
1279 __del_port_router(port);
1281 del_timer(&port->multicast_router_timer);
1282 del_timer(&port->ip4_own_query.timer);
1283 #if IS_ENABLED(CONFIG_IPV6)
1284 del_timer(&port->ip6_own_query.timer);
1286 spin_unlock(&br->multicast_lock);
1289 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1291 struct net_bridge_group_src *ent;
1292 struct hlist_node *tmp;
1295 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1296 if (ent->flags & BR_SGRP_F_DELETE) {
1297 br_multicast_del_group_src(ent);
1304 static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
1306 struct bridge_mcast_other_query *other_query = NULL;
1307 struct net_bridge *br = pg->port->br;
1308 u32 lmqc = br->multicast_last_member_count;
1309 unsigned long lmqt, lmi, now = jiffies;
1310 struct net_bridge_group_src *ent;
1312 if (!netif_running(br->dev) ||
1313 !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1316 if (pg->addr.proto == htons(ETH_P_IP))
1317 other_query = &br->ip4_other_query;
1318 #if IS_ENABLED(CONFIG_IPV6)
1320 other_query = &br->ip6_other_query;
1323 lmqt = now + br_multicast_lmqt(br);
1324 hlist_for_each_entry(ent, &pg->src_list, node) {
1325 if (ent->flags & BR_SGRP_F_SEND) {
1326 ent->flags &= ~BR_SGRP_F_SEND;
1327 if (ent->timer.expires > lmqt) {
1328 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1330 !timer_pending(&other_query->timer))
1331 ent->src_query_rexmit_cnt = lmqc;
1332 mod_timer(&ent->timer, lmqt);
1337 if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) ||
1338 !other_query || timer_pending(&other_query->timer))
1341 __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1342 &pg->addr, true, 1, NULL);
1344 lmi = now + br->multicast_last_member_interval;
1345 if (!timer_pending(&pg->rexmit_timer) ||
1346 time_after(pg->rexmit_timer.expires, lmi))
1347 mod_timer(&pg->rexmit_timer, lmi);
1350 static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
1352 struct bridge_mcast_other_query *other_query = NULL;
1353 struct net_bridge *br = pg->port->br;
1354 unsigned long now = jiffies, lmi;
1356 if (!netif_running(br->dev) ||
1357 !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1360 if (pg->addr.proto == htons(ETH_P_IP))
1361 other_query = &br->ip4_other_query;
1362 #if IS_ENABLED(CONFIG_IPV6)
1364 other_query = &br->ip6_other_query;
1367 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
1368 other_query && !timer_pending(&other_query->timer)) {
1369 lmi = now + br->multicast_last_member_interval;
1370 pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
1371 __br_multicast_send_query(br, pg->port, pg, &pg->addr,
1372 &pg->addr, false, 0, NULL);
1373 if (!timer_pending(&pg->rexmit_timer) ||
1374 time_after(pg->rexmit_timer.expires, lmi))
1375 mod_timer(&pg->rexmit_timer, lmi);
1378 if (pg->filter_mode == MCAST_EXCLUDE &&
1379 (!timer_pending(&pg->timer) ||
1380 time_after(pg->timer.expires, now + br_multicast_lmqt(br))))
1381 mod_timer(&pg->timer, now + br_multicast_lmqt(br));
1384 /* State Msg type New state Actions
1385 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1386 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1387 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1389 static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
1390 void *srcs, u32 nsrcs, size_t src_size)
1392 struct net_bridge *br = pg->port->br;
1393 struct net_bridge_group_src *ent;
1394 unsigned long now = jiffies;
1395 bool changed = false;
1396 struct br_ip src_ip;
1399 memset(&src_ip, 0, sizeof(src_ip));
1400 src_ip.proto = pg->addr.proto;
1401 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1402 memcpy(&src_ip.u, srcs, src_size);
1403 ent = br_multicast_find_group_src(pg, &src_ip);
1405 ent = br_multicast_new_group_src(pg, &src_ip);
1411 mod_timer(&ent->timer, now + br_multicast_gmi(br));
1418 /* State Msg type New state Actions
1419 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1423 static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
1424 void *srcs, u32 nsrcs, size_t src_size)
1426 struct net_bridge_group_src *ent;
1427 struct br_ip src_ip;
1430 hlist_for_each_entry(ent, &pg->src_list, node)
1431 ent->flags |= BR_SGRP_F_DELETE;
1433 memset(&src_ip, 0, sizeof(src_ip));
1434 src_ip.proto = pg->addr.proto;
1435 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1436 memcpy(&src_ip.u, srcs, src_size);
1437 ent = br_multicast_find_group_src(pg, &src_ip);
1439 ent->flags &= ~BR_SGRP_F_DELETE;
1441 br_multicast_new_group_src(pg, &src_ip);
1445 __grp_src_delete_marked(pg);
1448 /* State Msg type New state Actions
1449 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1454 static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
1455 void *srcs, u32 nsrcs, size_t src_size)
1457 struct net_bridge *br = pg->port->br;
1458 struct net_bridge_group_src *ent;
1459 unsigned long now = jiffies;
1460 bool changed = false;
1461 struct br_ip src_ip;
1464 hlist_for_each_entry(ent, &pg->src_list, node)
1465 ent->flags |= BR_SGRP_F_DELETE;
1467 memset(&src_ip, 0, sizeof(src_ip));
1468 src_ip.proto = pg->addr.proto;
1469 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1470 memcpy(&src_ip.u, srcs, src_size);
1471 ent = br_multicast_find_group_src(pg, &src_ip);
1473 ent->flags &= ~BR_SGRP_F_DELETE;
1475 ent = br_multicast_new_group_src(pg, &src_ip);
1477 mod_timer(&ent->timer,
1478 now + br_multicast_gmi(br));
1485 if (__grp_src_delete_marked(pg))
1491 static bool br_multicast_isexc(struct net_bridge_port_group *pg,
1492 void *srcs, u32 nsrcs, size_t src_size)
1494 struct net_bridge *br = pg->port->br;
1495 bool changed = false;
1497 switch (pg->filter_mode) {
1499 __grp_src_isexc_incl(pg, srcs, nsrcs, src_size);
1503 changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size);
1507 pg->filter_mode = MCAST_EXCLUDE;
1508 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1513 /* State Msg type New state Actions
1514 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1517 static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
1518 void *srcs, u32 nsrcs, size_t src_size)
1520 struct net_bridge *br = pg->port->br;
1521 u32 src_idx, to_send = pg->src_ents;
1522 struct net_bridge_group_src *ent;
1523 unsigned long now = jiffies;
1524 bool changed = false;
1525 struct br_ip src_ip;
1527 hlist_for_each_entry(ent, &pg->src_list, node)
1528 ent->flags |= BR_SGRP_F_SEND;
1530 memset(&src_ip, 0, sizeof(src_ip));
1531 src_ip.proto = pg->addr.proto;
1532 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1533 memcpy(&src_ip.u, srcs, src_size);
1534 ent = br_multicast_find_group_src(pg, &src_ip);
1536 ent->flags &= ~BR_SGRP_F_SEND;
1539 ent = br_multicast_new_group_src(pg, &src_ip);
1544 mod_timer(&ent->timer, now + br_multicast_gmi(br));
1549 __grp_src_query_marked_and_rexmit(pg);
1554 /* State Msg type New state Actions
1555 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1559 static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
1560 void *srcs, u32 nsrcs, size_t src_size)
1562 struct net_bridge *br = pg->port->br;
1563 u32 src_idx, to_send = pg->src_ents;
1564 struct net_bridge_group_src *ent;
1565 unsigned long now = jiffies;
1566 bool changed = false;
1567 struct br_ip src_ip;
1569 hlist_for_each_entry(ent, &pg->src_list, node)
1570 if (timer_pending(&ent->timer))
1571 ent->flags |= BR_SGRP_F_SEND;
1573 memset(&src_ip, 0, sizeof(src_ip));
1574 src_ip.proto = pg->addr.proto;
1575 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1576 memcpy(&src_ip.u, srcs, src_size);
1577 ent = br_multicast_find_group_src(pg, &src_ip);
1579 if (timer_pending(&ent->timer)) {
1580 ent->flags &= ~BR_SGRP_F_SEND;
1584 ent = br_multicast_new_group_src(pg, &src_ip);
1589 mod_timer(&ent->timer, now + br_multicast_gmi(br));
1594 __grp_src_query_marked_and_rexmit(pg);
1596 __grp_send_query_and_rexmit(pg);
1601 static bool br_multicast_toin(struct net_bridge_port_group *pg,
1602 void *srcs, u32 nsrcs, size_t src_size)
1604 bool changed = false;
1606 switch (pg->filter_mode) {
1608 changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size);
1611 changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size);
1618 /* State Msg type New state Actions
1619 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1624 static void __grp_src_toex_incl(struct net_bridge_port_group *pg,
1625 void *srcs, u32 nsrcs, size_t src_size)
1627 struct net_bridge_group_src *ent;
1628 u32 src_idx, to_send = 0;
1629 struct br_ip src_ip;
1631 hlist_for_each_entry(ent, &pg->src_list, node)
1632 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
1634 memset(&src_ip, 0, sizeof(src_ip));
1635 src_ip.proto = pg->addr.proto;
1636 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1637 memcpy(&src_ip.u, srcs, src_size);
1638 ent = br_multicast_find_group_src(pg, &src_ip);
1640 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
1644 br_multicast_new_group_src(pg, &src_ip);
1649 __grp_src_delete_marked(pg);
1651 __grp_src_query_marked_and_rexmit(pg);
1654 /* State Msg type New state Actions
1655 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1661 static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
1662 void *srcs, u32 nsrcs, size_t src_size)
1664 struct net_bridge_group_src *ent;
1665 u32 src_idx, to_send = 0;
1666 bool changed = false;
1667 struct br_ip src_ip;
1669 hlist_for_each_entry(ent, &pg->src_list, node)
1670 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
1672 memset(&src_ip, 0, sizeof(src_ip));
1673 src_ip.proto = pg->addr.proto;
1674 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1675 memcpy(&src_ip.u, srcs, src_size);
1676 ent = br_multicast_find_group_src(pg, &src_ip);
1678 ent->flags &= ~BR_SGRP_F_DELETE;
1680 ent = br_multicast_new_group_src(pg, &src_ip);
1682 mod_timer(&ent->timer, pg->timer.expires);
1686 if (ent && timer_pending(&ent->timer)) {
1687 ent->flags |= BR_SGRP_F_SEND;
1693 if (__grp_src_delete_marked(pg))
1696 __grp_src_query_marked_and_rexmit(pg);
1701 static bool br_multicast_toex(struct net_bridge_port_group *pg,
1702 void *srcs, u32 nsrcs, size_t src_size)
1704 struct net_bridge *br = pg->port->br;
1705 bool changed = false;
1707 switch (pg->filter_mode) {
1709 __grp_src_toex_incl(pg, srcs, nsrcs, src_size);
1713 __grp_src_toex_excl(pg, srcs, nsrcs, src_size);
1717 pg->filter_mode = MCAST_EXCLUDE;
1718 mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
1723 /* State Msg type New state Actions
1724 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1726 static void __grp_src_block_incl(struct net_bridge_port_group *pg,
1727 void *srcs, u32 nsrcs, size_t src_size)
1729 struct net_bridge_group_src *ent;
1730 u32 src_idx, to_send = 0;
1731 struct br_ip src_ip;
1733 hlist_for_each_entry(ent, &pg->src_list, node)
1734 ent->flags &= ~BR_SGRP_F_SEND;
1736 memset(&src_ip, 0, sizeof(src_ip));
1737 src_ip.proto = pg->addr.proto;
1738 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1739 memcpy(&src_ip.u, srcs, src_size);
1740 ent = br_multicast_find_group_src(pg, &src_ip);
1742 ent->flags |= BR_SGRP_F_SEND;
1749 __grp_src_query_marked_and_rexmit(pg);
1751 if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list))
1752 br_multicast_find_del_pg(pg->port->br, pg);
1755 /* State Msg type New state Actions
1756 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1759 static bool __grp_src_block_excl(struct net_bridge_port_group *pg,
1760 void *srcs, u32 nsrcs, size_t src_size)
1762 struct net_bridge_group_src *ent;
1763 u32 src_idx, to_send = 0;
1764 bool changed = false;
1765 struct br_ip src_ip;
1767 hlist_for_each_entry(ent, &pg->src_list, node)
1768 ent->flags &= ~BR_SGRP_F_SEND;
1770 memset(&src_ip, 0, sizeof(src_ip));
1771 src_ip.proto = pg->addr.proto;
1772 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
1773 memcpy(&src_ip.u, srcs, src_size);
1774 ent = br_multicast_find_group_src(pg, &src_ip);
1776 ent = br_multicast_new_group_src(pg, &src_ip);
1778 mod_timer(&ent->timer, pg->timer.expires);
1782 if (ent && timer_pending(&ent->timer)) {
1783 ent->flags |= BR_SGRP_F_SEND;
1790 __grp_src_query_marked_and_rexmit(pg);
1795 static bool br_multicast_block(struct net_bridge_port_group *pg,
1796 void *srcs, u32 nsrcs, size_t src_size)
1798 bool changed = false;
1800 switch (pg->filter_mode) {
1802 __grp_src_block_incl(pg, srcs, nsrcs, src_size);
1805 changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size);
1812 static struct net_bridge_port_group *
1813 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
1814 struct net_bridge_port *p,
1815 const unsigned char *src)
1817 struct net_bridge_port_group *pg;
1818 struct net_bridge *br = mp->br;
1820 for (pg = mlock_dereference(mp->ports, br);
1822 pg = mlock_dereference(pg->next, br))
1823 if (br_port_group_equal(pg, p, src))
1829 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1830 struct net_bridge_port *port,
1831 struct sk_buff *skb,
1834 bool igmpv2 = br->multicast_igmp_version == 2;
1835 struct net_bridge_mdb_entry *mdst;
1836 struct net_bridge_port_group *pg;
1837 const unsigned char *src;
1838 struct igmpv3_report *ih;
1839 struct igmpv3_grec *grec;
1840 int i, len, num, type;
1841 bool changed = false;
1846 ih = igmpv3_report_hdr(skb);
1847 num = ntohs(ih->ngrec);
1848 len = skb_transport_offset(skb) + sizeof(*ih);
1850 for (i = 0; i < num; i++) {
1851 len += sizeof(*grec);
1852 if (!ip_mc_may_pull(skb, len))
1855 grec = (void *)(skb->data + len - sizeof(*grec));
1856 group = grec->grec_mca;
1857 type = grec->grec_type;
1858 nsrcs = ntohs(grec->grec_nsrcs);
1861 if (!ip_mc_may_pull(skb, len))
1865 case IGMPV3_MODE_IS_INCLUDE:
1866 case IGMPV3_MODE_IS_EXCLUDE:
1867 case IGMPV3_CHANGE_TO_INCLUDE:
1868 case IGMPV3_CHANGE_TO_EXCLUDE:
1869 case IGMPV3_ALLOW_NEW_SOURCES:
1870 case IGMPV3_BLOCK_OLD_SOURCES:
1877 src = eth_hdr(skb)->h_source;
1879 (type == IGMPV3_CHANGE_TO_INCLUDE ||
1880 type == IGMPV3_MODE_IS_INCLUDE)) {
1881 if (!port || igmpv2) {
1882 br_ip4_multicast_leave_group(br, port, group, vid, src);
1886 err = br_ip4_multicast_add_group(br, port, group, vid,
1892 if (!port || igmpv2)
1895 spin_lock_bh(&br->multicast_lock);
1896 mdst = br_mdb_ip4_get(br, group, vid);
1898 goto unlock_continue;
1899 pg = br_multicast_find_port(mdst, port, src);
1900 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
1901 goto unlock_continue;
1903 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
1905 case IGMPV3_ALLOW_NEW_SOURCES:
1906 changed = br_multicast_isinc_allow(pg, grec->grec_src,
1907 nsrcs, sizeof(__be32));
1909 case IGMPV3_MODE_IS_INCLUDE:
1910 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
1913 case IGMPV3_MODE_IS_EXCLUDE:
1914 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
1917 case IGMPV3_CHANGE_TO_INCLUDE:
1918 changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
1921 case IGMPV3_CHANGE_TO_EXCLUDE:
1922 changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
1925 case IGMPV3_BLOCK_OLD_SOURCES:
1926 changed = br_multicast_block(pg, grec->grec_src, nsrcs,
1931 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
1933 spin_unlock_bh(&br->multicast_lock);
1939 #if IS_ENABLED(CONFIG_IPV6)
1940 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1941 struct net_bridge_port *port,
1942 struct sk_buff *skb,
1945 bool mldv1 = br->multicast_mld_version == 1;
1946 struct net_bridge_mdb_entry *mdst;
1947 struct net_bridge_port_group *pg;
1948 unsigned int nsrcs_offset;
1949 const unsigned char *src;
1950 struct icmp6hdr *icmp6h;
1951 struct mld2_grec *grec;
1952 unsigned int grec_len;
1953 bool changed = false;
1957 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
1960 icmp6h = icmp6_hdr(skb);
1961 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1962 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1964 for (i = 0; i < num; i++) {
1965 __be16 *_nsrcs, __nsrcs;
1968 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1970 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1971 nsrcs_offset + sizeof(__nsrcs))
1974 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
1975 sizeof(__nsrcs), &__nsrcs);
1979 nsrcs = ntohs(*_nsrcs);
1980 grec_len = struct_size(grec, grec_src, nsrcs);
1982 if (!ipv6_mc_may_pull(skb, len + grec_len))
1985 grec = (struct mld2_grec *)(skb->data + len);
1988 switch (grec->grec_type) {
1989 case MLD2_MODE_IS_INCLUDE:
1990 case MLD2_MODE_IS_EXCLUDE:
1991 case MLD2_CHANGE_TO_INCLUDE:
1992 case MLD2_CHANGE_TO_EXCLUDE:
1993 case MLD2_ALLOW_NEW_SOURCES:
1994 case MLD2_BLOCK_OLD_SOURCES:
2001 src = eth_hdr(skb)->h_source;
2002 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2003 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2005 if (!port || mldv1) {
2006 br_ip6_multicast_leave_group(br, port,
2012 err = br_ip6_multicast_add_group(br, port,
2013 &grec->grec_mca, vid,
2022 spin_lock_bh(&br->multicast_lock);
2023 mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid);
2025 goto unlock_continue;
2026 pg = br_multicast_find_port(mdst, port, src);
2027 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2028 goto unlock_continue;
2029 switch (grec->grec_type) {
2030 case MLD2_ALLOW_NEW_SOURCES:
2031 changed = br_multicast_isinc_allow(pg, grec->grec_src,
2033 sizeof(struct in6_addr));
2035 case MLD2_MODE_IS_INCLUDE:
2036 changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
2037 sizeof(struct in6_addr));
2039 case MLD2_MODE_IS_EXCLUDE:
2040 changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
2041 sizeof(struct in6_addr));
2043 case MLD2_CHANGE_TO_INCLUDE:
2044 changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
2045 sizeof(struct in6_addr));
2047 case MLD2_CHANGE_TO_EXCLUDE:
2048 changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
2049 sizeof(struct in6_addr));
2051 case MLD2_BLOCK_OLD_SOURCES:
2052 changed = br_multicast_block(pg, grec->grec_src, nsrcs,
2053 sizeof(struct in6_addr));
2057 br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
2059 spin_unlock_bh(&br->multicast_lock);
2066 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
2067 struct net_bridge_port *port,
2070 if (!timer_pending(&br->ip4_own_query.timer) &&
2071 !timer_pending(&br->ip4_other_query.timer))
2074 if (!br->ip4_querier.addr.u.ip4)
2077 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
2083 br->ip4_querier.addr.u.ip4 = saddr;
2085 /* update protected by general multicast_lock by caller */
2086 rcu_assign_pointer(br->ip4_querier.port, port);
2091 #if IS_ENABLED(CONFIG_IPV6)
2092 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
2093 struct net_bridge_port *port,
2094 struct in6_addr *saddr)
2096 if (!timer_pending(&br->ip6_own_query.timer) &&
2097 !timer_pending(&br->ip6_other_query.timer))
2100 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
2106 br->ip6_querier.addr.u.ip6 = *saddr;
2108 /* update protected by general multicast_lock by caller */
2109 rcu_assign_pointer(br->ip6_querier.port, port);
2115 static bool br_multicast_select_querier(struct net_bridge *br,
2116 struct net_bridge_port *port,
2117 struct br_ip *saddr)
2119 switch (saddr->proto) {
2120 case htons(ETH_P_IP):
2121 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
2122 #if IS_ENABLED(CONFIG_IPV6)
2123 case htons(ETH_P_IPV6):
2124 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
2132 br_multicast_update_query_timer(struct net_bridge *br,
2133 struct bridge_mcast_other_query *query,
2134 unsigned long max_delay)
2136 if (!timer_pending(&query->timer))
2137 query->delay_time = jiffies + max_delay;
2139 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
2142 static void br_port_mc_router_state_change(struct net_bridge_port *p,
2145 struct switchdev_attr attr = {
2147 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
2148 .flags = SWITCHDEV_F_DEFER,
2149 .u.mrouter = is_mc_router,
2152 switchdev_port_attr_set(p->dev, &attr);
2156 * Add port to router_list
2157 * list is maintained ordered by pointer value
2158 * and locked by br->multicast_lock and RCU
2160 static void br_multicast_add_router(struct net_bridge *br,
2161 struct net_bridge_port *port)
2163 struct net_bridge_port *p;
2164 struct hlist_node *slot = NULL;
2166 if (!hlist_unhashed(&port->rlist))
2169 hlist_for_each_entry(p, &br->router_list, rlist) {
2170 if ((unsigned long) port >= (unsigned long) p)
2176 hlist_add_behind_rcu(&port->rlist, slot);
2178 hlist_add_head_rcu(&port->rlist, &br->router_list);
2179 br_rtr_notify(br->dev, port, RTM_NEWMDB);
2180 br_port_mc_router_state_change(port, true);
2183 static void br_multicast_mark_router(struct net_bridge *br,
2184 struct net_bridge_port *port)
2186 unsigned long now = jiffies;
2189 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
2190 if (!timer_pending(&br->multicast_router_timer))
2191 br_mc_router_state_change(br, true);
2192 mod_timer(&br->multicast_router_timer,
2193 now + br->multicast_querier_interval);
2198 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
2199 port->multicast_router == MDB_RTR_TYPE_PERM)
2202 br_multicast_add_router(br, port);
2204 mod_timer(&port->multicast_router_timer,
2205 now + br->multicast_querier_interval);
2208 static void br_multicast_query_received(struct net_bridge *br,
2209 struct net_bridge_port *port,
2210 struct bridge_mcast_other_query *query,
2211 struct br_ip *saddr,
2212 unsigned long max_delay)
2214 if (!br_multicast_select_querier(br, port, saddr))
2217 br_multicast_update_query_timer(br, query, max_delay);
2218 br_multicast_mark_router(br, port);
2221 static void br_ip4_multicast_query(struct net_bridge *br,
2222 struct net_bridge_port *port,
2223 struct sk_buff *skb,
2226 unsigned int transport_len = ip_transport_len(skb);
2227 const struct iphdr *iph = ip_hdr(skb);
2228 struct igmphdr *ih = igmp_hdr(skb);
2229 struct net_bridge_mdb_entry *mp;
2230 struct igmpv3_query *ih3;
2231 struct net_bridge_port_group *p;
2232 struct net_bridge_port_group __rcu **pp;
2234 unsigned long max_delay;
2235 unsigned long now = jiffies;
2238 spin_lock(&br->multicast_lock);
2239 if (!netif_running(br->dev) ||
2240 (port && port->state == BR_STATE_DISABLED))
2245 if (transport_len == sizeof(*ih)) {
2246 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
2249 max_delay = 10 * HZ;
2252 } else if (transport_len >= sizeof(*ih3)) {
2253 ih3 = igmpv3_query_hdr(skb);
2255 (br->multicast_igmp_version == 3 && group && ih3->suppress))
2258 max_delay = ih3->code ?
2259 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
2265 saddr.proto = htons(ETH_P_IP);
2266 saddr.u.ip4 = iph->saddr;
2268 br_multicast_query_received(br, port, &br->ip4_other_query,
2273 mp = br_mdb_ip4_get(br, group, vid);
2277 max_delay *= br->multicast_last_member_count;
2279 if (mp->host_joined &&
2280 (timer_pending(&mp->timer) ?
2281 time_after(mp->timer.expires, now + max_delay) :
2282 try_to_del_timer_sync(&mp->timer) >= 0))
2283 mod_timer(&mp->timer, now + max_delay);
2285 for (pp = &mp->ports;
2286 (p = mlock_dereference(*pp, br)) != NULL;
2288 if (timer_pending(&p->timer) ?
2289 time_after(p->timer.expires, now + max_delay) :
2290 try_to_del_timer_sync(&p->timer) >= 0 &&
2291 (br->multicast_igmp_version == 2 ||
2292 p->filter_mode == MCAST_EXCLUDE))
2293 mod_timer(&p->timer, now + max_delay);
2297 spin_unlock(&br->multicast_lock);
2300 #if IS_ENABLED(CONFIG_IPV6)
2301 static int br_ip6_multicast_query(struct net_bridge *br,
2302 struct net_bridge_port *port,
2303 struct sk_buff *skb,
2306 unsigned int transport_len = ipv6_transport_len(skb);
2307 struct mld_msg *mld;
2308 struct net_bridge_mdb_entry *mp;
2309 struct mld2_query *mld2q;
2310 struct net_bridge_port_group *p;
2311 struct net_bridge_port_group __rcu **pp;
2313 unsigned long max_delay;
2314 unsigned long now = jiffies;
2315 unsigned int offset = skb_transport_offset(skb);
2316 const struct in6_addr *group = NULL;
2317 bool is_general_query;
2320 spin_lock(&br->multicast_lock);
2321 if (!netif_running(br->dev) ||
2322 (port && port->state == BR_STATE_DISABLED))
2325 if (transport_len == sizeof(*mld)) {
2326 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
2330 mld = (struct mld_msg *) icmp6_hdr(skb);
2331 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
2333 group = &mld->mld_mca;
2335 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
2339 mld2q = (struct mld2_query *)icmp6_hdr(skb);
2340 if (!mld2q->mld2q_nsrcs)
2341 group = &mld2q->mld2q_mca;
2342 if (br->multicast_mld_version == 2 &&
2343 !ipv6_addr_any(&mld2q->mld2q_mca) &&
2344 mld2q->mld2q_suppress)
2347 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
2350 is_general_query = group && ipv6_addr_any(group);
2352 if (is_general_query) {
2353 saddr.proto = htons(ETH_P_IPV6);
2354 saddr.u.ip6 = ipv6_hdr(skb)->saddr;
2356 br_multicast_query_received(br, port, &br->ip6_other_query,
2359 } else if (!group) {
2363 mp = br_mdb_ip6_get(br, group, vid);
2367 max_delay *= br->multicast_last_member_count;
2368 if (mp->host_joined &&
2369 (timer_pending(&mp->timer) ?
2370 time_after(mp->timer.expires, now + max_delay) :
2371 try_to_del_timer_sync(&mp->timer) >= 0))
2372 mod_timer(&mp->timer, now + max_delay);
2374 for (pp = &mp->ports;
2375 (p = mlock_dereference(*pp, br)) != NULL;
2377 if (timer_pending(&p->timer) ?
2378 time_after(p->timer.expires, now + max_delay) :
2379 try_to_del_timer_sync(&p->timer) >= 0 &&
2380 (br->multicast_mld_version == 1 ||
2381 p->filter_mode == MCAST_EXCLUDE))
2382 mod_timer(&p->timer, now + max_delay);
2386 spin_unlock(&br->multicast_lock);
2392 br_multicast_leave_group(struct net_bridge *br,
2393 struct net_bridge_port *port,
2394 struct br_ip *group,
2395 struct bridge_mcast_other_query *other_query,
2396 struct bridge_mcast_own_query *own_query,
2397 const unsigned char *src)
2399 struct net_bridge_mdb_entry *mp;
2400 struct net_bridge_port_group *p;
2404 spin_lock(&br->multicast_lock);
2405 if (!netif_running(br->dev) ||
2406 (port && port->state == BR_STATE_DISABLED))
2409 mp = br_mdb_ip_get(br, group);
2413 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
2414 struct net_bridge_port_group __rcu **pp;
2416 for (pp = &mp->ports;
2417 (p = mlock_dereference(*pp, br)) != NULL;
2419 if (!br_port_group_equal(p, port, src))
2422 if (p->flags & MDB_PG_FLAGS_PERMANENT)
2425 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2426 br_multicast_del_pg(mp, p, pp);
2431 if (timer_pending(&other_query->timer))
2434 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
2435 __br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
2438 time = jiffies + br->multicast_last_member_count *
2439 br->multicast_last_member_interval;
2441 mod_timer(&own_query->timer, time);
2443 for (p = mlock_dereference(mp->ports, br);
2445 p = mlock_dereference(p->next, br)) {
2446 if (!br_port_group_equal(p, port, src))
2449 if (!hlist_unhashed(&p->mglist) &&
2450 (timer_pending(&p->timer) ?
2451 time_after(p->timer.expires, time) :
2452 try_to_del_timer_sync(&p->timer) >= 0)) {
2453 mod_timer(&p->timer, time);
2461 time = now + br->multicast_last_member_count *
2462 br->multicast_last_member_interval;
2465 if (mp->host_joined &&
2466 (timer_pending(&mp->timer) ?
2467 time_after(mp->timer.expires, time) :
2468 try_to_del_timer_sync(&mp->timer) >= 0)) {
2469 mod_timer(&mp->timer, time);
2475 for (p = mlock_dereference(mp->ports, br);
2477 p = mlock_dereference(p->next, br)) {
2478 if (p->port != port)
2481 if (!hlist_unhashed(&p->mglist) &&
2482 (timer_pending(&p->timer) ?
2483 time_after(p->timer.expires, time) :
2484 try_to_del_timer_sync(&p->timer) >= 0)) {
2485 mod_timer(&p->timer, time);
2491 spin_unlock(&br->multicast_lock);
2494 static void br_ip4_multicast_leave_group(struct net_bridge *br,
2495 struct net_bridge_port *port,
2498 const unsigned char *src)
2500 struct br_ip br_group;
2501 struct bridge_mcast_own_query *own_query;
2503 if (ipv4_is_local_multicast(group))
2506 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
2508 memset(&br_group, 0, sizeof(br_group));
2509 br_group.u.ip4 = group;
2510 br_group.proto = htons(ETH_P_IP);
2513 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
2517 #if IS_ENABLED(CONFIG_IPV6)
2518 static void br_ip6_multicast_leave_group(struct net_bridge *br,
2519 struct net_bridge_port *port,
2520 const struct in6_addr *group,
2522 const unsigned char *src)
2524 struct br_ip br_group;
2525 struct bridge_mcast_own_query *own_query;
2527 if (ipv6_addr_is_ll_all_nodes(group))
2530 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
2532 memset(&br_group, 0, sizeof(br_group));
2533 br_group.u.ip6 = *group;
2534 br_group.proto = htons(ETH_P_IPV6);
2537 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
2542 static void br_multicast_err_count(const struct net_bridge *br,
2543 const struct net_bridge_port *p,
2546 struct bridge_mcast_stats __percpu *stats;
2547 struct bridge_mcast_stats *pstats;
2549 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2553 stats = p->mcast_stats;
2555 stats = br->mcast_stats;
2556 if (WARN_ON(!stats))
2559 pstats = this_cpu_ptr(stats);
2561 u64_stats_update_begin(&pstats->syncp);
2563 case htons(ETH_P_IP):
2564 pstats->mstats.igmp_parse_errors++;
2566 #if IS_ENABLED(CONFIG_IPV6)
2567 case htons(ETH_P_IPV6):
2568 pstats->mstats.mld_parse_errors++;
2572 u64_stats_update_end(&pstats->syncp);
2575 static void br_multicast_pim(struct net_bridge *br,
2576 struct net_bridge_port *port,
2577 const struct sk_buff *skb)
2579 unsigned int offset = skb_transport_offset(skb);
2580 struct pimhdr *pimhdr, _pimhdr;
2582 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
2583 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
2584 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
2587 br_multicast_mark_router(br, port);
2590 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
2591 struct net_bridge_port *port,
2592 struct sk_buff *skb)
2594 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
2595 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
2598 br_multicast_mark_router(br, port);
2603 static int br_multicast_ipv4_rcv(struct net_bridge *br,
2604 struct net_bridge_port *port,
2605 struct sk_buff *skb,
2608 const unsigned char *src;
2612 err = ip_mc_check_igmp(skb);
2614 if (err == -ENOMSG) {
2615 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
2616 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2617 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
2618 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
2619 br_multicast_pim(br, port, skb);
2620 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
2621 br_ip4_multicast_mrd_rcv(br, port, skb);
2625 } else if (err < 0) {
2626 br_multicast_err_count(br, port, skb->protocol);
2631 src = eth_hdr(skb)->h_source;
2632 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
2635 case IGMP_HOST_MEMBERSHIP_REPORT:
2636 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2637 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2638 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
2641 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2642 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
2644 case IGMP_HOST_MEMBERSHIP_QUERY:
2645 br_ip4_multicast_query(br, port, skb, vid);
2647 case IGMP_HOST_LEAVE_MESSAGE:
2648 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
2652 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
2658 #if IS_ENABLED(CONFIG_IPV6)
2659 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
2660 struct net_bridge_port *port,
2661 struct sk_buff *skb)
2665 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
2668 ret = ipv6_mc_check_icmpv6(skb);
2672 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
2675 br_multicast_mark_router(br, port);
2680 static int br_multicast_ipv6_rcv(struct net_bridge *br,
2681 struct net_bridge_port *port,
2682 struct sk_buff *skb,
2685 const unsigned char *src;
2686 struct mld_msg *mld;
2689 err = ipv6_mc_check_mld(skb);
2691 if (err == -ENOMSG) {
2692 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
2693 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2695 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
2696 err = br_ip6_multicast_mrd_rcv(br, port, skb);
2698 if (err < 0 && err != -ENOMSG) {
2699 br_multicast_err_count(br, port, skb->protocol);
2705 } else if (err < 0) {
2706 br_multicast_err_count(br, port, skb->protocol);
2710 mld = (struct mld_msg *)skb_transport_header(skb);
2711 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
2713 switch (mld->mld_type) {
2714 case ICMPV6_MGM_REPORT:
2715 src = eth_hdr(skb)->h_source;
2716 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
2717 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
2720 case ICMPV6_MLD2_REPORT:
2721 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
2723 case ICMPV6_MGM_QUERY:
2724 err = br_ip6_multicast_query(br, port, skb, vid);
2726 case ICMPV6_MGM_REDUCTION:
2727 src = eth_hdr(skb)->h_source;
2728 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
2732 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
2739 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
2740 struct sk_buff *skb, u16 vid)
2744 BR_INPUT_SKB_CB(skb)->igmp = 0;
2745 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
2747 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2750 switch (skb->protocol) {
2751 case htons(ETH_P_IP):
2752 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
2754 #if IS_ENABLED(CONFIG_IPV6)
2755 case htons(ETH_P_IPV6):
2756 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
2764 static void br_multicast_query_expired(struct net_bridge *br,
2765 struct bridge_mcast_own_query *query,
2766 struct bridge_mcast_querier *querier)
2768 spin_lock(&br->multicast_lock);
2769 if (query->startup_sent < br->multicast_startup_query_count)
2770 query->startup_sent++;
2772 RCU_INIT_POINTER(querier->port, NULL);
2773 br_multicast_send_query(br, NULL, query);
2774 spin_unlock(&br->multicast_lock);
2777 static void br_ip4_multicast_query_expired(struct timer_list *t)
2779 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
2781 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
2784 #if IS_ENABLED(CONFIG_IPV6)
2785 static void br_ip6_multicast_query_expired(struct timer_list *t)
2787 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
2789 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
2793 static void br_multicast_gc_work(struct work_struct *work)
2795 struct net_bridge *br = container_of(work, struct net_bridge,
2797 HLIST_HEAD(deleted_head);
2799 spin_lock_bh(&br->multicast_lock);
2800 hlist_move_list(&br->mcast_gc_list, &deleted_head);
2801 spin_unlock_bh(&br->multicast_lock);
2803 br_multicast_gc(&deleted_head);
2806 void br_multicast_init(struct net_bridge *br)
2808 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
2810 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2811 br->multicast_last_member_count = 2;
2812 br->multicast_startup_query_count = 2;
2814 br->multicast_last_member_interval = HZ;
2815 br->multicast_query_response_interval = 10 * HZ;
2816 br->multicast_startup_query_interval = 125 * HZ / 4;
2817 br->multicast_query_interval = 125 * HZ;
2818 br->multicast_querier_interval = 255 * HZ;
2819 br->multicast_membership_interval = 260 * HZ;
2821 br->ip4_other_query.delay_time = 0;
2822 br->ip4_querier.port = NULL;
2823 br->multicast_igmp_version = 2;
2824 #if IS_ENABLED(CONFIG_IPV6)
2825 br->multicast_mld_version = 1;
2826 br->ip6_other_query.delay_time = 0;
2827 br->ip6_querier.port = NULL;
2829 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
2830 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
2832 spin_lock_init(&br->multicast_lock);
2833 timer_setup(&br->multicast_router_timer,
2834 br_multicast_local_router_expired, 0);
2835 timer_setup(&br->ip4_other_query.timer,
2836 br_ip4_multicast_querier_expired, 0);
2837 timer_setup(&br->ip4_own_query.timer,
2838 br_ip4_multicast_query_expired, 0);
2839 #if IS_ENABLED(CONFIG_IPV6)
2840 timer_setup(&br->ip6_other_query.timer,
2841 br_ip6_multicast_querier_expired, 0);
2842 timer_setup(&br->ip6_own_query.timer,
2843 br_ip6_multicast_query_expired, 0);
2845 INIT_HLIST_HEAD(&br->mdb_list);
2846 INIT_HLIST_HEAD(&br->mcast_gc_list);
2847 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
2850 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
2852 struct in_device *in_dev = in_dev_get(br->dev);
2857 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2861 #if IS_ENABLED(CONFIG_IPV6)
2862 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2864 struct in6_addr addr;
2866 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2867 ipv6_dev_mc_inc(br->dev, &addr);
2870 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
2875 static void br_multicast_join_snoopers(struct net_bridge *br)
2877 br_ip4_multicast_join_snoopers(br);
2878 br_ip6_multicast_join_snoopers(br);
2881 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
2883 struct in_device *in_dev = in_dev_get(br->dev);
2885 if (WARN_ON(!in_dev))
2888 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2892 #if IS_ENABLED(CONFIG_IPV6)
2893 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2895 struct in6_addr addr;
2897 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2898 ipv6_dev_mc_dec(br->dev, &addr);
2901 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2906 static void br_multicast_leave_snoopers(struct net_bridge *br)
2908 br_ip4_multicast_leave_snoopers(br);
2909 br_ip6_multicast_leave_snoopers(br);
2912 static void __br_multicast_open(struct net_bridge *br,
2913 struct bridge_mcast_own_query *query)
2915 query->startup_sent = 0;
2917 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2920 mod_timer(&query->timer, jiffies);
2923 void br_multicast_open(struct net_bridge *br)
2925 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2926 br_multicast_join_snoopers(br);
2928 __br_multicast_open(br, &br->ip4_own_query);
2929 #if IS_ENABLED(CONFIG_IPV6)
2930 __br_multicast_open(br, &br->ip6_own_query);
2934 void br_multicast_stop(struct net_bridge *br)
2936 del_timer_sync(&br->multicast_router_timer);
2937 del_timer_sync(&br->ip4_other_query.timer);
2938 del_timer_sync(&br->ip4_own_query.timer);
2939 #if IS_ENABLED(CONFIG_IPV6)
2940 del_timer_sync(&br->ip6_other_query.timer);
2941 del_timer_sync(&br->ip6_own_query.timer);
2944 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2945 br_multicast_leave_snoopers(br);
2948 void br_multicast_dev_del(struct net_bridge *br)
2950 struct net_bridge_mdb_entry *mp;
2951 HLIST_HEAD(deleted_head);
2952 struct hlist_node *tmp;
2954 spin_lock_bh(&br->multicast_lock);
2955 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
2956 br_multicast_del_mdb_entry(mp);
2957 hlist_move_list(&br->mcast_gc_list, &deleted_head);
2958 spin_unlock_bh(&br->multicast_lock);
2960 br_multicast_gc(&deleted_head);
2961 cancel_work_sync(&br->mcast_gc_work);
2966 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2970 spin_lock_bh(&br->multicast_lock);
2973 case MDB_RTR_TYPE_DISABLED:
2974 case MDB_RTR_TYPE_PERM:
2975 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
2976 del_timer(&br->multicast_router_timer);
2977 br->multicast_router = val;
2980 case MDB_RTR_TYPE_TEMP_QUERY:
2981 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
2982 br_mc_router_state_change(br, false);
2983 br->multicast_router = val;
2988 spin_unlock_bh(&br->multicast_lock);
2993 static void __del_port_router(struct net_bridge_port *p)
2995 if (hlist_unhashed(&p->rlist))
2997 hlist_del_init_rcu(&p->rlist);
2998 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2999 br_port_mc_router_state_change(p, false);
3001 /* don't allow timer refresh */
3002 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3003 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3006 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
3008 struct net_bridge *br = p->br;
3009 unsigned long now = jiffies;
3012 spin_lock(&br->multicast_lock);
3013 if (p->multicast_router == val) {
3014 /* Refresh the temp router port timer */
3015 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
3016 mod_timer(&p->multicast_router_timer,
3017 now + br->multicast_querier_interval);
3022 case MDB_RTR_TYPE_DISABLED:
3023 p->multicast_router = MDB_RTR_TYPE_DISABLED;
3024 __del_port_router(p);
3025 del_timer(&p->multicast_router_timer);
3027 case MDB_RTR_TYPE_TEMP_QUERY:
3028 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3029 __del_port_router(p);
3031 case MDB_RTR_TYPE_PERM:
3032 p->multicast_router = MDB_RTR_TYPE_PERM;
3033 del_timer(&p->multicast_router_timer);
3034 br_multicast_add_router(br, p);
3036 case MDB_RTR_TYPE_TEMP:
3037 p->multicast_router = MDB_RTR_TYPE_TEMP;
3038 br_multicast_mark_router(br, p);
3045 spin_unlock(&br->multicast_lock);
3050 static void br_multicast_start_querier(struct net_bridge *br,
3051 struct bridge_mcast_own_query *query)
3053 struct net_bridge_port *port;
3055 __br_multicast_open(br, query);
3058 list_for_each_entry_rcu(port, &br->port_list, list) {
3059 if (port->state == BR_STATE_DISABLED ||
3060 port->state == BR_STATE_BLOCKING)
3063 if (query == &br->ip4_own_query)
3064 br_multicast_enable(&port->ip4_own_query);
3065 #if IS_ENABLED(CONFIG_IPV6)
3067 br_multicast_enable(&port->ip6_own_query);
3073 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
3075 struct net_bridge_port *port;
3077 spin_lock_bh(&br->multicast_lock);
3078 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
3081 br_mc_disabled_update(br->dev, val);
3082 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
3083 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
3084 br_multicast_leave_snoopers(br);
3088 if (!netif_running(br->dev))
3091 br_multicast_open(br);
3092 list_for_each_entry(port, &br->port_list, list)
3093 __br_multicast_enable_port(port);
3096 spin_unlock_bh(&br->multicast_lock);
3101 bool br_multicast_enabled(const struct net_device *dev)
3103 struct net_bridge *br = netdev_priv(dev);
3105 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
3107 EXPORT_SYMBOL_GPL(br_multicast_enabled);
3109 bool br_multicast_router(const struct net_device *dev)
3111 struct net_bridge *br = netdev_priv(dev);
3114 spin_lock_bh(&br->multicast_lock);
3115 is_router = br_multicast_is_router(br);
3116 spin_unlock_bh(&br->multicast_lock);
3119 EXPORT_SYMBOL_GPL(br_multicast_router);
3121 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
3123 unsigned long max_delay;
3127 spin_lock_bh(&br->multicast_lock);
3128 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
3131 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
3135 max_delay = br->multicast_query_response_interval;
3137 if (!timer_pending(&br->ip4_other_query.timer))
3138 br->ip4_other_query.delay_time = jiffies + max_delay;
3140 br_multicast_start_querier(br, &br->ip4_own_query);
3142 #if IS_ENABLED(CONFIG_IPV6)
3143 if (!timer_pending(&br->ip6_other_query.timer))
3144 br->ip6_other_query.delay_time = jiffies + max_delay;
3146 br_multicast_start_querier(br, &br->ip6_own_query);
3150 spin_unlock_bh(&br->multicast_lock);
3155 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
3157 /* Currently we support only version 2 and 3 */
3166 spin_lock_bh(&br->multicast_lock);
3167 br->multicast_igmp_version = val;
3168 spin_unlock_bh(&br->multicast_lock);
3173 #if IS_ENABLED(CONFIG_IPV6)
3174 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
3176 /* Currently we support version 1 and 2 */
3185 spin_lock_bh(&br->multicast_lock);
3186 br->multicast_mld_version = val;
3187 spin_unlock_bh(&br->multicast_lock);
3194 * br_multicast_list_adjacent - Returns snooped multicast addresses
3195 * @dev: The bridge port adjacent to which to retrieve addresses
3196 * @br_ip_list: The list to store found, snooped multicast IP addresses in
3198 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
3199 * snooping feature on all bridge ports of dev's bridge device, excluding
3200 * the addresses from dev itself.
3202 * Returns the number of items added to br_ip_list.
3205 * - br_ip_list needs to be initialized by caller
3206 * - br_ip_list might contain duplicates in the end
3207 * (needs to be taken care of by caller)
3208 * - br_ip_list needs to be freed by caller
3210 int br_multicast_list_adjacent(struct net_device *dev,
3211 struct list_head *br_ip_list)
3213 struct net_bridge *br;
3214 struct net_bridge_port *port;
3215 struct net_bridge_port_group *group;
3216 struct br_ip_list *entry;
3220 if (!br_ip_list || !netif_is_bridge_port(dev))
3223 port = br_port_get_rcu(dev);
3224 if (!port || !port->br)
3229 list_for_each_entry_rcu(port, &br->port_list, list) {
3230 if (!port->dev || port->dev == dev)
3233 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
3234 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
3238 entry->addr = group->addr;
3239 list_add(&entry->list, br_ip_list);
3248 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
3251 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
3252 * @dev: The bridge port providing the bridge on which to check for a querier
3253 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3255 * Checks whether the given interface has a bridge on top and if so returns
3256 * true if a valid querier exists anywhere on the bridged link layer.
3257 * Otherwise returns false.
3259 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
3261 struct net_bridge *br;
3262 struct net_bridge_port *port;
3267 if (!netif_is_bridge_port(dev))
3270 port = br_port_get_rcu(dev);
3271 if (!port || !port->br)
3276 memset(ð, 0, sizeof(eth));
3277 eth.h_proto = htons(proto);
3279 ret = br_multicast_querier_exists(br, ð);
3285 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
3288 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
3289 * @dev: The bridge port adjacent to which to check for a querier
3290 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
3292 * Checks whether the given interface has a bridge on top and if so returns
3293 * true if a selected querier is behind one of the other ports of this
3294 * bridge. Otherwise returns false.
3296 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
3298 struct net_bridge *br;
3299 struct net_bridge_port *port;
3303 if (!netif_is_bridge_port(dev))
3306 port = br_port_get_rcu(dev);
3307 if (!port || !port->br)
3314 if (!timer_pending(&br->ip4_other_query.timer) ||
3315 rcu_dereference(br->ip4_querier.port) == port)
3318 #if IS_ENABLED(CONFIG_IPV6)
3320 if (!timer_pending(&br->ip6_other_query.timer) ||
3321 rcu_dereference(br->ip6_querier.port) == port)
3334 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
3336 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
3337 const struct sk_buff *skb, u8 type, u8 dir)
3339 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
3340 __be16 proto = skb->protocol;
3343 u64_stats_update_begin(&pstats->syncp);
3345 case htons(ETH_P_IP):
3346 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
3348 case IGMP_HOST_MEMBERSHIP_REPORT:
3349 pstats->mstats.igmp_v1reports[dir]++;
3351 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3352 pstats->mstats.igmp_v2reports[dir]++;
3354 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3355 pstats->mstats.igmp_v3reports[dir]++;
3357 case IGMP_HOST_MEMBERSHIP_QUERY:
3358 if (t_len != sizeof(struct igmphdr)) {
3359 pstats->mstats.igmp_v3queries[dir]++;
3361 unsigned int offset = skb_transport_offset(skb);
3362 struct igmphdr *ih, _ihdr;
3364 ih = skb_header_pointer(skb, offset,
3365 sizeof(_ihdr), &_ihdr);
3369 pstats->mstats.igmp_v1queries[dir]++;
3371 pstats->mstats.igmp_v2queries[dir]++;
3374 case IGMP_HOST_LEAVE_MESSAGE:
3375 pstats->mstats.igmp_leaves[dir]++;
3379 #if IS_ENABLED(CONFIG_IPV6)
3380 case htons(ETH_P_IPV6):
3381 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
3382 sizeof(struct ipv6hdr);
3383 t_len -= skb_network_header_len(skb);
3385 case ICMPV6_MGM_REPORT:
3386 pstats->mstats.mld_v1reports[dir]++;
3388 case ICMPV6_MLD2_REPORT:
3389 pstats->mstats.mld_v2reports[dir]++;
3391 case ICMPV6_MGM_QUERY:
3392 if (t_len != sizeof(struct mld_msg))
3393 pstats->mstats.mld_v2queries[dir]++;
3395 pstats->mstats.mld_v1queries[dir]++;
3397 case ICMPV6_MGM_REDUCTION:
3398 pstats->mstats.mld_leaves[dir]++;
3402 #endif /* CONFIG_IPV6 */
3404 u64_stats_update_end(&pstats->syncp);
3407 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
3408 const struct sk_buff *skb, u8 type, u8 dir)
3410 struct bridge_mcast_stats __percpu *stats;
3412 /* if multicast_disabled is true then igmp type can't be set */
3413 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3417 stats = p->mcast_stats;
3419 stats = br->mcast_stats;
3420 if (WARN_ON(!stats))
3423 br_mcast_stats_add(stats, skb, type, dir);
3426 int br_multicast_init_stats(struct net_bridge *br)
3428 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
3429 if (!br->mcast_stats)
3435 void br_multicast_uninit_stats(struct net_bridge *br)
3437 free_percpu(br->mcast_stats);
3440 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
3441 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
3443 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
3444 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
3447 void br_multicast_get_stats(const struct net_bridge *br,
3448 const struct net_bridge_port *p,
3449 struct br_mcast_stats *dest)
3451 struct bridge_mcast_stats __percpu *stats;
3452 struct br_mcast_stats tdst;
3455 memset(dest, 0, sizeof(*dest));
3457 stats = p->mcast_stats;
3459 stats = br->mcast_stats;
3460 if (WARN_ON(!stats))
3463 memset(&tdst, 0, sizeof(tdst));
3464 for_each_possible_cpu(i) {
3465 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
3466 struct br_mcast_stats temp;
3470 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
3471 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
3472 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
3474 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
3475 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
3476 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
3477 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
3478 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
3479 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
3480 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
3481 tdst.igmp_parse_errors += temp.igmp_parse_errors;
3483 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
3484 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
3485 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
3486 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
3487 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
3488 tdst.mld_parse_errors += temp.mld_parse_errors;
3490 memcpy(dest, &tdst, sizeof(*dest));
3493 int br_mdb_hash_init(struct net_bridge *br)
3495 return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
3498 void br_mdb_hash_fini(struct net_bridge *br)
3500 rhashtable_destroy(&br->mdb_hash_tbl);