1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Bridge multicast support.
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
35 #include "br_private.h"
37 static const struct rhashtable_params br_mdb_rht_params = {
38 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
39 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
40 .key_len = sizeof(struct br_ip),
41 .automatic_shrinking = true,
44 static void br_multicast_start_querier(struct net_bridge *br,
45 struct bridge_mcast_own_query *query);
46 static void br_multicast_add_router(struct net_bridge *br,
47 struct net_bridge_port *port);
48 static void br_ip4_multicast_leave_group(struct net_bridge *br,
49 struct net_bridge_port *port,
52 const unsigned char *src);
54 static void __del_port_router(struct net_bridge_port *p);
55 #if IS_ENABLED(CONFIG_IPV6)
56 static void br_ip6_multicast_leave_group(struct net_bridge *br,
57 struct net_bridge_port *port,
58 const struct in6_addr *group,
59 __u16 vid, const unsigned char *src);
62 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
65 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
68 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
71 struct net_bridge_mdb_entry *ent;
73 lockdep_assert_held_once(&br->multicast_lock);
76 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
82 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
83 __be32 dst, __u16 vid)
87 memset(&br_dst, 0, sizeof(br_dst));
89 br_dst.proto = htons(ETH_P_IP);
92 return br_mdb_ip_get(br, &br_dst);
95 #if IS_ENABLED(CONFIG_IPV6)
96 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
97 const struct in6_addr *dst,
102 memset(&br_dst, 0, sizeof(br_dst));
104 br_dst.proto = htons(ETH_P_IPV6);
107 return br_mdb_ip_get(br, &br_dst);
111 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
112 struct sk_buff *skb, u16 vid)
116 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
119 if (BR_INPUT_SKB_CB(skb)->igmp)
122 memset(&ip, 0, sizeof(ip));
123 ip.proto = skb->protocol;
126 switch (skb->protocol) {
127 case htons(ETH_P_IP):
128 ip.u.ip4 = ip_hdr(skb)->daddr;
130 #if IS_ENABLED(CONFIG_IPV6)
131 case htons(ETH_P_IPV6):
132 ip.u.ip6 = ipv6_hdr(skb)->daddr;
139 return br_mdb_ip_get_rcu(br, &ip);
142 static void br_multicast_group_expired(struct timer_list *t)
144 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
145 struct net_bridge *br = mp->br;
147 spin_lock(&br->multicast_lock);
148 if (!netif_running(br->dev) || timer_pending(&mp->timer))
151 br_multicast_host_leave(mp, true);
156 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
158 hlist_del_rcu(&mp->mdb_node);
163 spin_unlock(&br->multicast_lock);
166 static void br_multicast_del_group_src(struct net_bridge_group_src *src)
168 struct net_bridge *br = src->pg->port->br;
170 hlist_del_init_rcu(&src->node);
172 hlist_add_head(&src->del_node, &br->src_gc_list);
173 queue_work(system_long_wq, &br->src_gc_work);
176 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
177 struct net_bridge_port_group *pg,
178 struct net_bridge_port_group __rcu **pp)
180 struct net_bridge *br = pg->port->br;
181 struct net_bridge_group_src *ent;
182 struct hlist_node *tmp;
184 rcu_assign_pointer(*pp, pg->next);
185 hlist_del_init(&pg->mglist);
186 del_timer(&pg->timer);
187 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
188 br_multicast_del_group_src(ent);
189 br_mdb_notify(br->dev, pg->port, &pg->addr, RTM_DELMDB, pg->flags);
192 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
193 mod_timer(&mp->timer, jiffies);
196 static void br_multicast_find_del_pg(struct net_bridge *br,
197 struct net_bridge_port_group *pg)
199 struct net_bridge_port_group __rcu **pp;
200 struct net_bridge_mdb_entry *mp;
201 struct net_bridge_port_group *p;
203 mp = br_mdb_ip_get(br, &pg->addr);
207 for (pp = &mp->ports;
208 (p = mlock_dereference(*pp, br)) != NULL;
213 br_multicast_del_pg(mp, pg, pp);
220 static void br_multicast_port_group_expired(struct timer_list *t)
222 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
223 struct net_bridge *br = pg->port->br;
225 spin_lock(&br->multicast_lock);
226 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
227 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
230 br_multicast_find_del_pg(br, pg);
233 spin_unlock(&br->multicast_lock);
236 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
240 struct igmpv3_query *ihv3;
241 size_t igmp_hdr_size;
247 igmp_hdr_size = sizeof(*ih);
248 if (br->multicast_igmp_version == 3)
249 igmp_hdr_size = sizeof(*ihv3);
250 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
255 skb->protocol = htons(ETH_P_IP);
257 skb_reset_mac_header(skb);
260 ether_addr_copy(eth->h_source, br->dev->dev_addr);
263 eth->h_dest[2] = 0x5e;
267 eth->h_proto = htons(ETH_P_IP);
268 skb_put(skb, sizeof(*eth));
270 skb_set_network_header(skb, skb->len);
276 iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
278 iph->frag_off = htons(IP_DF);
280 iph->protocol = IPPROTO_IGMP;
281 iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
282 inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
283 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
284 ((u8 *)&iph[1])[0] = IPOPT_RA;
285 ((u8 *)&iph[1])[1] = 4;
286 ((u8 *)&iph[1])[2] = 0;
287 ((u8 *)&iph[1])[3] = 0;
291 skb_set_transport_header(skb, skb->len);
292 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
294 switch (br->multicast_igmp_version) {
297 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
298 ih->code = (group ? br->multicast_last_member_interval :
299 br->multicast_query_response_interval) /
300 (HZ / IGMP_TIMER_SCALE);
303 ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
306 ihv3 = igmpv3_query_hdr(skb);
307 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
308 ihv3->code = (group ? br->multicast_last_member_interval :
309 br->multicast_query_response_interval) /
310 (HZ / IGMP_TIMER_SCALE);
312 ihv3->qqic = br->multicast_query_interval / HZ;
318 ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
322 skb_put(skb, igmp_hdr_size);
323 __skb_pull(skb, sizeof(*eth));
329 #if IS_ENABLED(CONFIG_IPV6)
330 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
331 const struct in6_addr *grp,
334 struct mld2_query *mld2q;
335 unsigned long interval;
336 struct ipv6hdr *ip6h;
337 struct mld_msg *mldq;
343 mld_hdr_size = sizeof(*mldq);
344 if (br->multicast_mld_version == 2)
345 mld_hdr_size = sizeof(*mld2q);
346 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
351 skb->protocol = htons(ETH_P_IPV6);
353 /* Ethernet header */
354 skb_reset_mac_header(skb);
357 ether_addr_copy(eth->h_source, br->dev->dev_addr);
358 eth->h_proto = htons(ETH_P_IPV6);
359 skb_put(skb, sizeof(*eth));
361 /* IPv6 header + HbH option */
362 skb_set_network_header(skb, skb->len);
363 ip6h = ipv6_hdr(skb);
365 *(__force __be32 *)ip6h = htonl(0x60000000);
366 ip6h->payload_len = htons(8 + mld_hdr_size);
367 ip6h->nexthdr = IPPROTO_HOPOPTS;
369 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
370 if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
373 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, false);
377 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
378 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
380 hopopt = (u8 *)(ip6h + 1);
381 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
382 hopopt[1] = 0; /* length of HbH */
383 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
384 hopopt[3] = 2; /* Length of RA Option */
385 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
387 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
388 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
390 skb_put(skb, sizeof(*ip6h) + 8);
393 skb_set_transport_header(skb, skb->len);
394 interval = ipv6_addr_any(grp) ?
395 br->multicast_query_response_interval :
396 br->multicast_last_member_interval;
397 *igmp_type = ICMPV6_MGM_QUERY;
398 switch (br->multicast_mld_version) {
400 mldq = (struct mld_msg *)icmp6_hdr(skb);
401 mldq->mld_type = ICMPV6_MGM_QUERY;
404 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
405 mldq->mld_reserved = 0;
406 mldq->mld_mca = *grp;
407 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
408 sizeof(*mldq), IPPROTO_ICMPV6,
414 mld2q = (struct mld2_query *)icmp6_hdr(skb);
415 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
416 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
417 mld2q->mld2q_code = 0;
418 mld2q->mld2q_cksum = 0;
419 mld2q->mld2q_resv1 = 0;
420 mld2q->mld2q_resv2 = 0;
421 mld2q->mld2q_suppress = 0;
422 mld2q->mld2q_qrv = 2;
423 mld2q->mld2q_nsrcs = 0;
424 mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
425 mld2q->mld2q_mca = *grp;
426 mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
434 skb_put(skb, mld_hdr_size);
436 __skb_pull(skb, sizeof(*eth));
443 static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
447 switch (addr->proto) {
448 case htons(ETH_P_IP):
449 return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
450 #if IS_ENABLED(CONFIG_IPV6)
451 case htons(ETH_P_IPV6):
452 return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
459 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
462 struct net_bridge_mdb_entry *mp;
465 mp = br_mdb_ip_get(br, group);
469 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
470 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
471 return ERR_PTR(-E2BIG);
474 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
476 return ERR_PTR(-ENOMEM);
480 timer_setup(&mp->timer, br_multicast_group_expired, 0);
481 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
487 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
493 static void br_multicast_group_src_expired(struct timer_list *t)
495 struct net_bridge_group_src *src = from_timer(src, t, timer);
496 struct net_bridge_port_group *pg;
497 struct net_bridge *br = src->br;
499 spin_lock(&br->multicast_lock);
500 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
501 timer_pending(&src->timer))
505 if (pg->filter_mode == MCAST_INCLUDE) {
506 br_multicast_del_group_src(src);
507 if (!hlist_empty(&pg->src_list))
509 br_multicast_find_del_pg(br, pg);
512 spin_unlock(&br->multicast_lock);
515 static struct net_bridge_group_src *
516 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
518 struct net_bridge_group_src *ent;
521 case htons(ETH_P_IP):
522 hlist_for_each_entry(ent, &pg->src_list, node)
523 if (ip->u.ip4 == ent->addr.u.ip4)
526 #if IS_ENABLED(CONFIG_IPV6)
527 case htons(ETH_P_IPV6):
528 hlist_for_each_entry(ent, &pg->src_list, node)
529 if (!ipv6_addr_cmp(&ent->addr.u.ip6, &ip->u.ip6))
538 static struct net_bridge_group_src *
539 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
541 struct net_bridge_group_src *grp_src;
543 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
546 switch (src_ip->proto) {
547 case htons(ETH_P_IP):
548 if (ipv4_is_zeronet(src_ip->u.ip4) ||
549 ipv4_is_multicast(src_ip->u.ip4))
552 #if IS_ENABLED(CONFIG_IPV6)
553 case htons(ETH_P_IPV6):
554 if (ipv6_addr_any(&src_ip->u.ip6) ||
555 ipv6_addr_is_multicast(&src_ip->u.ip6))
561 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
562 if (unlikely(!grp_src))
566 grp_src->br = pg->port->br;
567 grp_src->addr = *src_ip;
568 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
570 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
576 struct net_bridge_port_group *br_multicast_new_port_group(
577 struct net_bridge_port *port,
579 struct net_bridge_port_group __rcu *next,
581 const unsigned char *src,
584 struct net_bridge_port_group *p;
586 p = kzalloc(sizeof(*p), GFP_ATOMIC);
593 p->filter_mode = filter_mode;
594 INIT_HLIST_HEAD(&p->src_list);
595 rcu_assign_pointer(p->next, next);
596 hlist_add_head(&p->mglist, &port->mglist);
597 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
600 memcpy(p->eth_addr, src, ETH_ALEN);
602 eth_broadcast_addr(p->eth_addr);
607 static bool br_port_group_equal(struct net_bridge_port_group *p,
608 struct net_bridge_port *port,
609 const unsigned char *src)
614 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
617 return ether_addr_equal(src, p->eth_addr);
620 void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
622 if (!mp->host_joined) {
623 mp->host_joined = true;
625 br_mdb_notify(mp->br->dev, NULL, &mp->addr,
628 mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
631 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
633 if (!mp->host_joined)
636 mp->host_joined = false;
638 br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
641 static int br_multicast_add_group(struct net_bridge *br,
642 struct net_bridge_port *port,
644 const unsigned char *src,
647 struct net_bridge_port_group __rcu **pp;
648 struct net_bridge_port_group *p;
649 struct net_bridge_mdb_entry *mp;
650 unsigned long now = jiffies;
653 spin_lock(&br->multicast_lock);
654 if (!netif_running(br->dev) ||
655 (port && port->state == BR_STATE_DISABLED))
658 mp = br_multicast_new_group(br, group);
664 br_multicast_host_join(mp, true);
668 for (pp = &mp->ports;
669 (p = mlock_dereference(*pp, br)) != NULL;
671 if (br_port_group_equal(p, port, src))
673 if ((unsigned long)p->port < (unsigned long)port)
677 p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode);
680 rcu_assign_pointer(*pp, p);
681 br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
684 mod_timer(&p->timer, now + br->multicast_membership_interval);
689 spin_unlock(&br->multicast_lock);
693 static int br_ip4_multicast_add_group(struct net_bridge *br,
694 struct net_bridge_port *port,
697 const unsigned char *src,
700 struct br_ip br_group;
703 if (ipv4_is_local_multicast(group))
706 memset(&br_group, 0, sizeof(br_group));
707 br_group.u.ip4 = group;
708 br_group.proto = htons(ETH_P_IP);
710 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
712 return br_multicast_add_group(br, port, &br_group, src, filter_mode);
715 #if IS_ENABLED(CONFIG_IPV6)
716 static int br_ip6_multicast_add_group(struct net_bridge *br,
717 struct net_bridge_port *port,
718 const struct in6_addr *group,
720 const unsigned char *src,
723 struct br_ip br_group;
726 if (ipv6_addr_is_ll_all_nodes(group))
729 memset(&br_group, 0, sizeof(br_group));
730 br_group.u.ip6 = *group;
731 br_group.proto = htons(ETH_P_IPV6);
733 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
735 return br_multicast_add_group(br, port, &br_group, src, filter_mode);
739 static void br_multicast_router_expired(struct timer_list *t)
741 struct net_bridge_port *port =
742 from_timer(port, t, multicast_router_timer);
743 struct net_bridge *br = port->br;
745 spin_lock(&br->multicast_lock);
746 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
747 port->multicast_router == MDB_RTR_TYPE_PERM ||
748 timer_pending(&port->multicast_router_timer))
751 __del_port_router(port);
753 spin_unlock(&br->multicast_lock);
756 static void br_mc_router_state_change(struct net_bridge *p,
759 struct switchdev_attr attr = {
761 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
762 .flags = SWITCHDEV_F_DEFER,
763 .u.mrouter = is_mc_router,
766 switchdev_port_attr_set(p->dev, &attr);
769 static void br_multicast_local_router_expired(struct timer_list *t)
771 struct net_bridge *br = from_timer(br, t, multicast_router_timer);
773 spin_lock(&br->multicast_lock);
774 if (br->multicast_router == MDB_RTR_TYPE_DISABLED ||
775 br->multicast_router == MDB_RTR_TYPE_PERM ||
776 timer_pending(&br->multicast_router_timer))
779 br_mc_router_state_change(br, false);
781 spin_unlock(&br->multicast_lock);
784 static void br_multicast_querier_expired(struct net_bridge *br,
785 struct bridge_mcast_own_query *query)
787 spin_lock(&br->multicast_lock);
788 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
791 br_multicast_start_querier(br, query);
794 spin_unlock(&br->multicast_lock);
797 static void br_ip4_multicast_querier_expired(struct timer_list *t)
799 struct net_bridge *br = from_timer(br, t, ip4_other_query.timer);
801 br_multicast_querier_expired(br, &br->ip4_own_query);
804 #if IS_ENABLED(CONFIG_IPV6)
805 static void br_ip6_multicast_querier_expired(struct timer_list *t)
807 struct net_bridge *br = from_timer(br, t, ip6_other_query.timer);
809 br_multicast_querier_expired(br, &br->ip6_own_query);
813 static void br_multicast_select_own_querier(struct net_bridge *br,
817 if (ip->proto == htons(ETH_P_IP))
818 br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
819 #if IS_ENABLED(CONFIG_IPV6)
821 br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
825 static void __br_multicast_send_query(struct net_bridge *br,
826 struct net_bridge_port *port,
832 skb = br_multicast_alloc_query(br, ip, &igmp_type);
837 skb->dev = port->dev;
838 br_multicast_count(br, port, skb, igmp_type,
840 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
841 dev_net(port->dev), NULL, skb, NULL, skb->dev,
842 br_dev_queue_push_xmit);
844 br_multicast_select_own_querier(br, ip, skb);
845 br_multicast_count(br, port, skb, igmp_type,
851 static void br_multicast_send_query(struct net_bridge *br,
852 struct net_bridge_port *port,
853 struct bridge_mcast_own_query *own_query)
855 struct bridge_mcast_other_query *other_query = NULL;
856 struct br_ip br_group;
859 if (!netif_running(br->dev) ||
860 !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
861 !br_opt_get(br, BROPT_MULTICAST_QUERIER))
864 memset(&br_group.u, 0, sizeof(br_group.u));
866 if (port ? (own_query == &port->ip4_own_query) :
867 (own_query == &br->ip4_own_query)) {
868 other_query = &br->ip4_other_query;
869 br_group.proto = htons(ETH_P_IP);
870 #if IS_ENABLED(CONFIG_IPV6)
872 other_query = &br->ip6_other_query;
873 br_group.proto = htons(ETH_P_IPV6);
877 if (!other_query || timer_pending(&other_query->timer))
880 __br_multicast_send_query(br, port, &br_group);
883 time += own_query->startup_sent < br->multicast_startup_query_count ?
884 br->multicast_startup_query_interval :
885 br->multicast_query_interval;
886 mod_timer(&own_query->timer, time);
890 br_multicast_port_query_expired(struct net_bridge_port *port,
891 struct bridge_mcast_own_query *query)
893 struct net_bridge *br = port->br;
895 spin_lock(&br->multicast_lock);
896 if (port->state == BR_STATE_DISABLED ||
897 port->state == BR_STATE_BLOCKING)
900 if (query->startup_sent < br->multicast_startup_query_count)
901 query->startup_sent++;
903 br_multicast_send_query(port->br, port, query);
906 spin_unlock(&br->multicast_lock);
909 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
911 struct net_bridge_port *port = from_timer(port, t, ip4_own_query.timer);
913 br_multicast_port_query_expired(port, &port->ip4_own_query);
916 #if IS_ENABLED(CONFIG_IPV6)
917 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
919 struct net_bridge_port *port = from_timer(port, t, ip6_own_query.timer);
921 br_multicast_port_query_expired(port, &port->ip6_own_query);
925 static void br_mc_disabled_update(struct net_device *dev, bool value)
927 struct switchdev_attr attr = {
929 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
930 .flags = SWITCHDEV_F_DEFER,
931 .u.mc_disabled = !value,
934 switchdev_port_attr_set(dev, &attr);
937 int br_multicast_add_port(struct net_bridge_port *port)
939 port->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
941 timer_setup(&port->multicast_router_timer,
942 br_multicast_router_expired, 0);
943 timer_setup(&port->ip4_own_query.timer,
944 br_ip4_multicast_port_query_expired, 0);
945 #if IS_ENABLED(CONFIG_IPV6)
946 timer_setup(&port->ip6_own_query.timer,
947 br_ip6_multicast_port_query_expired, 0);
949 br_mc_disabled_update(port->dev,
950 br_opt_get(port->br, BROPT_MULTICAST_ENABLED));
952 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
953 if (!port->mcast_stats)
959 void br_multicast_del_port(struct net_bridge_port *port)
961 struct net_bridge *br = port->br;
962 struct net_bridge_port_group *pg;
963 struct hlist_node *n;
965 /* Take care of the remaining groups, only perm ones should be left */
966 spin_lock_bh(&br->multicast_lock);
967 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
968 br_multicast_find_del_pg(br, pg);
969 spin_unlock_bh(&br->multicast_lock);
970 del_timer_sync(&port->multicast_router_timer);
971 free_percpu(port->mcast_stats);
974 static void br_multicast_enable(struct bridge_mcast_own_query *query)
976 query->startup_sent = 0;
978 if (try_to_del_timer_sync(&query->timer) >= 0 ||
979 del_timer(&query->timer))
980 mod_timer(&query->timer, jiffies);
983 static void __br_multicast_enable_port(struct net_bridge_port *port)
985 struct net_bridge *br = port->br;
987 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) || !netif_running(br->dev))
990 br_multicast_enable(&port->ip4_own_query);
991 #if IS_ENABLED(CONFIG_IPV6)
992 br_multicast_enable(&port->ip6_own_query);
994 if (port->multicast_router == MDB_RTR_TYPE_PERM &&
995 hlist_unhashed(&port->rlist))
996 br_multicast_add_router(br, port);
999 void br_multicast_enable_port(struct net_bridge_port *port)
1001 struct net_bridge *br = port->br;
1003 spin_lock(&br->multicast_lock);
1004 __br_multicast_enable_port(port);
1005 spin_unlock(&br->multicast_lock);
1008 void br_multicast_disable_port(struct net_bridge_port *port)
1010 struct net_bridge *br = port->br;
1011 struct net_bridge_port_group *pg;
1012 struct hlist_node *n;
1014 spin_lock(&br->multicast_lock);
1015 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1016 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
1017 br_multicast_find_del_pg(br, pg);
1019 __del_port_router(port);
1021 del_timer(&port->multicast_router_timer);
1022 del_timer(&port->ip4_own_query.timer);
1023 #if IS_ENABLED(CONFIG_IPV6)
1024 del_timer(&port->ip6_own_query.timer);
1026 spin_unlock(&br->multicast_lock);
1029 static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1030 struct net_bridge_port *port,
1031 struct sk_buff *skb,
1034 const unsigned char *src;
1035 struct igmpv3_report *ih;
1036 struct igmpv3_grec *grec;
1045 ih = igmpv3_report_hdr(skb);
1046 num = ntohs(ih->ngrec);
1047 len = skb_transport_offset(skb) + sizeof(*ih);
1049 for (i = 0; i < num; i++) {
1050 len += sizeof(*grec);
1051 if (!ip_mc_may_pull(skb, len))
1054 grec = (void *)(skb->data + len - sizeof(*grec));
1055 group = grec->grec_mca;
1056 type = grec->grec_type;
1057 nsrcs = ntohs(grec->grec_nsrcs);
1060 if (!ip_mc_may_pull(skb, len))
1063 /* We treat this as an IGMPv2 report for now. */
1065 case IGMPV3_MODE_IS_INCLUDE:
1066 case IGMPV3_MODE_IS_EXCLUDE:
1067 case IGMPV3_CHANGE_TO_INCLUDE:
1068 case IGMPV3_CHANGE_TO_EXCLUDE:
1069 case IGMPV3_ALLOW_NEW_SOURCES:
1070 case IGMPV3_BLOCK_OLD_SOURCES:
1077 src = eth_hdr(skb)->h_source;
1078 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1079 type == IGMPV3_MODE_IS_INCLUDE) &&
1081 br_ip4_multicast_leave_group(br, port, group, vid, src);
1083 err = br_ip4_multicast_add_group(br, port, group, vid,
1093 #if IS_ENABLED(CONFIG_IPV6)
1094 static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1095 struct net_bridge_port *port,
1096 struct sk_buff *skb,
1099 unsigned int nsrcs_offset;
1100 const unsigned char *src;
1101 struct icmp6hdr *icmp6h;
1102 struct mld2_grec *grec;
1103 unsigned int grec_len;
1109 if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
1112 icmp6h = icmp6_hdr(skb);
1113 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1114 len = skb_transport_offset(skb) + sizeof(*icmp6h);
1116 for (i = 0; i < num; i++) {
1117 __be16 *_nsrcs, __nsrcs;
1120 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
1122 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
1123 nsrcs_offset + sizeof(__nsrcs))
1126 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
1127 sizeof(__nsrcs), &__nsrcs);
1131 nsrcs = ntohs(*_nsrcs);
1132 grec_len = struct_size(grec, grec_src, nsrcs);
1134 if (!ipv6_mc_may_pull(skb, len + grec_len))
1137 grec = (struct mld2_grec *)(skb->data + len);
1140 /* We treat these as MLDv1 reports for now. */
1141 switch (grec->grec_type) {
1142 case MLD2_MODE_IS_INCLUDE:
1143 case MLD2_MODE_IS_EXCLUDE:
1144 case MLD2_CHANGE_TO_INCLUDE:
1145 case MLD2_CHANGE_TO_EXCLUDE:
1146 case MLD2_ALLOW_NEW_SOURCES:
1147 case MLD2_BLOCK_OLD_SOURCES:
1154 src = eth_hdr(skb)->h_source;
1155 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1156 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1158 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1161 err = br_ip6_multicast_add_group(br, port,
1162 &grec->grec_mca, vid,
1173 static bool br_ip4_multicast_select_querier(struct net_bridge *br,
1174 struct net_bridge_port *port,
1177 if (!timer_pending(&br->ip4_own_query.timer) &&
1178 !timer_pending(&br->ip4_other_query.timer))
1181 if (!br->ip4_querier.addr.u.ip4)
1184 if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
1190 br->ip4_querier.addr.u.ip4 = saddr;
1192 /* update protected by general multicast_lock by caller */
1193 rcu_assign_pointer(br->ip4_querier.port, port);
1198 #if IS_ENABLED(CONFIG_IPV6)
1199 static bool br_ip6_multicast_select_querier(struct net_bridge *br,
1200 struct net_bridge_port *port,
1201 struct in6_addr *saddr)
1203 if (!timer_pending(&br->ip6_own_query.timer) &&
1204 !timer_pending(&br->ip6_other_query.timer))
1207 if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
1213 br->ip6_querier.addr.u.ip6 = *saddr;
1215 /* update protected by general multicast_lock by caller */
1216 rcu_assign_pointer(br->ip6_querier.port, port);
1222 static bool br_multicast_select_querier(struct net_bridge *br,
1223 struct net_bridge_port *port,
1224 struct br_ip *saddr)
1226 switch (saddr->proto) {
1227 case htons(ETH_P_IP):
1228 return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
1229 #if IS_ENABLED(CONFIG_IPV6)
1230 case htons(ETH_P_IPV6):
1231 return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
1239 br_multicast_update_query_timer(struct net_bridge *br,
1240 struct bridge_mcast_other_query *query,
1241 unsigned long max_delay)
1243 if (!timer_pending(&query->timer))
1244 query->delay_time = jiffies + max_delay;
1246 mod_timer(&query->timer, jiffies + br->multicast_querier_interval);
1249 static void br_port_mc_router_state_change(struct net_bridge_port *p,
1252 struct switchdev_attr attr = {
1254 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
1255 .flags = SWITCHDEV_F_DEFER,
1256 .u.mrouter = is_mc_router,
1259 switchdev_port_attr_set(p->dev, &attr);
1263 * Add port to router_list
1264 * list is maintained ordered by pointer value
1265 * and locked by br->multicast_lock and RCU
1267 static void br_multicast_add_router(struct net_bridge *br,
1268 struct net_bridge_port *port)
1270 struct net_bridge_port *p;
1271 struct hlist_node *slot = NULL;
1273 if (!hlist_unhashed(&port->rlist))
1276 hlist_for_each_entry(p, &br->router_list, rlist) {
1277 if ((unsigned long) port >= (unsigned long) p)
1283 hlist_add_behind_rcu(&port->rlist, slot);
1285 hlist_add_head_rcu(&port->rlist, &br->router_list);
1286 br_rtr_notify(br->dev, port, RTM_NEWMDB);
1287 br_port_mc_router_state_change(port, true);
1290 static void br_multicast_mark_router(struct net_bridge *br,
1291 struct net_bridge_port *port)
1293 unsigned long now = jiffies;
1296 if (br->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
1297 if (!timer_pending(&br->multicast_router_timer))
1298 br_mc_router_state_change(br, true);
1299 mod_timer(&br->multicast_router_timer,
1300 now + br->multicast_querier_interval);
1305 if (port->multicast_router == MDB_RTR_TYPE_DISABLED ||
1306 port->multicast_router == MDB_RTR_TYPE_PERM)
1309 br_multicast_add_router(br, port);
1311 mod_timer(&port->multicast_router_timer,
1312 now + br->multicast_querier_interval);
1315 static void br_multicast_query_received(struct net_bridge *br,
1316 struct net_bridge_port *port,
1317 struct bridge_mcast_other_query *query,
1318 struct br_ip *saddr,
1319 unsigned long max_delay)
1321 if (!br_multicast_select_querier(br, port, saddr))
1324 br_multicast_update_query_timer(br, query, max_delay);
1325 br_multicast_mark_router(br, port);
1328 static void br_ip4_multicast_query(struct net_bridge *br,
1329 struct net_bridge_port *port,
1330 struct sk_buff *skb,
1333 unsigned int transport_len = ip_transport_len(skb);
1334 const struct iphdr *iph = ip_hdr(skb);
1335 struct igmphdr *ih = igmp_hdr(skb);
1336 struct net_bridge_mdb_entry *mp;
1337 struct igmpv3_query *ih3;
1338 struct net_bridge_port_group *p;
1339 struct net_bridge_port_group __rcu **pp;
1341 unsigned long max_delay;
1342 unsigned long now = jiffies;
1345 spin_lock(&br->multicast_lock);
1346 if (!netif_running(br->dev) ||
1347 (port && port->state == BR_STATE_DISABLED))
1352 if (transport_len == sizeof(*ih)) {
1353 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
1356 max_delay = 10 * HZ;
1359 } else if (transport_len >= sizeof(*ih3)) {
1360 ih3 = igmpv3_query_hdr(skb);
1364 max_delay = ih3->code ?
1365 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
1371 saddr.proto = htons(ETH_P_IP);
1372 saddr.u.ip4 = iph->saddr;
1374 br_multicast_query_received(br, port, &br->ip4_other_query,
1379 mp = br_mdb_ip4_get(br, group, vid);
1383 max_delay *= br->multicast_last_member_count;
1385 if (mp->host_joined &&
1386 (timer_pending(&mp->timer) ?
1387 time_after(mp->timer.expires, now + max_delay) :
1388 try_to_del_timer_sync(&mp->timer) >= 0))
1389 mod_timer(&mp->timer, now + max_delay);
1391 for (pp = &mp->ports;
1392 (p = mlock_dereference(*pp, br)) != NULL;
1394 if (timer_pending(&p->timer) ?
1395 time_after(p->timer.expires, now + max_delay) :
1396 try_to_del_timer_sync(&p->timer) >= 0)
1397 mod_timer(&p->timer, now + max_delay);
1401 spin_unlock(&br->multicast_lock);
1404 #if IS_ENABLED(CONFIG_IPV6)
1405 static int br_ip6_multicast_query(struct net_bridge *br,
1406 struct net_bridge_port *port,
1407 struct sk_buff *skb,
1410 unsigned int transport_len = ipv6_transport_len(skb);
1411 struct mld_msg *mld;
1412 struct net_bridge_mdb_entry *mp;
1413 struct mld2_query *mld2q;
1414 struct net_bridge_port_group *p;
1415 struct net_bridge_port_group __rcu **pp;
1417 unsigned long max_delay;
1418 unsigned long now = jiffies;
1419 unsigned int offset = skb_transport_offset(skb);
1420 const struct in6_addr *group = NULL;
1421 bool is_general_query;
1424 spin_lock(&br->multicast_lock);
1425 if (!netif_running(br->dev) ||
1426 (port && port->state == BR_STATE_DISABLED))
1429 if (transport_len == sizeof(*mld)) {
1430 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
1434 mld = (struct mld_msg *) icmp6_hdr(skb);
1435 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
1437 group = &mld->mld_mca;
1439 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
1443 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1444 if (!mld2q->mld2q_nsrcs)
1445 group = &mld2q->mld2q_mca;
1447 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
1450 is_general_query = group && ipv6_addr_any(group);
1452 if (is_general_query) {
1453 saddr.proto = htons(ETH_P_IPV6);
1454 saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1456 br_multicast_query_received(br, port, &br->ip6_other_query,
1459 } else if (!group) {
1463 mp = br_mdb_ip6_get(br, group, vid);
1467 max_delay *= br->multicast_last_member_count;
1468 if (mp->host_joined &&
1469 (timer_pending(&mp->timer) ?
1470 time_after(mp->timer.expires, now + max_delay) :
1471 try_to_del_timer_sync(&mp->timer) >= 0))
1472 mod_timer(&mp->timer, now + max_delay);
1474 for (pp = &mp->ports;
1475 (p = mlock_dereference(*pp, br)) != NULL;
1477 if (timer_pending(&p->timer) ?
1478 time_after(p->timer.expires, now + max_delay) :
1479 try_to_del_timer_sync(&p->timer) >= 0)
1480 mod_timer(&p->timer, now + max_delay);
1484 spin_unlock(&br->multicast_lock);
1490 br_multicast_leave_group(struct net_bridge *br,
1491 struct net_bridge_port *port,
1492 struct br_ip *group,
1493 struct bridge_mcast_other_query *other_query,
1494 struct bridge_mcast_own_query *own_query,
1495 const unsigned char *src)
1497 struct net_bridge_mdb_entry *mp;
1498 struct net_bridge_port_group *p;
1502 spin_lock(&br->multicast_lock);
1503 if (!netif_running(br->dev) ||
1504 (port && port->state == BR_STATE_DISABLED))
1507 mp = br_mdb_ip_get(br, group);
1511 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1512 struct net_bridge_port_group __rcu **pp;
1514 for (pp = &mp->ports;
1515 (p = mlock_dereference(*pp, br)) != NULL;
1517 if (!br_port_group_equal(p, port, src))
1520 if (p->flags & MDB_PG_FLAGS_PERMANENT)
1523 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
1524 br_multicast_del_pg(mp, p, pp);
1529 if (timer_pending(&other_query->timer))
1532 if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
1533 __br_multicast_send_query(br, port, &mp->addr);
1535 time = jiffies + br->multicast_last_member_count *
1536 br->multicast_last_member_interval;
1538 mod_timer(&own_query->timer, time);
1540 for (p = mlock_dereference(mp->ports, br);
1542 p = mlock_dereference(p->next, br)) {
1543 if (!br_port_group_equal(p, port, src))
1546 if (!hlist_unhashed(&p->mglist) &&
1547 (timer_pending(&p->timer) ?
1548 time_after(p->timer.expires, time) :
1549 try_to_del_timer_sync(&p->timer) >= 0)) {
1550 mod_timer(&p->timer, time);
1558 time = now + br->multicast_last_member_count *
1559 br->multicast_last_member_interval;
1562 if (mp->host_joined &&
1563 (timer_pending(&mp->timer) ?
1564 time_after(mp->timer.expires, time) :
1565 try_to_del_timer_sync(&mp->timer) >= 0)) {
1566 mod_timer(&mp->timer, time);
1572 for (p = mlock_dereference(mp->ports, br);
1574 p = mlock_dereference(p->next, br)) {
1575 if (p->port != port)
1578 if (!hlist_unhashed(&p->mglist) &&
1579 (timer_pending(&p->timer) ?
1580 time_after(p->timer.expires, time) :
1581 try_to_del_timer_sync(&p->timer) >= 0)) {
1582 mod_timer(&p->timer, time);
1588 spin_unlock(&br->multicast_lock);
1591 static void br_ip4_multicast_leave_group(struct net_bridge *br,
1592 struct net_bridge_port *port,
1595 const unsigned char *src)
1597 struct br_ip br_group;
1598 struct bridge_mcast_own_query *own_query;
1600 if (ipv4_is_local_multicast(group))
1603 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1605 memset(&br_group, 0, sizeof(br_group));
1606 br_group.u.ip4 = group;
1607 br_group.proto = htons(ETH_P_IP);
1610 br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
1614 #if IS_ENABLED(CONFIG_IPV6)
1615 static void br_ip6_multicast_leave_group(struct net_bridge *br,
1616 struct net_bridge_port *port,
1617 const struct in6_addr *group,
1619 const unsigned char *src)
1621 struct br_ip br_group;
1622 struct bridge_mcast_own_query *own_query;
1624 if (ipv6_addr_is_ll_all_nodes(group))
1627 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1629 memset(&br_group, 0, sizeof(br_group));
1630 br_group.u.ip6 = *group;
1631 br_group.proto = htons(ETH_P_IPV6);
1634 br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
1639 static void br_multicast_err_count(const struct net_bridge *br,
1640 const struct net_bridge_port *p,
1643 struct bridge_mcast_stats __percpu *stats;
1644 struct bridge_mcast_stats *pstats;
1646 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
1650 stats = p->mcast_stats;
1652 stats = br->mcast_stats;
1653 if (WARN_ON(!stats))
1656 pstats = this_cpu_ptr(stats);
1658 u64_stats_update_begin(&pstats->syncp);
1660 case htons(ETH_P_IP):
1661 pstats->mstats.igmp_parse_errors++;
1663 #if IS_ENABLED(CONFIG_IPV6)
1664 case htons(ETH_P_IPV6):
1665 pstats->mstats.mld_parse_errors++;
1669 u64_stats_update_end(&pstats->syncp);
1672 static void br_multicast_pim(struct net_bridge *br,
1673 struct net_bridge_port *port,
1674 const struct sk_buff *skb)
1676 unsigned int offset = skb_transport_offset(skb);
1677 struct pimhdr *pimhdr, _pimhdr;
1679 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
1680 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
1681 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
1684 br_multicast_mark_router(br, port);
1687 static int br_ip4_multicast_mrd_rcv(struct net_bridge *br,
1688 struct net_bridge_port *port,
1689 struct sk_buff *skb)
1691 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
1692 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
1695 br_multicast_mark_router(br, port);
1700 static int br_multicast_ipv4_rcv(struct net_bridge *br,
1701 struct net_bridge_port *port,
1702 struct sk_buff *skb,
1705 const unsigned char *src;
1709 err = ip_mc_check_igmp(skb);
1711 if (err == -ENOMSG) {
1712 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
1713 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1714 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
1715 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
1716 br_multicast_pim(br, port, skb);
1717 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
1718 br_ip4_multicast_mrd_rcv(br, port, skb);
1722 } else if (err < 0) {
1723 br_multicast_err_count(br, port, skb->protocol);
1728 src = eth_hdr(skb)->h_source;
1729 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
1732 case IGMP_HOST_MEMBERSHIP_REPORT:
1733 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1734 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1735 err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
1738 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1739 err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
1741 case IGMP_HOST_MEMBERSHIP_QUERY:
1742 br_ip4_multicast_query(br, port, skb, vid);
1744 case IGMP_HOST_LEAVE_MESSAGE:
1745 br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
1749 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1755 #if IS_ENABLED(CONFIG_IPV6)
1756 static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
1757 struct net_bridge_port *port,
1758 struct sk_buff *skb)
1762 if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
1765 ret = ipv6_mc_check_icmpv6(skb);
1769 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
1772 br_multicast_mark_router(br, port);
1777 static int br_multicast_ipv6_rcv(struct net_bridge *br,
1778 struct net_bridge_port *port,
1779 struct sk_buff *skb,
1782 const unsigned char *src;
1783 struct mld_msg *mld;
1786 err = ipv6_mc_check_mld(skb);
1788 if (err == -ENOMSG) {
1789 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
1790 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1792 if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
1793 err = br_ip6_multicast_mrd_rcv(br, port, skb);
1795 if (err < 0 && err != -ENOMSG) {
1796 br_multicast_err_count(br, port, skb->protocol);
1802 } else if (err < 0) {
1803 br_multicast_err_count(br, port, skb->protocol);
1807 mld = (struct mld_msg *)skb_transport_header(skb);
1808 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
1810 switch (mld->mld_type) {
1811 case ICMPV6_MGM_REPORT:
1812 src = eth_hdr(skb)->h_source;
1813 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1814 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
1817 case ICMPV6_MLD2_REPORT:
1818 err = br_ip6_multicast_mld2_report(br, port, skb, vid);
1820 case ICMPV6_MGM_QUERY:
1821 err = br_ip6_multicast_query(br, port, skb, vid);
1823 case ICMPV6_MGM_REDUCTION:
1824 src = eth_hdr(skb)->h_source;
1825 br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
1829 br_multicast_count(br, port, skb, BR_INPUT_SKB_CB(skb)->igmp,
1836 int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1837 struct sk_buff *skb, u16 vid)
1841 BR_INPUT_SKB_CB(skb)->igmp = 0;
1842 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1844 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
1847 switch (skb->protocol) {
1848 case htons(ETH_P_IP):
1849 ret = br_multicast_ipv4_rcv(br, port, skb, vid);
1851 #if IS_ENABLED(CONFIG_IPV6)
1852 case htons(ETH_P_IPV6):
1853 ret = br_multicast_ipv6_rcv(br, port, skb, vid);
1861 static void br_multicast_query_expired(struct net_bridge *br,
1862 struct bridge_mcast_own_query *query,
1863 struct bridge_mcast_querier *querier)
1865 spin_lock(&br->multicast_lock);
1866 if (query->startup_sent < br->multicast_startup_query_count)
1867 query->startup_sent++;
1869 RCU_INIT_POINTER(querier->port, NULL);
1870 br_multicast_send_query(br, NULL, query);
1871 spin_unlock(&br->multicast_lock);
1874 static void br_ip4_multicast_query_expired(struct timer_list *t)
1876 struct net_bridge *br = from_timer(br, t, ip4_own_query.timer);
1878 br_multicast_query_expired(br, &br->ip4_own_query, &br->ip4_querier);
1881 #if IS_ENABLED(CONFIG_IPV6)
1882 static void br_ip6_multicast_query_expired(struct timer_list *t)
1884 struct net_bridge *br = from_timer(br, t, ip6_own_query.timer);
1886 br_multicast_query_expired(br, &br->ip6_own_query, &br->ip6_querier);
1890 static void __grp_src_gc(struct hlist_head *head)
1892 struct net_bridge_group_src *ent;
1893 struct hlist_node *tmp;
1895 hlist_for_each_entry_safe(ent, tmp, head, del_node) {
1896 hlist_del_init(&ent->del_node);
1897 del_timer_sync(&ent->timer);
1898 kfree_rcu(ent, rcu);
1902 static void br_multicast_src_gc(struct work_struct *work)
1904 struct net_bridge *br = container_of(work, struct net_bridge,
1906 HLIST_HEAD(deleted_head);
1908 spin_lock_bh(&br->multicast_lock);
1909 hlist_move_list(&br->src_gc_list, &deleted_head);
1910 spin_unlock_bh(&br->multicast_lock);
1912 __grp_src_gc(&deleted_head);
1915 void br_multicast_init(struct net_bridge *br)
1917 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
1919 br->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1920 br->multicast_last_member_count = 2;
1921 br->multicast_startup_query_count = 2;
1923 br->multicast_last_member_interval = HZ;
1924 br->multicast_query_response_interval = 10 * HZ;
1925 br->multicast_startup_query_interval = 125 * HZ / 4;
1926 br->multicast_query_interval = 125 * HZ;
1927 br->multicast_querier_interval = 255 * HZ;
1928 br->multicast_membership_interval = 260 * HZ;
1930 br->ip4_other_query.delay_time = 0;
1931 br->ip4_querier.port = NULL;
1932 br->multicast_igmp_version = 2;
1933 #if IS_ENABLED(CONFIG_IPV6)
1934 br->multicast_mld_version = 1;
1935 br->ip6_other_query.delay_time = 0;
1936 br->ip6_querier.port = NULL;
1938 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
1939 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
1941 spin_lock_init(&br->multicast_lock);
1942 timer_setup(&br->multicast_router_timer,
1943 br_multicast_local_router_expired, 0);
1944 timer_setup(&br->ip4_other_query.timer,
1945 br_ip4_multicast_querier_expired, 0);
1946 timer_setup(&br->ip4_own_query.timer,
1947 br_ip4_multicast_query_expired, 0);
1948 #if IS_ENABLED(CONFIG_IPV6)
1949 timer_setup(&br->ip6_other_query.timer,
1950 br_ip6_multicast_querier_expired, 0);
1951 timer_setup(&br->ip6_own_query.timer,
1952 br_ip6_multicast_query_expired, 0);
1954 INIT_HLIST_HEAD(&br->mdb_list);
1955 INIT_HLIST_HEAD(&br->src_gc_list);
1956 INIT_WORK(&br->src_gc_work, br_multicast_src_gc);
1959 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
1961 struct in_device *in_dev = in_dev_get(br->dev);
1966 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
1970 #if IS_ENABLED(CONFIG_IPV6)
1971 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1973 struct in6_addr addr;
1975 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
1976 ipv6_dev_mc_inc(br->dev, &addr);
1979 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
1984 static void br_multicast_join_snoopers(struct net_bridge *br)
1986 br_ip4_multicast_join_snoopers(br);
1987 br_ip6_multicast_join_snoopers(br);
1990 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
1992 struct in_device *in_dev = in_dev_get(br->dev);
1994 if (WARN_ON(!in_dev))
1997 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
2001 #if IS_ENABLED(CONFIG_IPV6)
2002 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2004 struct in6_addr addr;
2006 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
2007 ipv6_dev_mc_dec(br->dev, &addr);
2010 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
2015 static void br_multicast_leave_snoopers(struct net_bridge *br)
2017 br_ip4_multicast_leave_snoopers(br);
2018 br_ip6_multicast_leave_snoopers(br);
2021 static void __br_multicast_open(struct net_bridge *br,
2022 struct bridge_mcast_own_query *query)
2024 query->startup_sent = 0;
2026 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
2029 mod_timer(&query->timer, jiffies);
2032 void br_multicast_open(struct net_bridge *br)
2034 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2035 br_multicast_join_snoopers(br);
2037 __br_multicast_open(br, &br->ip4_own_query);
2038 #if IS_ENABLED(CONFIG_IPV6)
2039 __br_multicast_open(br, &br->ip6_own_query);
2043 void br_multicast_stop(struct net_bridge *br)
2045 del_timer_sync(&br->multicast_router_timer);
2046 del_timer_sync(&br->ip4_other_query.timer);
2047 del_timer_sync(&br->ip4_own_query.timer);
2048 #if IS_ENABLED(CONFIG_IPV6)
2049 del_timer_sync(&br->ip6_other_query.timer);
2050 del_timer_sync(&br->ip6_own_query.timer);
2053 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
2054 br_multicast_leave_snoopers(br);
2057 void br_multicast_dev_del(struct net_bridge *br)
2059 struct net_bridge_mdb_entry *mp;
2060 HLIST_HEAD(deleted_head);
2061 struct hlist_node *tmp;
2063 spin_lock_bh(&br->multicast_lock);
2064 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
2065 del_timer(&mp->timer);
2066 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
2068 hlist_del_rcu(&mp->mdb_node);
2071 hlist_move_list(&br->src_gc_list, &deleted_head);
2072 spin_unlock_bh(&br->multicast_lock);
2074 __grp_src_gc(&deleted_head);
2075 cancel_work_sync(&br->src_gc_work);
2080 int br_multicast_set_router(struct net_bridge *br, unsigned long val)
2084 spin_lock_bh(&br->multicast_lock);
2087 case MDB_RTR_TYPE_DISABLED:
2088 case MDB_RTR_TYPE_PERM:
2089 br_mc_router_state_change(br, val == MDB_RTR_TYPE_PERM);
2090 del_timer(&br->multicast_router_timer);
2091 br->multicast_router = val;
2094 case MDB_RTR_TYPE_TEMP_QUERY:
2095 if (br->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
2096 br_mc_router_state_change(br, false);
2097 br->multicast_router = val;
2102 spin_unlock_bh(&br->multicast_lock);
2107 static void __del_port_router(struct net_bridge_port *p)
2109 if (hlist_unhashed(&p->rlist))
2111 hlist_del_init_rcu(&p->rlist);
2112 br_rtr_notify(p->br->dev, p, RTM_DELMDB);
2113 br_port_mc_router_state_change(p, false);
2115 /* don't allow timer refresh */
2116 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2117 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2120 int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
2122 struct net_bridge *br = p->br;
2123 unsigned long now = jiffies;
2126 spin_lock(&br->multicast_lock);
2127 if (p->multicast_router == val) {
2128 /* Refresh the temp router port timer */
2129 if (p->multicast_router == MDB_RTR_TYPE_TEMP)
2130 mod_timer(&p->multicast_router_timer,
2131 now + br->multicast_querier_interval);
2136 case MDB_RTR_TYPE_DISABLED:
2137 p->multicast_router = MDB_RTR_TYPE_DISABLED;
2138 __del_port_router(p);
2139 del_timer(&p->multicast_router_timer);
2141 case MDB_RTR_TYPE_TEMP_QUERY:
2142 p->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
2143 __del_port_router(p);
2145 case MDB_RTR_TYPE_PERM:
2146 p->multicast_router = MDB_RTR_TYPE_PERM;
2147 del_timer(&p->multicast_router_timer);
2148 br_multicast_add_router(br, p);
2150 case MDB_RTR_TYPE_TEMP:
2151 p->multicast_router = MDB_RTR_TYPE_TEMP;
2152 br_multicast_mark_router(br, p);
2159 spin_unlock(&br->multicast_lock);
2164 static void br_multicast_start_querier(struct net_bridge *br,
2165 struct bridge_mcast_own_query *query)
2167 struct net_bridge_port *port;
2169 __br_multicast_open(br, query);
2172 list_for_each_entry_rcu(port, &br->port_list, list) {
2173 if (port->state == BR_STATE_DISABLED ||
2174 port->state == BR_STATE_BLOCKING)
2177 if (query == &br->ip4_own_query)
2178 br_multicast_enable(&port->ip4_own_query);
2179 #if IS_ENABLED(CONFIG_IPV6)
2181 br_multicast_enable(&port->ip6_own_query);
2187 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
2189 struct net_bridge_port *port;
2191 spin_lock_bh(&br->multicast_lock);
2192 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
2195 br_mc_disabled_update(br->dev, val);
2196 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
2197 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
2198 br_multicast_leave_snoopers(br);
2202 if (!netif_running(br->dev))
2205 br_multicast_open(br);
2206 list_for_each_entry(port, &br->port_list, list)
2207 __br_multicast_enable_port(port);
2210 spin_unlock_bh(&br->multicast_lock);
2215 bool br_multicast_enabled(const struct net_device *dev)
2217 struct net_bridge *br = netdev_priv(dev);
2219 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
2221 EXPORT_SYMBOL_GPL(br_multicast_enabled);
2223 bool br_multicast_router(const struct net_device *dev)
2225 struct net_bridge *br = netdev_priv(dev);
2228 spin_lock_bh(&br->multicast_lock);
2229 is_router = br_multicast_is_router(br);
2230 spin_unlock_bh(&br->multicast_lock);
2233 EXPORT_SYMBOL_GPL(br_multicast_router);
2235 int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
2237 unsigned long max_delay;
2241 spin_lock_bh(&br->multicast_lock);
2242 if (br_opt_get(br, BROPT_MULTICAST_QUERIER) == val)
2245 br_opt_toggle(br, BROPT_MULTICAST_QUERIER, !!val);
2249 max_delay = br->multicast_query_response_interval;
2251 if (!timer_pending(&br->ip4_other_query.timer))
2252 br->ip4_other_query.delay_time = jiffies + max_delay;
2254 br_multicast_start_querier(br, &br->ip4_own_query);
2256 #if IS_ENABLED(CONFIG_IPV6)
2257 if (!timer_pending(&br->ip6_other_query.timer))
2258 br->ip6_other_query.delay_time = jiffies + max_delay;
2260 br_multicast_start_querier(br, &br->ip6_own_query);
2264 spin_unlock_bh(&br->multicast_lock);
2269 int br_multicast_set_igmp_version(struct net_bridge *br, unsigned long val)
2271 /* Currently we support only version 2 and 3 */
2280 spin_lock_bh(&br->multicast_lock);
2281 br->multicast_igmp_version = val;
2282 spin_unlock_bh(&br->multicast_lock);
2287 #if IS_ENABLED(CONFIG_IPV6)
2288 int br_multicast_set_mld_version(struct net_bridge *br, unsigned long val)
2290 /* Currently we support version 1 and 2 */
2299 spin_lock_bh(&br->multicast_lock);
2300 br->multicast_mld_version = val;
2301 spin_unlock_bh(&br->multicast_lock);
2308 * br_multicast_list_adjacent - Returns snooped multicast addresses
2309 * @dev: The bridge port adjacent to which to retrieve addresses
2310 * @br_ip_list: The list to store found, snooped multicast IP addresses in
2312 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
2313 * snooping feature on all bridge ports of dev's bridge device, excluding
2314 * the addresses from dev itself.
2316 * Returns the number of items added to br_ip_list.
2319 * - br_ip_list needs to be initialized by caller
2320 * - br_ip_list might contain duplicates in the end
2321 * (needs to be taken care of by caller)
2322 * - br_ip_list needs to be freed by caller
2324 int br_multicast_list_adjacent(struct net_device *dev,
2325 struct list_head *br_ip_list)
2327 struct net_bridge *br;
2328 struct net_bridge_port *port;
2329 struct net_bridge_port_group *group;
2330 struct br_ip_list *entry;
2334 if (!br_ip_list || !netif_is_bridge_port(dev))
2337 port = br_port_get_rcu(dev);
2338 if (!port || !port->br)
2343 list_for_each_entry_rcu(port, &br->port_list, list) {
2344 if (!port->dev || port->dev == dev)
2347 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
2348 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
2352 entry->addr = group->addr;
2353 list_add(&entry->list, br_ip_list);
2362 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
2365 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
2366 * @dev: The bridge port providing the bridge on which to check for a querier
2367 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2369 * Checks whether the given interface has a bridge on top and if so returns
2370 * true if a valid querier exists anywhere on the bridged link layer.
2371 * Otherwise returns false.
2373 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
2375 struct net_bridge *br;
2376 struct net_bridge_port *port;
2381 if (!netif_is_bridge_port(dev))
2384 port = br_port_get_rcu(dev);
2385 if (!port || !port->br)
2390 memset(ð, 0, sizeof(eth));
2391 eth.h_proto = htons(proto);
2393 ret = br_multicast_querier_exists(br, ð);
2399 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
2402 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
2403 * @dev: The bridge port adjacent to which to check for a querier
2404 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
2406 * Checks whether the given interface has a bridge on top and if so returns
2407 * true if a selected querier is behind one of the other ports of this
2408 * bridge. Otherwise returns false.
2410 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
2412 struct net_bridge *br;
2413 struct net_bridge_port *port;
2417 if (!netif_is_bridge_port(dev))
2420 port = br_port_get_rcu(dev);
2421 if (!port || !port->br)
2428 if (!timer_pending(&br->ip4_other_query.timer) ||
2429 rcu_dereference(br->ip4_querier.port) == port)
2432 #if IS_ENABLED(CONFIG_IPV6)
2434 if (!timer_pending(&br->ip6_other_query.timer) ||
2435 rcu_dereference(br->ip6_querier.port) == port)
2448 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
2450 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
2451 const struct sk_buff *skb, u8 type, u8 dir)
2453 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
2454 __be16 proto = skb->protocol;
2457 u64_stats_update_begin(&pstats->syncp);
2459 case htons(ETH_P_IP):
2460 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
2462 case IGMP_HOST_MEMBERSHIP_REPORT:
2463 pstats->mstats.igmp_v1reports[dir]++;
2465 case IGMPV2_HOST_MEMBERSHIP_REPORT:
2466 pstats->mstats.igmp_v2reports[dir]++;
2468 case IGMPV3_HOST_MEMBERSHIP_REPORT:
2469 pstats->mstats.igmp_v3reports[dir]++;
2471 case IGMP_HOST_MEMBERSHIP_QUERY:
2472 if (t_len != sizeof(struct igmphdr)) {
2473 pstats->mstats.igmp_v3queries[dir]++;
2475 unsigned int offset = skb_transport_offset(skb);
2476 struct igmphdr *ih, _ihdr;
2478 ih = skb_header_pointer(skb, offset,
2479 sizeof(_ihdr), &_ihdr);
2483 pstats->mstats.igmp_v1queries[dir]++;
2485 pstats->mstats.igmp_v2queries[dir]++;
2488 case IGMP_HOST_LEAVE_MESSAGE:
2489 pstats->mstats.igmp_leaves[dir]++;
2493 #if IS_ENABLED(CONFIG_IPV6)
2494 case htons(ETH_P_IPV6):
2495 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
2496 sizeof(struct ipv6hdr);
2497 t_len -= skb_network_header_len(skb);
2499 case ICMPV6_MGM_REPORT:
2500 pstats->mstats.mld_v1reports[dir]++;
2502 case ICMPV6_MLD2_REPORT:
2503 pstats->mstats.mld_v2reports[dir]++;
2505 case ICMPV6_MGM_QUERY:
2506 if (t_len != sizeof(struct mld_msg))
2507 pstats->mstats.mld_v2queries[dir]++;
2509 pstats->mstats.mld_v1queries[dir]++;
2511 case ICMPV6_MGM_REDUCTION:
2512 pstats->mstats.mld_leaves[dir]++;
2516 #endif /* CONFIG_IPV6 */
2518 u64_stats_update_end(&pstats->syncp);
2521 void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
2522 const struct sk_buff *skb, u8 type, u8 dir)
2524 struct bridge_mcast_stats __percpu *stats;
2526 /* if multicast_disabled is true then igmp type can't be set */
2527 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
2531 stats = p->mcast_stats;
2533 stats = br->mcast_stats;
2534 if (WARN_ON(!stats))
2537 br_mcast_stats_add(stats, skb, type, dir);
2540 int br_multicast_init_stats(struct net_bridge *br)
2542 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
2543 if (!br->mcast_stats)
2549 void br_multicast_uninit_stats(struct net_bridge *br)
2551 free_percpu(br->mcast_stats);
2554 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
2555 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
2557 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
2558 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
2561 void br_multicast_get_stats(const struct net_bridge *br,
2562 const struct net_bridge_port *p,
2563 struct br_mcast_stats *dest)
2565 struct bridge_mcast_stats __percpu *stats;
2566 struct br_mcast_stats tdst;
2569 memset(dest, 0, sizeof(*dest));
2571 stats = p->mcast_stats;
2573 stats = br->mcast_stats;
2574 if (WARN_ON(!stats))
2577 memset(&tdst, 0, sizeof(tdst));
2578 for_each_possible_cpu(i) {
2579 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
2580 struct br_mcast_stats temp;
2584 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2585 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
2586 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2588 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
2589 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
2590 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
2591 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
2592 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
2593 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
2594 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
2595 tdst.igmp_parse_errors += temp.igmp_parse_errors;
2597 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
2598 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
2599 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
2600 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
2601 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
2602 tdst.mld_parse_errors += temp.mld_parse_errors;
2604 memcpy(dest, &tdst, sizeof(*dest));
2607 int br_mdb_hash_init(struct net_bridge *br)
2609 return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
2612 void br_mdb_hash_fini(struct net_bridge *br)
2614 rhashtable_destroy(&br->mdb_hash_tbl);