1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
24 struct nlattr *nest, *port_nest;
26 if (!br->multicast_router || hlist_empty(&br->router_list))
29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
47 nla_nest_end(skb, port_nest);
50 nla_nest_end(skb, nest);
53 nla_nest_cancel(skb, nest);
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
63 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 e->flags |= MDB_FLAGS_FAST_LEAVE;
65 if (flags & MDB_PG_FLAGS_STAR_EXCL)
66 e->flags |= MDB_FLAGS_STAR_EXCL;
67 if (flags & MDB_PG_FLAGS_BLOCKED)
68 e->flags |= MDB_FLAGS_BLOCKED;
71 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
72 struct nlattr **mdb_attrs)
74 memset(ip, 0, sizeof(struct br_ip));
76 ip->proto = entry->addr.proto;
79 ip->dst.ip4 = entry->addr.u.ip4;
80 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
81 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
83 #if IS_ENABLED(CONFIG_IPV6)
84 case htons(ETH_P_IPV6):
85 ip->dst.ip6 = entry->addr.u.ip6;
86 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
87 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
94 static int __mdb_fill_srcs(struct sk_buff *skb,
95 struct net_bridge_port_group *p)
97 struct net_bridge_group_src *ent;
98 struct nlattr *nest, *nest_ent;
100 if (hlist_empty(&p->src_list))
103 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
107 hlist_for_each_entry_rcu(ent, &p->src_list, node,
108 lockdep_is_held(&p->key.port->br->multicast_lock)) {
109 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
112 switch (ent->addr.proto) {
113 case htons(ETH_P_IP):
114 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
115 ent->addr.src.ip4)) {
116 nla_nest_cancel(skb, nest_ent);
120 #if IS_ENABLED(CONFIG_IPV6)
121 case htons(ETH_P_IPV6):
122 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
123 &ent->addr.src.ip6)) {
124 nla_nest_cancel(skb, nest_ent);
130 nla_nest_cancel(skb, nest_ent);
133 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
134 br_timer_value(&ent->timer))) {
135 nla_nest_cancel(skb, nest_ent);
138 nla_nest_end(skb, nest_ent);
141 nla_nest_end(skb, nest);
146 nla_nest_cancel(skb, nest);
150 static int __mdb_fill_info(struct sk_buff *skb,
151 struct net_bridge_mdb_entry *mp,
152 struct net_bridge_port_group *p)
154 bool dump_srcs_mode = false;
155 struct timer_list *mtimer;
156 struct nlattr *nest_ent;
157 struct br_mdb_entry e;
161 memset(&e, 0, sizeof(e));
163 ifindex = p->key.port->dev->ifindex;
167 ifindex = mp->br->dev->ifindex;
171 __mdb_entry_fill_flags(&e, flags);
173 e.vid = mp->addr.vid;
174 if (mp->addr.proto == htons(ETH_P_IP))
175 e.addr.u.ip4 = mp->addr.dst.ip4;
176 #if IS_ENABLED(CONFIG_IPV6)
177 if (mp->addr.proto == htons(ETH_P_IPV6))
178 e.addr.u.ip6 = mp->addr.dst.ip6;
180 e.addr.proto = mp->addr.proto;
181 nest_ent = nla_nest_start_noflag(skb,
182 MDBA_MDB_ENTRY_INFO);
186 if (nla_put_nohdr(skb, sizeof(e), &e) ||
188 MDBA_MDB_EATTR_TIMER,
189 br_timer_value(mtimer)))
192 switch (mp->addr.proto) {
193 case htons(ETH_P_IP):
194 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
195 if (mp->addr.src.ip4) {
196 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
202 #if IS_ENABLED(CONFIG_IPV6)
203 case htons(ETH_P_IPV6):
204 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
205 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
206 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
215 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
217 if (dump_srcs_mode &&
218 (__mdb_fill_srcs(skb, p) ||
219 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
223 nla_nest_end(skb, nest_ent);
228 nla_nest_cancel(skb, nest_ent);
232 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
233 struct net_device *dev)
235 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
236 struct net_bridge *br = netdev_priv(dev);
237 struct net_bridge_mdb_entry *mp;
238 struct nlattr *nest, *nest2;
240 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
243 nest = nla_nest_start_noflag(skb, MDBA_MDB);
247 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
248 struct net_bridge_port_group *p;
249 struct net_bridge_port_group __rcu **pp;
254 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
260 if (!s_pidx && mp->host_joined) {
261 err = __mdb_fill_info(skb, mp, NULL);
263 nla_nest_cancel(skb, nest2);
268 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
275 err = __mdb_fill_info(skb, mp, p);
277 nla_nest_end(skb, nest2);
285 nla_nest_end(skb, nest2);
293 nla_nest_end(skb, nest);
297 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
298 struct netlink_ext_ack *extack)
300 struct br_port_msg *bpm;
302 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
303 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
307 bpm = nlmsg_data(nlh);
309 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
312 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
313 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
320 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
322 struct net_device *dev;
323 struct net *net = sock_net(skb->sk);
324 struct nlmsghdr *nlh = NULL;
327 if (cb->strict_check) {
328 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
338 cb->seq = net->dev_base_seq;
340 for_each_netdev_rcu(net, dev) {
341 if (dev->priv_flags & IFF_EBRIDGE) {
342 struct br_port_msg *bpm;
347 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
348 cb->nlh->nlmsg_seq, RTM_GETMDB,
349 sizeof(*bpm), NLM_F_MULTI);
353 bpm = nlmsg_data(nlh);
354 memset(bpm, 0, sizeof(*bpm));
355 bpm->ifindex = dev->ifindex;
356 if (br_mdb_fill_info(skb, cb, dev) < 0)
358 if (br_rports_fill_info(skb, cb, dev) < 0)
376 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
377 struct net_device *dev,
378 struct net_bridge_mdb_entry *mp,
379 struct net_bridge_port_group *pg,
382 struct nlmsghdr *nlh;
383 struct br_port_msg *bpm;
384 struct nlattr *nest, *nest2;
386 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
390 bpm = nlmsg_data(nlh);
391 memset(bpm, 0, sizeof(*bpm));
392 bpm->family = AF_BRIDGE;
393 bpm->ifindex = dev->ifindex;
394 nest = nla_nest_start_noflag(skb, MDBA_MDB);
397 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
401 if (__mdb_fill_info(skb, mp, pg))
404 nla_nest_end(skb, nest2);
405 nla_nest_end(skb, nest);
410 nla_nest_end(skb, nest);
412 nlmsg_cancel(skb, nlh);
416 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
418 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
419 nla_total_size(sizeof(struct br_mdb_entry)) +
420 nla_total_size(sizeof(u32));
421 struct net_bridge_group_src *ent;
422 size_t addr_size = 0;
427 /* MDBA_MDB_EATTR_RTPROT */
428 nlmsg_size += nla_total_size(sizeof(u8));
430 switch (pg->key.addr.proto) {
431 case htons(ETH_P_IP):
432 /* MDBA_MDB_EATTR_SOURCE */
433 if (pg->key.addr.src.ip4)
434 nlmsg_size += nla_total_size(sizeof(__be32));
435 if (pg->key.port->br->multicast_igmp_version == 2)
437 addr_size = sizeof(__be32);
439 #if IS_ENABLED(CONFIG_IPV6)
440 case htons(ETH_P_IPV6):
441 /* MDBA_MDB_EATTR_SOURCE */
442 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
443 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
444 if (pg->key.port->br->multicast_mld_version == 1)
446 addr_size = sizeof(struct in6_addr);
451 /* MDBA_MDB_EATTR_GROUP_MODE */
452 nlmsg_size += nla_total_size(sizeof(u8));
454 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
455 if (!hlist_empty(&pg->src_list))
456 nlmsg_size += nla_total_size(0);
458 hlist_for_each_entry(ent, &pg->src_list, node) {
459 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
460 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
462 nlmsg_size += nla_total_size(0) +
463 nla_total_size(addr_size) +
464 nla_total_size(sizeof(u32));
470 struct br_mdb_complete_info {
471 struct net_bridge_port *port;
475 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
477 struct br_mdb_complete_info *data = priv;
478 struct net_bridge_port_group __rcu **pp;
479 struct net_bridge_port_group *p;
480 struct net_bridge_mdb_entry *mp;
481 struct net_bridge_port *port = data->port;
482 struct net_bridge *br = port->br;
487 spin_lock_bh(&br->multicast_lock);
488 mp = br_mdb_ip_get(br, &data->ip);
491 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
493 if (p->key.port != port)
495 p->flags |= MDB_PG_FLAGS_OFFLOAD;
498 spin_unlock_bh(&br->multicast_lock);
503 static void br_mdb_switchdev_host_port(struct net_device *dev,
504 struct net_device *lower_dev,
505 struct net_bridge_mdb_entry *mp,
508 struct switchdev_obj_port_mdb mdb = {
510 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
511 .flags = SWITCHDEV_F_DEFER,
516 if (mp->addr.proto == htons(ETH_P_IP))
517 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
518 #if IS_ENABLED(CONFIG_IPV6)
520 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
523 mdb.obj.orig_dev = dev;
526 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
529 switchdev_port_obj_del(lower_dev, &mdb.obj);
534 static void br_mdb_switchdev_host(struct net_device *dev,
535 struct net_bridge_mdb_entry *mp, int type)
537 struct net_device *lower_dev;
538 struct list_head *iter;
540 netdev_for_each_lower_dev(dev, lower_dev, iter)
541 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
544 void br_mdb_notify(struct net_device *dev,
545 struct net_bridge_mdb_entry *mp,
546 struct net_bridge_port_group *pg,
549 struct br_mdb_complete_info *complete_info;
550 struct switchdev_obj_port_mdb mdb = {
552 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
553 .flags = SWITCHDEV_F_DEFER,
557 struct net *net = dev_net(dev);
562 if (mp->addr.proto == htons(ETH_P_IP))
563 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
564 #if IS_ENABLED(CONFIG_IPV6)
566 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
568 mdb.obj.orig_dev = pg->key.port->dev;
571 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
574 complete_info->port = pg->key.port;
575 complete_info->ip = mp->addr;
576 mdb.obj.complete_priv = complete_info;
577 mdb.obj.complete = br_mdb_complete;
578 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
579 kfree(complete_info);
582 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
586 br_mdb_switchdev_host(dev, mp, type);
589 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
593 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
599 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
602 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
605 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
606 struct net_device *dev,
607 int ifindex, u32 pid,
608 u32 seq, int type, unsigned int flags)
610 struct br_port_msg *bpm;
611 struct nlmsghdr *nlh;
614 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
618 bpm = nlmsg_data(nlh);
619 memset(bpm, 0, sizeof(*bpm));
620 bpm->family = AF_BRIDGE;
621 bpm->ifindex = dev->ifindex;
622 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
626 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
629 nla_nest_end(skb, nest);
634 nla_nest_end(skb, nest);
636 nlmsg_cancel(skb, nlh);
640 static inline size_t rtnl_rtr_nlmsg_size(void)
642 return NLMSG_ALIGN(sizeof(struct br_port_msg))
643 + nla_total_size(sizeof(__u32));
646 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
649 struct net *net = dev_net(dev);
654 ifindex = port ? port->dev->ifindex : 0;
655 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
659 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
665 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
669 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
672 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
673 struct netlink_ext_ack *extack)
675 if (entry->ifindex == 0) {
676 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
680 if (entry->addr.proto == htons(ETH_P_IP)) {
681 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
682 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
685 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
686 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
689 #if IS_ENABLED(CONFIG_IPV6)
690 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
691 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
692 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
697 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
701 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
702 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
705 if (entry->vid >= VLAN_VID_MASK) {
706 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
713 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
714 struct netlink_ext_ack *extack)
717 case htons(ETH_P_IP):
718 if (nla_len(attr) != sizeof(struct in_addr)) {
719 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
722 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
723 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
727 #if IS_ENABLED(CONFIG_IPV6)
728 case htons(ETH_P_IPV6): {
731 if (nla_len(attr) != sizeof(struct in6_addr)) {
732 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
735 src = nla_get_in6_addr(attr);
736 if (ipv6_addr_is_multicast(&src)) {
737 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
744 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
751 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
752 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
753 sizeof(struct in_addr),
754 sizeof(struct in6_addr)),
757 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
758 struct net_device **pdev, struct br_mdb_entry **pentry,
759 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
761 struct net *net = sock_net(skb->sk);
762 struct br_mdb_entry *entry;
763 struct br_port_msg *bpm;
764 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
765 struct net_device *dev;
768 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
769 MDBA_SET_ENTRY_MAX, NULL, NULL);
773 bpm = nlmsg_data(nlh);
774 if (bpm->ifindex == 0) {
775 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
779 dev = __dev_get_by_index(net, bpm->ifindex);
781 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
785 if (!(dev->priv_flags & IFF_EBRIDGE)) {
786 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
792 if (!tb[MDBA_SET_ENTRY]) {
793 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
796 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
797 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
801 entry = nla_data(tb[MDBA_SET_ENTRY]);
802 if (!is_valid_mdb_entry(entry, extack))
806 if (tb[MDBA_SET_ENTRY_ATTRS]) {
807 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
808 tb[MDBA_SET_ENTRY_ATTRS],
809 br_mdbe_attrs_pol, extack);
812 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
813 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
814 entry->addr.proto, extack))
818 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
824 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
825 struct br_mdb_entry *entry,
826 struct nlattr **mdb_attrs,
827 struct netlink_ext_ack *extack)
829 struct net_bridge_mdb_entry *mp, *star_mp;
830 struct net_bridge_port_group *p;
831 struct net_bridge_port_group __rcu **pp;
832 struct br_ip group, star_group;
833 unsigned long now = jiffies;
837 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
839 /* host join errors which can happen before creating the group */
841 /* don't allow any flags for host-joined groups */
843 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
846 if (!br_multicast_is_star_g(&group)) {
847 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
852 mp = br_mdb_ip_get(br, &group);
854 mp = br_multicast_new_group(br, &group);
855 err = PTR_ERR_OR_ZERO(mp);
862 if (mp->host_joined) {
863 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
867 br_multicast_host_join(mp, false);
868 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
873 for (pp = &mp->ports;
874 (p = mlock_dereference(*pp, br)) != NULL;
876 if (p->key.port == port) {
877 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
880 if ((unsigned long)p->key.port < (unsigned long)port)
884 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
887 p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
888 filter_mode, RTPROT_STATIC);
890 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
893 rcu_assign_pointer(*pp, p);
894 if (entry->state == MDB_TEMPORARY)
895 mod_timer(&p->timer, now + br->multicast_membership_interval);
896 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
897 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
898 * added to all S,G entries for proper replication, if we are adding
899 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
900 * added to it for proper replication
902 if (br_multicast_should_handle_mode(br, group.proto)) {
903 switch (filter_mode) {
905 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
908 star_group = p->key.addr;
909 memset(&star_group.src, 0, sizeof(star_group.src));
910 star_mp = br_mdb_ip_get(br, &star_group);
912 br_multicast_sg_add_exclude_ports(star_mp, p);
920 static int __br_mdb_add(struct net *net, struct net_bridge *br,
921 struct net_bridge_port *p,
922 struct br_mdb_entry *entry,
923 struct nlattr **mdb_attrs,
924 struct netlink_ext_ack *extack)
928 spin_lock_bh(&br->multicast_lock);
929 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
930 spin_unlock_bh(&br->multicast_lock);
935 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
936 struct netlink_ext_ack *extack)
938 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
939 struct net *net = sock_net(skb->sk);
940 struct net_bridge_vlan_group *vg;
941 struct net_bridge_port *p = NULL;
942 struct net_device *dev, *pdev;
943 struct br_mdb_entry *entry;
944 struct net_bridge_vlan *v;
945 struct net_bridge *br;
948 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
952 br = netdev_priv(dev);
954 if (!netif_running(br->dev)) {
955 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
959 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
960 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
964 if (entry->ifindex != br->dev->ifindex) {
965 pdev = __dev_get_by_index(net, entry->ifindex);
967 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
971 p = br_port_get_rtnl(pdev);
973 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
978 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
981 if (p->state == BR_STATE_DISABLED) {
982 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
985 vg = nbp_vlan_group(p);
987 vg = br_vlan_group(br);
990 /* If vlan filtering is enabled and VLAN is not specified
991 * install mdb entry on all vlans configured on the port.
993 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
994 list_for_each_entry(v, &vg->vlan_list, vlist) {
996 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1001 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1007 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1008 struct nlattr **mdb_attrs)
1010 struct net_bridge_mdb_entry *mp;
1011 struct net_bridge_port_group *p;
1012 struct net_bridge_port_group __rcu **pp;
1016 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1019 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1021 spin_lock_bh(&br->multicast_lock);
1022 mp = br_mdb_ip_get(br, &ip);
1027 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1028 br_multicast_host_leave(mp, false);
1030 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1031 if (!mp->ports && netif_running(br->dev))
1032 mod_timer(&mp->timer, jiffies);
1036 for (pp = &mp->ports;
1037 (p = mlock_dereference(*pp, br)) != NULL;
1039 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1042 if (p->key.port->state == BR_STATE_DISABLED)
1045 br_multicast_del_pg(mp, p, pp);
1051 spin_unlock_bh(&br->multicast_lock);
1055 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1056 struct netlink_ext_ack *extack)
1058 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1059 struct net *net = sock_net(skb->sk);
1060 struct net_bridge_vlan_group *vg;
1061 struct net_bridge_port *p = NULL;
1062 struct net_device *dev, *pdev;
1063 struct br_mdb_entry *entry;
1064 struct net_bridge_vlan *v;
1065 struct net_bridge *br;
1068 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1072 br = netdev_priv(dev);
1074 if (entry->ifindex != br->dev->ifindex) {
1075 pdev = __dev_get_by_index(net, entry->ifindex);
1079 p = br_port_get_rtnl(pdev);
1080 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1082 vg = nbp_vlan_group(p);
1084 vg = br_vlan_group(br);
1087 /* If vlan filtering is enabled and VLAN is not specified
1088 * delete mdb entry on all vlans configured on the port.
1090 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1091 list_for_each_entry(v, &vg->vlan_list, vlist) {
1092 entry->vid = v->vid;
1093 err = __br_mdb_del(br, entry, mdb_attrs);
1096 err = __br_mdb_del(br, entry, mdb_attrs);
1102 void br_mdb_init(void)
1104 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1105 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1106 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1109 void br_mdb_uninit(void)
1111 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1112 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1113 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);