1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
24 struct nlattr *nest, *port_nest;
26 if (!br->multicast_router || hlist_empty(&br->router_list))
29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
47 nla_nest_end(skb, port_nest);
50 nla_nest_end(skb, nest);
53 nla_nest_cancel(skb, nest);
57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
63 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 e->flags |= MDB_FLAGS_FAST_LEAVE;
65 if (flags & MDB_PG_FLAGS_STAR_EXCL)
66 e->flags |= MDB_FLAGS_STAR_EXCL;
67 if (flags & MDB_PG_FLAGS_BLOCKED)
68 e->flags |= MDB_FLAGS_BLOCKED;
71 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
72 struct nlattr **mdb_attrs)
74 memset(ip, 0, sizeof(struct br_ip));
76 ip->proto = entry->addr.proto;
79 ip->dst.ip4 = entry->addr.u.ip4;
80 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
81 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
83 #if IS_ENABLED(CONFIG_IPV6)
84 case htons(ETH_P_IPV6):
85 ip->dst.ip6 = entry->addr.u.ip6;
86 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
87 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
91 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
96 static int __mdb_fill_srcs(struct sk_buff *skb,
97 struct net_bridge_port_group *p)
99 struct net_bridge_group_src *ent;
100 struct nlattr *nest, *nest_ent;
102 if (hlist_empty(&p->src_list))
105 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
109 hlist_for_each_entry_rcu(ent, &p->src_list, node,
110 lockdep_is_held(&p->key.port->br->multicast_lock)) {
111 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
114 switch (ent->addr.proto) {
115 case htons(ETH_P_IP):
116 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
117 ent->addr.src.ip4)) {
118 nla_nest_cancel(skb, nest_ent);
122 #if IS_ENABLED(CONFIG_IPV6)
123 case htons(ETH_P_IPV6):
124 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
125 &ent->addr.src.ip6)) {
126 nla_nest_cancel(skb, nest_ent);
132 nla_nest_cancel(skb, nest_ent);
135 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
136 br_timer_value(&ent->timer))) {
137 nla_nest_cancel(skb, nest_ent);
140 nla_nest_end(skb, nest_ent);
143 nla_nest_end(skb, nest);
148 nla_nest_cancel(skb, nest);
152 static int __mdb_fill_info(struct sk_buff *skb,
153 struct net_bridge_mdb_entry *mp,
154 struct net_bridge_port_group *p)
156 bool dump_srcs_mode = false;
157 struct timer_list *mtimer;
158 struct nlattr *nest_ent;
159 struct br_mdb_entry e;
163 memset(&e, 0, sizeof(e));
165 ifindex = p->key.port->dev->ifindex;
169 ifindex = mp->br->dev->ifindex;
173 __mdb_entry_fill_flags(&e, flags);
175 e.vid = mp->addr.vid;
176 if (mp->addr.proto == htons(ETH_P_IP))
177 e.addr.u.ip4 = mp->addr.dst.ip4;
178 #if IS_ENABLED(CONFIG_IPV6)
179 else if (mp->addr.proto == htons(ETH_P_IPV6))
180 e.addr.u.ip6 = mp->addr.dst.ip6;
183 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
184 e.addr.proto = mp->addr.proto;
185 nest_ent = nla_nest_start_noflag(skb,
186 MDBA_MDB_ENTRY_INFO);
190 if (nla_put_nohdr(skb, sizeof(e), &e) ||
192 MDBA_MDB_EATTR_TIMER,
193 br_timer_value(mtimer)))
196 switch (mp->addr.proto) {
197 case htons(ETH_P_IP):
198 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
199 if (mp->addr.src.ip4) {
200 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
206 #if IS_ENABLED(CONFIG_IPV6)
207 case htons(ETH_P_IPV6):
208 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
209 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
210 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
218 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
221 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
223 if (dump_srcs_mode &&
224 (__mdb_fill_srcs(skb, p) ||
225 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
229 nla_nest_end(skb, nest_ent);
234 nla_nest_cancel(skb, nest_ent);
238 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
239 struct net_device *dev)
241 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
242 struct net_bridge *br = netdev_priv(dev);
243 struct net_bridge_mdb_entry *mp;
244 struct nlattr *nest, *nest2;
246 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
249 nest = nla_nest_start_noflag(skb, MDBA_MDB);
253 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
254 struct net_bridge_port_group *p;
255 struct net_bridge_port_group __rcu **pp;
260 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
266 if (!s_pidx && mp->host_joined) {
267 err = __mdb_fill_info(skb, mp, NULL);
269 nla_nest_cancel(skb, nest2);
274 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
281 err = __mdb_fill_info(skb, mp, p);
283 nla_nest_end(skb, nest2);
291 nla_nest_end(skb, nest2);
299 nla_nest_end(skb, nest);
303 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
304 struct netlink_ext_ack *extack)
306 struct br_port_msg *bpm;
308 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
309 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
313 bpm = nlmsg_data(nlh);
315 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
318 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
319 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
326 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
328 struct net_device *dev;
329 struct net *net = sock_net(skb->sk);
330 struct nlmsghdr *nlh = NULL;
333 if (cb->strict_check) {
334 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
344 cb->seq = net->dev_base_seq;
346 for_each_netdev_rcu(net, dev) {
347 if (dev->priv_flags & IFF_EBRIDGE) {
348 struct br_port_msg *bpm;
353 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
354 cb->nlh->nlmsg_seq, RTM_GETMDB,
355 sizeof(*bpm), NLM_F_MULTI);
359 bpm = nlmsg_data(nlh);
360 memset(bpm, 0, sizeof(*bpm));
361 bpm->ifindex = dev->ifindex;
362 if (br_mdb_fill_info(skb, cb, dev) < 0)
364 if (br_rports_fill_info(skb, cb, dev) < 0)
382 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
383 struct net_device *dev,
384 struct net_bridge_mdb_entry *mp,
385 struct net_bridge_port_group *pg,
388 struct nlmsghdr *nlh;
389 struct br_port_msg *bpm;
390 struct nlattr *nest, *nest2;
392 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
396 bpm = nlmsg_data(nlh);
397 memset(bpm, 0, sizeof(*bpm));
398 bpm->family = AF_BRIDGE;
399 bpm->ifindex = dev->ifindex;
400 nest = nla_nest_start_noflag(skb, MDBA_MDB);
403 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
407 if (__mdb_fill_info(skb, mp, pg))
410 nla_nest_end(skb, nest2);
411 nla_nest_end(skb, nest);
416 nla_nest_end(skb, nest);
418 nlmsg_cancel(skb, nlh);
422 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
424 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
425 nla_total_size(sizeof(struct br_mdb_entry)) +
426 nla_total_size(sizeof(u32));
427 struct net_bridge_group_src *ent;
428 size_t addr_size = 0;
433 /* MDBA_MDB_EATTR_RTPROT */
434 nlmsg_size += nla_total_size(sizeof(u8));
436 switch (pg->key.addr.proto) {
437 case htons(ETH_P_IP):
438 /* MDBA_MDB_EATTR_SOURCE */
439 if (pg->key.addr.src.ip4)
440 nlmsg_size += nla_total_size(sizeof(__be32));
441 if (pg->key.port->br->multicast_igmp_version == 2)
443 addr_size = sizeof(__be32);
445 #if IS_ENABLED(CONFIG_IPV6)
446 case htons(ETH_P_IPV6):
447 /* MDBA_MDB_EATTR_SOURCE */
448 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
449 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
450 if (pg->key.port->br->multicast_mld_version == 1)
452 addr_size = sizeof(struct in6_addr);
457 /* MDBA_MDB_EATTR_GROUP_MODE */
458 nlmsg_size += nla_total_size(sizeof(u8));
460 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
461 if (!hlist_empty(&pg->src_list))
462 nlmsg_size += nla_total_size(0);
464 hlist_for_each_entry(ent, &pg->src_list, node) {
465 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
466 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
468 nlmsg_size += nla_total_size(0) +
469 nla_total_size(addr_size) +
470 nla_total_size(sizeof(u32));
476 struct br_mdb_complete_info {
477 struct net_bridge_port *port;
481 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
483 struct br_mdb_complete_info *data = priv;
484 struct net_bridge_port_group __rcu **pp;
485 struct net_bridge_port_group *p;
486 struct net_bridge_mdb_entry *mp;
487 struct net_bridge_port *port = data->port;
488 struct net_bridge *br = port->br;
493 spin_lock_bh(&br->multicast_lock);
494 mp = br_mdb_ip_get(br, &data->ip);
497 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
499 if (p->key.port != port)
501 p->flags |= MDB_PG_FLAGS_OFFLOAD;
504 spin_unlock_bh(&br->multicast_lock);
509 static void br_mdb_switchdev_host_port(struct net_device *dev,
510 struct net_device *lower_dev,
511 struct net_bridge_mdb_entry *mp,
514 struct switchdev_obj_port_mdb mdb = {
516 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
517 .flags = SWITCHDEV_F_DEFER,
522 if (mp->addr.proto == htons(ETH_P_IP))
523 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
524 #if IS_ENABLED(CONFIG_IPV6)
526 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
529 mdb.obj.orig_dev = dev;
532 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
535 switchdev_port_obj_del(lower_dev, &mdb.obj);
540 static void br_mdb_switchdev_host(struct net_device *dev,
541 struct net_bridge_mdb_entry *mp, int type)
543 struct net_device *lower_dev;
544 struct list_head *iter;
546 netdev_for_each_lower_dev(dev, lower_dev, iter)
547 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
550 void br_mdb_notify(struct net_device *dev,
551 struct net_bridge_mdb_entry *mp,
552 struct net_bridge_port_group *pg,
555 struct br_mdb_complete_info *complete_info;
556 struct switchdev_obj_port_mdb mdb = {
558 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
559 .flags = SWITCHDEV_F_DEFER,
563 struct net *net = dev_net(dev);
568 if (mp->addr.proto == htons(ETH_P_IP))
569 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
570 #if IS_ENABLED(CONFIG_IPV6)
571 else if (mp->addr.proto == htons(ETH_P_IPV6))
572 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
575 ether_addr_copy(mdb.addr, mp->addr.dst.mac_addr);
577 mdb.obj.orig_dev = pg->key.port->dev;
580 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
583 complete_info->port = pg->key.port;
584 complete_info->ip = mp->addr;
585 mdb.obj.complete_priv = complete_info;
586 mdb.obj.complete = br_mdb_complete;
587 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
588 kfree(complete_info);
591 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
595 br_mdb_switchdev_host(dev, mp, type);
598 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
602 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
608 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
611 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
614 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
615 struct net_device *dev,
616 int ifindex, u32 pid,
617 u32 seq, int type, unsigned int flags)
619 struct br_port_msg *bpm;
620 struct nlmsghdr *nlh;
623 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
627 bpm = nlmsg_data(nlh);
628 memset(bpm, 0, sizeof(*bpm));
629 bpm->family = AF_BRIDGE;
630 bpm->ifindex = dev->ifindex;
631 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
635 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
638 nla_nest_end(skb, nest);
643 nla_nest_end(skb, nest);
645 nlmsg_cancel(skb, nlh);
649 static inline size_t rtnl_rtr_nlmsg_size(void)
651 return NLMSG_ALIGN(sizeof(struct br_port_msg))
652 + nla_total_size(sizeof(__u32));
655 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
658 struct net *net = dev_net(dev);
663 ifindex = port ? port->dev->ifindex : 0;
664 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
668 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
674 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
678 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
681 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
682 struct netlink_ext_ack *extack)
684 if (entry->ifindex == 0) {
685 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
689 if (entry->addr.proto == htons(ETH_P_IP)) {
690 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
691 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
694 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
695 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
698 #if IS_ENABLED(CONFIG_IPV6)
699 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
700 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
701 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
705 } else if (entry->addr.proto == 0) {
707 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
708 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
712 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
716 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
717 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
720 if (entry->vid >= VLAN_VID_MASK) {
721 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
728 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
729 struct netlink_ext_ack *extack)
732 case htons(ETH_P_IP):
733 if (nla_len(attr) != sizeof(struct in_addr)) {
734 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
737 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
738 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
742 #if IS_ENABLED(CONFIG_IPV6)
743 case htons(ETH_P_IPV6): {
746 if (nla_len(attr) != sizeof(struct in6_addr)) {
747 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
750 src = nla_get_in6_addr(attr);
751 if (ipv6_addr_is_multicast(&src)) {
752 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
759 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
766 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
767 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
768 sizeof(struct in_addr),
769 sizeof(struct in6_addr)),
772 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
773 struct net_device **pdev, struct br_mdb_entry **pentry,
774 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
776 struct net *net = sock_net(skb->sk);
777 struct br_mdb_entry *entry;
778 struct br_port_msg *bpm;
779 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
780 struct net_device *dev;
783 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
784 MDBA_SET_ENTRY_MAX, NULL, NULL);
788 bpm = nlmsg_data(nlh);
789 if (bpm->ifindex == 0) {
790 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
794 dev = __dev_get_by_index(net, bpm->ifindex);
796 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
800 if (!(dev->priv_flags & IFF_EBRIDGE)) {
801 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
807 if (!tb[MDBA_SET_ENTRY]) {
808 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
811 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
812 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
816 entry = nla_data(tb[MDBA_SET_ENTRY]);
817 if (!is_valid_mdb_entry(entry, extack))
821 if (tb[MDBA_SET_ENTRY_ATTRS]) {
822 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
823 tb[MDBA_SET_ENTRY_ATTRS],
824 br_mdbe_attrs_pol, extack);
827 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
828 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
829 entry->addr.proto, extack))
833 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
839 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
840 struct br_mdb_entry *entry,
841 struct nlattr **mdb_attrs,
842 struct netlink_ext_ack *extack)
844 struct net_bridge_mdb_entry *mp, *star_mp;
845 struct net_bridge_port_group *p;
846 struct net_bridge_port_group __rcu **pp;
847 struct br_ip group, star_group;
848 unsigned long now = jiffies;
849 unsigned char flags = 0;
853 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
855 /* host join errors which can happen before creating the group */
857 /* don't allow any flags for host-joined groups */
859 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
862 if (!br_multicast_is_star_g(&group)) {
863 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
868 if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
869 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
873 mp = br_mdb_ip_get(br, &group);
875 mp = br_multicast_new_group(br, &group);
876 err = PTR_ERR_OR_ZERO(mp);
883 if (mp->host_joined) {
884 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
888 br_multicast_host_join(mp, false);
889 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
894 for (pp = &mp->ports;
895 (p = mlock_dereference(*pp, br)) != NULL;
897 if (p->key.port == port) {
898 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
901 if ((unsigned long)p->key.port < (unsigned long)port)
905 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
908 if (entry->state == MDB_PERMANENT)
909 flags |= MDB_PG_FLAGS_PERMANENT;
911 p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
912 filter_mode, RTPROT_STATIC);
914 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
917 rcu_assign_pointer(*pp, p);
918 if (entry->state == MDB_TEMPORARY)
919 mod_timer(&p->timer, now + br->multicast_membership_interval);
920 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
921 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
922 * added to all S,G entries for proper replication, if we are adding
923 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
924 * added to it for proper replication
926 if (br_multicast_should_handle_mode(br, group.proto)) {
927 switch (filter_mode) {
929 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
932 star_group = p->key.addr;
933 memset(&star_group.src, 0, sizeof(star_group.src));
934 star_mp = br_mdb_ip_get(br, &star_group);
936 br_multicast_sg_add_exclude_ports(star_mp, p);
944 static int __br_mdb_add(struct net *net, struct net_bridge *br,
945 struct net_bridge_port *p,
946 struct br_mdb_entry *entry,
947 struct nlattr **mdb_attrs,
948 struct netlink_ext_ack *extack)
952 spin_lock_bh(&br->multicast_lock);
953 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
954 spin_unlock_bh(&br->multicast_lock);
959 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
960 struct netlink_ext_ack *extack)
962 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
963 struct net *net = sock_net(skb->sk);
964 struct net_bridge_vlan_group *vg;
965 struct net_bridge_port *p = NULL;
966 struct net_device *dev, *pdev;
967 struct br_mdb_entry *entry;
968 struct net_bridge_vlan *v;
969 struct net_bridge *br;
972 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
976 br = netdev_priv(dev);
978 if (!netif_running(br->dev)) {
979 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
983 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
984 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
988 if (entry->ifindex != br->dev->ifindex) {
989 pdev = __dev_get_by_index(net, entry->ifindex);
991 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
995 p = br_port_get_rtnl(pdev);
997 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1002 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1005 if (p->state == BR_STATE_DISABLED) {
1006 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1009 vg = nbp_vlan_group(p);
1011 vg = br_vlan_group(br);
1014 /* If vlan filtering is enabled and VLAN is not specified
1015 * install mdb entry on all vlans configured on the port.
1017 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1018 list_for_each_entry(v, &vg->vlan_list, vlist) {
1019 entry->vid = v->vid;
1020 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1025 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1031 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1032 struct nlattr **mdb_attrs)
1034 struct net_bridge_mdb_entry *mp;
1035 struct net_bridge_port_group *p;
1036 struct net_bridge_port_group __rcu **pp;
1040 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1043 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1045 spin_lock_bh(&br->multicast_lock);
1046 mp = br_mdb_ip_get(br, &ip);
1051 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1052 br_multicast_host_leave(mp, false);
1054 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1055 if (!mp->ports && netif_running(br->dev))
1056 mod_timer(&mp->timer, jiffies);
1060 for (pp = &mp->ports;
1061 (p = mlock_dereference(*pp, br)) != NULL;
1063 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1066 if (p->key.port->state == BR_STATE_DISABLED)
1069 br_multicast_del_pg(mp, p, pp);
1075 spin_unlock_bh(&br->multicast_lock);
1079 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1080 struct netlink_ext_ack *extack)
1082 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1083 struct net *net = sock_net(skb->sk);
1084 struct net_bridge_vlan_group *vg;
1085 struct net_bridge_port *p = NULL;
1086 struct net_device *dev, *pdev;
1087 struct br_mdb_entry *entry;
1088 struct net_bridge_vlan *v;
1089 struct net_bridge *br;
1092 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1096 br = netdev_priv(dev);
1098 if (entry->ifindex != br->dev->ifindex) {
1099 pdev = __dev_get_by_index(net, entry->ifindex);
1103 p = br_port_get_rtnl(pdev);
1104 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1106 vg = nbp_vlan_group(p);
1108 vg = br_vlan_group(br);
1111 /* If vlan filtering is enabled and VLAN is not specified
1112 * delete mdb entry on all vlans configured on the port.
1114 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1115 list_for_each_entry(v, &vg->vlan_list, vlist) {
1116 entry->vid = v->vid;
1117 err = __br_mdb_del(br, entry, mdb_attrs);
1120 err = __br_mdb_del(br, entry, mdb_attrs);
1126 void br_mdb_init(void)
1128 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1129 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1130 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1133 void br_mdb_uninit(void)
1135 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1136 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1137 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);