1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
14 #include <net/addrconf.h>
17 #include "br_private.h"
19 static bool br_rports_have_mc_router(struct net_bridge *br)
21 #if IS_ENABLED(CONFIG_IPV6)
22 return !hlist_empty(&br->ip4_mc_router_list) ||
23 !hlist_empty(&br->ip6_mc_router_list);
25 return !hlist_empty(&br->ip4_mc_router_list);
30 br_ip4_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
32 *timer = br_timer_value(&port->ip4_mc_router_timer);
33 return !hlist_unhashed(&port->ip4_rlist);
37 br_ip6_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
39 #if IS_ENABLED(CONFIG_IPV6)
40 *timer = br_timer_value(&port->ip6_mc_router_timer);
41 return !hlist_unhashed(&port->ip6_rlist);
48 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
49 struct net_device *dev)
51 struct net_bridge *br = netdev_priv(dev);
52 bool have_ip4_mc_rtr, have_ip6_mc_rtr;
53 unsigned long ip4_timer, ip6_timer;
54 struct nlattr *nest, *port_nest;
55 struct net_bridge_port *p;
57 if (!br->multicast_router)
60 if (!br_rports_have_mc_router(br))
63 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
67 list_for_each_entry_rcu(p, &br->port_list, list) {
68 have_ip4_mc_rtr = br_ip4_rports_get_timer(p, &ip4_timer);
69 have_ip6_mc_rtr = br_ip6_rports_get_timer(p, &ip6_timer);
71 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
74 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
78 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
79 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
80 max(ip4_timer, ip6_timer)) ||
81 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
82 p->multicast_router) ||
84 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
87 nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
89 nla_nest_cancel(skb, port_nest);
92 nla_nest_end(skb, port_nest);
95 nla_nest_end(skb, nest);
98 nla_nest_cancel(skb, nest);
102 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
104 e->state = flags & MDB_PG_FLAGS_PERMANENT;
106 if (flags & MDB_PG_FLAGS_OFFLOAD)
107 e->flags |= MDB_FLAGS_OFFLOAD;
108 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
109 e->flags |= MDB_FLAGS_FAST_LEAVE;
110 if (flags & MDB_PG_FLAGS_STAR_EXCL)
111 e->flags |= MDB_FLAGS_STAR_EXCL;
112 if (flags & MDB_PG_FLAGS_BLOCKED)
113 e->flags |= MDB_FLAGS_BLOCKED;
116 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
117 struct nlattr **mdb_attrs)
119 memset(ip, 0, sizeof(struct br_ip));
120 ip->vid = entry->vid;
121 ip->proto = entry->addr.proto;
123 case htons(ETH_P_IP):
124 ip->dst.ip4 = entry->addr.u.ip4;
125 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
126 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
128 #if IS_ENABLED(CONFIG_IPV6)
129 case htons(ETH_P_IPV6):
130 ip->dst.ip6 = entry->addr.u.ip6;
131 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
132 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
136 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
141 static int __mdb_fill_srcs(struct sk_buff *skb,
142 struct net_bridge_port_group *p)
144 struct net_bridge_group_src *ent;
145 struct nlattr *nest, *nest_ent;
147 if (hlist_empty(&p->src_list))
150 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
154 hlist_for_each_entry_rcu(ent, &p->src_list, node,
155 lockdep_is_held(&p->key.port->br->multicast_lock)) {
156 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
159 switch (ent->addr.proto) {
160 case htons(ETH_P_IP):
161 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
162 ent->addr.src.ip4)) {
163 nla_nest_cancel(skb, nest_ent);
167 #if IS_ENABLED(CONFIG_IPV6)
168 case htons(ETH_P_IPV6):
169 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
170 &ent->addr.src.ip6)) {
171 nla_nest_cancel(skb, nest_ent);
177 nla_nest_cancel(skb, nest_ent);
180 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
181 br_timer_value(&ent->timer))) {
182 nla_nest_cancel(skb, nest_ent);
185 nla_nest_end(skb, nest_ent);
188 nla_nest_end(skb, nest);
193 nla_nest_cancel(skb, nest);
197 static int __mdb_fill_info(struct sk_buff *skb,
198 struct net_bridge_mdb_entry *mp,
199 struct net_bridge_port_group *p)
201 bool dump_srcs_mode = false;
202 struct timer_list *mtimer;
203 struct nlattr *nest_ent;
204 struct br_mdb_entry e;
208 memset(&e, 0, sizeof(e));
210 ifindex = p->key.port->dev->ifindex;
214 ifindex = mp->br->dev->ifindex;
218 __mdb_entry_fill_flags(&e, flags);
220 e.vid = mp->addr.vid;
221 if (mp->addr.proto == htons(ETH_P_IP))
222 e.addr.u.ip4 = mp->addr.dst.ip4;
223 #if IS_ENABLED(CONFIG_IPV6)
224 else if (mp->addr.proto == htons(ETH_P_IPV6))
225 e.addr.u.ip6 = mp->addr.dst.ip6;
228 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
229 e.addr.proto = mp->addr.proto;
230 nest_ent = nla_nest_start_noflag(skb,
231 MDBA_MDB_ENTRY_INFO);
235 if (nla_put_nohdr(skb, sizeof(e), &e) ||
237 MDBA_MDB_EATTR_TIMER,
238 br_timer_value(mtimer)))
241 switch (mp->addr.proto) {
242 case htons(ETH_P_IP):
243 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
244 if (mp->addr.src.ip4) {
245 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
251 #if IS_ENABLED(CONFIG_IPV6)
252 case htons(ETH_P_IPV6):
253 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
254 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
255 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
263 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
266 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
268 if (dump_srcs_mode &&
269 (__mdb_fill_srcs(skb, p) ||
270 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
274 nla_nest_end(skb, nest_ent);
279 nla_nest_cancel(skb, nest_ent);
283 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
284 struct net_device *dev)
286 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
287 struct net_bridge *br = netdev_priv(dev);
288 struct net_bridge_mdb_entry *mp;
289 struct nlattr *nest, *nest2;
291 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
294 nest = nla_nest_start_noflag(skb, MDBA_MDB);
298 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
299 struct net_bridge_port_group *p;
300 struct net_bridge_port_group __rcu **pp;
305 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
311 if (!s_pidx && mp->host_joined) {
312 err = __mdb_fill_info(skb, mp, NULL);
314 nla_nest_cancel(skb, nest2);
319 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
326 err = __mdb_fill_info(skb, mp, p);
328 nla_nest_end(skb, nest2);
336 nla_nest_end(skb, nest2);
344 nla_nest_end(skb, nest);
348 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
349 struct netlink_ext_ack *extack)
351 struct br_port_msg *bpm;
353 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
354 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
358 bpm = nlmsg_data(nlh);
360 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
363 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
364 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
371 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
373 struct net_device *dev;
374 struct net *net = sock_net(skb->sk);
375 struct nlmsghdr *nlh = NULL;
378 if (cb->strict_check) {
379 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
389 cb->seq = net->dev_base_seq;
391 for_each_netdev_rcu(net, dev) {
392 if (dev->priv_flags & IFF_EBRIDGE) {
393 struct br_port_msg *bpm;
398 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
399 cb->nlh->nlmsg_seq, RTM_GETMDB,
400 sizeof(*bpm), NLM_F_MULTI);
404 bpm = nlmsg_data(nlh);
405 memset(bpm, 0, sizeof(*bpm));
406 bpm->ifindex = dev->ifindex;
407 if (br_mdb_fill_info(skb, cb, dev) < 0)
409 if (br_rports_fill_info(skb, cb, dev) < 0)
427 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
428 struct net_device *dev,
429 struct net_bridge_mdb_entry *mp,
430 struct net_bridge_port_group *pg,
433 struct nlmsghdr *nlh;
434 struct br_port_msg *bpm;
435 struct nlattr *nest, *nest2;
437 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
441 bpm = nlmsg_data(nlh);
442 memset(bpm, 0, sizeof(*bpm));
443 bpm->family = AF_BRIDGE;
444 bpm->ifindex = dev->ifindex;
445 nest = nla_nest_start_noflag(skb, MDBA_MDB);
448 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
452 if (__mdb_fill_info(skb, mp, pg))
455 nla_nest_end(skb, nest2);
456 nla_nest_end(skb, nest);
461 nla_nest_end(skb, nest);
463 nlmsg_cancel(skb, nlh);
467 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
469 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
470 nla_total_size(sizeof(struct br_mdb_entry)) +
471 nla_total_size(sizeof(u32));
472 struct net_bridge_group_src *ent;
473 size_t addr_size = 0;
478 /* MDBA_MDB_EATTR_RTPROT */
479 nlmsg_size += nla_total_size(sizeof(u8));
481 switch (pg->key.addr.proto) {
482 case htons(ETH_P_IP):
483 /* MDBA_MDB_EATTR_SOURCE */
484 if (pg->key.addr.src.ip4)
485 nlmsg_size += nla_total_size(sizeof(__be32));
486 if (pg->key.port->br->multicast_igmp_version == 2)
488 addr_size = sizeof(__be32);
490 #if IS_ENABLED(CONFIG_IPV6)
491 case htons(ETH_P_IPV6):
492 /* MDBA_MDB_EATTR_SOURCE */
493 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
494 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
495 if (pg->key.port->br->multicast_mld_version == 1)
497 addr_size = sizeof(struct in6_addr);
502 /* MDBA_MDB_EATTR_GROUP_MODE */
503 nlmsg_size += nla_total_size(sizeof(u8));
505 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
506 if (!hlist_empty(&pg->src_list))
507 nlmsg_size += nla_total_size(0);
509 hlist_for_each_entry(ent, &pg->src_list, node) {
510 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
511 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
513 nlmsg_size += nla_total_size(0) +
514 nla_total_size(addr_size) +
515 nla_total_size(sizeof(u32));
521 struct br_mdb_complete_info {
522 struct net_bridge_port *port;
526 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
528 struct br_mdb_complete_info *data = priv;
529 struct net_bridge_port_group __rcu **pp;
530 struct net_bridge_port_group *p;
531 struct net_bridge_mdb_entry *mp;
532 struct net_bridge_port *port = data->port;
533 struct net_bridge *br = port->br;
538 spin_lock_bh(&br->multicast_lock);
539 mp = br_mdb_ip_get(br, &data->ip);
542 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
544 if (p->key.port != port)
546 p->flags |= MDB_PG_FLAGS_OFFLOAD;
549 spin_unlock_bh(&br->multicast_lock);
554 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
555 const struct net_bridge_mdb_entry *mp)
557 if (mp->addr.proto == htons(ETH_P_IP))
558 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
559 #if IS_ENABLED(CONFIG_IPV6)
560 else if (mp->addr.proto == htons(ETH_P_IPV6))
561 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
564 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
566 mdb->vid = mp->addr.vid;
569 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
570 const struct switchdev_obj_port_mdb *mdb,
571 unsigned long action, const void *ctx,
572 struct netlink_ext_ack *extack)
574 struct switchdev_notifier_port_obj_info obj_info = {
584 err = nb->notifier_call(nb, action, &obj_info);
585 return notifier_to_errno(err);
588 static int br_mdb_queue_one(struct list_head *mdb_list,
589 enum switchdev_obj_id id,
590 const struct net_bridge_mdb_entry *mp,
591 struct net_device *orig_dev)
593 struct switchdev_obj_port_mdb *mdb;
595 mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
600 mdb->obj.orig_dev = orig_dev;
601 br_switchdev_mdb_populate(mdb, mp);
602 list_add_tail(&mdb->obj.list, mdb_list);
607 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
608 const void *ctx, bool adding, struct notifier_block *nb,
609 struct netlink_ext_ack *extack)
611 const struct net_bridge_mdb_entry *mp;
612 struct switchdev_obj *obj, *tmp;
613 struct net_bridge *br;
614 unsigned long action;
620 if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
623 br = netdev_priv(br_dev);
625 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
628 /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
629 * because the write-side protection is br->multicast_lock. But we
630 * need to emulate the [ blocking ] calling context of a regular
631 * switchdev event, so since both br->multicast_lock and RCU read side
632 * critical sections are atomic, we have no choice but to pick the RCU
633 * read side lock, queue up all our events, leave the critical section
634 * and notify switchdev from blocking context.
638 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
639 struct net_bridge_port_group __rcu * const *pp;
640 const struct net_bridge_port_group *p;
642 if (mp->host_joined) {
643 err = br_mdb_queue_one(&mdb_list,
644 SWITCHDEV_OBJ_ID_HOST_MDB,
652 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
654 if (p->key.port->dev != dev)
657 err = br_mdb_queue_one(&mdb_list,
658 SWITCHDEV_OBJ_ID_PORT_MDB,
670 action = SWITCHDEV_PORT_OBJ_ADD;
672 action = SWITCHDEV_PORT_OBJ_DEL;
674 list_for_each_entry(obj, &mdb_list, list) {
675 err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
676 action, ctx, extack);
682 list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
683 list_del(&obj->list);
684 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
689 EXPORT_SYMBOL_GPL(br_mdb_replay);
691 static void br_mdb_switchdev_host_port(struct net_device *dev,
692 struct net_device *lower_dev,
693 struct net_bridge_mdb_entry *mp,
696 struct switchdev_obj_port_mdb mdb = {
698 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
699 .flags = SWITCHDEV_F_DEFER,
704 br_switchdev_mdb_populate(&mdb, mp);
708 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
711 switchdev_port_obj_del(lower_dev, &mdb.obj);
716 static void br_mdb_switchdev_host(struct net_device *dev,
717 struct net_bridge_mdb_entry *mp, int type)
719 struct net_device *lower_dev;
720 struct list_head *iter;
722 netdev_for_each_lower_dev(dev, lower_dev, iter)
723 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
726 void br_mdb_notify(struct net_device *dev,
727 struct net_bridge_mdb_entry *mp,
728 struct net_bridge_port_group *pg,
731 struct br_mdb_complete_info *complete_info;
732 struct switchdev_obj_port_mdb mdb = {
734 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
735 .flags = SWITCHDEV_F_DEFER,
738 struct net *net = dev_net(dev);
743 br_switchdev_mdb_populate(&mdb, mp);
745 mdb.obj.orig_dev = pg->key.port->dev;
748 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
751 complete_info->port = pg->key.port;
752 complete_info->ip = mp->addr;
753 mdb.obj.complete_priv = complete_info;
754 mdb.obj.complete = br_mdb_complete;
755 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
756 kfree(complete_info);
759 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
763 br_mdb_switchdev_host(dev, mp, type);
766 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
770 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
776 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
779 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
782 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
783 struct net_device *dev,
784 int ifindex, u32 pid,
785 u32 seq, int type, unsigned int flags)
787 struct br_port_msg *bpm;
788 struct nlmsghdr *nlh;
791 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
795 bpm = nlmsg_data(nlh);
796 memset(bpm, 0, sizeof(*bpm));
797 bpm->family = AF_BRIDGE;
798 bpm->ifindex = dev->ifindex;
799 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
803 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
806 nla_nest_end(skb, nest);
811 nla_nest_end(skb, nest);
813 nlmsg_cancel(skb, nlh);
817 static inline size_t rtnl_rtr_nlmsg_size(void)
819 return NLMSG_ALIGN(sizeof(struct br_port_msg))
820 + nla_total_size(sizeof(__u32));
823 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
826 struct net *net = dev_net(dev);
831 ifindex = port ? port->dev->ifindex : 0;
832 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
836 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
842 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
846 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
849 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
850 struct netlink_ext_ack *extack)
852 if (entry->ifindex == 0) {
853 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
857 if (entry->addr.proto == htons(ETH_P_IP)) {
858 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
859 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
862 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
863 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
866 #if IS_ENABLED(CONFIG_IPV6)
867 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
868 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
869 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
873 } else if (entry->addr.proto == 0) {
875 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
876 NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
880 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
884 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
885 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
888 if (entry->vid >= VLAN_VID_MASK) {
889 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
896 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
897 struct netlink_ext_ack *extack)
900 case htons(ETH_P_IP):
901 if (nla_len(attr) != sizeof(struct in_addr)) {
902 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
905 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
906 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
910 #if IS_ENABLED(CONFIG_IPV6)
911 case htons(ETH_P_IPV6): {
914 if (nla_len(attr) != sizeof(struct in6_addr)) {
915 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
918 src = nla_get_in6_addr(attr);
919 if (ipv6_addr_is_multicast(&src)) {
920 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
927 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
934 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
935 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
936 sizeof(struct in_addr),
937 sizeof(struct in6_addr)),
940 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
941 struct net_device **pdev, struct br_mdb_entry **pentry,
942 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
944 struct net *net = sock_net(skb->sk);
945 struct br_mdb_entry *entry;
946 struct br_port_msg *bpm;
947 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
948 struct net_device *dev;
951 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
952 MDBA_SET_ENTRY_MAX, NULL, NULL);
956 bpm = nlmsg_data(nlh);
957 if (bpm->ifindex == 0) {
958 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
962 dev = __dev_get_by_index(net, bpm->ifindex);
964 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
968 if (!(dev->priv_flags & IFF_EBRIDGE)) {
969 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
975 if (!tb[MDBA_SET_ENTRY]) {
976 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
979 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
980 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
984 entry = nla_data(tb[MDBA_SET_ENTRY]);
985 if (!is_valid_mdb_entry(entry, extack))
989 if (tb[MDBA_SET_ENTRY_ATTRS]) {
990 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
991 tb[MDBA_SET_ENTRY_ATTRS],
992 br_mdbe_attrs_pol, extack);
995 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
996 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
997 entry->addr.proto, extack))
1000 memset(mdb_attrs, 0,
1001 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
1007 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
1008 struct br_mdb_entry *entry,
1009 struct nlattr **mdb_attrs,
1010 struct netlink_ext_ack *extack)
1012 struct net_bridge_mdb_entry *mp, *star_mp;
1013 struct net_bridge_port_group *p;
1014 struct net_bridge_port_group __rcu **pp;
1015 struct br_ip group, star_group;
1016 unsigned long now = jiffies;
1017 unsigned char flags = 0;
1021 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
1023 /* host join errors which can happen before creating the group */
1025 /* don't allow any flags for host-joined groups */
1027 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1030 if (!br_multicast_is_star_g(&group)) {
1031 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1036 if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
1037 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1041 mp = br_mdb_ip_get(br, &group);
1043 mp = br_multicast_new_group(br, &group);
1044 err = PTR_ERR_OR_ZERO(mp);
1051 if (mp->host_joined) {
1052 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1056 br_multicast_host_join(mp, false);
1057 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1062 for (pp = &mp->ports;
1063 (p = mlock_dereference(*pp, br)) != NULL;
1065 if (p->key.port == port) {
1066 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
1069 if ((unsigned long)p->key.port < (unsigned long)port)
1073 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1076 if (entry->state == MDB_PERMANENT)
1077 flags |= MDB_PG_FLAGS_PERMANENT;
1079 p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
1080 filter_mode, RTPROT_STATIC);
1082 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1085 rcu_assign_pointer(*pp, p);
1086 if (entry->state == MDB_TEMPORARY)
1087 mod_timer(&p->timer, now + br->multicast_membership_interval);
1088 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1089 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1090 * added to all S,G entries for proper replication, if we are adding
1091 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1092 * added to it for proper replication
1094 if (br_multicast_should_handle_mode(br, group.proto)) {
1095 switch (filter_mode) {
1097 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1100 star_group = p->key.addr;
1101 memset(&star_group.src, 0, sizeof(star_group.src));
1102 star_mp = br_mdb_ip_get(br, &star_group);
1104 br_multicast_sg_add_exclude_ports(star_mp, p);
1112 static int __br_mdb_add(struct net *net, struct net_bridge *br,
1113 struct net_bridge_port *p,
1114 struct br_mdb_entry *entry,
1115 struct nlattr **mdb_attrs,
1116 struct netlink_ext_ack *extack)
1120 spin_lock_bh(&br->multicast_lock);
1121 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
1122 spin_unlock_bh(&br->multicast_lock);
1127 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1128 struct netlink_ext_ack *extack)
1130 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1131 struct net *net = sock_net(skb->sk);
1132 struct net_bridge_vlan_group *vg;
1133 struct net_bridge_port *p = NULL;
1134 struct net_device *dev, *pdev;
1135 struct br_mdb_entry *entry;
1136 struct net_bridge_vlan *v;
1137 struct net_bridge *br;
1140 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1144 br = netdev_priv(dev);
1146 if (!netif_running(br->dev)) {
1147 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1151 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1152 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1156 if (entry->ifindex != br->dev->ifindex) {
1157 pdev = __dev_get_by_index(net, entry->ifindex);
1159 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1163 p = br_port_get_rtnl(pdev);
1165 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1170 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1173 if (p->state == BR_STATE_DISABLED) {
1174 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1177 vg = nbp_vlan_group(p);
1179 vg = br_vlan_group(br);
1182 /* If vlan filtering is enabled and VLAN is not specified
1183 * install mdb entry on all vlans configured on the port.
1185 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1186 list_for_each_entry(v, &vg->vlan_list, vlist) {
1187 entry->vid = v->vid;
1188 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1193 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1199 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1200 struct nlattr **mdb_attrs)
1202 struct net_bridge_mdb_entry *mp;
1203 struct net_bridge_port_group *p;
1204 struct net_bridge_port_group __rcu **pp;
1208 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1211 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1213 spin_lock_bh(&br->multicast_lock);
1214 mp = br_mdb_ip_get(br, &ip);
1219 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1220 br_multicast_host_leave(mp, false);
1222 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1223 if (!mp->ports && netif_running(br->dev))
1224 mod_timer(&mp->timer, jiffies);
1228 for (pp = &mp->ports;
1229 (p = mlock_dereference(*pp, br)) != NULL;
1231 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1234 if (p->key.port->state == BR_STATE_DISABLED)
1237 br_multicast_del_pg(mp, p, pp);
1243 spin_unlock_bh(&br->multicast_lock);
1247 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1248 struct netlink_ext_ack *extack)
1250 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1251 struct net *net = sock_net(skb->sk);
1252 struct net_bridge_vlan_group *vg;
1253 struct net_bridge_port *p = NULL;
1254 struct net_device *dev, *pdev;
1255 struct br_mdb_entry *entry;
1256 struct net_bridge_vlan *v;
1257 struct net_bridge *br;
1260 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1264 br = netdev_priv(dev);
1266 if (entry->ifindex != br->dev->ifindex) {
1267 pdev = __dev_get_by_index(net, entry->ifindex);
1271 p = br_port_get_rtnl(pdev);
1272 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1274 vg = nbp_vlan_group(p);
1276 vg = br_vlan_group(br);
1279 /* If vlan filtering is enabled and VLAN is not specified
1280 * delete mdb entry on all vlans configured on the port.
1282 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1283 list_for_each_entry(v, &vg->vlan_list, vlist) {
1284 entry->vid = v->vid;
1285 err = __br_mdb_del(br, entry, mdb_attrs);
1288 err = __br_mdb_del(br, entry, mdb_attrs);
1294 void br_mdb_init(void)
1296 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1297 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1298 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1301 void br_mdb_uninit(void)
1303 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1304 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1305 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);