1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Linux ethernet bridge
7 * Lennert Buytenhek <buytenh@gnu.org>
10 #include <linux/kernel.h>
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/netpoll.h>
14 #include <linux/ethtool.h>
15 #include <linux/if_arp.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/if_ether.h>
20 #include <linux/slab.h>
23 #include <linux/if_vlan.h>
24 #include <net/switchdev.h>
25 #include <net/net_namespace.h>
27 #include "br_private.h"
30 * Determine initial path cost based on speed.
31 * using recommendations from 802.1d standard
33 * Since driver might sleep need to not be holding any locks.
35 static int port_cost(struct net_device *dev)
37 struct ethtool_link_ksettings ecmd;
39 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
40 switch (ecmd.base.speed) {
56 if (ecmd.base.speed > SPEED_10000)
61 /* Old silly heuristics based on name */
62 if (!strncmp(dev->name, "lec", 3))
65 if (!strncmp(dev->name, "plip", 4))
68 return 100; /* assume old 10Mbps */
72 /* Check for port carrier transitions. */
73 void br_port_carrier_check(struct net_bridge_port *p, bool *notified)
75 struct net_device *dev = p->dev;
76 struct net_bridge *br = p->br;
78 if (!(p->flags & BR_ADMIN_COST) &&
79 netif_running(dev) && netif_oper_up(dev))
80 p->path_cost = port_cost(dev);
83 if (!netif_running(br->dev))
86 spin_lock_bh(&br->lock);
87 if (netif_running(dev) && netif_oper_up(dev)) {
88 if (p->state == BR_STATE_DISABLED) {
89 br_stp_enable_port(p);
93 if (p->state != BR_STATE_DISABLED) {
94 br_stp_disable_port(p);
98 spin_unlock_bh(&br->lock);
101 static void br_port_set_promisc(struct net_bridge_port *p)
105 if (br_promisc_port(p))
108 err = dev_set_promiscuity(p->dev, 1);
112 br_fdb_unsync_static(p->br, p);
113 p->flags |= BR_PROMISC;
116 static void br_port_clear_promisc(struct net_bridge_port *p)
120 /* Check if the port is already non-promisc or if it doesn't
121 * support UNICAST filtering. Without unicast filtering support
122 * we'll end up re-enabling promisc mode anyway, so just check for
125 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
128 /* Since we'll be clearing the promisc mode, program the port
129 * first so that we don't have interruption in traffic.
131 err = br_fdb_sync_static(p->br, p);
135 dev_set_promiscuity(p->dev, -1);
136 p->flags &= ~BR_PROMISC;
139 /* When a port is added or removed or when certain port flags
140 * change, this function is called to automatically manage
141 * promiscuity setting of all the bridge ports. We are always called
142 * under RTNL so can skip using rcu primitives.
144 void br_manage_promisc(struct net_bridge *br)
146 struct net_bridge_port *p;
147 bool set_all = false;
149 /* If vlan filtering is disabled or bridge interface is placed
150 * into promiscuous mode, place all ports in promiscuous mode.
152 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br->dev))
155 list_for_each_entry(p, &br->port_list, list) {
157 br_port_set_promisc(p);
159 /* If the number of auto-ports is <= 1, then all other
160 * ports will have their output configuration
161 * statically specified through fdbs. Since ingress
162 * on the auto-port becomes forwarding/egress to other
163 * ports and egress configuration is statically known,
164 * we can say that ingress configuration of the
165 * auto-port is also statically known.
166 * This lets us disable promiscuous mode and write
169 if (br->auto_cnt == 0 ||
170 (br->auto_cnt == 1 && br_auto_port(p)))
171 br_port_clear_promisc(p);
173 br_port_set_promisc(p);
178 int nbp_backup_change(struct net_bridge_port *p,
179 struct net_device *backup_dev)
181 struct net_bridge_port *old_backup = rtnl_dereference(p->backup_port);
182 struct net_bridge_port *backup_p = NULL;
187 if (!netif_is_bridge_port(backup_dev))
190 backup_p = br_port_get_rtnl(backup_dev);
191 if (backup_p->br != p->br)
198 if (old_backup == backup_p)
201 /* if the backup link is already set, clear it */
203 old_backup->backup_redirected_cnt--;
206 backup_p->backup_redirected_cnt++;
207 rcu_assign_pointer(p->backup_port, backup_p);
212 static void nbp_backup_clear(struct net_bridge_port *p)
214 nbp_backup_change(p, NULL);
215 if (p->backup_redirected_cnt) {
216 struct net_bridge_port *cur_p;
218 list_for_each_entry(cur_p, &p->br->port_list, list) {
219 struct net_bridge_port *backup_p;
221 backup_p = rtnl_dereference(cur_p->backup_port);
223 nbp_backup_change(cur_p, NULL);
227 WARN_ON(rcu_access_pointer(p->backup_port) || p->backup_redirected_cnt);
230 static void nbp_update_port_count(struct net_bridge *br)
232 struct net_bridge_port *p;
235 list_for_each_entry(p, &br->port_list, list) {
239 if (br->auto_cnt != cnt) {
241 br_manage_promisc(br);
245 static void nbp_delete_promisc(struct net_bridge_port *p)
247 /* If port is currently promiscuous, unset promiscuity.
248 * Otherwise, it is a static port so remove all addresses
251 dev_set_allmulti(p->dev, -1);
252 if (br_promisc_port(p))
253 dev_set_promiscuity(p->dev, -1);
255 br_fdb_unsync_static(p->br, p);
258 static void release_nbp(struct kobject *kobj)
260 struct net_bridge_port *p
261 = container_of(kobj, struct net_bridge_port, kobj);
265 static void brport_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t *gid)
267 struct net_bridge_port *p = kobj_to_brport(kobj);
269 net_ns_get_ownership(dev_net(p->dev), uid, gid);
272 static const struct kobj_type brport_ktype = {
274 .sysfs_ops = &brport_sysfs_ops,
276 .release = release_nbp,
277 .get_ownership = brport_get_ownership,
280 static void destroy_nbp(struct net_bridge_port *p)
282 struct net_device *dev = p->dev;
286 netdev_put(dev, &p->dev_tracker);
288 kobject_put(&p->kobj);
291 static void destroy_nbp_rcu(struct rcu_head *head)
293 struct net_bridge_port *p =
294 container_of(head, struct net_bridge_port, rcu);
298 static unsigned get_max_headroom(struct net_bridge *br)
300 unsigned max_headroom = 0;
301 struct net_bridge_port *p;
303 list_for_each_entry(p, &br->port_list, list) {
304 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
306 if (dev_headroom > max_headroom)
307 max_headroom = dev_headroom;
313 static void update_headroom(struct net_bridge *br, int new_hr)
315 struct net_bridge_port *p;
317 list_for_each_entry(p, &br->port_list, list)
318 netdev_set_rx_headroom(p->dev, new_hr);
320 br->dev->needed_headroom = new_hr;
323 /* Delete port(interface) from bridge is done in two steps.
324 * via RCU. First step, marks device as down. That deletes
325 * all the timers and stops new packets from flowing through.
327 * Final cleanup doesn't occur until after all CPU's finished
328 * processing packets.
330 * Protected from multiple admin operations by RTNL mutex
332 static void del_nbp(struct net_bridge_port *p)
334 struct net_bridge *br = p->br;
335 struct net_device *dev = p->dev;
337 sysfs_remove_link(br->ifobj, p->dev->name);
339 nbp_delete_promisc(p);
341 spin_lock_bh(&br->lock);
342 br_stp_disable_port(p);
343 spin_unlock_bh(&br->lock);
345 br_mrp_port_del(br, p);
346 br_cfm_port_del(br, p);
348 br_ifinfo_notify(RTM_DELLINK, NULL, p);
350 list_del_rcu(&p->list);
351 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
352 update_headroom(br, get_max_headroom(br));
353 netdev_reset_rx_headroom(dev);
356 br_fdb_delete_by_port(br, p, 0, 1);
357 switchdev_deferred_process();
360 nbp_update_port_count(br);
362 netdev_upper_dev_unlink(dev, br->dev);
364 dev->priv_flags &= ~IFF_BRIDGE_PORT;
366 netdev_rx_handler_unregister(dev);
368 br_multicast_del_port(p);
370 kobject_uevent(&p->kobj, KOBJ_REMOVE);
371 kobject_del(&p->kobj);
373 br_netpoll_disable(p);
375 call_rcu(&p->rcu, destroy_nbp_rcu);
378 /* Delete bridge device */
379 void br_dev_delete(struct net_device *dev, struct list_head *head)
381 struct net_bridge *br = netdev_priv(dev);
382 struct net_bridge_port *p, *n;
384 list_for_each_entry_safe(p, n, &br->port_list, list) {
388 br_recalculate_neigh_suppress_enabled(br);
390 br_fdb_delete_by_port(br, NULL, 0, 1);
392 cancel_delayed_work_sync(&br->gc_work);
394 br_sysfs_delbr(br->dev);
395 unregister_netdevice_queue(br->dev, head);
398 /* find an available port number */
399 static int find_portno(struct net_bridge *br)
402 struct net_bridge_port *p;
403 unsigned long *inuse;
405 inuse = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
409 __set_bit(0, inuse); /* zero is reserved */
410 list_for_each_entry(p, &br->port_list, list)
411 __set_bit(p->port_no, inuse);
413 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
416 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
419 /* called with RTNL but without bridge lock */
420 static struct net_bridge_port *new_nbp(struct net_bridge *br,
421 struct net_device *dev)
423 struct net_bridge_port *p;
426 index = find_portno(br);
428 return ERR_PTR(index);
430 p = kzalloc(sizeof(*p), GFP_KERNEL);
432 return ERR_PTR(-ENOMEM);
435 netdev_hold(dev, &p->dev_tracker, GFP_KERNEL);
437 p->path_cost = port_cost(dev);
438 p->priority = 0x8000 >> BR_PORT_BITS;
440 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
442 br_set_state(p, BR_STATE_DISABLED);
443 br_stp_port_timer_init(p);
444 err = br_multicast_add_port(p);
446 netdev_put(dev, &p->dev_tracker);
454 int br_add_bridge(struct net *net, const char *name)
456 struct net_device *dev;
459 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
465 dev_net_set(dev, net);
466 dev->rtnl_link_ops = &br_link_ops;
468 res = register_netdevice(dev);
474 int br_del_bridge(struct net *net, const char *name)
476 struct net_device *dev;
479 dev = __dev_get_by_name(net, name);
481 ret = -ENXIO; /* Could not find device */
483 else if (!netif_is_bridge_master(dev)) {
484 /* Attempt to delete non bridge device! */
488 else if (dev->flags & IFF_UP) {
489 /* Not shutdown yet. */
494 br_dev_delete(dev, NULL);
499 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
500 static int br_mtu_min(const struct net_bridge *br)
502 const struct net_bridge_port *p;
505 list_for_each_entry(p, &br->port_list, list)
506 if (!ret_mtu || ret_mtu > p->dev->mtu)
507 ret_mtu = p->dev->mtu;
509 return ret_mtu ? ret_mtu : ETH_DATA_LEN;
512 void br_mtu_auto_adjust(struct net_bridge *br)
516 /* if the bridge MTU was manually configured don't mess with it */
517 if (br_opt_get(br, BROPT_MTU_SET_BY_USER))
520 /* change to the minimum MTU and clear the flag which was set by
521 * the bridge ndo_change_mtu callback
523 dev_set_mtu(br->dev, br_mtu_min(br));
524 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, false);
527 static void br_set_gso_limits(struct net_bridge *br)
529 unsigned int tso_max_size = TSO_MAX_SIZE;
530 const struct net_bridge_port *p;
531 u16 tso_max_segs = TSO_MAX_SEGS;
533 list_for_each_entry(p, &br->port_list, list) {
534 tso_max_size = min(tso_max_size, p->dev->tso_max_size);
535 tso_max_segs = min(tso_max_segs, p->dev->tso_max_segs);
537 netif_set_tso_max_size(br->dev, tso_max_size);
538 netif_set_tso_max_segs(br->dev, tso_max_segs);
542 * Recomputes features using slave's features
544 netdev_features_t br_features_recompute(struct net_bridge *br,
545 netdev_features_t features)
547 struct net_bridge_port *p;
548 netdev_features_t mask;
550 if (list_empty(&br->port_list))
554 features &= ~NETIF_F_ONE_FOR_ALL;
556 list_for_each_entry(p, &br->port_list, list) {
557 features = netdev_increment_features(features,
558 p->dev->features, mask);
560 features = netdev_add_tso_features(features, mask);
565 /* called with RTNL */
566 int br_add_if(struct net_bridge *br, struct net_device *dev,
567 struct netlink_ext_ack *extack)
569 struct net_bridge_port *p;
571 unsigned br_hr, dev_hr;
572 bool changed_addr, fdb_synced = false;
574 /* Don't allow bridging non-ethernet like devices. */
575 if ((dev->flags & IFF_LOOPBACK) ||
576 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
577 !is_valid_ether_addr(dev->dev_addr))
580 /* No bridging of bridges */
581 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) {
582 NL_SET_ERR_MSG(extack,
583 "Can not enslave a bridge to a bridge");
587 /* Device has master upper dev */
588 if (netdev_master_upper_dev_get(dev))
591 /* No bridging devices that dislike that (e.g. wireless) */
592 if (dev->priv_flags & IFF_DONT_BRIDGE) {
593 NL_SET_ERR_MSG(extack,
594 "Device does not allow enslaving to a bridge");
598 p = new_nbp(br, dev);
602 call_netdevice_notifiers(NETDEV_JOIN, dev);
604 err = dev_set_allmulti(dev, 1);
606 br_multicast_del_port(p);
607 netdev_put(dev, &p->dev_tracker);
608 kfree(p); /* kobject not yet init'd, manually free */
612 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
613 SYSFS_BRIDGE_PORT_ATTR);
617 err = br_sysfs_addif(p);
621 err = br_netpoll_enable(p);
625 err = netdev_rx_handler_register(dev, br_get_rx_handler(dev), p);
629 dev->priv_flags |= IFF_BRIDGE_PORT;
631 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, extack);
635 dev_disable_lro(dev);
637 list_add_rcu(&p->list, &br->port_list);
639 nbp_update_port_count(br);
640 if (!br_promisc_port(p) && (p->dev->priv_flags & IFF_UNICAST_FLT)) {
641 /* When updating the port count we also update all ports'
643 * A port leaving promiscuous mode normally gets the bridge's
644 * fdb synced to the unicast filter (if supported), however,
645 * `br_port_clear_promisc` does not distinguish between
646 * non-promiscuous ports and *new* ports, so we need to
647 * sync explicitly here.
649 fdb_synced = br_fdb_sync_static(br, p) == 0;
651 netdev_err(dev, "failed to sync bridge static fdb addresses to this port\n");
654 netdev_update_features(br->dev);
656 br_hr = br->dev->needed_headroom;
657 dev_hr = netdev_get_fwd_headroom(dev);
659 update_headroom(br, dev_hr);
661 netdev_set_rx_headroom(dev, br_hr);
663 if (br_fdb_add_local(br, p, dev->dev_addr, 0))
664 netdev_err(dev, "failed insert local address bridge forwarding table\n");
666 if (br->dev->addr_assign_type != NET_ADDR_SET) {
667 /* Ask for permission to use this MAC address now, even if we
668 * don't end up choosing it below.
670 err = dev_pre_changeaddr_notify(br->dev, dev->dev_addr, extack);
675 err = nbp_vlan_init(p, extack);
677 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
681 spin_lock_bh(&br->lock);
682 changed_addr = br_stp_recalculate_bridge_id(br);
684 if (netif_running(dev) && netif_oper_up(dev) &&
685 (br->dev->flags & IFF_UP))
686 br_stp_enable_port(p);
687 spin_unlock_bh(&br->lock);
689 br_ifinfo_notify(RTM_NEWLINK, NULL, p);
692 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
694 br_mtu_auto_adjust(br);
695 br_set_gso_limits(br);
697 kobject_uevent(&p->kobj, KOBJ_ADD);
703 br_fdb_unsync_static(br, p);
704 list_del_rcu(&p->list);
705 br_fdb_delete_by_port(br, p, 0, 1);
706 nbp_update_port_count(br);
707 netdev_upper_dev_unlink(dev, br->dev);
709 dev->priv_flags &= ~IFF_BRIDGE_PORT;
710 netdev_rx_handler_unregister(dev);
712 br_netpoll_disable(p);
714 sysfs_remove_link(br->ifobj, p->dev->name);
716 br_multicast_del_port(p);
717 netdev_put(dev, &p->dev_tracker);
718 kobject_put(&p->kobj);
719 dev_set_allmulti(dev, -1);
724 /* called with RTNL */
725 int br_del_if(struct net_bridge *br, struct net_device *dev)
727 struct net_bridge_port *p;
730 p = br_port_get_rtnl(dev);
731 if (!p || p->br != br)
734 /* Since more than one interface can be attached to a bridge,
735 * there still maybe an alternate path for netconsole to use;
736 * therefore there is no reason for a NETDEV_RELEASE event.
740 br_mtu_auto_adjust(br);
741 br_set_gso_limits(br);
743 spin_lock_bh(&br->lock);
744 changed_addr = br_stp_recalculate_bridge_id(br);
745 spin_unlock_bh(&br->lock);
748 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
750 netdev_update_features(br->dev);
755 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
757 struct net_bridge *br = p->br;
759 if (mask & BR_AUTO_MASK)
760 nbp_update_port_count(br);
762 if (mask & BR_NEIGH_SUPPRESS)
763 br_recalculate_neigh_suppress_enabled(br);
766 bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
768 struct net_bridge_port *p;
770 p = br_port_get_rtnl_rcu(dev);
774 return p->flags & flag;
776 EXPORT_SYMBOL_GPL(br_port_flag_is_set);