1 // SPDX-License-Identifier: GPL-1.0+
3 * originally based on the dummy device.
5 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
6 * Based on dummy.c, and eql.c devices.
8 * bonding.c: an Ethernet Bonding driver
10 * This is useful to talk to a Cisco EtherChannel compatible equipment:
12 * Sun Trunking (Solaris)
13 * Alteon AceDirector Trunks
15 * and probably many L2 switches ...
18 * ifconfig bond0 ipaddress netmask up
19 * will setup a network device, with an ip address. No mac address
20 * will be assigned at this time. The hw mac address will come from
21 * the first slave bonded to the channel. All slaves will then use
22 * this hw mac address.
25 * will release all slaves, marking them as down.
27 * ifenslave bond0 eth0
28 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
29 * a: be used as initial mac address
30 * b: if a hw mac address already is there, eth0's hw mac address
31 * will then be set from bond0.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/fcntl.h>
39 #include <linux/filter.h>
40 #include <linux/interrupt.h>
41 #include <linux/ptrace.h>
42 #include <linux/ioport.h>
46 #include <linux/icmp.h>
47 #include <linux/icmpv6.h>
48 #include <linux/tcp.h>
49 #include <linux/udp.h>
50 #include <linux/slab.h>
51 #include <linux/string.h>
52 #include <linux/init.h>
53 #include <linux/timer.h>
54 #include <linux/socket.h>
55 #include <linux/ctype.h>
56 #include <linux/inet.h>
57 #include <linux/bitops.h>
60 #include <linux/uaccess.h>
61 #include <linux/errno.h>
62 #include <linux/netdevice.h>
63 #include <linux/inetdevice.h>
64 #include <linux/igmp.h>
65 #include <linux/etherdevice.h>
66 #include <linux/skbuff.h>
68 #include <linux/rtnetlink.h>
69 #include <linux/smp.h>
70 #include <linux/if_ether.h>
72 #include <linux/mii.h>
73 #include <linux/ethtool.h>
74 #include <linux/if_vlan.h>
75 #include <linux/if_bonding.h>
76 #include <linux/phy.h>
77 #include <linux/jiffies.h>
78 #include <linux/preempt.h>
79 #include <net/route.h>
80 #include <net/net_namespace.h>
81 #include <net/netns/generic.h>
82 #include <net/pkt_sched.h>
83 #include <linux/rculist.h>
84 #include <net/flow_dissector.h>
86 #include <net/bonding.h>
87 #include <net/bond_3ad.h>
88 #include <net/bond_alb.h>
89 #if IS_ENABLED(CONFIG_TLS_DEVICE)
92 #include <net/ip6_route.h>
94 #include "bonding_priv.h"
96 /*---------------------------- Module parameters ----------------------------*/
98 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
100 static int max_bonds = BOND_DEFAULT_MAX_BONDS;
101 static int tx_queues = BOND_DEFAULT_TX_QUEUES;
102 static int num_peer_notif = 1;
105 static int downdelay;
106 static int use_carrier = 1;
108 static char *primary;
109 static char *primary_reselect;
110 static char *lacp_rate;
111 static int min_links;
112 static char *ad_select;
113 static char *xmit_hash_policy;
114 static int arp_interval;
115 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
116 static char *arp_validate;
117 static char *arp_all_targets;
118 static char *fail_over_mac;
119 static int all_slaves_active;
120 static struct bond_params bonding_defaults;
121 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
122 static int packets_per_slave = 1;
123 static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
125 module_param(max_bonds, int, 0);
126 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
127 module_param(tx_queues, int, 0);
128 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
129 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
130 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
131 "failover event (alias of num_unsol_na)");
132 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
133 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
134 "failover event (alias of num_grat_arp)");
135 module_param(miimon, int, 0);
136 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
137 module_param(updelay, int, 0);
138 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
139 module_param(downdelay, int, 0);
140 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
142 module_param(use_carrier, int, 0);
143 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
144 "0 for off, 1 for on (default)");
145 module_param(mode, charp, 0);
146 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
147 "1 for active-backup, 2 for balance-xor, "
148 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
149 "6 for balance-alb");
150 module_param(primary, charp, 0);
151 MODULE_PARM_DESC(primary, "Primary network device to use");
152 module_param(primary_reselect, charp, 0);
153 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
155 "0 for always (default), "
156 "1 for only if speed of primary is "
158 "2 for only on active slave "
160 module_param(lacp_rate, charp, 0);
161 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
162 "0 for slow, 1 for fast");
163 module_param(ad_select, charp, 0);
164 MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
165 "0 for stable (default), 1 for bandwidth, "
167 module_param(min_links, int, 0);
168 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
170 module_param(xmit_hash_policy, charp, 0);
171 MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
172 "0 for layer 2 (default), 1 for layer 3+4, "
173 "2 for layer 2+3, 3 for encap layer 2+3, "
174 "4 for encap layer 3+4, 5 for vlan+srcmac");
175 module_param(arp_interval, int, 0);
176 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
177 module_param_array(arp_ip_target, charp, NULL, 0);
178 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
179 module_param(arp_validate, charp, 0);
180 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
181 "0 for none (default), 1 for active, "
182 "2 for backup, 3 for all");
183 module_param(arp_all_targets, charp, 0);
184 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
185 module_param(fail_over_mac, charp, 0);
186 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
187 "the same MAC; 0 for none (default), "
188 "1 for active, 2 for follow");
189 module_param(all_slaves_active, int, 0);
190 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
191 "by setting active flag for all slaves; "
192 "0 for never (default), 1 for always.");
193 module_param(resend_igmp, int, 0);
194 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
196 module_param(packets_per_slave, int, 0);
197 MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
198 "mode; 0 for a random slave, 1 packet per "
199 "slave (default), >1 packets per slave.");
200 module_param(lp_interval, uint, 0);
201 MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
202 "the bonding driver sends learning packets to "
203 "each slaves peer switch. The default is 1.");
205 /*----------------------------- Global variables ----------------------------*/
207 #ifdef CONFIG_NET_POLL_CONTROLLER
208 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
211 unsigned int bond_net_id __read_mostly;
213 static const struct flow_dissector_key flow_keys_bonding_keys[] = {
215 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
216 .offset = offsetof(struct flow_keys, control),
219 .key_id = FLOW_DISSECTOR_KEY_BASIC,
220 .offset = offsetof(struct flow_keys, basic),
223 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
224 .offset = offsetof(struct flow_keys, addrs.v4addrs),
227 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
228 .offset = offsetof(struct flow_keys, addrs.v6addrs),
231 .key_id = FLOW_DISSECTOR_KEY_TIPC,
232 .offset = offsetof(struct flow_keys, addrs.tipckey),
235 .key_id = FLOW_DISSECTOR_KEY_PORTS,
236 .offset = offsetof(struct flow_keys, ports),
239 .key_id = FLOW_DISSECTOR_KEY_ICMP,
240 .offset = offsetof(struct flow_keys, icmp),
243 .key_id = FLOW_DISSECTOR_KEY_VLAN,
244 .offset = offsetof(struct flow_keys, vlan),
247 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
248 .offset = offsetof(struct flow_keys, tags),
251 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
252 .offset = offsetof(struct flow_keys, keyid),
256 static struct flow_dissector flow_keys_bonding __read_mostly;
258 /*-------------------------- Forward declarations ---------------------------*/
260 static int bond_init(struct net_device *bond_dev);
261 static void bond_uninit(struct net_device *bond_dev);
262 static void bond_get_stats(struct net_device *bond_dev,
263 struct rtnl_link_stats64 *stats);
264 static void bond_slave_arr_handler(struct work_struct *work);
265 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
267 static void bond_netdev_notify_work(struct work_struct *work);
269 /*---------------------------- General routines -----------------------------*/
271 const char *bond_mode_name(int mode)
273 static const char *names[] = {
274 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
275 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
276 [BOND_MODE_XOR] = "load balancing (xor)",
277 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
278 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
279 [BOND_MODE_TLB] = "transmit load balancing",
280 [BOND_MODE_ALB] = "adaptive load balancing",
283 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
290 * bond_dev_queue_xmit - Prepare skb for xmit.
292 * @bond: bond device that got this skb for tx.
293 * @skb: hw accel VLAN tagged skb to transmit
294 * @slave_dev: slave that is supposed to xmit this skbuff
296 netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
297 struct net_device *slave_dev)
299 skb->dev = slave_dev;
301 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
302 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
303 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
305 if (unlikely(netpoll_tx_running(bond->dev)))
306 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
308 return dev_queue_xmit(skb);
311 static bool bond_sk_check(struct bonding *bond)
313 switch (BOND_MODE(bond)) {
314 case BOND_MODE_8023AD:
316 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
324 static bool bond_xdp_check(struct bonding *bond)
326 switch (BOND_MODE(bond)) {
327 case BOND_MODE_ROUNDROBIN:
328 case BOND_MODE_ACTIVEBACKUP:
330 case BOND_MODE_8023AD:
332 /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
333 * payload is not in the packet due to hardware offload.
335 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
343 /*---------------------------------- VLAN -----------------------------------*/
345 /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
346 * We don't protect the slave list iteration with a lock because:
347 * a. This operation is performed in IOCTL context,
348 * b. The operation is protected by the RTNL semaphore in the 8021q code,
349 * c. Holding a lock with BH disabled while directly calling a base driver
350 * entry point is generally a BAD idea.
352 * The design of synchronization/protection for this operation in the 8021q
353 * module is good for one or more VLAN devices over a single physical device
354 * and cannot be extended for a teaming solution like bonding, so there is a
355 * potential race condition here where a net device from the vlan group might
356 * be referenced (either by a base driver or the 8021q code) while it is being
357 * removed from the system. However, it turns out we're not making matters
358 * worse, and if it works for regular VLAN usage it will work here too.
362 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
363 * @bond_dev: bonding net device that got called
364 * @proto: network protocol ID
365 * @vid: vlan id being added
367 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
368 __be16 proto, u16 vid)
370 struct bonding *bond = netdev_priv(bond_dev);
371 struct slave *slave, *rollback_slave;
372 struct list_head *iter;
375 bond_for_each_slave(bond, slave, iter) {
376 res = vlan_vid_add(slave->dev, proto, vid);
384 /* unwind to the slave that failed */
385 bond_for_each_slave(bond, rollback_slave, iter) {
386 if (rollback_slave == slave)
389 vlan_vid_del(rollback_slave->dev, proto, vid);
396 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
397 * @bond_dev: bonding net device that got called
398 * @proto: network protocol ID
399 * @vid: vlan id being removed
401 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
402 __be16 proto, u16 vid)
404 struct bonding *bond = netdev_priv(bond_dev);
405 struct list_head *iter;
408 bond_for_each_slave(bond, slave, iter)
409 vlan_vid_del(slave->dev, proto, vid);
411 if (bond_is_lb(bond))
412 bond_alb_clear_vlan(bond, vid);
417 /*---------------------------------- XFRM -----------------------------------*/
419 #ifdef CONFIG_XFRM_OFFLOAD
421 * bond_ipsec_add_sa - program device with a security association
422 * @xs: pointer to transformer state struct
423 * @extack: extack point to fill failure reason
425 static int bond_ipsec_add_sa(struct xfrm_state *xs,
426 struct netlink_ext_ack *extack)
428 struct net_device *bond_dev = xs->xso.dev;
429 struct bond_ipsec *ipsec;
430 struct bonding *bond;
438 bond = netdev_priv(bond_dev);
439 slave = rcu_dereference(bond->curr_active_slave);
445 if (!slave->dev->xfrmdev_ops ||
446 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
447 netif_is_bond_master(slave->dev)) {
448 NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
453 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
458 xs->xso.real_dev = slave->dev;
460 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
463 INIT_LIST_HEAD(&ipsec->list);
464 spin_lock_bh(&bond->ipsec_lock);
465 list_add(&ipsec->list, &bond->ipsec_list);
466 spin_unlock_bh(&bond->ipsec_lock);
474 static void bond_ipsec_add_sa_all(struct bonding *bond)
476 struct net_device *bond_dev = bond->dev;
477 struct bond_ipsec *ipsec;
481 slave = rcu_dereference(bond->curr_active_slave);
485 if (!slave->dev->xfrmdev_ops ||
486 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
487 netif_is_bond_master(slave->dev)) {
488 spin_lock_bh(&bond->ipsec_lock);
489 if (!list_empty(&bond->ipsec_list))
490 slave_warn(bond_dev, slave->dev,
491 "%s: no slave xdo_dev_state_add\n",
493 spin_unlock_bh(&bond->ipsec_lock);
497 spin_lock_bh(&bond->ipsec_lock);
498 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
499 ipsec->xs->xso.real_dev = slave->dev;
500 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
501 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
502 ipsec->xs->xso.real_dev = NULL;
505 spin_unlock_bh(&bond->ipsec_lock);
511 * bond_ipsec_del_sa - clear out this specific SA
512 * @xs: pointer to transformer state struct
514 static void bond_ipsec_del_sa(struct xfrm_state *xs)
516 struct net_device *bond_dev = xs->xso.dev;
517 struct bond_ipsec *ipsec;
518 struct bonding *bond;
525 bond = netdev_priv(bond_dev);
526 slave = rcu_dereference(bond->curr_active_slave);
531 if (!xs->xso.real_dev)
534 WARN_ON(xs->xso.real_dev != slave->dev);
536 if (!slave->dev->xfrmdev_ops ||
537 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
538 netif_is_bond_master(slave->dev)) {
539 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
543 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
545 spin_lock_bh(&bond->ipsec_lock);
546 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
547 if (ipsec->xs == xs) {
548 list_del(&ipsec->list);
553 spin_unlock_bh(&bond->ipsec_lock);
557 static void bond_ipsec_del_sa_all(struct bonding *bond)
559 struct net_device *bond_dev = bond->dev;
560 struct bond_ipsec *ipsec;
564 slave = rcu_dereference(bond->curr_active_slave);
570 spin_lock_bh(&bond->ipsec_lock);
571 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
572 if (!ipsec->xs->xso.real_dev)
575 if (!slave->dev->xfrmdev_ops ||
576 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
577 netif_is_bond_master(slave->dev)) {
578 slave_warn(bond_dev, slave->dev,
579 "%s: no slave xdo_dev_state_delete\n",
582 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
584 ipsec->xs->xso.real_dev = NULL;
586 spin_unlock_bh(&bond->ipsec_lock);
591 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
592 * @skb: current data packet
593 * @xs: pointer to transformer state struct
595 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
597 struct net_device *bond_dev = xs->xso.dev;
598 struct net_device *real_dev;
599 struct slave *curr_active;
600 struct bonding *bond;
603 bond = netdev_priv(bond_dev);
605 curr_active = rcu_dereference(bond->curr_active_slave);
606 real_dev = curr_active->dev;
608 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
613 if (!xs->xso.real_dev) {
618 if (!real_dev->xfrmdev_ops ||
619 !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
620 netif_is_bond_master(real_dev)) {
625 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
631 static const struct xfrmdev_ops bond_xfrmdev_ops = {
632 .xdo_dev_state_add = bond_ipsec_add_sa,
633 .xdo_dev_state_delete = bond_ipsec_del_sa,
634 .xdo_dev_offload_ok = bond_ipsec_offload_ok,
636 #endif /* CONFIG_XFRM_OFFLOAD */
638 /*------------------------------- Link status -------------------------------*/
640 /* Set the carrier state for the master according to the state of its
641 * slaves. If any slaves are up, the master is up. In 802.3ad mode,
642 * do special 802.3ad magic.
644 * Returns zero if carrier state does not change, nonzero if it does.
646 int bond_set_carrier(struct bonding *bond)
648 struct list_head *iter;
651 if (!bond_has_slaves(bond))
654 if (BOND_MODE(bond) == BOND_MODE_8023AD)
655 return bond_3ad_set_carrier(bond);
657 bond_for_each_slave(bond, slave, iter) {
658 if (slave->link == BOND_LINK_UP) {
659 if (!netif_carrier_ok(bond->dev)) {
660 netif_carrier_on(bond->dev);
668 if (netif_carrier_ok(bond->dev)) {
669 netif_carrier_off(bond->dev);
675 /* Get link speed and duplex from the slave's base driver
676 * using ethtool. If for some reason the call fails or the
677 * values are invalid, set speed and duplex to -1,
678 * and return. Return 1 if speed or duplex settings are
679 * UNKNOWN; 0 otherwise.
681 static int bond_update_speed_duplex(struct slave *slave)
683 struct net_device *slave_dev = slave->dev;
684 struct ethtool_link_ksettings ecmd;
687 slave->speed = SPEED_UNKNOWN;
688 slave->duplex = DUPLEX_UNKNOWN;
690 res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
693 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
695 switch (ecmd.base.duplex) {
703 slave->speed = ecmd.base.speed;
704 slave->duplex = ecmd.base.duplex;
709 const char *bond_slave_link_status(s8 link)
725 /* if <dev> supports MII link status reporting, check its link status.
727 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
728 * depending upon the setting of the use_carrier parameter.
730 * Return either BMSR_LSTATUS, meaning that the link is up (or we
731 * can't tell and just pretend it is), or 0, meaning that the link is
734 * If reporting is non-zero, instead of faking link up, return -1 if
735 * both ETHTOOL and MII ioctls fail (meaning the device does not
736 * support them). If use_carrier is set, return whatever it says.
737 * It'd be nice if there was a good way to tell if a driver supports
738 * netif_carrier, but there really isn't.
740 static int bond_check_dev_link(struct bonding *bond,
741 struct net_device *slave_dev, int reporting)
743 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
744 int (*ioctl)(struct net_device *, struct ifreq *, int);
746 struct mii_ioctl_data *mii;
748 if (!reporting && !netif_running(slave_dev))
751 if (bond->params.use_carrier)
752 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
754 /* Try to get link status using Ethtool first. */
755 if (slave_dev->ethtool_ops->get_link)
756 return slave_dev->ethtool_ops->get_link(slave_dev) ?
759 /* Ethtool can't be used, fallback to MII ioctls. */
760 ioctl = slave_ops->ndo_eth_ioctl;
762 /* TODO: set pointer to correct ioctl on a per team member
763 * bases to make this more efficient. that is, once
764 * we determine the correct ioctl, we will always
765 * call it and not the others for that team
769 /* We cannot assume that SIOCGMIIPHY will also read a
770 * register; not all network drivers (e.g., e100)
774 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
775 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
777 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
778 mii->reg_num = MII_BMSR;
779 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
780 return mii->val_out & BMSR_LSTATUS;
784 /* If reporting, report that either there's no ndo_eth_ioctl,
785 * or both SIOCGMIIREG and get_link failed (meaning that we
786 * cannot report link status). If not reporting, pretend
789 return reporting ? -1 : BMSR_LSTATUS;
792 /*----------------------------- Multicast list ------------------------------*/
794 /* Push the promiscuity flag down to appropriate slaves */
795 static int bond_set_promiscuity(struct bonding *bond, int inc)
797 struct list_head *iter;
800 if (bond_uses_primary(bond)) {
801 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
804 err = dev_set_promiscuity(curr_active->dev, inc);
808 bond_for_each_slave(bond, slave, iter) {
809 err = dev_set_promiscuity(slave->dev, inc);
817 /* Push the allmulti flag down to all slaves */
818 static int bond_set_allmulti(struct bonding *bond, int inc)
820 struct list_head *iter;
823 if (bond_uses_primary(bond)) {
824 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
827 err = dev_set_allmulti(curr_active->dev, inc);
831 bond_for_each_slave(bond, slave, iter) {
832 err = dev_set_allmulti(slave->dev, inc);
840 /* Retrieve the list of registered multicast addresses for the bonding
841 * device and retransmit an IGMP JOIN request to the current active
844 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
846 struct bonding *bond = container_of(work, struct bonding,
849 if (!rtnl_trylock()) {
850 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
853 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
855 if (bond->igmp_retrans > 1) {
856 bond->igmp_retrans--;
857 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
862 /* Flush bond's hardware addresses from slave */
863 static void bond_hw_addr_flush(struct net_device *bond_dev,
864 struct net_device *slave_dev)
866 struct bonding *bond = netdev_priv(bond_dev);
868 dev_uc_unsync(slave_dev, bond_dev);
869 dev_mc_unsync(slave_dev, bond_dev);
871 if (BOND_MODE(bond) == BOND_MODE_8023AD)
872 dev_mc_del(slave_dev, lacpdu_mcast_addr);
875 /*--------------------------- Active slave change ---------------------------*/
877 /* Update the hardware address list and promisc/allmulti for the new and
878 * old active slaves (if any). Modes that are not using primary keep all
879 * slaves up date at all times; only the modes that use primary need to call
880 * this function to swap these settings during a failover.
882 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
883 struct slave *old_active)
886 if (bond->dev->flags & IFF_PROMISC)
887 dev_set_promiscuity(old_active->dev, -1);
889 if (bond->dev->flags & IFF_ALLMULTI)
890 dev_set_allmulti(old_active->dev, -1);
892 if (bond->dev->flags & IFF_UP)
893 bond_hw_addr_flush(bond->dev, old_active->dev);
897 /* FIXME: Signal errors upstream. */
898 if (bond->dev->flags & IFF_PROMISC)
899 dev_set_promiscuity(new_active->dev, 1);
901 if (bond->dev->flags & IFF_ALLMULTI)
902 dev_set_allmulti(new_active->dev, 1);
904 if (bond->dev->flags & IFF_UP) {
905 netif_addr_lock_bh(bond->dev);
906 dev_uc_sync(new_active->dev, bond->dev);
907 dev_mc_sync(new_active->dev, bond->dev);
908 netif_addr_unlock_bh(bond->dev);
914 * bond_set_dev_addr - clone slave's address to bond
915 * @bond_dev: bond net device
916 * @slave_dev: slave net device
918 * Should be called with RTNL held.
920 static int bond_set_dev_addr(struct net_device *bond_dev,
921 struct net_device *slave_dev)
925 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
926 bond_dev, slave_dev, slave_dev->addr_len);
927 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
931 __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
932 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
933 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
937 static struct slave *bond_get_old_active(struct bonding *bond,
938 struct slave *new_active)
941 struct list_head *iter;
943 bond_for_each_slave(bond, slave, iter) {
944 if (slave == new_active)
947 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
954 /* bond_do_fail_over_mac
956 * Perform special MAC address swapping for fail_over_mac settings
960 static void bond_do_fail_over_mac(struct bonding *bond,
961 struct slave *new_active,
962 struct slave *old_active)
964 u8 tmp_mac[MAX_ADDR_LEN];
965 struct sockaddr_storage ss;
968 switch (bond->params.fail_over_mac) {
969 case BOND_FOM_ACTIVE:
971 rv = bond_set_dev_addr(bond->dev, new_active->dev);
973 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
977 case BOND_FOM_FOLLOW:
978 /* if new_active && old_active, swap them
979 * if just old_active, do nothing (going to no active slave)
980 * if just new_active, set new_active to bond's MAC
986 old_active = bond_get_old_active(bond, new_active);
989 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
990 new_active->dev->addr_len);
991 bond_hw_addr_copy(ss.__data,
992 old_active->dev->dev_addr,
993 old_active->dev->addr_len);
994 ss.ss_family = new_active->dev->type;
996 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
997 bond->dev->addr_len);
998 ss.ss_family = bond->dev->type;
1001 rv = dev_set_mac_address(new_active->dev,
1002 (struct sockaddr *)&ss, NULL);
1004 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1012 bond_hw_addr_copy(ss.__data, tmp_mac,
1013 new_active->dev->addr_len);
1014 ss.ss_family = old_active->dev->type;
1016 rv = dev_set_mac_address(old_active->dev,
1017 (struct sockaddr *)&ss, NULL);
1019 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1024 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1025 bond->params.fail_over_mac);
1032 * bond_choose_primary_or_current - select the primary or high priority slave
1033 * @bond: our bonding struct
1035 * - Check if there is a primary link. If the primary link was set and is up,
1036 * go on and do link reselection.
1038 * - If primary link is not set or down, find the highest priority link.
1039 * If the highest priority link is not current slave, set it as primary
1040 * link and do link reselection.
1042 static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1044 struct slave *prim = rtnl_dereference(bond->primary_slave);
1045 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1046 struct slave *slave, *hprio = NULL;
1047 struct list_head *iter;
1049 if (!prim || prim->link != BOND_LINK_UP) {
1050 bond_for_each_slave(bond, slave, iter) {
1051 if (slave->link == BOND_LINK_UP) {
1052 hprio = hprio ?: slave;
1053 if (slave->prio > hprio->prio)
1058 if (hprio && hprio != curr) {
1063 if (!curr || curr->link != BOND_LINK_UP)
1068 if (bond->force_primary) {
1069 bond->force_primary = false;
1074 if (!curr || curr->link != BOND_LINK_UP)
1077 /* At this point, prim and curr are both up */
1078 switch (bond->params.primary_reselect) {
1079 case BOND_PRI_RESELECT_ALWAYS:
1081 case BOND_PRI_RESELECT_BETTER:
1082 if (prim->speed < curr->speed)
1084 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1087 case BOND_PRI_RESELECT_FAILURE:
1090 netdev_err(bond->dev, "impossible primary_reselect %d\n",
1091 bond->params.primary_reselect);
1097 * bond_find_best_slave - select the best available slave to be the active one
1098 * @bond: our bonding struct
1100 static struct slave *bond_find_best_slave(struct bonding *bond)
1102 struct slave *slave, *bestslave = NULL;
1103 struct list_head *iter;
1104 int mintime = bond->params.updelay;
1106 slave = bond_choose_primary_or_current(bond);
1110 bond_for_each_slave(bond, slave, iter) {
1111 if (slave->link == BOND_LINK_UP)
1113 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1114 slave->delay < mintime) {
1115 mintime = slave->delay;
1123 static bool bond_should_notify_peers(struct bonding *bond)
1125 struct slave *slave;
1128 slave = rcu_dereference(bond->curr_active_slave);
1131 if (!slave || !bond->send_peer_notif ||
1132 bond->send_peer_notif %
1133 max(1, bond->params.peer_notif_delay) != 0 ||
1134 !netif_carrier_ok(bond->dev) ||
1135 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1138 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1139 slave ? slave->dev->name : "NULL");
1145 * bond_change_active_slave - change the active slave into the specified one
1146 * @bond: our bonding struct
1147 * @new_active: the new slave to make the active one
1149 * Set the new slave to the bond's settings and unset them on the old
1150 * curr_active_slave.
1151 * Setting include flags, mc-list, promiscuity, allmulti, etc.
1153 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1154 * because it is apparently the best available slave we have, even though its
1155 * updelay hasn't timed out yet.
1157 * Caller must hold RTNL.
1159 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1161 struct slave *old_active;
1165 old_active = rtnl_dereference(bond->curr_active_slave);
1167 if (old_active == new_active)
1170 #ifdef CONFIG_XFRM_OFFLOAD
1171 bond_ipsec_del_sa_all(bond);
1172 #endif /* CONFIG_XFRM_OFFLOAD */
1175 new_active->last_link_up = jiffies;
1177 if (new_active->link == BOND_LINK_BACK) {
1178 if (bond_uses_primary(bond)) {
1179 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1180 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1183 new_active->delay = 0;
1184 bond_set_slave_link_state(new_active, BOND_LINK_UP,
1185 BOND_SLAVE_NOTIFY_NOW);
1187 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1188 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1190 if (bond_is_lb(bond))
1191 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1193 if (bond_uses_primary(bond))
1194 slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1198 if (bond_uses_primary(bond))
1199 bond_hw_addr_swap(bond, new_active, old_active);
1201 if (bond_is_lb(bond)) {
1202 bond_alb_handle_active_change(bond, new_active);
1204 bond_set_slave_inactive_flags(old_active,
1205 BOND_SLAVE_NOTIFY_NOW);
1207 bond_set_slave_active_flags(new_active,
1208 BOND_SLAVE_NOTIFY_NOW);
1210 rcu_assign_pointer(bond->curr_active_slave, new_active);
1213 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1215 bond_set_slave_inactive_flags(old_active,
1216 BOND_SLAVE_NOTIFY_NOW);
1219 bool should_notify_peers = false;
1221 bond_set_slave_active_flags(new_active,
1222 BOND_SLAVE_NOTIFY_NOW);
1224 if (bond->params.fail_over_mac)
1225 bond_do_fail_over_mac(bond, new_active,
1228 if (netif_running(bond->dev)) {
1229 bond->send_peer_notif =
1230 bond->params.num_peer_notif *
1231 max(1, bond->params.peer_notif_delay);
1232 should_notify_peers =
1233 bond_should_notify_peers(bond);
1236 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1237 if (should_notify_peers) {
1238 bond->send_peer_notif--;
1239 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1245 #ifdef CONFIG_XFRM_OFFLOAD
1246 bond_ipsec_add_sa_all(bond);
1247 #endif /* CONFIG_XFRM_OFFLOAD */
1249 /* resend IGMP joins since active slave has changed or
1250 * all were sent on curr_active_slave.
1251 * resend only if bond is brought up with the affected
1252 * bonding modes and the retransmission is enabled
1254 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1255 ((bond_uses_primary(bond) && new_active) ||
1256 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1257 bond->igmp_retrans = bond->params.resend_igmp;
1258 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1263 * bond_select_active_slave - select a new active slave, if needed
1264 * @bond: our bonding struct
1266 * This functions should be called when one of the following occurs:
1267 * - The old curr_active_slave has been released or lost its link.
1268 * - The primary_slave has got its link back.
1269 * - A slave has got its link back and there's no old curr_active_slave.
1271 * Caller must hold RTNL.
1273 void bond_select_active_slave(struct bonding *bond)
1275 struct slave *best_slave;
1280 best_slave = bond_find_best_slave(bond);
1281 if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1282 bond_change_active_slave(bond, best_slave);
1283 rv = bond_set_carrier(bond);
1287 if (netif_carrier_ok(bond->dev))
1288 netdev_info(bond->dev, "active interface up!\n");
1290 netdev_info(bond->dev, "now running without any active interface!\n");
1294 #ifdef CONFIG_NET_POLL_CONTROLLER
1295 static inline int slave_enable_netpoll(struct slave *slave)
1300 np = kzalloc(sizeof(*np), GFP_KERNEL);
1305 err = __netpoll_setup(np, slave->dev);
1314 static inline void slave_disable_netpoll(struct slave *slave)
1316 struct netpoll *np = slave->np;
1326 static void bond_poll_controller(struct net_device *bond_dev)
1328 struct bonding *bond = netdev_priv(bond_dev);
1329 struct slave *slave = NULL;
1330 struct list_head *iter;
1331 struct ad_info ad_info;
1333 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1334 if (bond_3ad_get_active_agg_info(bond, &ad_info))
1337 bond_for_each_slave_rcu(bond, slave, iter) {
1338 if (!bond_slave_is_up(slave))
1341 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1342 struct aggregator *agg =
1343 SLAVE_AD_INFO(slave)->port.aggregator;
1346 agg->aggregator_identifier != ad_info.aggregator_id)
1350 netpoll_poll_dev(slave->dev);
1354 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1356 struct bonding *bond = netdev_priv(bond_dev);
1357 struct list_head *iter;
1358 struct slave *slave;
1360 bond_for_each_slave(bond, slave, iter)
1361 if (bond_slave_is_up(slave))
1362 slave_disable_netpoll(slave);
1365 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1367 struct bonding *bond = netdev_priv(dev);
1368 struct list_head *iter;
1369 struct slave *slave;
1372 bond_for_each_slave(bond, slave, iter) {
1373 err = slave_enable_netpoll(slave);
1375 bond_netpoll_cleanup(dev);
1382 static inline int slave_enable_netpoll(struct slave *slave)
1386 static inline void slave_disable_netpoll(struct slave *slave)
1389 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1394 /*---------------------------------- IOCTL ----------------------------------*/
1396 static netdev_features_t bond_fix_features(struct net_device *dev,
1397 netdev_features_t features)
1399 struct bonding *bond = netdev_priv(dev);
1400 struct list_head *iter;
1401 netdev_features_t mask;
1402 struct slave *slave;
1406 features &= ~NETIF_F_ONE_FOR_ALL;
1407 features |= NETIF_F_ALL_FOR_ALL;
1409 bond_for_each_slave(bond, slave, iter) {
1410 features = netdev_increment_features(features,
1411 slave->dev->features,
1414 features = netdev_add_tso_features(features, mask);
1419 #define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1420 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1421 NETIF_F_HIGHDMA | NETIF_F_LRO)
1423 #define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1424 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1426 #define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1427 NETIF_F_GSO_SOFTWARE)
1430 static void bond_compute_features(struct bonding *bond)
1432 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1433 IFF_XMIT_DST_RELEASE_PERM;
1434 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1435 netdev_features_t enc_features = BOND_ENC_FEATURES;
1436 #ifdef CONFIG_XFRM_OFFLOAD
1437 netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
1438 #endif /* CONFIG_XFRM_OFFLOAD */
1439 netdev_features_t mpls_features = BOND_MPLS_FEATURES;
1440 struct net_device *bond_dev = bond->dev;
1441 struct list_head *iter;
1442 struct slave *slave;
1443 unsigned short max_hard_header_len = ETH_HLEN;
1444 unsigned int tso_max_size = TSO_MAX_SIZE;
1445 u16 tso_max_segs = TSO_MAX_SEGS;
1447 if (!bond_has_slaves(bond))
1449 vlan_features &= NETIF_F_ALL_FOR_ALL;
1450 mpls_features &= NETIF_F_ALL_FOR_ALL;
1452 bond_for_each_slave(bond, slave, iter) {
1453 vlan_features = netdev_increment_features(vlan_features,
1454 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1456 enc_features = netdev_increment_features(enc_features,
1457 slave->dev->hw_enc_features,
1460 #ifdef CONFIG_XFRM_OFFLOAD
1461 xfrm_features = netdev_increment_features(xfrm_features,
1462 slave->dev->hw_enc_features,
1463 BOND_XFRM_FEATURES);
1464 #endif /* CONFIG_XFRM_OFFLOAD */
1466 mpls_features = netdev_increment_features(mpls_features,
1467 slave->dev->mpls_features,
1468 BOND_MPLS_FEATURES);
1470 dst_release_flag &= slave->dev->priv_flags;
1471 if (slave->dev->hard_header_len > max_hard_header_len)
1472 max_hard_header_len = slave->dev->hard_header_len;
1474 tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
1475 tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
1477 bond_dev->hard_header_len = max_hard_header_len;
1480 bond_dev->vlan_features = vlan_features;
1481 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1482 NETIF_F_HW_VLAN_CTAG_TX |
1483 NETIF_F_HW_VLAN_STAG_TX;
1484 #ifdef CONFIG_XFRM_OFFLOAD
1485 bond_dev->hw_enc_features |= xfrm_features;
1486 #endif /* CONFIG_XFRM_OFFLOAD */
1487 bond_dev->mpls_features = mpls_features;
1488 netif_set_tso_max_segs(bond_dev, tso_max_segs);
1489 netif_set_tso_max_size(bond_dev, tso_max_size);
1491 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1492 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1493 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1494 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1496 netdev_change_features(bond_dev);
1499 static void bond_setup_by_slave(struct net_device *bond_dev,
1500 struct net_device *slave_dev)
1502 bond_dev->header_ops = slave_dev->header_ops;
1504 bond_dev->type = slave_dev->type;
1505 bond_dev->hard_header_len = slave_dev->hard_header_len;
1506 bond_dev->needed_headroom = slave_dev->needed_headroom;
1507 bond_dev->addr_len = slave_dev->addr_len;
1509 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1510 slave_dev->addr_len);
1513 /* On bonding slaves other than the currently active slave, suppress
1514 * duplicates except for alb non-mcast/bcast.
1516 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1517 struct slave *slave,
1518 struct bonding *bond)
1520 if (bond_is_slave_inactive(slave)) {
1521 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1522 skb->pkt_type != PACKET_BROADCAST &&
1523 skb->pkt_type != PACKET_MULTICAST)
1530 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1532 struct sk_buff *skb = *pskb;
1533 struct slave *slave;
1534 struct bonding *bond;
1535 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1537 int ret = RX_HANDLER_ANOTHER;
1539 skb = skb_share_check(skb, GFP_ATOMIC);
1541 return RX_HANDLER_CONSUMED;
1545 slave = bond_slave_get_rcu(skb->dev);
1548 recv_probe = READ_ONCE(bond->recv_probe);
1550 ret = recv_probe(skb, bond, slave);
1551 if (ret == RX_HANDLER_CONSUMED) {
1558 * For packets determined by bond_should_deliver_exact_match() call to
1559 * be suppressed we want to make an exception for link-local packets.
1560 * This is necessary for e.g. LLDP daemons to be able to monitor
1561 * inactive slave links without being forced to bind to them
1564 * At the same time, packets that are passed to the bonding master
1565 * (including link-local ones) can have their originating interface
1566 * determined via PACKET_ORIGDEV socket option.
1568 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1569 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1570 return RX_HANDLER_PASS;
1571 return RX_HANDLER_EXACT;
1574 skb->dev = bond->dev;
1576 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1577 netif_is_bridge_port(bond->dev) &&
1578 skb->pkt_type == PACKET_HOST) {
1580 if (unlikely(skb_cow_head(skb,
1581 skb->data - skb_mac_header(skb)))) {
1583 return RX_HANDLER_CONSUMED;
1585 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1586 bond->dev->addr_len);
1592 static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1594 switch (BOND_MODE(bond)) {
1595 case BOND_MODE_ROUNDROBIN:
1596 return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1597 case BOND_MODE_ACTIVEBACKUP:
1598 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1599 case BOND_MODE_BROADCAST:
1600 return NETDEV_LAG_TX_TYPE_BROADCAST;
1602 case BOND_MODE_8023AD:
1603 return NETDEV_LAG_TX_TYPE_HASH;
1605 return NETDEV_LAG_TX_TYPE_UNKNOWN;
1609 static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1610 enum netdev_lag_tx_type type)
1612 if (type != NETDEV_LAG_TX_TYPE_HASH)
1613 return NETDEV_LAG_HASH_NONE;
1615 switch (bond->params.xmit_policy) {
1616 case BOND_XMIT_POLICY_LAYER2:
1617 return NETDEV_LAG_HASH_L2;
1618 case BOND_XMIT_POLICY_LAYER34:
1619 return NETDEV_LAG_HASH_L34;
1620 case BOND_XMIT_POLICY_LAYER23:
1621 return NETDEV_LAG_HASH_L23;
1622 case BOND_XMIT_POLICY_ENCAP23:
1623 return NETDEV_LAG_HASH_E23;
1624 case BOND_XMIT_POLICY_ENCAP34:
1625 return NETDEV_LAG_HASH_E34;
1626 case BOND_XMIT_POLICY_VLAN_SRCMAC:
1627 return NETDEV_LAG_HASH_VLAN_SRCMAC;
1629 return NETDEV_LAG_HASH_UNKNOWN;
1633 static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1634 struct netlink_ext_ack *extack)
1636 struct netdev_lag_upper_info lag_upper_info;
1637 enum netdev_lag_tx_type type;
1640 type = bond_lag_tx_type(bond);
1641 lag_upper_info.tx_type = type;
1642 lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1644 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1645 &lag_upper_info, extack);
1649 slave->dev->flags |= IFF_SLAVE;
1653 static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1655 netdev_upper_dev_unlink(slave->dev, bond->dev);
1656 slave->dev->flags &= ~IFF_SLAVE;
1659 static void slave_kobj_release(struct kobject *kobj)
1661 struct slave *slave = to_slave(kobj);
1662 struct bonding *bond = bond_get_bond_by_slave(slave);
1664 cancel_delayed_work_sync(&slave->notify_work);
1665 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1666 kfree(SLAVE_AD_INFO(slave));
1671 static struct kobj_type slave_ktype = {
1672 .release = slave_kobj_release,
1674 .sysfs_ops = &slave_sysfs_ops,
1678 static int bond_kobj_init(struct slave *slave)
1682 err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1683 &(slave->dev->dev.kobj), "bonding_slave");
1685 kobject_put(&slave->kobj);
1690 static struct slave *bond_alloc_slave(struct bonding *bond,
1691 struct net_device *slave_dev)
1693 struct slave *slave = NULL;
1695 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1700 slave->dev = slave_dev;
1701 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1703 if (bond_kobj_init(slave))
1706 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1707 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1709 if (!SLAVE_AD_INFO(slave)) {
1710 kobject_put(&slave->kobj);
1718 static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1720 info->bond_mode = BOND_MODE(bond);
1721 info->miimon = bond->params.miimon;
1722 info->num_slaves = bond->slave_cnt;
1725 static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1727 strcpy(info->slave_name, slave->dev->name);
1728 info->link = slave->link;
1729 info->state = bond_slave_state(slave);
1730 info->link_failure_count = slave->link_failure_count;
1733 static void bond_netdev_notify_work(struct work_struct *_work)
1735 struct slave *slave = container_of(_work, struct slave,
1738 if (rtnl_trylock()) {
1739 struct netdev_bonding_info binfo;
1741 bond_fill_ifslave(slave, &binfo.slave);
1742 bond_fill_ifbond(slave->bond, &binfo.master);
1743 netdev_bonding_info_change(slave->dev, &binfo);
1746 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1750 void bond_queue_slave_event(struct slave *slave)
1752 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1755 void bond_lower_state_changed(struct slave *slave)
1757 struct netdev_lag_lower_state_info info;
1759 info.link_up = slave->link == BOND_LINK_UP ||
1760 slave->link == BOND_LINK_FAIL;
1761 info.tx_enabled = bond_is_active_slave(slave);
1762 netdev_lower_state_changed(slave->dev, &info);
1765 #define BOND_NL_ERR(bond_dev, extack, errmsg) do { \
1767 NL_SET_ERR_MSG(extack, errmsg); \
1769 netdev_err(bond_dev, "Error: %s\n", errmsg); \
1772 #define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \
1774 NL_SET_ERR_MSG(extack, errmsg); \
1776 slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
1779 /* The bonding driver uses ether_setup() to convert a master bond device
1780 * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
1781 * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
1784 static void bond_ether_setup(struct net_device *bond_dev)
1786 unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
1788 ether_setup(bond_dev);
1789 bond_dev->flags |= IFF_MASTER | flags;
1790 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1793 void bond_xdp_set_features(struct net_device *bond_dev)
1795 struct bonding *bond = netdev_priv(bond_dev);
1796 xdp_features_t val = NETDEV_XDP_ACT_MASK;
1797 struct list_head *iter;
1798 struct slave *slave;
1802 if (!bond_xdp_check(bond)) {
1803 xdp_clear_features_flag(bond_dev);
1807 bond_for_each_slave(bond, slave, iter)
1808 val &= slave->dev->xdp_features;
1810 xdp_set_features_flag(bond_dev, val);
1813 /* enslave device <slave> to bond device <master> */
1814 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1815 struct netlink_ext_ack *extack)
1817 struct bonding *bond = netdev_priv(bond_dev);
1818 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1819 struct slave *new_slave = NULL, *prev_slave;
1820 struct sockaddr_storage ss;
1824 if (slave_dev->flags & IFF_MASTER &&
1825 !netif_is_bond_master(slave_dev)) {
1826 BOND_NL_ERR(bond_dev, extack,
1827 "Device type (master device) cannot be enslaved");
1831 if (!bond->params.use_carrier &&
1832 slave_dev->ethtool_ops->get_link == NULL &&
1833 slave_ops->ndo_eth_ioctl == NULL) {
1834 slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1837 /* already in-use? */
1838 if (netdev_is_rx_handler_busy(slave_dev)) {
1839 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1840 "Device is in use and cannot be enslaved");
1844 if (bond_dev == slave_dev) {
1845 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1849 /* vlan challenged mutual exclusion */
1850 /* no need to lock since we're protected by rtnl_lock */
1851 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1852 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1853 if (vlan_uses_dev(bond_dev)) {
1854 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1855 "Can not enslave VLAN challenged device to VLAN enabled bond");
1858 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1861 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1864 if (slave_dev->features & NETIF_F_HW_ESP)
1865 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1867 /* Old ifenslave binaries are no longer supported. These can
1868 * be identified with moderate accuracy by the state of the slave:
1869 * the current ifenslave will set the interface down prior to
1870 * enslaving it; the old ifenslave will not.
1872 if (slave_dev->flags & IFF_UP) {
1873 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1874 "Device can not be enslaved while up");
1878 /* set bonding device ether type by slave - bonding netdevices are
1879 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1880 * there is a need to override some of the type dependent attribs/funcs.
1882 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1883 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1885 if (!bond_has_slaves(bond)) {
1886 if (bond_dev->type != slave_dev->type) {
1887 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1888 bond_dev->type, slave_dev->type);
1890 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1892 res = notifier_to_errno(res);
1894 slave_err(bond_dev, slave_dev, "refused to change device type\n");
1898 /* Flush unicast and multicast addresses */
1899 dev_uc_flush(bond_dev);
1900 dev_mc_flush(bond_dev);
1902 if (slave_dev->type != ARPHRD_ETHER)
1903 bond_setup_by_slave(bond_dev, slave_dev);
1905 bond_ether_setup(bond_dev);
1907 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1910 } else if (bond_dev->type != slave_dev->type) {
1911 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1912 "Device type is different from other slaves");
1916 if (slave_dev->type == ARPHRD_INFINIBAND &&
1917 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1918 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1919 "Only active-backup mode is supported for infiniband slaves");
1921 goto err_undo_flags;
1924 if (!slave_ops->ndo_set_mac_address ||
1925 slave_dev->type == ARPHRD_INFINIBAND) {
1926 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1927 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1928 bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1929 if (!bond_has_slaves(bond)) {
1930 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1931 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1933 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1934 "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1936 goto err_undo_flags;
1941 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1943 /* If this is the first slave, then we need to set the master's hardware
1944 * address to be the same as the slave's.
1946 if (!bond_has_slaves(bond) &&
1947 bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1948 res = bond_set_dev_addr(bond->dev, slave_dev);
1950 goto err_undo_flags;
1953 new_slave = bond_alloc_slave(bond, slave_dev);
1956 goto err_undo_flags;
1959 /* Set the new_slave's queue_id to be zero. Queue ID mapping
1960 * is set via sysfs or module option if desired.
1962 new_slave->queue_id = 0;
1964 /* Save slave's original mtu and then set it to match the bond */
1965 new_slave->original_mtu = slave_dev->mtu;
1966 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1968 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1972 /* Save slave's original ("permanent") mac address for modes
1973 * that need it, and for restoring it upon release, and then
1974 * set it to the master's address
1976 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1977 slave_dev->addr_len);
1979 if (!bond->params.fail_over_mac ||
1980 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1981 /* Set slave to master's mac address. The application already
1982 * set the master's mac address to that of the first slave
1984 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1985 ss.ss_family = slave_dev->type;
1986 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1989 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1990 goto err_restore_mtu;
1994 /* set no_addrconf flag before open to prevent IPv6 addrconf */
1995 slave_dev->priv_flags |= IFF_NO_ADDRCONF;
1997 /* open the slave since the application closed it */
1998 res = dev_open(slave_dev, extack);
2000 slave_err(bond_dev, slave_dev, "Opening slave failed\n");
2001 goto err_restore_mac;
2004 slave_dev->priv_flags |= IFF_BONDING;
2005 /* initialize slave stats */
2006 dev_get_stats(new_slave->dev, &new_slave->slave_stats);
2008 if (bond_is_lb(bond)) {
2009 /* bond_alb_init_slave() must be called before all other stages since
2010 * it might fail and we do not want to have to undo everything
2012 res = bond_alb_init_slave(bond, new_slave);
2017 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
2019 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
2023 prev_slave = bond_last_slave(bond);
2025 new_slave->delay = 0;
2026 new_slave->link_failure_count = 0;
2028 if (bond_update_speed_duplex(new_slave) &&
2029 bond_needs_speed_duplex(bond))
2030 new_slave->link = BOND_LINK_DOWN;
2032 new_slave->last_rx = jiffies -
2033 (msecs_to_jiffies(bond->params.arp_interval) + 1);
2034 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
2035 new_slave->target_last_arp_rx[i] = new_slave->last_rx;
2037 new_slave->last_tx = new_slave->last_rx;
2039 if (bond->params.miimon && !bond->params.use_carrier) {
2040 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
2042 if ((link_reporting == -1) && !bond->params.arp_interval) {
2043 /* miimon is set but a bonded network driver
2044 * does not support ETHTOOL/MII and
2045 * arp_interval is not set. Note: if
2046 * use_carrier is enabled, we will never go
2047 * here (because netif_carrier is always
2048 * supported); thus, we don't need to change
2049 * the messages for netif_carrier.
2051 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
2052 } else if (link_reporting == -1) {
2053 /* unable get link status using mii/ethtool */
2054 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
2058 /* check for initial state */
2059 new_slave->link = BOND_LINK_NOCHANGE;
2060 if (bond->params.miimon) {
2061 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
2062 if (bond->params.updelay) {
2063 bond_set_slave_link_state(new_slave,
2065 BOND_SLAVE_NOTIFY_NOW);
2066 new_slave->delay = bond->params.updelay;
2068 bond_set_slave_link_state(new_slave,
2070 BOND_SLAVE_NOTIFY_NOW);
2073 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2074 BOND_SLAVE_NOTIFY_NOW);
2076 } else if (bond->params.arp_interval) {
2077 bond_set_slave_link_state(new_slave,
2078 (netif_carrier_ok(slave_dev) ?
2079 BOND_LINK_UP : BOND_LINK_DOWN),
2080 BOND_SLAVE_NOTIFY_NOW);
2082 bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2083 BOND_SLAVE_NOTIFY_NOW);
2086 if (new_slave->link != BOND_LINK_DOWN)
2087 new_slave->last_link_up = jiffies;
2088 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2089 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2090 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2092 if (bond_uses_primary(bond) && bond->params.primary[0]) {
2093 /* if there is a primary slave, remember it */
2094 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2095 rcu_assign_pointer(bond->primary_slave, new_slave);
2096 bond->force_primary = true;
2100 switch (BOND_MODE(bond)) {
2101 case BOND_MODE_ACTIVEBACKUP:
2102 bond_set_slave_inactive_flags(new_slave,
2103 BOND_SLAVE_NOTIFY_NOW);
2105 case BOND_MODE_8023AD:
2106 /* in 802.3ad mode, the internal mechanism
2107 * will activate the slaves in the selected
2110 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2111 /* if this is the first slave */
2113 SLAVE_AD_INFO(new_slave)->id = 1;
2114 /* Initialize AD with the number of times that the AD timer is called in 1 second
2115 * can be called only after the mac address of the bond is set
2117 bond_3ad_initialize(bond);
2119 SLAVE_AD_INFO(new_slave)->id =
2120 SLAVE_AD_INFO(prev_slave)->id + 1;
2123 bond_3ad_bind_slave(new_slave);
2127 bond_set_active_slave(new_slave);
2128 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2131 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2133 /* always active in trunk mode */
2134 bond_set_active_slave(new_slave);
2136 /* In trunking mode there is little meaning to curr_active_slave
2137 * anyway (it holds no special properties of the bond device),
2138 * so we can change it without calling change_active_interface()
2140 if (!rcu_access_pointer(bond->curr_active_slave) &&
2141 new_slave->link == BOND_LINK_UP)
2142 rcu_assign_pointer(bond->curr_active_slave, new_slave);
2145 } /* switch(bond_mode) */
2147 #ifdef CONFIG_NET_POLL_CONTROLLER
2148 if (bond->dev->npinfo) {
2149 if (slave_enable_netpoll(new_slave)) {
2150 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2157 if (!(bond_dev->features & NETIF_F_LRO))
2158 dev_disable_lro(slave_dev);
2160 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2163 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2167 res = bond_master_upper_dev_link(bond, new_slave, extack);
2169 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2170 goto err_unregister;
2173 bond_lower_state_changed(new_slave);
2175 res = bond_sysfs_slave_add(new_slave);
2177 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2178 goto err_upper_unlink;
2181 /* If the mode uses primary, then the following is handled by
2182 * bond_change_active_slave().
2184 if (!bond_uses_primary(bond)) {
2185 /* set promiscuity level to new slave */
2186 if (bond_dev->flags & IFF_PROMISC) {
2187 res = dev_set_promiscuity(slave_dev, 1);
2192 /* set allmulti level to new slave */
2193 if (bond_dev->flags & IFF_ALLMULTI) {
2194 res = dev_set_allmulti(slave_dev, 1);
2196 if (bond_dev->flags & IFF_PROMISC)
2197 dev_set_promiscuity(slave_dev, -1);
2202 if (bond_dev->flags & IFF_UP) {
2203 netif_addr_lock_bh(bond_dev);
2204 dev_mc_sync_multiple(slave_dev, bond_dev);
2205 dev_uc_sync_multiple(slave_dev, bond_dev);
2206 netif_addr_unlock_bh(bond_dev);
2208 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2209 dev_mc_add(slave_dev, lacpdu_mcast_addr);
2214 bond_compute_features(bond);
2215 bond_set_carrier(bond);
2217 if (bond_uses_primary(bond)) {
2219 bond_select_active_slave(bond);
2220 unblock_netpoll_tx();
2223 if (bond_mode_can_use_xmit_hash(bond))
2224 bond_update_slave_arr(bond, NULL);
2227 if (!slave_dev->netdev_ops->ndo_bpf ||
2228 !slave_dev->netdev_ops->ndo_xdp_xmit) {
2229 if (bond->xdp_prog) {
2230 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2231 "Slave does not support XDP");
2235 } else if (bond->xdp_prog) {
2236 struct netdev_bpf xdp = {
2237 .command = XDP_SETUP_PROG,
2239 .prog = bond->xdp_prog,
2243 if (dev_xdp_prog_count(slave_dev) > 0) {
2244 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2245 "Slave has XDP program loaded, please unload before enslaving");
2250 res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2252 /* ndo_bpf() sets extack error message */
2253 slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2257 bpf_prog_inc(bond->xdp_prog);
2260 bond_xdp_set_features(bond_dev);
2262 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2263 bond_is_active_slave(new_slave) ? "an active" : "a backup",
2264 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2266 /* enslave is successful */
2267 bond_queue_slave_event(new_slave);
2270 /* Undo stages on error */
2272 bond_sysfs_slave_del(new_slave);
2275 bond_upper_dev_unlink(bond, new_slave);
2278 netdev_rx_handler_unregister(slave_dev);
2281 vlan_vids_del_by_dev(slave_dev, bond_dev);
2282 if (rcu_access_pointer(bond->primary_slave) == new_slave)
2283 RCU_INIT_POINTER(bond->primary_slave, NULL);
2284 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2286 bond_change_active_slave(bond, NULL);
2287 bond_select_active_slave(bond);
2288 unblock_netpoll_tx();
2290 /* either primary_slave or curr_active_slave might've changed */
2292 slave_disable_netpoll(new_slave);
2295 if (!netif_is_bond_master(slave_dev))
2296 slave_dev->priv_flags &= ~IFF_BONDING;
2297 dev_close(slave_dev);
2300 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2301 if (!bond->params.fail_over_mac ||
2302 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2303 /* XXX TODO - fom follow mode needs to change master's
2304 * MAC if this slave's MAC is in use by the bond, or at
2305 * least print a warning.
2307 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2308 new_slave->dev->addr_len);
2309 ss.ss_family = slave_dev->type;
2310 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2314 dev_set_mtu(slave_dev, new_slave->original_mtu);
2317 kobject_put(&new_slave->kobj);
2320 /* Enslave of first slave has failed and we need to fix master's mac */
2321 if (!bond_has_slaves(bond)) {
2322 if (ether_addr_equal_64bits(bond_dev->dev_addr,
2323 slave_dev->dev_addr))
2324 eth_hw_addr_random(bond_dev);
2325 if (bond_dev->type != ARPHRD_ETHER) {
2326 dev_close(bond_dev);
2327 bond_ether_setup(bond_dev);
2334 /* Try to release the slave device <slave> from the bond device <master>
2335 * It is legal to access curr_active_slave without a lock because all the function
2336 * is RTNL-locked. If "all" is true it means that the function is being called
2337 * while destroying a bond interface and all slaves are being released.
2339 * The rules for slave state should be:
2340 * for Active/Backup:
2341 * Active stays on all backups go down
2342 * for Bonded connections:
2343 * The first up interface should be left on and all others downed.
2345 static int __bond_release_one(struct net_device *bond_dev,
2346 struct net_device *slave_dev,
2347 bool all, bool unregister)
2349 struct bonding *bond = netdev_priv(bond_dev);
2350 struct slave *slave, *oldcurrent;
2351 struct sockaddr_storage ss;
2352 int old_flags = bond_dev->flags;
2353 netdev_features_t old_features = bond_dev->features;
2355 /* slave is not a slave or master is not master of this slave */
2356 if (!(slave_dev->flags & IFF_SLAVE) ||
2357 !netdev_has_upper_dev(slave_dev, bond_dev)) {
2358 slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2364 slave = bond_get_slave_by_dev(bond, slave_dev);
2366 /* not a slave of this bond */
2367 slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2368 unblock_netpoll_tx();
2372 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2374 bond_sysfs_slave_del(slave);
2376 /* recompute stats just before removing the slave */
2377 bond_get_stats(bond->dev, &bond->bond_stats);
2379 if (bond->xdp_prog) {
2380 struct netdev_bpf xdp = {
2381 .command = XDP_SETUP_PROG,
2386 if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2387 slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2390 /* unregister rx_handler early so bond_handle_frame wouldn't be called
2391 * for this slave anymore.
2393 netdev_rx_handler_unregister(slave_dev);
2395 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2396 bond_3ad_unbind_slave(slave);
2398 bond_upper_dev_unlink(bond, slave);
2400 if (bond_mode_can_use_xmit_hash(bond))
2401 bond_update_slave_arr(bond, slave);
2403 slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2404 bond_is_active_slave(slave) ? "active" : "backup");
2406 oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2408 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2410 if (!all && (!bond->params.fail_over_mac ||
2411 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2412 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2413 bond_has_slaves(bond))
2414 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2415 slave->perm_hwaddr);
2418 if (rtnl_dereference(bond->primary_slave) == slave)
2419 RCU_INIT_POINTER(bond->primary_slave, NULL);
2421 if (oldcurrent == slave)
2422 bond_change_active_slave(bond, NULL);
2424 if (bond_is_lb(bond)) {
2425 /* Must be called only after the slave has been
2426 * detached from the list and the curr_active_slave
2427 * has been cleared (if our_slave == old_current),
2428 * but before a new active slave is selected.
2430 bond_alb_deinit_slave(bond, slave);
2434 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2435 } else if (oldcurrent == slave) {
2436 /* Note that we hold RTNL over this sequence, so there
2437 * is no concern that another slave add/remove event
2440 bond_select_active_slave(bond);
2443 bond_set_carrier(bond);
2444 if (!bond_has_slaves(bond))
2445 eth_hw_addr_random(bond_dev);
2447 unblock_netpoll_tx();
2451 if (!bond_has_slaves(bond)) {
2452 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2453 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2456 bond_compute_features(bond);
2457 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2458 (old_features & NETIF_F_VLAN_CHALLENGED))
2459 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2461 vlan_vids_del_by_dev(slave_dev, bond_dev);
2463 /* If the mode uses primary, then this case was handled above by
2464 * bond_change_active_slave(..., NULL)
2466 if (!bond_uses_primary(bond)) {
2467 /* unset promiscuity level from slave
2468 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2469 * of the IFF_PROMISC flag in the bond_dev, but we need the
2470 * value of that flag before that change, as that was the value
2471 * when this slave was attached, so we cache at the start of the
2472 * function and use it here. Same goes for ALLMULTI below
2474 if (old_flags & IFF_PROMISC)
2475 dev_set_promiscuity(slave_dev, -1);
2477 /* unset allmulti level from slave */
2478 if (old_flags & IFF_ALLMULTI)
2479 dev_set_allmulti(slave_dev, -1);
2481 if (old_flags & IFF_UP)
2482 bond_hw_addr_flush(bond_dev, slave_dev);
2485 slave_disable_netpoll(slave);
2487 /* close slave before restoring its mac address */
2488 dev_close(slave_dev);
2490 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2492 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2493 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2494 /* restore original ("permanent") mac address */
2495 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2496 slave->dev->addr_len);
2497 ss.ss_family = slave_dev->type;
2498 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2502 __dev_set_mtu(slave_dev, slave->original_mtu);
2504 dev_set_mtu(slave_dev, slave->original_mtu);
2506 if (!netif_is_bond_master(slave_dev))
2507 slave_dev->priv_flags &= ~IFF_BONDING;
2509 bond_xdp_set_features(bond_dev);
2510 kobject_put(&slave->kobj);
2515 /* A wrapper used because of ndo_del_link */
2516 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2518 return __bond_release_one(bond_dev, slave_dev, false, false);
2521 /* First release a slave and then destroy the bond if no more slaves are left.
2522 * Must be under rtnl_lock when this function is called.
2524 static int bond_release_and_destroy(struct net_device *bond_dev,
2525 struct net_device *slave_dev)
2527 struct bonding *bond = netdev_priv(bond_dev);
2530 ret = __bond_release_one(bond_dev, slave_dev, false, true);
2531 if (ret == 0 && !bond_has_slaves(bond) &&
2532 bond_dev->reg_state != NETREG_UNREGISTERING) {
2533 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2534 netdev_info(bond_dev, "Destroying bond\n");
2535 bond_remove_proc_entry(bond);
2536 unregister_netdevice(bond_dev);
2541 static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2543 struct bonding *bond = netdev_priv(bond_dev);
2545 bond_fill_ifbond(bond, info);
2548 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2550 struct bonding *bond = netdev_priv(bond_dev);
2551 struct list_head *iter;
2552 int i = 0, res = -ENODEV;
2553 struct slave *slave;
2555 bond_for_each_slave(bond, slave, iter) {
2556 if (i++ == (int)info->slave_id) {
2558 bond_fill_ifslave(slave, info);
2566 /*-------------------------------- Monitoring -------------------------------*/
2568 /* called with rcu_read_lock() */
2569 static int bond_miimon_inspect(struct bonding *bond)
2571 bool ignore_updelay = false;
2572 int link_state, commit = 0;
2573 struct list_head *iter;
2574 struct slave *slave;
2576 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
2577 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2579 struct bond_up_slave *usable_slaves;
2581 usable_slaves = rcu_dereference(bond->usable_slaves);
2583 if (usable_slaves && usable_slaves->count == 0)
2584 ignore_updelay = true;
2587 bond_for_each_slave_rcu(bond, slave, iter) {
2588 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2590 link_state = bond_check_dev_link(bond, slave->dev, 0);
2592 switch (slave->link) {
2597 bond_propose_link_state(slave, BOND_LINK_FAIL);
2599 slave->delay = bond->params.downdelay;
2601 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2603 BOND_MODE_ACTIVEBACKUP) ?
2604 (bond_is_active_slave(slave) ?
2605 "active " : "backup ") : "",
2606 bond->params.downdelay * bond->params.miimon);
2609 case BOND_LINK_FAIL:
2611 /* recovered before downdelay expired */
2612 bond_propose_link_state(slave, BOND_LINK_UP);
2613 slave->last_link_up = jiffies;
2614 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2615 (bond->params.downdelay - slave->delay) *
2616 bond->params.miimon);
2621 if (slave->delay <= 0) {
2622 bond_propose_link_state(slave, BOND_LINK_DOWN);
2630 case BOND_LINK_DOWN:
2634 bond_propose_link_state(slave, BOND_LINK_BACK);
2636 slave->delay = bond->params.updelay;
2639 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2640 ignore_updelay ? 0 :
2641 bond->params.updelay *
2642 bond->params.miimon);
2645 case BOND_LINK_BACK:
2647 bond_propose_link_state(slave, BOND_LINK_DOWN);
2648 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2649 (bond->params.updelay - slave->delay) *
2650 bond->params.miimon);
2658 if (slave->delay <= 0) {
2659 bond_propose_link_state(slave, BOND_LINK_UP);
2661 ignore_updelay = false;
2673 static void bond_miimon_link_change(struct bonding *bond,
2674 struct slave *slave,
2677 switch (BOND_MODE(bond)) {
2678 case BOND_MODE_8023AD:
2679 bond_3ad_handle_link_change(slave, link);
2683 bond_alb_handle_link_change(bond, slave, link);
2686 bond_update_slave_arr(bond, NULL);
2691 static void bond_miimon_commit(struct bonding *bond)
2693 struct slave *slave, *primary, *active;
2694 bool do_failover = false;
2695 struct list_head *iter;
2699 bond_for_each_slave(bond, slave, iter) {
2700 switch (slave->link_new_state) {
2701 case BOND_LINK_NOCHANGE:
2702 /* For 802.3ad mode, check current slave speed and
2703 * duplex again in case its port was disabled after
2704 * invalid speed/duplex reporting but recovered before
2705 * link monitoring could make a decision on the actual
2708 if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2709 slave->link == BOND_LINK_UP)
2710 bond_3ad_adapter_speed_duplex_changed(slave);
2714 if (bond_update_speed_duplex(slave) &&
2715 bond_needs_speed_duplex(bond)) {
2716 slave->link = BOND_LINK_DOWN;
2717 if (net_ratelimit())
2718 slave_warn(bond->dev, slave->dev,
2719 "failed to get link speed/duplex\n");
2722 bond_set_slave_link_state(slave, BOND_LINK_UP,
2723 BOND_SLAVE_NOTIFY_NOW);
2724 slave->last_link_up = jiffies;
2726 primary = rtnl_dereference(bond->primary_slave);
2727 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2728 /* prevent it from being the active one */
2729 bond_set_backup_slave(slave);
2730 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2731 /* make it immediately active */
2732 bond_set_active_slave(slave);
2735 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2736 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2737 slave->duplex ? "full" : "half");
2739 bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2741 active = rtnl_dereference(bond->curr_active_slave);
2742 if (!active || slave == primary || slave->prio > active->prio)
2747 case BOND_LINK_DOWN:
2748 if (slave->link_failure_count < UINT_MAX)
2749 slave->link_failure_count++;
2751 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2752 BOND_SLAVE_NOTIFY_NOW);
2754 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2755 BOND_MODE(bond) == BOND_MODE_8023AD)
2756 bond_set_slave_inactive_flags(slave,
2757 BOND_SLAVE_NOTIFY_NOW);
2759 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2761 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2763 if (slave == rcu_access_pointer(bond->curr_active_slave))
2769 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2770 slave->link_new_state);
2771 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2779 bond_select_active_slave(bond);
2780 unblock_netpoll_tx();
2783 bond_set_carrier(bond);
2788 * Really a wrapper that splits the mii monitor into two phases: an
2789 * inspection, then (if inspection indicates something needs to be done)
2790 * an acquisition of appropriate locks followed by a commit phase to
2791 * implement whatever link state changes are indicated.
2793 static void bond_mii_monitor(struct work_struct *work)
2795 struct bonding *bond = container_of(work, struct bonding,
2797 bool should_notify_peers = false;
2799 unsigned long delay;
2800 struct slave *slave;
2801 struct list_head *iter;
2803 delay = msecs_to_jiffies(bond->params.miimon);
2805 if (!bond_has_slaves(bond))
2809 should_notify_peers = bond_should_notify_peers(bond);
2810 commit = !!bond_miimon_inspect(bond);
2811 if (bond->send_peer_notif) {
2813 if (rtnl_trylock()) {
2814 bond->send_peer_notif--;
2822 /* Race avoidance with bond_close cancel of workqueue */
2823 if (!rtnl_trylock()) {
2825 should_notify_peers = false;
2829 bond_for_each_slave(bond, slave, iter) {
2830 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2832 bond_miimon_commit(bond);
2834 rtnl_unlock(); /* might sleep, hold no other locks */
2838 if (bond->params.miimon)
2839 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2841 if (should_notify_peers) {
2842 if (!rtnl_trylock())
2844 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2849 static int bond_upper_dev_walk(struct net_device *upper,
2850 struct netdev_nested_priv *priv)
2852 __be32 ip = *(__be32 *)priv->data;
2854 return ip == bond_confirm_addr(upper, 0, ip);
2857 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2859 struct netdev_nested_priv priv = {
2860 .data = (void *)&ip,
2864 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2868 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2875 #define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff)
2877 static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
2878 struct sk_buff *skb)
2880 struct net_device *bond_dev = slave->bond->dev;
2881 struct net_device *slave_dev = slave->dev;
2882 struct bond_vlan_tag *outer_tag = tags;
2884 if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE)
2889 /* Go through all the tags backwards and add them to the packet */
2890 while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) {
2891 if (!tags->vlan_id) {
2896 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2897 ntohs(outer_tag->vlan_proto), tags->vlan_id);
2898 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2901 net_err_ratelimited("failed to insert inner VLAN tag\n");
2907 /* Set the outer tag */
2908 if (outer_tag->vlan_id) {
2909 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2910 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2911 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2912 outer_tag->vlan_id);
2918 /* We go to the (large) trouble of VLAN tagging ARP frames because
2919 * switches in VLAN mode (especially if ports are configured as
2920 * "native" to a VLAN) might not pass non-tagged frames.
2922 static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2923 __be32 src_ip, struct bond_vlan_tag *tags)
2925 struct net_device *bond_dev = slave->bond->dev;
2926 struct net_device *slave_dev = slave->dev;
2927 struct sk_buff *skb;
2929 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2930 arp_op, &dest_ip, &src_ip);
2932 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2933 NULL, slave_dev->dev_addr, NULL);
2936 net_err_ratelimited("ARP packet allocation failed\n");
2940 if (bond_handle_vlan(slave, tags, skb)) {
2941 slave_update_last_tx(slave);
2948 /* Validate the device path between the @start_dev and the @end_dev.
2949 * The path is valid if the @end_dev is reachable through device
2951 * When the path is validated, collect any vlan information in the
2954 struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2955 struct net_device *end_dev,
2958 struct bond_vlan_tag *tags;
2959 struct net_device *upper;
2960 struct list_head *iter;
2962 if (start_dev == end_dev) {
2963 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2965 return ERR_PTR(-ENOMEM);
2966 tags[level].vlan_proto = BOND_VLAN_PROTO_NONE;
2970 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2971 tags = bond_verify_device_path(upper, end_dev, level + 1);
2972 if (IS_ERR_OR_NULL(tags)) {
2977 if (is_vlan_dev(upper)) {
2978 tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2979 tags[level].vlan_id = vlan_dev_vlan_id(upper);
2988 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2991 struct bond_vlan_tag *tags;
2992 __be32 *targets = bond->params.arp_targets, addr;
2995 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2996 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2997 __func__, &targets[i]);
3000 /* Find out through which dev should the packet go */
3001 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
3004 /* there's no route to target - try to send arp
3005 * probe to generate any traffic (arp_validate=0)
3007 if (bond->params.arp_validate)
3008 pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
3011 bond_arp_send(slave, ARPOP_REQUEST, targets[i],
3016 /* bond device itself */
3017 if (rt->dst.dev == bond->dev)
3021 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
3024 if (!IS_ERR_OR_NULL(tags))
3027 /* Not our device - skip */
3028 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
3029 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
3035 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
3037 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
3042 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
3046 if (!sip || !bond_has_this_ip(bond, tip)) {
3047 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
3048 __func__, &sip, &tip);
3052 i = bond_get_targets_ip(bond->params.arp_targets, sip);
3054 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
3058 slave->last_rx = jiffies;
3059 slave->target_last_arp_rx[i] = jiffies;
3062 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
3063 struct slave *slave)
3065 struct arphdr *arp = (struct arphdr *)skb->data;
3066 struct slave *curr_active_slave, *curr_arp_slave;
3067 unsigned char *arp_ptr;
3071 alen = arp_hdr_len(bond->dev);
3073 if (alen > skb_headlen(skb)) {
3074 arp = kmalloc(alen, GFP_ATOMIC);
3077 if (skb_copy_bits(skb, 0, arp, alen) < 0)
3081 if (arp->ar_hln != bond->dev->addr_len ||
3082 skb->pkt_type == PACKET_OTHERHOST ||
3083 skb->pkt_type == PACKET_LOOPBACK ||
3084 arp->ar_hrd != htons(ARPHRD_ETHER) ||
3085 arp->ar_pro != htons(ETH_P_IP) ||
3089 arp_ptr = (unsigned char *)(arp + 1);
3090 arp_ptr += bond->dev->addr_len;
3091 memcpy(&sip, arp_ptr, 4);
3092 arp_ptr += 4 + bond->dev->addr_len;
3093 memcpy(&tip, arp_ptr, 4);
3095 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3096 __func__, slave->dev->name, bond_slave_state(slave),
3097 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3100 curr_active_slave = rcu_dereference(bond->curr_active_slave);
3101 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3103 /* We 'trust' the received ARP enough to validate it if:
3105 * (a) the slave receiving the ARP is active (which includes the
3106 * current ARP slave, if any), or
3108 * (b) the receiving slave isn't active, but there is a currently
3109 * active slave and it received valid arp reply(s) after it became
3110 * the currently active slave, or
3112 * (c) there is an ARP slave that sent an ARP during the prior ARP
3113 * interval, and we receive an ARP reply on any slave. We accept
3114 * these because switch FDB update delays may deliver the ARP
3115 * reply to a slave other than the sender of the ARP request.
3117 * Note: for (b), backup slaves are receiving the broadcast ARP
3118 * request, not a reply. This request passes from the sending
3119 * slave through the L2 switch(es) to the receiving slave. Since
3120 * this is checking the request, sip/tip are swapped for
3123 * This is done to avoid endless looping when we can't reach the
3124 * arp_ip_target and fool ourselves with our own arp requests.
3126 if (bond_is_active_slave(slave))
3127 bond_validate_arp(bond, slave, sip, tip);
3128 else if (curr_active_slave &&
3129 time_after(slave_last_rx(bond, curr_active_slave),
3130 curr_active_slave->last_link_up))
3131 bond_validate_arp(bond, slave, tip, sip);
3132 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3133 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3134 bond_validate_arp(bond, slave, sip, tip);
3137 if (arp != (struct arphdr *)skb->data)
3139 return RX_HANDLER_ANOTHER;
3142 #if IS_ENABLED(CONFIG_IPV6)
3143 static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
3144 const struct in6_addr *saddr, struct bond_vlan_tag *tags)
3146 struct net_device *bond_dev = slave->bond->dev;
3147 struct net_device *slave_dev = slave->dev;
3148 struct in6_addr mcaddr;
3149 struct sk_buff *skb;
3151 slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
3154 skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
3156 net_err_ratelimited("NS packet allocation failed\n");
3160 addrconf_addr_solict_mult(daddr, &mcaddr);
3161 if (bond_handle_vlan(slave, tags, skb)) {
3162 slave_update_last_tx(slave);
3163 ndisc_send_skb(skb, &mcaddr, saddr);
3167 static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
3169 struct in6_addr *targets = bond->params.ns_targets;
3170 struct bond_vlan_tag *tags;
3171 struct dst_entry *dst;
3172 struct in6_addr saddr;
3176 for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
3177 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
3178 __func__, &targets[i]);
3181 /* Find out through which dev should the packet go */
3182 memset(&fl6, 0, sizeof(struct flowi6));
3183 fl6.daddr = targets[i];
3184 fl6.flowi6_oif = bond->dev->ifindex;
3186 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
3189 /* there's no route to target - try to send arp
3190 * probe to generate any traffic (arp_validate=0)
3192 if (bond->params.arp_validate)
3193 pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
3196 bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3200 /* bond device itself */
3201 if (dst->dev == bond->dev)
3205 tags = bond_verify_device_path(bond->dev, dst->dev, 0);
3208 if (!IS_ERR_OR_NULL(tags))
3211 /* Not our device - skip */
3212 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
3213 &targets[i], dst->dev ? dst->dev->name : "NULL");
3219 if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
3220 bond_ns_send(slave, &targets[i], &saddr, tags);
3222 bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3229 static int bond_confirm_addr6(struct net_device *dev,
3230 struct netdev_nested_priv *priv)
3232 struct in6_addr *addr = (struct in6_addr *)priv->data;
3234 return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
3237 static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
3239 struct netdev_nested_priv priv = {
3244 if (bond_confirm_addr6(bond->dev, &priv))
3248 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
3255 static void bond_validate_na(struct bonding *bond, struct slave *slave,
3256 struct in6_addr *saddr, struct in6_addr *daddr)
3261 * 1. Source address is unspecified address.
3262 * 2. Dest address is neither all-nodes multicast address nor
3263 * exist on bond interface.
3265 if (ipv6_addr_any(saddr) ||
3266 (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
3267 !bond_has_this_ip6(bond, daddr))) {
3268 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
3269 __func__, saddr, daddr);
3273 i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
3275 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
3279 slave->last_rx = jiffies;
3280 slave->target_last_arp_rx[i] = jiffies;
3283 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
3284 struct slave *slave)
3286 struct slave *curr_active_slave, *curr_arp_slave;
3287 struct in6_addr *saddr, *daddr;
3290 struct icmp6hdr icmp6;
3291 } *combined, _combined;
3293 if (skb->pkt_type == PACKET_OTHERHOST ||
3294 skb->pkt_type == PACKET_LOOPBACK)
3297 combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
3298 if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
3299 (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
3300 combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
3303 saddr = &combined->ip6.saddr;
3304 daddr = &combined->ip6.daddr;
3306 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
3307 __func__, slave->dev->name, bond_slave_state(slave),
3308 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3311 curr_active_slave = rcu_dereference(bond->curr_active_slave);
3312 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3314 /* We 'trust' the received ARP enough to validate it if:
3315 * see bond_arp_rcv().
3317 if (bond_is_active_slave(slave))
3318 bond_validate_na(bond, slave, saddr, daddr);
3319 else if (curr_active_slave &&
3320 time_after(slave_last_rx(bond, curr_active_slave),
3321 curr_active_slave->last_link_up))
3322 bond_validate_na(bond, slave, daddr, saddr);
3323 else if (curr_arp_slave &&
3324 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3325 bond_validate_na(bond, slave, saddr, daddr);
3328 return RX_HANDLER_ANOTHER;
3332 int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
3333 struct slave *slave)
3335 #if IS_ENABLED(CONFIG_IPV6)
3336 bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
3338 bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
3340 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
3341 __func__, skb->dev->name);
3343 /* Use arp validate logic for both ARP and NS */
3344 if (!slave_do_arp_validate(bond, slave)) {
3345 if ((slave_do_arp_validate_only(bond) && is_arp) ||
3346 #if IS_ENABLED(CONFIG_IPV6)
3347 (slave_do_arp_validate_only(bond) && is_ipv6) ||
3349 !slave_do_arp_validate_only(bond))
3350 slave->last_rx = jiffies;
3351 return RX_HANDLER_ANOTHER;
3352 } else if (is_arp) {
3353 return bond_arp_rcv(skb, bond, slave);
3354 #if IS_ENABLED(CONFIG_IPV6)
3355 } else if (is_ipv6) {
3356 return bond_na_rcv(skb, bond, slave);
3359 return RX_HANDLER_ANOTHER;
3363 static void bond_send_validate(struct bonding *bond, struct slave *slave)
3365 bond_arp_send_all(bond, slave);
3366 #if IS_ENABLED(CONFIG_IPV6)
3367 bond_ns_send_all(bond, slave);
3371 /* function to verify if we're in the arp_interval timeslice, returns true if
3372 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3373 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3375 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3378 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3380 return time_in_range(jiffies,
3381 last_act - delta_in_ticks,
3382 last_act + mod * delta_in_ticks + delta_in_ticks/2);
3385 /* This function is called regularly to monitor each slave's link
3386 * ensuring that traffic is being sent and received when arp monitoring
3387 * is used in load-balancing mode. if the adapter has been dormant, then an
3388 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3389 * arp monitoring in active backup mode.
3391 static void bond_loadbalance_arp_mon(struct bonding *bond)
3393 struct slave *slave, *oldcurrent;
3394 struct list_head *iter;
3395 int do_failover = 0, slave_state_changed = 0;
3397 if (!bond_has_slaves(bond))
3402 oldcurrent = rcu_dereference(bond->curr_active_slave);
3403 /* see if any of the previous devices are up now (i.e. they have
3404 * xmt and rcv traffic). the curr_active_slave does not come into
3405 * the picture unless it is null. also, slave->last_link_up is not
3406 * needed here because we send an arp on each slave and give a slave
3407 * as long as it needs to get the tx/rx within the delta.
3408 * TODO: what about up/down delay in arp mode? it wasn't here before
3411 bond_for_each_slave_rcu(bond, slave, iter) {
3412 unsigned long last_tx = slave_last_tx(slave);
3414 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3416 if (slave->link != BOND_LINK_UP) {
3417 if (bond_time_in_interval(bond, last_tx, 1) &&
3418 bond_time_in_interval(bond, slave->last_rx, 1)) {
3420 bond_propose_link_state(slave, BOND_LINK_UP);
3421 slave_state_changed = 1;
3423 /* primary_slave has no meaning in round-robin
3424 * mode. the window of a slave being up and
3425 * curr_active_slave being null after enslaving
3429 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3432 slave_info(bond->dev, slave->dev, "interface is now up\n");
3436 /* slave->link == BOND_LINK_UP */
3438 /* not all switches will respond to an arp request
3439 * when the source ip is 0, so don't take the link down
3440 * if we don't know our ip yet
3442 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3443 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3445 bond_propose_link_state(slave, BOND_LINK_DOWN);
3446 slave_state_changed = 1;
3448 if (slave->link_failure_count < UINT_MAX)
3449 slave->link_failure_count++;
3451 slave_info(bond->dev, slave->dev, "interface is now down\n");
3453 if (slave == oldcurrent)
3458 /* note: if switch is in round-robin mode, all links
3459 * must tx arp to ensure all links rx an arp - otherwise
3460 * links may oscillate or not come up at all; if switch is
3461 * in something like xor mode, there is nothing we can
3462 * do - all replies will be rx'ed on same link causing slaves
3463 * to be unstable during low/no traffic periods
3465 if (bond_slave_is_up(slave))
3466 bond_send_validate(bond, slave);
3471 if (do_failover || slave_state_changed) {
3472 if (!rtnl_trylock())
3475 bond_for_each_slave(bond, slave, iter) {
3476 if (slave->link_new_state != BOND_LINK_NOCHANGE)
3477 slave->link = slave->link_new_state;
3480 if (slave_state_changed) {
3481 bond_slave_state_change(bond);
3482 if (BOND_MODE(bond) == BOND_MODE_XOR)
3483 bond_update_slave_arr(bond, NULL);
3487 bond_select_active_slave(bond);
3488 unblock_netpoll_tx();
3494 if (bond->params.arp_interval)
3495 queue_delayed_work(bond->wq, &bond->arp_work,
3496 msecs_to_jiffies(bond->params.arp_interval));
3499 /* Called to inspect slaves for active-backup mode ARP monitor link state
3500 * changes. Sets proposed link state in slaves to specify what action
3501 * should take place for the slave. Returns 0 if no changes are found, >0
3502 * if changes to link states must be committed.
3504 * Called with rcu_read_lock held.
3506 static int bond_ab_arp_inspect(struct bonding *bond)
3508 unsigned long last_tx, last_rx;
3509 struct list_head *iter;
3510 struct slave *slave;
3513 bond_for_each_slave_rcu(bond, slave, iter) {
3514 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3515 last_rx = slave_last_rx(bond, slave);
3517 if (slave->link != BOND_LINK_UP) {
3518 if (bond_time_in_interval(bond, last_rx, 1)) {
3519 bond_propose_link_state(slave, BOND_LINK_UP);
3521 } else if (slave->link == BOND_LINK_BACK) {
3522 bond_propose_link_state(slave, BOND_LINK_FAIL);
3528 /* Give slaves 2*delta after being enslaved or made
3529 * active. This avoids bouncing, as the last receive
3530 * times need a full ARP monitor cycle to be updated.
3532 if (bond_time_in_interval(bond, slave->last_link_up, 2))
3535 /* Backup slave is down if:
3536 * - No current_arp_slave AND
3537 * - more than (missed_max+1)*delta since last receive AND
3538 * - the bond has an IP address
3540 * Note: a non-null current_arp_slave indicates
3541 * the curr_active_slave went down and we are
3542 * searching for a new one; under this condition
3543 * we only take the curr_active_slave down - this
3544 * gives each slave a chance to tx/rx traffic
3545 * before being taken out
3547 if (!bond_is_active_slave(slave) &&
3548 !rcu_access_pointer(bond->current_arp_slave) &&
3549 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3550 bond_propose_link_state(slave, BOND_LINK_DOWN);
3554 /* Active slave is down if:
3555 * - more than missed_max*delta since transmitting OR
3556 * - (more than missed_max*delta since receive AND
3557 * the bond has an IP address)
3559 last_tx = slave_last_tx(slave);
3560 if (bond_is_active_slave(slave) &&
3561 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3562 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3563 bond_propose_link_state(slave, BOND_LINK_DOWN);
3571 /* Called to commit link state changes noted by inspection step of
3572 * active-backup mode ARP monitor.
3574 * Called with RTNL hold.
3576 static void bond_ab_arp_commit(struct bonding *bond)
3578 bool do_failover = false;
3579 struct list_head *iter;
3580 unsigned long last_tx;
3581 struct slave *slave;
3583 bond_for_each_slave(bond, slave, iter) {
3584 switch (slave->link_new_state) {
3585 case BOND_LINK_NOCHANGE:
3589 last_tx = slave_last_tx(slave);
3590 if (rtnl_dereference(bond->curr_active_slave) != slave ||
3591 (!rtnl_dereference(bond->curr_active_slave) &&
3592 bond_time_in_interval(bond, last_tx, 1))) {
3593 struct slave *current_arp_slave;
3595 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3596 bond_set_slave_link_state(slave, BOND_LINK_UP,
3597 BOND_SLAVE_NOTIFY_NOW);
3598 if (current_arp_slave) {
3599 bond_set_slave_inactive_flags(
3601 BOND_SLAVE_NOTIFY_NOW);
3602 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3605 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3607 if (!rtnl_dereference(bond->curr_active_slave) ||
3608 slave == rtnl_dereference(bond->primary_slave) ||
3609 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
3616 case BOND_LINK_DOWN:
3617 if (slave->link_failure_count < UINT_MAX)
3618 slave->link_failure_count++;
3620 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3621 BOND_SLAVE_NOTIFY_NOW);
3622 bond_set_slave_inactive_flags(slave,
3623 BOND_SLAVE_NOTIFY_NOW);
3625 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3627 if (slave == rtnl_dereference(bond->curr_active_slave)) {
3628 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3634 case BOND_LINK_FAIL:
3635 bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3636 BOND_SLAVE_NOTIFY_NOW);
3637 bond_set_slave_inactive_flags(slave,
3638 BOND_SLAVE_NOTIFY_NOW);
3640 /* A slave has just been enslaved and has become
3641 * the current active slave.
3643 if (rtnl_dereference(bond->curr_active_slave))
3644 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3648 slave_err(bond->dev, slave->dev,
3649 "impossible: link_new_state %d on slave\n",
3650 slave->link_new_state);
3657 bond_select_active_slave(bond);
3658 unblock_netpoll_tx();
3661 bond_set_carrier(bond);
3664 /* Send ARP probes for active-backup mode ARP monitor.
3666 * Called with rcu_read_lock held.
3668 static bool bond_ab_arp_probe(struct bonding *bond)
3670 struct slave *slave, *before = NULL, *new_slave = NULL,
3671 *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3672 *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3673 struct list_head *iter;
3675 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3677 if (curr_arp_slave && curr_active_slave)
3678 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3679 curr_arp_slave->dev->name,
3680 curr_active_slave->dev->name);
3682 if (curr_active_slave) {
3683 bond_send_validate(bond, curr_active_slave);
3684 return should_notify_rtnl;
3687 /* if we don't have a curr_active_slave, search for the next available
3688 * backup slave from the current_arp_slave and make it the candidate
3689 * for becoming the curr_active_slave
3692 if (!curr_arp_slave) {
3693 curr_arp_slave = bond_first_slave_rcu(bond);
3694 if (!curr_arp_slave)
3695 return should_notify_rtnl;
3698 bond_for_each_slave_rcu(bond, slave, iter) {
3699 if (!found && !before && bond_slave_is_up(slave))
3702 if (found && !new_slave && bond_slave_is_up(slave))
3704 /* if the link state is up at this point, we
3705 * mark it down - this can happen if we have
3706 * simultaneous link failures and
3707 * reselect_active_interface doesn't make this
3708 * one the current slave so it is still marked
3709 * up when it is actually down
3711 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3712 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3713 BOND_SLAVE_NOTIFY_LATER);
3714 if (slave->link_failure_count < UINT_MAX)
3715 slave->link_failure_count++;
3717 bond_set_slave_inactive_flags(slave,
3718 BOND_SLAVE_NOTIFY_LATER);
3720 slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3722 if (slave == curr_arp_slave)
3726 if (!new_slave && before)
3732 bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3733 BOND_SLAVE_NOTIFY_LATER);
3734 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3735 bond_send_validate(bond, new_slave);
3736 new_slave->last_link_up = jiffies;
3737 rcu_assign_pointer(bond->current_arp_slave, new_slave);
3740 bond_for_each_slave_rcu(bond, slave, iter) {
3741 if (slave->should_notify || slave->should_notify_link) {
3742 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3746 return should_notify_rtnl;
3749 static void bond_activebackup_arp_mon(struct bonding *bond)
3751 bool should_notify_peers = false;
3752 bool should_notify_rtnl = false;
3755 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3757 if (!bond_has_slaves(bond))
3762 should_notify_peers = bond_should_notify_peers(bond);
3764 if (bond_ab_arp_inspect(bond)) {
3767 /* Race avoidance with bond_close flush of workqueue */
3768 if (!rtnl_trylock()) {
3770 should_notify_peers = false;
3774 bond_ab_arp_commit(bond);
3780 should_notify_rtnl = bond_ab_arp_probe(bond);
3784 if (bond->params.arp_interval)
3785 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3787 if (should_notify_peers || should_notify_rtnl) {
3788 if (!rtnl_trylock())
3791 if (should_notify_peers) {
3792 bond->send_peer_notif--;
3793 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3796 if (should_notify_rtnl) {
3797 bond_slave_state_notify(bond);
3798 bond_slave_link_notify(bond);
3805 static void bond_arp_monitor(struct work_struct *work)
3807 struct bonding *bond = container_of(work, struct bonding,
3810 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3811 bond_activebackup_arp_mon(bond);
3813 bond_loadbalance_arp_mon(bond);
3816 /*-------------------------- netdev event handling --------------------------*/
3818 /* Change device name */
3819 static int bond_event_changename(struct bonding *bond)
3821 bond_remove_proc_entry(bond);
3822 bond_create_proc_entry(bond);
3824 bond_debug_reregister(bond);
3829 static int bond_master_netdev_event(unsigned long event,
3830 struct net_device *bond_dev)
3832 struct bonding *event_bond = netdev_priv(bond_dev);
3834 netdev_dbg(bond_dev, "%s called\n", __func__);
3837 case NETDEV_CHANGENAME:
3838 return bond_event_changename(event_bond);
3839 case NETDEV_UNREGISTER:
3840 bond_remove_proc_entry(event_bond);
3841 #ifdef CONFIG_XFRM_OFFLOAD
3842 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3843 #endif /* CONFIG_XFRM_OFFLOAD */
3845 case NETDEV_REGISTER:
3846 bond_create_proc_entry(event_bond);
3855 static int bond_slave_netdev_event(unsigned long event,
3856 struct net_device *slave_dev)
3858 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3859 struct bonding *bond;
3860 struct net_device *bond_dev;
3862 /* A netdev event can be generated while enslaving a device
3863 * before netdev_rx_handler_register is called in which case
3864 * slave will be NULL
3867 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3871 bond_dev = slave->bond->dev;
3873 primary = rtnl_dereference(bond->primary_slave);
3875 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3878 case NETDEV_UNREGISTER:
3879 if (bond_dev->type != ARPHRD_ETHER)
3880 bond_release_and_destroy(bond_dev, slave_dev);
3882 __bond_release_one(bond_dev, slave_dev, false, true);
3886 /* For 802.3ad mode only:
3887 * Getting invalid Speed/Duplex values here will put slave
3888 * in weird state. Mark it as link-fail if the link was
3889 * previously up or link-down if it hasn't yet come up, and
3890 * let link-monitoring (miimon) set it right when correct
3891 * speeds/duplex are available.
3893 if (bond_update_speed_duplex(slave) &&
3894 BOND_MODE(bond) == BOND_MODE_8023AD) {
3895 if (slave->last_link_up)
3896 slave->link = BOND_LINK_FAIL;
3898 slave->link = BOND_LINK_DOWN;
3901 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3902 bond_3ad_adapter_speed_duplex_changed(slave);
3905 /* Refresh slave-array if applicable!
3906 * If the setup does not use miimon or arpmon (mode-specific!),
3907 * then these events will not cause the slave-array to be
3908 * refreshed. This will cause xmit to use a slave that is not
3909 * usable. Avoid such situation by refeshing the array at these
3910 * events. If these (miimon/arpmon) parameters are configured
3911 * then array gets refreshed twice and that should be fine!
3913 if (bond_mode_can_use_xmit_hash(bond))
3914 bond_update_slave_arr(bond, NULL);
3916 case NETDEV_CHANGEMTU:
3917 /* TODO: Should slaves be allowed to
3918 * independently alter their MTU? For
3919 * an active-backup bond, slaves need
3920 * not be the same type of device, so
3921 * MTUs may vary. For other modes,
3922 * slaves arguably should have the
3923 * same MTUs. To do this, we'd need to
3924 * take over the slave's change_mtu
3925 * function for the duration of their
3929 case NETDEV_CHANGENAME:
3930 /* we don't care if we don't have primary set */
3931 if (!bond_uses_primary(bond) ||
3932 !bond->params.primary[0])
3935 if (slave == primary) {
3936 /* slave's name changed - he's no longer primary */
3937 RCU_INIT_POINTER(bond->primary_slave, NULL);
3938 } else if (!strcmp(slave_dev->name, bond->params.primary)) {
3939 /* we have a new primary slave */
3940 rcu_assign_pointer(bond->primary_slave, slave);
3941 } else { /* we didn't change primary - exit */
3945 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3946 primary ? slave_dev->name : "none");
3949 bond_select_active_slave(bond);
3950 unblock_netpoll_tx();
3952 case NETDEV_FEAT_CHANGE:
3953 if (!bond->notifier_ctx) {
3954 bond->notifier_ctx = true;
3955 bond_compute_features(bond);
3956 bond->notifier_ctx = false;
3959 case NETDEV_RESEND_IGMP:
3960 /* Propagate to master device */
3961 call_netdevice_notifiers(event, slave->bond->dev);
3963 case NETDEV_XDP_FEAT_CHANGE:
3964 bond_xdp_set_features(bond_dev);
3973 /* bond_netdev_event: handle netdev notifier chain events.
3975 * This function receives events for the netdev chain. The caller (an
3976 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3977 * locks for us to safely manipulate the slave devices (RTNL lock,
3980 static int bond_netdev_event(struct notifier_block *this,
3981 unsigned long event, void *ptr)
3983 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3985 netdev_dbg(event_dev, "%s received %s\n",
3986 __func__, netdev_cmd_to_name(event));
3988 if (!(event_dev->priv_flags & IFF_BONDING))
3991 if (event_dev->flags & IFF_MASTER) {
3994 ret = bond_master_netdev_event(event, event_dev);
3995 if (ret != NOTIFY_DONE)
3999 if (event_dev->flags & IFF_SLAVE)
4000 return bond_slave_netdev_event(event, event_dev);
4005 static struct notifier_block bond_netdev_notifier = {
4006 .notifier_call = bond_netdev_event,
4009 /*---------------------------- Hashing Policies -----------------------------*/
4011 /* Helper to access data in a packet, with or without a backing skb.
4012 * If skb is given the data is linearized if necessary via pskb_may_pull.
4014 static inline const void *bond_pull_data(struct sk_buff *skb,
4015 const void *data, int hlen, int n)
4017 if (likely(n <= hlen))
4019 else if (skb && likely(pskb_may_pull(skb, n)))
4025 /* L2 hash helper */
4026 static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4030 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4034 ep = (struct ethhdr *)(data + mhoff);
4035 return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
4038 static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
4039 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
4041 const struct ipv6hdr *iph6;
4042 const struct iphdr *iph;
4044 if (l2_proto == htons(ETH_P_IP)) {
4045 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
4049 iph = (const struct iphdr *)(data + *nhoff);
4050 iph_to_flow_copy_v4addrs(fk, iph);
4051 *nhoff += iph->ihl << 2;
4052 if (!ip_is_fragment(iph))
4053 *ip_proto = iph->protocol;
4054 } else if (l2_proto == htons(ETH_P_IPV6)) {
4055 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
4059 iph6 = (const struct ipv6hdr *)(data + *nhoff);
4060 iph_to_flow_copy_v6addrs(fk, iph6);
4061 *nhoff += sizeof(*iph6);
4062 *ip_proto = iph6->nexthdr;
4067 if (l34 && *ip_proto >= 0)
4068 fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
4073 static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4075 u32 srcmac_vendor = 0, srcmac_dev = 0;
4076 struct ethhdr *mac_hdr;
4080 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4083 mac_hdr = (struct ethhdr *)(data + mhoff);
4085 for (i = 0; i < 3; i++)
4086 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
4088 for (i = 3; i < ETH_ALEN; i++)
4089 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
4091 if (skb && skb_vlan_tag_present(skb))
4092 vlan = skb_vlan_tag_get(skb);
4094 return vlan ^ srcmac_vendor ^ srcmac_dev;
4097 /* Extract the appropriate headers based on bond's xmit policy */
4098 static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
4099 __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
4101 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
4104 switch (bond->params.xmit_policy) {
4105 case BOND_XMIT_POLICY_ENCAP23:
4106 case BOND_XMIT_POLICY_ENCAP34:
4107 memset(fk, 0, sizeof(*fk));
4108 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
4109 fk, data, l2_proto, nhoff, hlen, 0);
4114 fk->ports.ports = 0;
4115 memset(&fk->icmp, 0, sizeof(fk->icmp));
4116 if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
4119 /* ICMP error packets contains at least 8 bytes of the header
4120 * of the packet which generated the error. Use this information
4121 * to correlate ICMP error packets within the same flow which
4122 * generated the error.
4124 if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
4125 skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
4126 if (ip_proto == IPPROTO_ICMP) {
4127 if (!icmp_is_err(fk->icmp.type))
4130 nhoff += sizeof(struct icmphdr);
4131 } else if (ip_proto == IPPROTO_ICMPV6) {
4132 if (!icmpv6_is_err(fk->icmp.type))
4135 nhoff += sizeof(struct icmp6hdr);
4137 return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
4143 static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
4145 hash ^= (__force u32)flow_get_u32_dst(flow) ^
4146 (__force u32)flow_get_u32_src(flow);
4147 hash ^= (hash >> 16);
4148 hash ^= (hash >> 8);
4150 /* discard lowest hash bit to deal with the common even ports pattern */
4151 if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
4152 xmit_policy == BOND_XMIT_POLICY_ENCAP34)
4158 /* Generate hash based on xmit policy. If @skb is given it is used to linearize
4159 * the data as required, but this function can be used without it if the data is
4160 * known to be linear (e.g. with xdp_buff).
4162 static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
4163 __be16 l2_proto, int mhoff, int nhoff, int hlen)
4165 struct flow_keys flow;
4168 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
4169 return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
4171 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
4172 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
4173 return bond_eth_hash(skb, data, mhoff, hlen);
4175 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
4176 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
4177 hash = bond_eth_hash(skb, data, mhoff, hlen);
4180 memcpy(&hash, &flow.icmp, sizeof(hash));
4182 memcpy(&hash, &flow.ports.ports, sizeof(hash));
4185 return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
4189 * bond_xmit_hash - generate a hash value based on the xmit policy
4190 * @bond: bonding device
4191 * @skb: buffer to use for headers
4193 * This function will extract the necessary headers from the skb buffer and use
4194 * them to generate a hash based on the xmit_policy set in the bonding device
4196 u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
4198 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
4202 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
4203 skb_mac_offset(skb), skb_network_offset(skb),
4208 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
4209 * @bond: bonding device
4210 * @xdp: buffer to use for headers
4212 * The XDP variant of bond_xmit_hash.
4214 static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
4218 if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
4221 eth = (struct ethhdr *)xdp->data;
4223 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
4224 sizeof(struct ethhdr), xdp->data_end - xdp->data);
4227 /*-------------------------- Device entry points ----------------------------*/
4229 void bond_work_init_all(struct bonding *bond)
4231 INIT_DELAYED_WORK(&bond->mcast_work,
4232 bond_resend_igmp_join_requests_delayed);
4233 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
4234 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
4235 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
4236 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
4237 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
4240 static void bond_work_cancel_all(struct bonding *bond)
4242 cancel_delayed_work_sync(&bond->mii_work);
4243 cancel_delayed_work_sync(&bond->arp_work);
4244 cancel_delayed_work_sync(&bond->alb_work);
4245 cancel_delayed_work_sync(&bond->ad_work);
4246 cancel_delayed_work_sync(&bond->mcast_work);
4247 cancel_delayed_work_sync(&bond->slave_arr_work);
4250 static int bond_open(struct net_device *bond_dev)
4252 struct bonding *bond = netdev_priv(bond_dev);
4253 struct list_head *iter;
4254 struct slave *slave;
4256 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
4257 bond->rr_tx_counter = alloc_percpu(u32);
4258 if (!bond->rr_tx_counter)
4262 /* reset slave->backup and slave->inactive */
4263 if (bond_has_slaves(bond)) {
4264 bond_for_each_slave(bond, slave, iter) {
4265 if (bond_uses_primary(bond) &&
4266 slave != rcu_access_pointer(bond->curr_active_slave)) {
4267 bond_set_slave_inactive_flags(slave,
4268 BOND_SLAVE_NOTIFY_NOW);
4269 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
4270 bond_set_slave_active_flags(slave,
4271 BOND_SLAVE_NOTIFY_NOW);
4276 if (bond_is_lb(bond)) {
4277 /* bond_alb_initialize must be called before the timer
4280 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
4282 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
4283 queue_delayed_work(bond->wq, &bond->alb_work, 0);
4286 if (bond->params.miimon) /* link check interval, in milliseconds. */
4287 queue_delayed_work(bond->wq, &bond->mii_work, 0);
4289 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
4290 queue_delayed_work(bond->wq, &bond->arp_work, 0);
4291 bond->recv_probe = bond_rcv_validate;
4294 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4295 queue_delayed_work(bond->wq, &bond->ad_work, 0);
4296 /* register to receive LACPDUs */
4297 bond->recv_probe = bond_3ad_lacpdu_recv;
4298 bond_3ad_initiate_agg_selection(bond, 1);
4300 bond_for_each_slave(bond, slave, iter)
4301 dev_mc_add(slave->dev, lacpdu_mcast_addr);
4304 if (bond_mode_can_use_xmit_hash(bond))
4305 bond_update_slave_arr(bond, NULL);
4310 static int bond_close(struct net_device *bond_dev)
4312 struct bonding *bond = netdev_priv(bond_dev);
4313 struct slave *slave;
4315 bond_work_cancel_all(bond);
4316 bond->send_peer_notif = 0;
4317 if (bond_is_lb(bond))
4318 bond_alb_deinitialize(bond);
4319 bond->recv_probe = NULL;
4321 if (bond_uses_primary(bond)) {
4323 slave = rcu_dereference(bond->curr_active_slave);
4325 bond_hw_addr_flush(bond_dev, slave->dev);
4328 struct list_head *iter;
4330 bond_for_each_slave(bond, slave, iter)
4331 bond_hw_addr_flush(bond_dev, slave->dev);
4337 /* fold stats, assuming all rtnl_link_stats64 fields are u64, but
4338 * that some drivers can provide 32bit values only.
4340 static void bond_fold_stats(struct rtnl_link_stats64 *_res,
4341 const struct rtnl_link_stats64 *_new,
4342 const struct rtnl_link_stats64 *_old)
4344 const u64 *new = (const u64 *)_new;
4345 const u64 *old = (const u64 *)_old;
4346 u64 *res = (u64 *)_res;
4349 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4352 s64 delta = nv - ov;
4354 /* detects if this particular field is 32bit only */
4355 if (((nv | ov) >> 32) == 0)
4356 delta = (s64)(s32)((u32)nv - (u32)ov);
4358 /* filter anomalies, some drivers reset their stats
4359 * at down/up events.
4366 #ifdef CONFIG_LOCKDEP
4367 static int bond_get_lowest_level_rcu(struct net_device *dev)
4369 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4370 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4371 int cur = 0, max = 0;
4374 iter = &dev->adj_list.lower;
4379 ldev = netdev_next_lower_dev_rcu(now, &iter);
4384 niter = &ldev->adj_list.lower;
4385 dev_stack[cur] = now;
4386 iter_stack[cur++] = iter;
4395 next = dev_stack[--cur];
4396 niter = iter_stack[cur];
4407 static void bond_get_stats(struct net_device *bond_dev,
4408 struct rtnl_link_stats64 *stats)
4410 struct bonding *bond = netdev_priv(bond_dev);
4411 struct rtnl_link_stats64 temp;
4412 struct list_head *iter;
4413 struct slave *slave;
4418 #ifdef CONFIG_LOCKDEP
4419 nest_level = bond_get_lowest_level_rcu(bond_dev);
4422 spin_lock_nested(&bond->stats_lock, nest_level);
4423 memcpy(stats, &bond->bond_stats, sizeof(*stats));
4425 bond_for_each_slave_rcu(bond, slave, iter) {
4426 const struct rtnl_link_stats64 *new =
4427 dev_get_stats(slave->dev, &temp);
4429 bond_fold_stats(stats, new, &slave->slave_stats);
4431 /* save off the slave stats for the next run */
4432 memcpy(&slave->slave_stats, new, sizeof(*new));
4435 memcpy(&bond->bond_stats, stats, sizeof(*stats));
4436 spin_unlock(&bond->stats_lock);
4440 static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4442 struct bonding *bond = netdev_priv(bond_dev);
4443 struct mii_ioctl_data *mii = NULL;
4444 const struct net_device_ops *ops;
4445 struct net_device *real_dev;
4446 struct hwtstamp_config cfg;
4450 netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4461 /* We do this again just in case we were called by SIOCGMIIREG
4462 * instead of SIOCGMIIPHY.
4468 if (mii->reg_num == 1) {
4470 if (netif_carrier_ok(bond->dev))
4471 mii->val_out = BMSR_LSTATUS;
4476 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4479 if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
4484 real_dev = bond_option_active_slave_get_rcu(bond);
4488 strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
4489 ifrr.ifr_ifru = ifr->ifr_ifru;
4491 ops = real_dev->netdev_ops;
4492 if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
4493 res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
4497 ifr->ifr_ifru = ifrr.ifr_ifru;
4498 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4501 /* Set the BOND_PHC_INDEX flag to notify user space */
4502 cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
4504 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
4515 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4517 struct bonding *bond = netdev_priv(bond_dev);
4518 struct net_device *slave_dev = NULL;
4519 struct ifbond k_binfo;
4520 struct ifbond __user *u_binfo = NULL;
4521 struct ifslave k_sinfo;
4522 struct ifslave __user *u_sinfo = NULL;
4523 struct bond_opt_value newval;
4527 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4530 case SIOCBONDINFOQUERY:
4531 u_binfo = (struct ifbond __user *)ifr->ifr_data;
4533 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4536 bond_info_query(bond_dev, &k_binfo);
4537 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4541 case SIOCBONDSLAVEINFOQUERY:
4542 u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4544 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4547 res = bond_slave_info_query(bond_dev, &k_sinfo);
4549 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4557 net = dev_net(bond_dev);
4559 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4562 slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4564 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4570 case SIOCBONDENSLAVE:
4571 res = bond_enslave(bond_dev, slave_dev, NULL);
4573 case SIOCBONDRELEASE:
4574 res = bond_release(bond_dev, slave_dev);
4576 case SIOCBONDSETHWADDR:
4577 res = bond_set_dev_addr(bond_dev, slave_dev);
4579 case SIOCBONDCHANGEACTIVE:
4580 bond_opt_initstr(&newval, slave_dev->name);
4581 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4591 static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4592 void __user *data, int cmd)
4594 struct ifreq ifrdata = { .ifr_data = data };
4597 case BOND_INFO_QUERY_OLD:
4598 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4599 case BOND_SLAVE_INFO_QUERY_OLD:
4600 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4601 case BOND_ENSLAVE_OLD:
4602 return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4603 case BOND_RELEASE_OLD:
4604 return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4605 case BOND_SETHWADDR_OLD:
4606 return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4607 case BOND_CHANGE_ACTIVE_OLD:
4608 return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4614 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4616 struct bonding *bond = netdev_priv(bond_dev);
4618 if (change & IFF_PROMISC)
4619 bond_set_promiscuity(bond,
4620 bond_dev->flags & IFF_PROMISC ? 1 : -1);
4622 if (change & IFF_ALLMULTI)
4623 bond_set_allmulti(bond,
4624 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4627 static void bond_set_rx_mode(struct net_device *bond_dev)
4629 struct bonding *bond = netdev_priv(bond_dev);
4630 struct list_head *iter;
4631 struct slave *slave;
4634 if (bond_uses_primary(bond)) {
4635 slave = rcu_dereference(bond->curr_active_slave);
4637 dev_uc_sync(slave->dev, bond_dev);
4638 dev_mc_sync(slave->dev, bond_dev);
4641 bond_for_each_slave_rcu(bond, slave, iter) {
4642 dev_uc_sync_multiple(slave->dev, bond_dev);
4643 dev_mc_sync_multiple(slave->dev, bond_dev);
4649 static int bond_neigh_init(struct neighbour *n)
4651 struct bonding *bond = netdev_priv(n->dev);
4652 const struct net_device_ops *slave_ops;
4653 struct neigh_parms parms;
4654 struct slave *slave;
4658 slave = bond_first_slave_rcu(bond);
4661 slave_ops = slave->dev->netdev_ops;
4662 if (!slave_ops->ndo_neigh_setup)
4665 /* TODO: find another way [1] to implement this.
4666 * Passing a zeroed structure is fragile,
4667 * but at least we do not pass garbage.
4669 * [1] One way would be that ndo_neigh_setup() never touch
4670 * struct neigh_parms, but propagate the new neigh_setup()
4671 * back to ___neigh_create() / neigh_parms_alloc()
4673 memset(&parms, 0, sizeof(parms));
4674 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4679 if (parms.neigh_setup)
4680 ret = parms.neigh_setup(n);
4686 /* The bonding ndo_neigh_setup is called at init time beofre any
4687 * slave exists. So we must declare proxy setup function which will
4688 * be used at run time to resolve the actual slave neigh param setup.
4690 * It's also called by master devices (such as vlans) to setup their
4691 * underlying devices. In that case - do nothing, we're already set up from
4694 static int bond_neigh_setup(struct net_device *dev,
4695 struct neigh_parms *parms)
4697 /* modify only our neigh_parms */
4698 if (parms->dev == dev)
4699 parms->neigh_setup = bond_neigh_init;
4704 /* Change the MTU of all of a master's slaves to match the master */
4705 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4707 struct bonding *bond = netdev_priv(bond_dev);
4708 struct slave *slave, *rollback_slave;
4709 struct list_head *iter;
4712 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4714 bond_for_each_slave(bond, slave, iter) {
4715 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4716 slave, slave->dev->netdev_ops->ndo_change_mtu);
4718 res = dev_set_mtu(slave->dev, new_mtu);
4721 /* If we failed to set the slave's mtu to the new value
4722 * we must abort the operation even in ACTIVE_BACKUP
4723 * mode, because if we allow the backup slaves to have
4724 * different mtu values than the active slave we'll
4725 * need to change their mtu when doing a failover. That
4726 * means changing their mtu from timer context, which
4727 * is probably not a good idea.
4729 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4735 bond_dev->mtu = new_mtu;
4740 /* unwind from head to the slave that failed */
4741 bond_for_each_slave(bond, rollback_slave, iter) {
4744 if (rollback_slave == slave)
4747 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4749 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4756 /* Change HW address
4758 * Note that many devices must be down to change the HW address, and
4759 * downing the master releases all slaves. We can make bonds full of
4760 * bonding devices to test this, however.
4762 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4764 struct bonding *bond = netdev_priv(bond_dev);
4765 struct slave *slave, *rollback_slave;
4766 struct sockaddr_storage *ss = addr, tmp_ss;
4767 struct list_head *iter;
4770 if (BOND_MODE(bond) == BOND_MODE_ALB)
4771 return bond_alb_set_mac_address(bond_dev, addr);
4774 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4776 /* If fail_over_mac is enabled, do nothing and return success.
4777 * Returning an error causes ifenslave to fail.
4779 if (bond->params.fail_over_mac &&
4780 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4783 if (!is_valid_ether_addr(ss->__data))
4784 return -EADDRNOTAVAIL;
4786 bond_for_each_slave(bond, slave, iter) {
4787 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4789 res = dev_set_mac_address(slave->dev, addr, NULL);
4791 /* TODO: consider downing the slave
4793 * User should expect communications
4794 * breakage anyway until ARP finish
4797 slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4804 dev_addr_set(bond_dev, ss->__data);
4808 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4809 tmp_ss.ss_family = bond_dev->type;
4811 /* unwind from head to the slave that failed */
4812 bond_for_each_slave(bond, rollback_slave, iter) {
4815 if (rollback_slave == slave)
4818 tmp_res = dev_set_mac_address(rollback_slave->dev,
4819 (struct sockaddr *)&tmp_ss, NULL);
4821 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4830 * bond_get_slave_by_id - get xmit slave with slave_id
4831 * @bond: bonding device that is transmitting
4832 * @slave_id: slave id up to slave_cnt-1 through which to transmit
4834 * This function tries to get slave with slave_id but in case
4835 * it fails, it tries to find the first available slave for transmission.
4837 static struct slave *bond_get_slave_by_id(struct bonding *bond,
4840 struct list_head *iter;
4841 struct slave *slave;
4844 /* Here we start from the slave with slave_id */
4845 bond_for_each_slave_rcu(bond, slave, iter) {
4847 if (bond_slave_can_tx(slave))
4852 /* Here we start from the first slave up to slave_id */
4854 bond_for_each_slave_rcu(bond, slave, iter) {
4857 if (bond_slave_can_tx(slave))
4860 /* no slave that can tx has been found */
4865 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4866 * @bond: bonding device to use
4868 * Based on the value of the bonding device's packets_per_slave parameter
4869 * this function generates a slave id, which is usually used as the next
4870 * slave to transmit through.
4872 static u32 bond_rr_gen_slave_id(struct bonding *bond)
4875 struct reciprocal_value reciprocal_packets_per_slave;
4876 int packets_per_slave = bond->params.packets_per_slave;
4878 switch (packets_per_slave) {
4880 slave_id = get_random_u32();
4883 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4886 reciprocal_packets_per_slave =
4887 bond->params.reciprocal_packets_per_slave;
4888 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4889 slave_id = reciprocal_divide(slave_id,
4890 reciprocal_packets_per_slave);
4897 static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4898 struct sk_buff *skb)
4900 struct slave *slave;
4904 /* Start with the curr_active_slave that joined the bond as the
4905 * default for sending IGMP traffic. For failover purposes one
4906 * needs to maintain some consistency for the interface that will
4907 * send the join/membership reports. The curr_active_slave found
4908 * will send all of this type of traffic.
4910 if (skb->protocol == htons(ETH_P_IP)) {
4911 int noff = skb_network_offset(skb);
4914 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4918 if (iph->protocol == IPPROTO_IGMP) {
4919 slave = rcu_dereference(bond->curr_active_slave);
4922 return bond_get_slave_by_id(bond, 0);
4927 slave_cnt = READ_ONCE(bond->slave_cnt);
4928 if (likely(slave_cnt)) {
4929 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4930 return bond_get_slave_by_id(bond, slave_id);
4935 static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4936 struct xdp_buff *xdp)
4938 struct slave *slave;
4941 const struct ethhdr *eth;
4942 void *data = xdp->data;
4944 if (data + sizeof(struct ethhdr) > xdp->data_end)
4947 eth = (struct ethhdr *)data;
4948 data += sizeof(struct ethhdr);
4950 /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4951 if (eth->h_proto == htons(ETH_P_IP)) {
4952 const struct iphdr *iph;
4954 if (data + sizeof(struct iphdr) > xdp->data_end)
4957 iph = (struct iphdr *)data;
4959 if (iph->protocol == IPPROTO_IGMP) {
4960 slave = rcu_dereference(bond->curr_active_slave);
4963 return bond_get_slave_by_id(bond, 0);
4968 slave_cnt = READ_ONCE(bond->slave_cnt);
4969 if (likely(slave_cnt)) {
4970 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4971 return bond_get_slave_by_id(bond, slave_id);
4976 static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4977 struct net_device *bond_dev)
4979 struct bonding *bond = netdev_priv(bond_dev);
4980 struct slave *slave;
4982 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4984 return bond_dev_queue_xmit(bond, skb, slave->dev);
4986 return bond_tx_drop(bond_dev, skb);
4989 static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4991 return rcu_dereference(bond->curr_active_slave);
4994 /* In active-backup mode, we know that bond->curr_active_slave is always valid if
4995 * the bond has a usable interface.
4997 static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4998 struct net_device *bond_dev)
5000 struct bonding *bond = netdev_priv(bond_dev);
5001 struct slave *slave;
5003 slave = bond_xmit_activebackup_slave_get(bond);
5005 return bond_dev_queue_xmit(bond, skb, slave->dev);
5007 return bond_tx_drop(bond_dev, skb);
5010 /* Use this to update slave_array when (a) it's not appropriate to update
5011 * slave_array right away (note that update_slave_array() may sleep)
5012 * and / or (b) RTNL is not held.
5014 void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
5016 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
5019 /* Slave array work handler. Holds only RTNL */
5020 static void bond_slave_arr_handler(struct work_struct *work)
5022 struct bonding *bond = container_of(work, struct bonding,
5023 slave_arr_work.work);
5026 if (!rtnl_trylock())
5029 ret = bond_update_slave_arr(bond, NULL);
5032 pr_warn_ratelimited("Failed to update slave array from WT\n");
5038 bond_slave_arr_work_rearm(bond, 1);
5041 static void bond_skip_slave(struct bond_up_slave *slaves,
5042 struct slave *skipslave)
5046 /* Rare situation where caller has asked to skip a specific
5047 * slave but allocation failed (most likely!). BTW this is
5048 * only possible when the call is initiated from
5049 * __bond_release_one(). In this situation; overwrite the
5050 * skipslave entry in the array with the last entry from the
5051 * array to avoid a situation where the xmit path may choose
5052 * this to-be-skipped slave to send a packet out.
5054 for (idx = 0; slaves && idx < slaves->count; idx++) {
5055 if (skipslave == slaves->arr[idx]) {
5057 slaves->arr[slaves->count - 1];
5064 static void bond_set_slave_arr(struct bonding *bond,
5065 struct bond_up_slave *usable_slaves,
5066 struct bond_up_slave *all_slaves)
5068 struct bond_up_slave *usable, *all;
5070 usable = rtnl_dereference(bond->usable_slaves);
5071 rcu_assign_pointer(bond->usable_slaves, usable_slaves);
5072 kfree_rcu(usable, rcu);
5074 all = rtnl_dereference(bond->all_slaves);
5075 rcu_assign_pointer(bond->all_slaves, all_slaves);
5076 kfree_rcu(all, rcu);
5079 static void bond_reset_slave_arr(struct bonding *bond)
5081 struct bond_up_slave *usable, *all;
5083 usable = rtnl_dereference(bond->usable_slaves);
5085 RCU_INIT_POINTER(bond->usable_slaves, NULL);
5086 kfree_rcu(usable, rcu);
5089 all = rtnl_dereference(bond->all_slaves);
5091 RCU_INIT_POINTER(bond->all_slaves, NULL);
5092 kfree_rcu(all, rcu);
5096 /* Build the usable slaves array in control path for modes that use xmit-hash
5097 * to determine the slave interface -
5098 * (a) BOND_MODE_8023AD
5100 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
5102 * The caller is expected to hold RTNL only and NO other lock!
5104 int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
5106 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
5107 struct slave *slave;
5108 struct list_head *iter;
5114 usable_slaves = kzalloc(struct_size(usable_slaves, arr,
5115 bond->slave_cnt), GFP_KERNEL);
5116 all_slaves = kzalloc(struct_size(all_slaves, arr,
5117 bond->slave_cnt), GFP_KERNEL);
5118 if (!usable_slaves || !all_slaves) {
5122 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5123 struct ad_info ad_info;
5125 spin_lock_bh(&bond->mode_lock);
5126 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
5127 spin_unlock_bh(&bond->mode_lock);
5128 pr_debug("bond_3ad_get_active_agg_info failed\n");
5129 /* No active aggragator means it's not safe to use
5130 * the previous array.
5132 bond_reset_slave_arr(bond);
5135 spin_unlock_bh(&bond->mode_lock);
5136 agg_id = ad_info.aggregator_id;
5138 bond_for_each_slave(bond, slave, iter) {
5139 if (skipslave == slave)
5142 all_slaves->arr[all_slaves->count++] = slave;
5143 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5144 struct aggregator *agg;
5146 agg = SLAVE_AD_INFO(slave)->port.aggregator;
5147 if (!agg || agg->aggregator_identifier != agg_id)
5150 if (!bond_slave_can_tx(slave))
5153 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
5154 usable_slaves->count);
5156 usable_slaves->arr[usable_slaves->count++] = slave;
5159 bond_set_slave_arr(bond, usable_slaves, all_slaves);
5162 if (ret != 0 && skipslave) {
5163 bond_skip_slave(rtnl_dereference(bond->all_slaves),
5165 bond_skip_slave(rtnl_dereference(bond->usable_slaves),
5168 kfree_rcu(all_slaves, rcu);
5169 kfree_rcu(usable_slaves, rcu);
5174 static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
5175 struct sk_buff *skb,
5176 struct bond_up_slave *slaves)
5178 struct slave *slave;
5182 hash = bond_xmit_hash(bond, skb);
5183 count = slaves ? READ_ONCE(slaves->count) : 0;
5184 if (unlikely(!count))
5187 slave = slaves->arr[hash % count];
5191 static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
5192 struct xdp_buff *xdp)
5194 struct bond_up_slave *slaves;
5198 hash = bond_xmit_hash_xdp(bond, xdp);
5199 slaves = rcu_dereference(bond->usable_slaves);
5200 count = slaves ? READ_ONCE(slaves->count) : 0;
5201 if (unlikely(!count))
5204 return slaves->arr[hash % count];
5207 /* Use this Xmit function for 3AD as well as XOR modes. The current
5208 * usable slave array is formed in the control path. The xmit function
5209 * just calculates hash and sends the packet out.
5211 static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
5212 struct net_device *dev)
5214 struct bonding *bond = netdev_priv(dev);
5215 struct bond_up_slave *slaves;
5216 struct slave *slave;
5218 slaves = rcu_dereference(bond->usable_slaves);
5219 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5221 return bond_dev_queue_xmit(bond, skb, slave->dev);
5223 return bond_tx_drop(dev, skb);
5226 /* in broadcast mode, we send everything to all usable interfaces. */
5227 static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
5228 struct net_device *bond_dev)
5230 struct bonding *bond = netdev_priv(bond_dev);
5231 struct slave *slave = NULL;
5232 struct list_head *iter;
5233 bool xmit_suc = false;
5234 bool skb_used = false;
5236 bond_for_each_slave_rcu(bond, slave, iter) {
5237 struct sk_buff *skb2;
5239 if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
5242 if (bond_is_last_slave(bond, slave)) {
5246 skb2 = skb_clone(skb, GFP_ATOMIC);
5248 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
5249 bond_dev->name, __func__);
5254 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
5259 dev_kfree_skb_any(skb);
5262 return NETDEV_TX_OK;
5264 dev_core_stats_tx_dropped_inc(bond_dev);
5265 return NET_XMIT_DROP;
5268 /*------------------------- Device initialization ---------------------------*/
5270 /* Lookup the slave that corresponds to a qid */
5271 static inline int bond_slave_override(struct bonding *bond,
5272 struct sk_buff *skb)
5274 struct slave *slave = NULL;
5275 struct list_head *iter;
5277 if (!skb_rx_queue_recorded(skb))
5280 /* Find out if any slaves have the same mapping as this skb. */
5281 bond_for_each_slave_rcu(bond, slave, iter) {
5282 if (slave->queue_id == skb_get_queue_mapping(skb)) {
5283 if (bond_slave_is_up(slave) &&
5284 slave->link == BOND_LINK_UP) {
5285 bond_dev_queue_xmit(bond, skb, slave->dev);
5288 /* If the slave isn't UP, use default transmit policy. */
5297 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
5298 struct net_device *sb_dev)
5300 /* This helper function exists to help dev_pick_tx get the correct
5301 * destination queue. Using a helper function skips a call to
5302 * skb_tx_hash and will put the skbs in the queue we expect on their
5303 * way down to the bonding driver.
5305 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
5307 /* Save the original txq to restore before passing to the driver */
5308 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
5310 if (unlikely(txq >= dev->real_num_tx_queues)) {
5312 txq -= dev->real_num_tx_queues;
5313 } while (txq >= dev->real_num_tx_queues);
5318 static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
5319 struct sk_buff *skb,
5322 struct bonding *bond = netdev_priv(master_dev);
5323 struct bond_up_slave *slaves;
5324 struct slave *slave = NULL;
5326 switch (BOND_MODE(bond)) {
5327 case BOND_MODE_ROUNDROBIN:
5328 slave = bond_xmit_roundrobin_slave_get(bond, skb);
5330 case BOND_MODE_ACTIVEBACKUP:
5331 slave = bond_xmit_activebackup_slave_get(bond);
5333 case BOND_MODE_8023AD:
5336 slaves = rcu_dereference(bond->all_slaves);
5338 slaves = rcu_dereference(bond->usable_slaves);
5339 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5341 case BOND_MODE_BROADCAST:
5344 slave = bond_xmit_alb_slave_get(bond, skb);
5347 slave = bond_xmit_tlb_slave_get(bond, skb);
5350 /* Should never happen, mode already checked */
5351 WARN_ONCE(true, "Unknown bonding mode");
5360 static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5362 switch (sk->sk_family) {
5363 #if IS_ENABLED(CONFIG_IPV6)
5365 if (ipv6_only_sock(sk) ||
5366 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5367 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5368 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5369 flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5374 default: /* AF_INET */
5375 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5376 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5377 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5381 flow->ports.src = inet_sk(sk)->inet_sport;
5382 flow->ports.dst = inet_sk(sk)->inet_dport;
5386 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5387 * @sk: socket to use for headers
5389 * This function will extract the necessary field from the socket and use
5390 * them to generate a hash based on the LAYER34 xmit_policy.
5391 * Assumes that sk is a TCP or UDP socket.
5393 static u32 bond_sk_hash_l34(struct sock *sk)
5395 struct flow_keys flow;
5398 bond_sk_to_flow(sk, &flow);
5401 memcpy(&hash, &flow.ports.ports, sizeof(hash));
5403 return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5406 static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5409 struct bond_up_slave *slaves;
5410 struct slave *slave;
5414 slaves = rcu_dereference(bond->usable_slaves);
5415 count = slaves ? READ_ONCE(slaves->count) : 0;
5416 if (unlikely(!count))
5419 hash = bond_sk_hash_l34(sk);
5420 slave = slaves->arr[hash % count];
5425 static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5428 struct bonding *bond = netdev_priv(dev);
5429 struct net_device *lower = NULL;
5432 if (bond_sk_check(bond))
5433 lower = __bond_sk_get_lower_dev(bond, sk);
5439 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5440 static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5441 struct net_device *dev)
5443 struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
5445 /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
5446 * was true, if tls_device_down is running in parallel, but it's OK,
5447 * because bond_get_slave_by_dev has a NULL check.
5449 if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
5450 return bond_dev_queue_xmit(bond, skb, tls_netdev);
5451 return bond_tx_drop(dev, skb);
5455 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5457 struct bonding *bond = netdev_priv(dev);
5459 if (bond_should_override_tx_queue(bond) &&
5460 !bond_slave_override(bond, skb))
5461 return NETDEV_TX_OK;
5463 #if IS_ENABLED(CONFIG_TLS_DEVICE)
5464 if (tls_is_skb_tx_device_offloaded(skb))
5465 return bond_tls_device_xmit(bond, skb, dev);
5468 switch (BOND_MODE(bond)) {
5469 case BOND_MODE_ROUNDROBIN:
5470 return bond_xmit_roundrobin(skb, dev);
5471 case BOND_MODE_ACTIVEBACKUP:
5472 return bond_xmit_activebackup(skb, dev);
5473 case BOND_MODE_8023AD:
5475 return bond_3ad_xor_xmit(skb, dev);
5476 case BOND_MODE_BROADCAST:
5477 return bond_xmit_broadcast(skb, dev);
5479 return bond_alb_xmit(skb, dev);
5481 return bond_tlb_xmit(skb, dev);
5483 /* Should never happen, mode already checked */
5484 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5486 return bond_tx_drop(dev, skb);
5490 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5492 struct bonding *bond = netdev_priv(dev);
5493 netdev_tx_t ret = NETDEV_TX_OK;
5495 /* If we risk deadlock from transmitting this in the
5496 * netpoll path, tell netpoll to queue the frame for later tx
5498 if (unlikely(is_netpoll_tx_blocked(dev)))
5499 return NETDEV_TX_BUSY;
5502 if (bond_has_slaves(bond))
5503 ret = __bond_start_xmit(skb, dev);
5505 ret = bond_tx_drop(dev, skb);
5511 static struct net_device *
5512 bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5514 struct bonding *bond = netdev_priv(bond_dev);
5515 struct slave *slave;
5517 /* Caller needs to hold rcu_read_lock() */
5519 switch (BOND_MODE(bond)) {
5520 case BOND_MODE_ROUNDROBIN:
5521 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5524 case BOND_MODE_ACTIVEBACKUP:
5525 slave = bond_xmit_activebackup_slave_get(bond);
5528 case BOND_MODE_8023AD:
5530 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5534 /* Should never happen. Mode guarded by bond_xdp_check() */
5535 netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5546 static int bond_xdp_xmit(struct net_device *bond_dev,
5547 int n, struct xdp_frame **frames, u32 flags)
5549 int nxmit, err = -ENXIO;
5553 for (nxmit = 0; nxmit < n; nxmit++) {
5554 struct xdp_frame *frame = frames[nxmit];
5555 struct xdp_frame *frames1[] = {frame};
5556 struct net_device *slave_dev;
5557 struct xdp_buff xdp;
5559 xdp_convert_frame_to_buff(frame, &xdp);
5561 slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5567 err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5574 /* If error happened on the first frame then we can pass the error up, otherwise
5575 * report the number of frames that were xmitted.
5578 return (nxmit == 0 ? err : nxmit);
5583 static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5584 struct netlink_ext_ack *extack)
5586 struct bonding *bond = netdev_priv(dev);
5587 struct list_head *iter;
5588 struct slave *slave, *rollback_slave;
5589 struct bpf_prog *old_prog;
5590 struct netdev_bpf xdp = {
5591 .command = XDP_SETUP_PROG,
5600 if (!bond_xdp_check(bond))
5603 old_prog = bond->xdp_prog;
5604 bond->xdp_prog = prog;
5606 bond_for_each_slave(bond, slave, iter) {
5607 struct net_device *slave_dev = slave->dev;
5609 if (!slave_dev->netdev_ops->ndo_bpf ||
5610 !slave_dev->netdev_ops->ndo_xdp_xmit) {
5611 SLAVE_NL_ERR(dev, slave_dev, extack,
5612 "Slave device does not support XDP");
5617 if (dev_xdp_prog_count(slave_dev) > 0) {
5618 SLAVE_NL_ERR(dev, slave_dev, extack,
5619 "Slave has XDP program loaded, please unload before enslaving");
5624 err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5626 /* ndo_bpf() sets extack error message */
5627 slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5635 static_branch_inc(&bpf_master_redirect_enabled_key);
5636 } else if (old_prog) {
5637 bpf_prog_put(old_prog);
5638 static_branch_dec(&bpf_master_redirect_enabled_key);
5644 /* unwind the program changes */
5645 bond->xdp_prog = old_prog;
5646 xdp.prog = old_prog;
5647 xdp.extack = NULL; /* do not overwrite original error */
5649 bond_for_each_slave(bond, rollback_slave, iter) {
5650 struct net_device *slave_dev = rollback_slave->dev;
5653 if (slave == rollback_slave)
5656 err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5658 slave_err(dev, slave_dev,
5659 "Error %d when unwinding XDP program change\n", err_unwind);
5661 bpf_prog_inc(xdp.prog);
5666 static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5668 switch (xdp->command) {
5669 case XDP_SETUP_PROG:
5670 return bond_xdp_set(dev, xdp->prog, xdp->extack);
5676 static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5678 if (speed == 0 || speed == SPEED_UNKNOWN)
5679 speed = slave->speed;
5681 speed = min(speed, slave->speed);
5686 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5687 struct ethtool_link_ksettings *cmd)
5689 struct bonding *bond = netdev_priv(bond_dev);
5690 struct list_head *iter;
5691 struct slave *slave;
5694 cmd->base.duplex = DUPLEX_UNKNOWN;
5695 cmd->base.port = PORT_OTHER;
5697 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5698 * do not need to check mode. Though link speed might not represent
5699 * the true receive or transmit bandwidth (not all modes are symmetric)
5700 * this is an accurate maximum.
5702 bond_for_each_slave(bond, slave, iter) {
5703 if (bond_slave_can_tx(slave)) {
5704 if (slave->speed != SPEED_UNKNOWN) {
5705 if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5706 speed = bond_mode_bcast_speed(slave,
5709 speed += slave->speed;
5711 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5712 slave->duplex != DUPLEX_UNKNOWN)
5713 cmd->base.duplex = slave->duplex;
5716 cmd->base.speed = speed ? : SPEED_UNKNOWN;
5721 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5722 struct ethtool_drvinfo *drvinfo)
5724 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5725 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5729 static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5730 struct ethtool_ts_info *info)
5732 struct bonding *bond = netdev_priv(bond_dev);
5733 struct ethtool_ts_info ts_info;
5734 const struct ethtool_ops *ops;
5735 struct net_device *real_dev;
5736 bool sw_tx_support = false;
5737 struct phy_device *phydev;
5738 struct list_head *iter;
5739 struct slave *slave;
5743 real_dev = bond_option_active_slave_get_rcu(bond);
5748 ops = real_dev->ethtool_ops;
5749 phydev = real_dev->phydev;
5751 if (phy_has_tsinfo(phydev)) {
5752 ret = phy_ts_info(phydev, info);
5754 } else if (ops->get_ts_info) {
5755 ret = ops->get_ts_info(real_dev, info);
5759 /* Check if all slaves support software tx timestamping */
5761 bond_for_each_slave_rcu(bond, slave, iter) {
5763 ops = slave->dev->ethtool_ops;
5764 phydev = slave->dev->phydev;
5766 if (phy_has_tsinfo(phydev))
5767 ret = phy_ts_info(phydev, &ts_info);
5768 else if (ops->get_ts_info)
5769 ret = ops->get_ts_info(slave->dev, &ts_info);
5771 if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
5772 sw_tx_support = true;
5776 sw_tx_support = false;
5783 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
5784 SOF_TIMESTAMPING_SOFTWARE;
5786 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
5788 info->phc_index = -1;
5795 static const struct ethtool_ops bond_ethtool_ops = {
5796 .get_drvinfo = bond_ethtool_get_drvinfo,
5797 .get_link = ethtool_op_get_link,
5798 .get_link_ksettings = bond_ethtool_get_link_ksettings,
5799 .get_ts_info = bond_ethtool_get_ts_info,
5802 static const struct net_device_ops bond_netdev_ops = {
5803 .ndo_init = bond_init,
5804 .ndo_uninit = bond_uninit,
5805 .ndo_open = bond_open,
5806 .ndo_stop = bond_close,
5807 .ndo_start_xmit = bond_start_xmit,
5808 .ndo_select_queue = bond_select_queue,
5809 .ndo_get_stats64 = bond_get_stats,
5810 .ndo_eth_ioctl = bond_eth_ioctl,
5811 .ndo_siocbond = bond_do_ioctl,
5812 .ndo_siocdevprivate = bond_siocdevprivate,
5813 .ndo_change_rx_flags = bond_change_rx_flags,
5814 .ndo_set_rx_mode = bond_set_rx_mode,
5815 .ndo_change_mtu = bond_change_mtu,
5816 .ndo_set_mac_address = bond_set_mac_address,
5817 .ndo_neigh_setup = bond_neigh_setup,
5818 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
5819 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
5820 #ifdef CONFIG_NET_POLL_CONTROLLER
5821 .ndo_netpoll_setup = bond_netpoll_setup,
5822 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
5823 .ndo_poll_controller = bond_poll_controller,
5825 .ndo_add_slave = bond_enslave,
5826 .ndo_del_slave = bond_release,
5827 .ndo_fix_features = bond_fix_features,
5828 .ndo_features_check = passthru_features_check,
5829 .ndo_get_xmit_slave = bond_xmit_get_slave,
5830 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
5831 .ndo_bpf = bond_xdp,
5832 .ndo_xdp_xmit = bond_xdp_xmit,
5833 .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5836 static const struct device_type bond_type = {
5840 static void bond_destructor(struct net_device *bond_dev)
5842 struct bonding *bond = netdev_priv(bond_dev);
5845 destroy_workqueue(bond->wq);
5847 if (bond->rr_tx_counter)
5848 free_percpu(bond->rr_tx_counter);
5851 void bond_setup(struct net_device *bond_dev)
5853 struct bonding *bond = netdev_priv(bond_dev);
5855 spin_lock_init(&bond->mode_lock);
5856 bond->params = bonding_defaults;
5858 /* Initialize pointers */
5859 bond->dev = bond_dev;
5861 /* Initialize the device entry points */
5862 ether_setup(bond_dev);
5863 bond_dev->max_mtu = ETH_MAX_MTU;
5864 bond_dev->netdev_ops = &bond_netdev_ops;
5865 bond_dev->ethtool_ops = &bond_ethtool_ops;
5867 bond_dev->needs_free_netdev = true;
5868 bond_dev->priv_destructor = bond_destructor;
5870 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5872 /* Initialize the device options */
5873 bond_dev->flags |= IFF_MASTER;
5874 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5875 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5877 #ifdef CONFIG_XFRM_OFFLOAD
5878 /* set up xfrm device ops (only supported in active-backup right now) */
5879 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5880 INIT_LIST_HEAD(&bond->ipsec_list);
5881 spin_lock_init(&bond->ipsec_lock);
5882 #endif /* CONFIG_XFRM_OFFLOAD */
5884 /* don't acquire bond device's netif_tx_lock when transmitting */
5885 bond_dev->features |= NETIF_F_LLTX;
5887 /* By default, we declare the bond to be fully
5888 * VLAN hardware accelerated capable. Special
5889 * care is taken in the various xmit functions
5890 * when there are slaves that are not hw accel
5894 /* Don't allow bond devices to change network namespaces. */
5895 bond_dev->features |= NETIF_F_NETNS_LOCAL;
5897 bond_dev->hw_features = BOND_VLAN_FEATURES |
5898 NETIF_F_HW_VLAN_CTAG_RX |
5899 NETIF_F_HW_VLAN_CTAG_FILTER;
5901 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5902 bond_dev->features |= bond_dev->hw_features;
5903 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5904 #ifdef CONFIG_XFRM_OFFLOAD
5905 bond_dev->hw_features |= BOND_XFRM_FEATURES;
5906 /* Only enable XFRM features if this is an active-backup config */
5907 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5908 bond_dev->features |= BOND_XFRM_FEATURES;
5909 #endif /* CONFIG_XFRM_OFFLOAD */
5911 if (bond_xdp_check(bond))
5912 bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
5915 /* Destroy a bonding device.
5916 * Must be under rtnl_lock when this function is called.
5918 static void bond_uninit(struct net_device *bond_dev)
5920 struct bonding *bond = netdev_priv(bond_dev);
5921 struct bond_up_slave *usable, *all;
5922 struct list_head *iter;
5923 struct slave *slave;
5925 bond_netpoll_cleanup(bond_dev);
5927 /* Release the bonded slaves */
5928 bond_for_each_slave(bond, slave, iter)
5929 __bond_release_one(bond_dev, slave->dev, true, true);
5930 netdev_info(bond_dev, "Released all slaves\n");
5932 usable = rtnl_dereference(bond->usable_slaves);
5934 RCU_INIT_POINTER(bond->usable_slaves, NULL);
5935 kfree_rcu(usable, rcu);
5938 all = rtnl_dereference(bond->all_slaves);
5940 RCU_INIT_POINTER(bond->all_slaves, NULL);
5941 kfree_rcu(all, rcu);
5944 list_del(&bond->bond_list);
5946 bond_debug_unregister(bond);
5949 /*------------------------- Module initialization ---------------------------*/
5951 static int bond_check_params(struct bond_params *params)
5953 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5954 struct bond_opt_value newval;
5955 const struct bond_opt_value *valptr;
5956 int arp_all_targets_value = 0;
5957 u16 ad_actor_sys_prio = 0;
5958 u16 ad_user_port_key = 0;
5959 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5961 int bond_mode = BOND_MODE_ROUNDROBIN;
5962 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5966 /* Convert string parameters. */
5968 bond_opt_initstr(&newval, mode);
5969 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5971 pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5974 bond_mode = valptr->value;
5977 if (xmit_hash_policy) {
5978 if (bond_mode == BOND_MODE_ROUNDROBIN ||
5979 bond_mode == BOND_MODE_ACTIVEBACKUP ||
5980 bond_mode == BOND_MODE_BROADCAST) {
5981 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5982 bond_mode_name(bond_mode));
5984 bond_opt_initstr(&newval, xmit_hash_policy);
5985 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5988 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5992 xmit_hashtype = valptr->value;
5997 if (bond_mode != BOND_MODE_8023AD) {
5998 pr_info("lacp_rate param is irrelevant in mode %s\n",
5999 bond_mode_name(bond_mode));
6001 bond_opt_initstr(&newval, lacp_rate);
6002 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
6005 pr_err("Error: Invalid lacp rate \"%s\"\n",
6009 lacp_fast = valptr->value;
6014 bond_opt_initstr(&newval, ad_select);
6015 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
6018 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
6021 params->ad_select = valptr->value;
6022 if (bond_mode != BOND_MODE_8023AD)
6023 pr_warn("ad_select param only affects 802.3ad mode\n");
6025 params->ad_select = BOND_AD_STABLE;
6028 if (max_bonds < 0) {
6029 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
6030 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
6031 max_bonds = BOND_DEFAULT_MAX_BONDS;
6035 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6041 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6046 if (downdelay < 0) {
6047 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6048 downdelay, INT_MAX);
6052 if ((use_carrier != 0) && (use_carrier != 1)) {
6053 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
6058 if (num_peer_notif < 0 || num_peer_notif > 255) {
6059 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
6064 /* reset values for 802.3ad/TLB/ALB */
6065 if (!bond_mode_uses_arp(bond_mode)) {
6067 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
6068 pr_warn("Forcing miimon to 100msec\n");
6069 miimon = BOND_DEFAULT_MIIMON;
6073 if (tx_queues < 1 || tx_queues > 255) {
6074 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
6075 tx_queues, BOND_DEFAULT_TX_QUEUES);
6076 tx_queues = BOND_DEFAULT_TX_QUEUES;
6079 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
6080 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
6082 all_slaves_active = 0;
6085 if (resend_igmp < 0 || resend_igmp > 255) {
6086 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
6087 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
6088 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
6091 bond_opt_initval(&newval, packets_per_slave);
6092 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
6093 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
6094 packets_per_slave, USHRT_MAX);
6095 packets_per_slave = 1;
6098 if (bond_mode == BOND_MODE_ALB) {
6099 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
6104 if (updelay || downdelay) {
6105 /* just warn the user the up/down delay will have
6106 * no effect since miimon is zero...
6108 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
6109 updelay, downdelay);
6112 /* don't allow arp monitoring */
6114 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
6115 miimon, arp_interval);
6119 if ((updelay % miimon) != 0) {
6120 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
6121 updelay, miimon, (updelay / miimon) * miimon);
6126 if ((downdelay % miimon) != 0) {
6127 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
6129 (downdelay / miimon) * miimon);
6132 downdelay /= miimon;
6135 if (arp_interval < 0) {
6136 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6137 arp_interval, INT_MAX);
6141 for (arp_ip_count = 0, i = 0;
6142 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
6145 /* not a complete check, but good enough to catch mistakes */
6146 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
6147 !bond_is_ip_target_ok(ip)) {
6148 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
6152 if (bond_get_targets_ip(arp_target, ip) == -1)
6153 arp_target[arp_ip_count++] = ip;
6155 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
6160 if (arp_interval && !arp_ip_count) {
6161 /* don't allow arping if no arp_ip_target given... */
6162 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
6168 if (!arp_interval) {
6169 pr_err("arp_validate requires arp_interval\n");
6173 bond_opt_initstr(&newval, arp_validate);
6174 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
6177 pr_err("Error: invalid arp_validate \"%s\"\n",
6181 arp_validate_value = valptr->value;
6183 arp_validate_value = 0;
6186 if (arp_all_targets) {
6187 bond_opt_initstr(&newval, arp_all_targets);
6188 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
6191 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
6193 arp_all_targets_value = 0;
6195 arp_all_targets_value = valptr->value;
6200 pr_info("MII link monitoring set to %d ms\n", miimon);
6201 } else if (arp_interval) {
6202 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
6203 arp_validate_value);
6204 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
6205 arp_interval, valptr->string, arp_ip_count);
6207 for (i = 0; i < arp_ip_count; i++)
6208 pr_cont(" %s", arp_ip_target[i]);
6212 } else if (max_bonds) {
6213 /* miimon and arp_interval not set, we need one so things
6214 * work as expected, see bonding.txt for details
6216 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
6219 if (primary && !bond_mode_uses_primary(bond_mode)) {
6220 /* currently, using a primary only makes sense
6221 * in active backup, TLB or ALB modes
6223 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
6224 primary, bond_mode_name(bond_mode));
6228 if (primary && primary_reselect) {
6229 bond_opt_initstr(&newval, primary_reselect);
6230 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
6233 pr_err("Error: Invalid primary_reselect \"%s\"\n",
6237 primary_reselect_value = valptr->value;
6239 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
6242 if (fail_over_mac) {
6243 bond_opt_initstr(&newval, fail_over_mac);
6244 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
6247 pr_err("Error: invalid fail_over_mac \"%s\"\n",
6251 fail_over_mac_value = valptr->value;
6252 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
6253 pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
6255 fail_over_mac_value = BOND_FOM_NONE;
6258 bond_opt_initstr(&newval, "default");
6259 valptr = bond_opt_parse(
6260 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
6263 pr_err("Error: No ad_actor_sys_prio default value");
6266 ad_actor_sys_prio = valptr->value;
6268 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
6271 pr_err("Error: No ad_user_port_key default value");
6274 ad_user_port_key = valptr->value;
6276 bond_opt_initstr(&newval, "default");
6277 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
6279 pr_err("Error: No tlb_dynamic_lb default value");
6282 tlb_dynamic_lb = valptr->value;
6284 if (lp_interval == 0) {
6285 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
6286 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
6287 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
6290 /* fill params struct with the proper values */
6291 params->mode = bond_mode;
6292 params->xmit_policy = xmit_hashtype;
6293 params->miimon = miimon;
6294 params->num_peer_notif = num_peer_notif;
6295 params->arp_interval = arp_interval;
6296 params->arp_validate = arp_validate_value;
6297 params->arp_all_targets = arp_all_targets_value;
6298 params->missed_max = 2;
6299 params->updelay = updelay;
6300 params->downdelay = downdelay;
6301 params->peer_notif_delay = 0;
6302 params->use_carrier = use_carrier;
6303 params->lacp_active = 1;
6304 params->lacp_fast = lacp_fast;
6305 params->primary[0] = 0;
6306 params->primary_reselect = primary_reselect_value;
6307 params->fail_over_mac = fail_over_mac_value;
6308 params->tx_queues = tx_queues;
6309 params->all_slaves_active = all_slaves_active;
6310 params->resend_igmp = resend_igmp;
6311 params->min_links = min_links;
6312 params->lp_interval = lp_interval;
6313 params->packets_per_slave = packets_per_slave;
6314 params->tlb_dynamic_lb = tlb_dynamic_lb;
6315 params->ad_actor_sys_prio = ad_actor_sys_prio;
6316 eth_zero_addr(params->ad_actor_system);
6317 params->ad_user_port_key = ad_user_port_key;
6318 if (packets_per_slave > 0) {
6319 params->reciprocal_packets_per_slave =
6320 reciprocal_value(packets_per_slave);
6322 /* reciprocal_packets_per_slave is unused if
6323 * packets_per_slave is 0 or 1, just initialize it
6325 params->reciprocal_packets_per_slave =
6326 (struct reciprocal_value) { 0 };
6330 strscpy_pad(params->primary, primary, sizeof(params->primary));
6332 memcpy(params->arp_targets, arp_target, sizeof(arp_target));
6333 #if IS_ENABLED(CONFIG_IPV6)
6334 memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
6340 /* Called from registration process */
6341 static int bond_init(struct net_device *bond_dev)
6343 struct bonding *bond = netdev_priv(bond_dev);
6344 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
6346 netdev_dbg(bond_dev, "Begin bond_init\n");
6348 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
6352 bond->notifier_ctx = false;
6354 spin_lock_init(&bond->stats_lock);
6355 netdev_lockdep_set_classes(bond_dev);
6357 list_add_tail(&bond->bond_list, &bn->dev_list);
6359 bond_prepare_sysfs_group(bond);
6361 bond_debug_register(bond);
6363 /* Ensure valid dev_addr */
6364 if (is_zero_ether_addr(bond_dev->dev_addr) &&
6365 bond_dev->addr_assign_type == NET_ADDR_PERM)
6366 eth_hw_addr_random(bond_dev);
6371 unsigned int bond_get_num_tx_queues(void)
6376 /* Create a new bond based on the specified name and bonding parameters.
6377 * If name is NULL, obtain a suitable "bond%d" name for us.
6378 * Caller must NOT hold rtnl_lock; we need to release it here before we
6379 * set up our sysfs entries.
6381 int bond_create(struct net *net, const char *name)
6383 struct net_device *bond_dev;
6384 struct bonding *bond;
6389 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6390 name ? name : "bond%d", NET_NAME_UNKNOWN,
6391 bond_setup, tx_queues);
6395 bond = netdev_priv(bond_dev);
6396 dev_net_set(bond_dev, net);
6397 bond_dev->rtnl_link_ops = &bond_link_ops;
6399 res = register_netdevice(bond_dev);
6401 free_netdev(bond_dev);
6405 netif_carrier_off(bond_dev);
6407 bond_work_init_all(bond);
6414 static int __net_init bond_net_init(struct net *net)
6416 struct bond_net *bn = net_generic(net, bond_net_id);
6419 INIT_LIST_HEAD(&bn->dev_list);
6421 bond_create_proc_dir(bn);
6422 bond_create_sysfs(bn);
6427 static void __net_exit bond_net_exit_batch(struct list_head *net_list)
6429 struct bond_net *bn;
6433 list_for_each_entry(net, net_list, exit_list) {
6434 bn = net_generic(net, bond_net_id);
6435 bond_destroy_sysfs(bn);
6438 /* Kill off any bonds created after unregistering bond rtnl ops */
6440 list_for_each_entry(net, net_list, exit_list) {
6441 struct bonding *bond, *tmp_bond;
6443 bn = net_generic(net, bond_net_id);
6444 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6445 unregister_netdevice_queue(bond->dev, &list);
6447 unregister_netdevice_many(&list);
6450 list_for_each_entry(net, net_list, exit_list) {
6451 bn = net_generic(net, bond_net_id);
6452 bond_destroy_proc_dir(bn);
6456 static struct pernet_operations bond_net_ops = {
6457 .init = bond_net_init,
6458 .exit_batch = bond_net_exit_batch,
6460 .size = sizeof(struct bond_net),
6463 static int __init bonding_init(void)
6468 res = bond_check_params(&bonding_defaults);
6472 res = register_pernet_subsys(&bond_net_ops);
6476 res = bond_netlink_init();
6480 bond_create_debugfs();
6482 for (i = 0; i < max_bonds; i++) {
6483 res = bond_create(&init_net, NULL);
6488 skb_flow_dissector_init(&flow_keys_bonding,
6489 flow_keys_bonding_keys,
6490 ARRAY_SIZE(flow_keys_bonding_keys));
6492 register_netdevice_notifier(&bond_netdev_notifier);
6496 bond_destroy_debugfs();
6497 bond_netlink_fini();
6499 unregister_pernet_subsys(&bond_net_ops);
6504 static void __exit bonding_exit(void)
6506 unregister_netdevice_notifier(&bond_netdev_notifier);
6508 bond_destroy_debugfs();
6510 bond_netlink_fini();
6511 unregister_pernet_subsys(&bond_net_ops);
6513 #ifdef CONFIG_NET_POLL_CONTROLLER
6514 /* Make sure we don't have an imbalance on our netpoll blocking */
6515 WARN_ON(atomic_read(&netpoll_block_tx));
6519 module_init(bonding_init);
6520 module_exit(bonding_exit);
6521 MODULE_LICENSE("GPL");
6522 MODULE_DESCRIPTION(DRV_DESCRIPTION);
6523 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");