2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <generated/utsrelease.h>
32 #include <linux/if_team.h>
34 #define DRV_NAME "team"
41 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
43 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
45 struct team_port *port = rtnl_dereference(dev->rx_handler_data);
47 return team_port_exists(dev) ? port : NULL;
51 * Since the ability to change device address for open port device is tested in
52 * team_port_add, this function can be called without control of return value
54 static int __set_port_dev_addr(struct net_device *port_dev,
55 const unsigned char *dev_addr)
57 struct sockaddr_storage addr;
59 memcpy(addr.__data, dev_addr, port_dev->addr_len);
60 addr.ss_family = port_dev->type;
61 return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
64 static int team_port_set_orig_dev_addr(struct team_port *port)
66 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
69 static int team_port_set_team_dev_addr(struct team *team,
70 struct team_port *port)
72 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
75 int team_modeop_port_enter(struct team *team, struct team_port *port)
77 return team_port_set_team_dev_addr(team, port);
79 EXPORT_SYMBOL(team_modeop_port_enter);
81 void team_modeop_port_change_dev_addr(struct team *team,
82 struct team_port *port)
84 team_port_set_team_dev_addr(team, port);
86 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
88 static void team_lower_state_changed(struct team_port *port)
90 struct netdev_lag_lower_state_info info;
92 info.link_up = port->linkup;
93 info.tx_enabled = team_port_enabled(port);
94 netdev_lower_state_changed(port->dev, &info);
97 static void team_refresh_port_linkup(struct team_port *port)
99 bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
102 if (port->linkup != new_linkup) {
103 port->linkup = new_linkup;
104 team_lower_state_changed(port);
113 struct team_option_inst { /* One for each option instance */
114 struct list_head list;
115 struct list_head tmp_list;
116 struct team_option *option;
117 struct team_option_inst_info info;
122 static struct team_option *__team_find_option(struct team *team,
123 const char *opt_name)
125 struct team_option *option;
127 list_for_each_entry(option, &team->option_list, list) {
128 if (strcmp(option->name, opt_name) == 0)
134 static void __team_option_inst_del(struct team_option_inst *opt_inst)
136 list_del(&opt_inst->list);
140 static void __team_option_inst_del_option(struct team *team,
141 struct team_option *option)
143 struct team_option_inst *opt_inst, *tmp;
145 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
146 if (opt_inst->option == option)
147 __team_option_inst_del(opt_inst);
151 static int __team_option_inst_add(struct team *team, struct team_option *option,
152 struct team_port *port)
154 struct team_option_inst *opt_inst;
155 unsigned int array_size;
159 array_size = option->array_size;
161 array_size = 1; /* No array but still need one instance */
163 for (i = 0; i < array_size; i++) {
164 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
167 opt_inst->option = option;
168 opt_inst->info.port = port;
169 opt_inst->info.array_index = i;
170 opt_inst->changed = true;
171 opt_inst->removed = false;
172 list_add_tail(&opt_inst->list, &team->option_inst_list);
174 err = option->init(team, &opt_inst->info);
183 static int __team_option_inst_add_option(struct team *team,
184 struct team_option *option)
188 if (!option->per_port) {
189 err = __team_option_inst_add(team, option, NULL);
191 goto inst_del_option;
196 __team_option_inst_del_option(team, option);
200 static void __team_option_inst_mark_removed_option(struct team *team,
201 struct team_option *option)
203 struct team_option_inst *opt_inst;
205 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
206 if (opt_inst->option == option) {
207 opt_inst->changed = true;
208 opt_inst->removed = true;
213 static void __team_option_inst_del_port(struct team *team,
214 struct team_port *port)
216 struct team_option_inst *opt_inst, *tmp;
218 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
219 if (opt_inst->option->per_port &&
220 opt_inst->info.port == port)
221 __team_option_inst_del(opt_inst);
225 static int __team_option_inst_add_port(struct team *team,
226 struct team_port *port)
228 struct team_option *option;
231 list_for_each_entry(option, &team->option_list, list) {
232 if (!option->per_port)
234 err = __team_option_inst_add(team, option, port);
241 __team_option_inst_del_port(team, port);
245 static void __team_option_inst_mark_removed_port(struct team *team,
246 struct team_port *port)
248 struct team_option_inst *opt_inst;
250 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
251 if (opt_inst->info.port == port) {
252 opt_inst->changed = true;
253 opt_inst->removed = true;
258 static int __team_options_register(struct team *team,
259 const struct team_option *option,
263 struct team_option **dst_opts;
266 dst_opts = kcalloc(option_count, sizeof(struct team_option *),
270 for (i = 0; i < option_count; i++, option++) {
271 if (__team_find_option(team, option->name)) {
275 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
282 for (i = 0; i < option_count; i++) {
283 err = __team_option_inst_add_option(team, dst_opts[i]);
286 list_add_tail(&dst_opts[i]->list, &team->option_list);
293 for (i--; i >= 0; i--)
294 __team_option_inst_del_option(team, dst_opts[i]);
296 i = option_count - 1;
298 for (i--; i >= 0; i--)
305 static void __team_options_mark_removed(struct team *team,
306 const struct team_option *option,
311 for (i = 0; i < option_count; i++, option++) {
312 struct team_option *del_opt;
314 del_opt = __team_find_option(team, option->name);
316 __team_option_inst_mark_removed_option(team, del_opt);
320 static void __team_options_unregister(struct team *team,
321 const struct team_option *option,
326 for (i = 0; i < option_count; i++, option++) {
327 struct team_option *del_opt;
329 del_opt = __team_find_option(team, option->name);
331 __team_option_inst_del_option(team, del_opt);
332 list_del(&del_opt->list);
338 static void __team_options_change_check(struct team *team);
340 int team_options_register(struct team *team,
341 const struct team_option *option,
346 err = __team_options_register(team, option, option_count);
349 __team_options_change_check(team);
352 EXPORT_SYMBOL(team_options_register);
354 void team_options_unregister(struct team *team,
355 const struct team_option *option,
358 __team_options_mark_removed(team, option, option_count);
359 __team_options_change_check(team);
360 __team_options_unregister(team, option, option_count);
362 EXPORT_SYMBOL(team_options_unregister);
364 static int team_option_get(struct team *team,
365 struct team_option_inst *opt_inst,
366 struct team_gsetter_ctx *ctx)
368 if (!opt_inst->option->getter)
370 return opt_inst->option->getter(team, ctx);
373 static int team_option_set(struct team *team,
374 struct team_option_inst *opt_inst,
375 struct team_gsetter_ctx *ctx)
377 if (!opt_inst->option->setter)
379 return opt_inst->option->setter(team, ctx);
382 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
384 struct team_option_inst *opt_inst;
386 opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
387 opt_inst->changed = true;
389 EXPORT_SYMBOL(team_option_inst_set_change);
391 void team_options_change_check(struct team *team)
393 __team_options_change_check(team);
395 EXPORT_SYMBOL(team_options_change_check);
402 static LIST_HEAD(mode_list);
403 static DEFINE_SPINLOCK(mode_list_lock);
405 struct team_mode_item {
406 struct list_head list;
407 const struct team_mode *mode;
410 static struct team_mode_item *__find_mode(const char *kind)
412 struct team_mode_item *mitem;
414 list_for_each_entry(mitem, &mode_list, list) {
415 if (strcmp(mitem->mode->kind, kind) == 0)
421 static bool is_good_mode_name(const char *name)
423 while (*name != '\0') {
424 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
431 int team_mode_register(const struct team_mode *mode)
434 struct team_mode_item *mitem;
436 if (!is_good_mode_name(mode->kind) ||
437 mode->priv_size > TEAM_MODE_PRIV_SIZE)
440 mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
444 spin_lock(&mode_list_lock);
445 if (__find_mode(mode->kind)) {
451 list_add_tail(&mitem->list, &mode_list);
453 spin_unlock(&mode_list_lock);
456 EXPORT_SYMBOL(team_mode_register);
458 void team_mode_unregister(const struct team_mode *mode)
460 struct team_mode_item *mitem;
462 spin_lock(&mode_list_lock);
463 mitem = __find_mode(mode->kind);
465 list_del_init(&mitem->list);
468 spin_unlock(&mode_list_lock);
470 EXPORT_SYMBOL(team_mode_unregister);
472 static const struct team_mode *team_mode_get(const char *kind)
474 struct team_mode_item *mitem;
475 const struct team_mode *mode = NULL;
477 spin_lock(&mode_list_lock);
478 mitem = __find_mode(kind);
480 spin_unlock(&mode_list_lock);
481 request_module("team-mode-%s", kind);
482 spin_lock(&mode_list_lock);
483 mitem = __find_mode(kind);
487 if (!try_module_get(mode->owner))
491 spin_unlock(&mode_list_lock);
495 static void team_mode_put(const struct team_mode *mode)
497 module_put(mode->owner);
500 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
502 dev_kfree_skb_any(skb);
506 static rx_handler_result_t team_dummy_receive(struct team *team,
507 struct team_port *port,
510 return RX_HANDLER_ANOTHER;
513 static const struct team_mode __team_no_mode = {
517 static bool team_is_mode_set(struct team *team)
519 return team->mode != &__team_no_mode;
522 static void team_set_no_mode(struct team *team)
524 team->user_carrier_enabled = false;
525 team->mode = &__team_no_mode;
528 static void team_adjust_ops(struct team *team)
531 * To avoid checks in rx/tx skb paths, ensure here that non-null and
532 * correct ops are always set.
535 if (!team->en_port_count || !team_is_mode_set(team) ||
536 !team->mode->ops->transmit)
537 team->ops.transmit = team_dummy_transmit;
539 team->ops.transmit = team->mode->ops->transmit;
541 if (!team->en_port_count || !team_is_mode_set(team) ||
542 !team->mode->ops->receive)
543 team->ops.receive = team_dummy_receive;
545 team->ops.receive = team->mode->ops->receive;
549 * We can benefit from the fact that it's ensured no port is present
550 * at the time of mode change. Therefore no packets are in fly so there's no
551 * need to set mode operations in any special way.
553 static int __team_change_mode(struct team *team,
554 const struct team_mode *new_mode)
556 /* Check if mode was previously set and do cleanup if so */
557 if (team_is_mode_set(team)) {
558 void (*exit_op)(struct team *team) = team->ops.exit;
560 /* Clear ops area so no callback is called any longer */
561 memset(&team->ops, 0, sizeof(struct team_mode_ops));
562 team_adjust_ops(team);
566 team_mode_put(team->mode);
567 team_set_no_mode(team);
568 /* zero private data area */
569 memset(&team->mode_priv, 0,
570 sizeof(struct team) - offsetof(struct team, mode_priv));
576 if (new_mode->ops->init) {
579 err = new_mode->ops->init(team);
584 team->mode = new_mode;
585 memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
586 team_adjust_ops(team);
591 static int team_change_mode(struct team *team, const char *kind)
593 const struct team_mode *new_mode;
594 struct net_device *dev = team->dev;
597 if (!list_empty(&team->port_list)) {
598 netdev_err(dev, "No ports can be present during mode change\n");
602 if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
603 netdev_err(dev, "Unable to change to the same mode the team is in\n");
607 new_mode = team_mode_get(kind);
609 netdev_err(dev, "Mode \"%s\" not found\n", kind);
613 err = __team_change_mode(team, new_mode);
615 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
616 team_mode_put(new_mode);
620 netdev_info(dev, "Mode changed to \"%s\"\n", kind);
625 /*********************
627 *********************/
629 static void team_notify_peers_work(struct work_struct *work)
634 team = container_of(work, struct team, notify_peers.dw.work);
636 if (!rtnl_trylock()) {
637 schedule_delayed_work(&team->notify_peers.dw, 0);
640 val = atomic_dec_if_positive(&team->notify_peers.count_pending);
645 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
648 schedule_delayed_work(&team->notify_peers.dw,
649 msecs_to_jiffies(team->notify_peers.interval));
652 static void team_notify_peers(struct team *team)
654 if (!team->notify_peers.count || !netif_running(team->dev))
656 atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
657 schedule_delayed_work(&team->notify_peers.dw, 0);
660 static void team_notify_peers_init(struct team *team)
662 INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
665 static void team_notify_peers_fini(struct team *team)
667 cancel_delayed_work_sync(&team->notify_peers.dw);
671 /*******************************
672 * Send multicast group rejoins
673 *******************************/
675 static void team_mcast_rejoin_work(struct work_struct *work)
680 team = container_of(work, struct team, mcast_rejoin.dw.work);
682 if (!rtnl_trylock()) {
683 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
686 val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
691 call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
694 schedule_delayed_work(&team->mcast_rejoin.dw,
695 msecs_to_jiffies(team->mcast_rejoin.interval));
698 static void team_mcast_rejoin(struct team *team)
700 if (!team->mcast_rejoin.count || !netif_running(team->dev))
702 atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
703 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
706 static void team_mcast_rejoin_init(struct team *team)
708 INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
711 static void team_mcast_rejoin_fini(struct team *team)
713 cancel_delayed_work_sync(&team->mcast_rejoin.dw);
717 /************************
718 * Rx path frame handler
719 ************************/
721 /* note: already called with rcu_read_lock */
722 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
724 struct sk_buff *skb = *pskb;
725 struct team_port *port;
727 rx_handler_result_t res;
729 skb = skb_share_check(skb, GFP_ATOMIC);
731 return RX_HANDLER_CONSUMED;
735 port = team_port_get_rcu(skb->dev);
737 if (!team_port_enabled(port)) {
738 /* allow exact match delivery for disabled ports */
739 res = RX_HANDLER_EXACT;
741 res = team->ops.receive(team, port, skb);
743 if (res == RX_HANDLER_ANOTHER) {
744 struct team_pcpu_stats *pcpu_stats;
746 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
747 u64_stats_update_begin(&pcpu_stats->syncp);
748 pcpu_stats->rx_packets++;
749 pcpu_stats->rx_bytes += skb->len;
750 if (skb->pkt_type == PACKET_MULTICAST)
751 pcpu_stats->rx_multicast++;
752 u64_stats_update_end(&pcpu_stats->syncp);
754 skb->dev = team->dev;
755 } else if (res == RX_HANDLER_EXACT) {
756 this_cpu_inc(team->pcpu_stats->rx_nohandler);
758 this_cpu_inc(team->pcpu_stats->rx_dropped);
765 /*************************************
766 * Multiqueue Tx port select override
767 *************************************/
769 static int team_queue_override_init(struct team *team)
771 struct list_head *listarr;
772 unsigned int queue_cnt = team->dev->num_tx_queues - 1;
777 listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
781 team->qom_lists = listarr;
782 for (i = 0; i < queue_cnt; i++)
783 INIT_LIST_HEAD(listarr++);
787 static void team_queue_override_fini(struct team *team)
789 kfree(team->qom_lists);
792 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
794 return &team->qom_lists[queue_id - 1];
798 * note: already called with rcu_read_lock
800 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
802 struct list_head *qom_list;
803 struct team_port *port;
805 if (!team->queue_override_enabled || !skb->queue_mapping)
807 qom_list = __team_get_qom_list(team, skb->queue_mapping);
808 list_for_each_entry_rcu(port, qom_list, qom_list) {
809 if (!team_dev_queue_xmit(team, port, skb))
815 static void __team_queue_override_port_del(struct team *team,
816 struct team_port *port)
820 list_del_rcu(&port->qom_list);
823 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
824 struct team_port *cur)
826 if (port->priority < cur->priority)
828 if (port->priority > cur->priority)
830 if (port->index < cur->index)
835 static void __team_queue_override_port_add(struct team *team,
836 struct team_port *port)
838 struct team_port *cur;
839 struct list_head *qom_list;
840 struct list_head *node;
844 qom_list = __team_get_qom_list(team, port->queue_id);
846 list_for_each_entry(cur, qom_list, qom_list) {
847 if (team_queue_override_port_has_gt_prio_than(port, cur))
849 node = &cur->qom_list;
851 list_add_tail_rcu(&port->qom_list, node);
854 static void __team_queue_override_enabled_check(struct team *team)
856 struct team_port *port;
857 bool enabled = false;
859 list_for_each_entry(port, &team->port_list, list) {
860 if (port->queue_id) {
865 if (enabled == team->queue_override_enabled)
867 netdev_dbg(team->dev, "%s queue override\n",
868 enabled ? "Enabling" : "Disabling");
869 team->queue_override_enabled = enabled;
872 static void team_queue_override_port_prio_changed(struct team *team,
873 struct team_port *port)
875 if (!port->queue_id || team_port_enabled(port))
877 __team_queue_override_port_del(team, port);
878 __team_queue_override_port_add(team, port);
879 __team_queue_override_enabled_check(team);
882 static void team_queue_override_port_change_queue_id(struct team *team,
883 struct team_port *port,
886 if (team_port_enabled(port)) {
887 __team_queue_override_port_del(team, port);
888 port->queue_id = new_queue_id;
889 __team_queue_override_port_add(team, port);
890 __team_queue_override_enabled_check(team);
892 port->queue_id = new_queue_id;
896 static void team_queue_override_port_add(struct team *team,
897 struct team_port *port)
899 __team_queue_override_port_add(team, port);
900 __team_queue_override_enabled_check(team);
903 static void team_queue_override_port_del(struct team *team,
904 struct team_port *port)
906 __team_queue_override_port_del(team, port);
907 __team_queue_override_enabled_check(team);
915 static bool team_port_find(const struct team *team,
916 const struct team_port *port)
918 struct team_port *cur;
920 list_for_each_entry(cur, &team->port_list, list)
927 * Enable/disable port by adding to enabled port hashlist and setting
928 * port->index (Might be racy so reader could see incorrect ifindex when
929 * processing a flying packet, but that is not a problem). Write guarded
932 static void team_port_enable(struct team *team,
933 struct team_port *port)
935 if (team_port_enabled(port))
937 port->index = team->en_port_count++;
938 hlist_add_head_rcu(&port->hlist,
939 team_port_index_hash(team, port->index));
940 team_adjust_ops(team);
941 team_queue_override_port_add(team, port);
942 if (team->ops.port_enabled)
943 team->ops.port_enabled(team, port);
944 team_notify_peers(team);
945 team_mcast_rejoin(team);
946 team_lower_state_changed(port);
949 static void __reconstruct_port_hlist(struct team *team, int rm_index)
952 struct team_port *port;
954 for (i = rm_index + 1; i < team->en_port_count; i++) {
955 port = team_get_port_by_index(team, i);
956 hlist_del_rcu(&port->hlist);
958 hlist_add_head_rcu(&port->hlist,
959 team_port_index_hash(team, port->index));
963 static void team_port_disable(struct team *team,
964 struct team_port *port)
966 if (!team_port_enabled(port))
968 if (team->ops.port_disabled)
969 team->ops.port_disabled(team, port);
970 hlist_del_rcu(&port->hlist);
971 __reconstruct_port_hlist(team, port->index);
973 team->en_port_count--;
974 team_queue_override_port_del(team, port);
975 team_adjust_ops(team);
976 team_lower_state_changed(port);
979 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
980 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
981 NETIF_F_HIGHDMA | NETIF_F_LRO)
983 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
984 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
986 static void __team_compute_features(struct team *team)
988 struct team_port *port;
989 netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
991 netdev_features_t enc_features = TEAM_ENC_FEATURES;
992 unsigned short max_hard_header_len = ETH_HLEN;
993 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
994 IFF_XMIT_DST_RELEASE_PERM;
996 list_for_each_entry(port, &team->port_list, list) {
997 vlan_features = netdev_increment_features(vlan_features,
998 port->dev->vlan_features,
1001 netdev_increment_features(enc_features,
1002 port->dev->hw_enc_features,
1006 dst_release_flag &= port->dev->priv_flags;
1007 if (port->dev->hard_header_len > max_hard_header_len)
1008 max_hard_header_len = port->dev->hard_header_len;
1011 team->dev->vlan_features = vlan_features;
1012 team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1014 team->dev->hard_header_len = max_hard_header_len;
1016 team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1017 if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1018 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1021 static void team_compute_features(struct team *team)
1023 mutex_lock(&team->lock);
1024 __team_compute_features(team);
1025 mutex_unlock(&team->lock);
1026 netdev_change_features(team->dev);
1029 static int team_port_enter(struct team *team, struct team_port *port)
1033 dev_hold(team->dev);
1034 if (team->ops.port_enter) {
1035 err = team->ops.port_enter(team, port);
1037 netdev_err(team->dev, "Device %s failed to enter team mode\n",
1039 goto err_port_enter;
1051 static void team_port_leave(struct team *team, struct team_port *port)
1053 if (team->ops.port_leave)
1054 team->ops.port_leave(team, port);
1058 #ifdef CONFIG_NET_POLL_CONTROLLER
1059 static int __team_port_enable_netpoll(struct team_port *port)
1064 np = kzalloc(sizeof(*np), GFP_KERNEL);
1068 err = __netpoll_setup(np, port->dev);
1077 static int team_port_enable_netpoll(struct team_port *port)
1079 if (!port->team->dev->npinfo)
1082 return __team_port_enable_netpoll(port);
1085 static void team_port_disable_netpoll(struct team_port *port)
1087 struct netpoll *np = port->np;
1096 static int team_port_enable_netpoll(struct team_port *port)
1100 static void team_port_disable_netpoll(struct team_port *port)
1105 static int team_upper_dev_link(struct team *team, struct team_port *port,
1106 struct netlink_ext_ack *extack)
1108 struct netdev_lag_upper_info lag_upper_info;
1111 lag_upper_info.tx_type = team->mode->lag_tx_type;
1112 lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1113 err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1114 &lag_upper_info, extack);
1117 port->dev->priv_flags |= IFF_TEAM_PORT;
1121 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1123 netdev_upper_dev_unlink(port->dev, team->dev);
1124 port->dev->priv_flags &= ~IFF_TEAM_PORT;
1127 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1128 static int team_dev_type_check_change(struct net_device *dev,
1129 struct net_device *port_dev);
1131 static int team_port_add(struct team *team, struct net_device *port_dev,
1132 struct netlink_ext_ack *extack)
1134 struct net_device *dev = team->dev;
1135 struct team_port *port;
1136 char *portname = port_dev->name;
1139 if (port_dev->flags & IFF_LOOPBACK) {
1140 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1141 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1146 if (team_port_exists(port_dev)) {
1147 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1148 netdev_err(dev, "Device %s is already a port "
1149 "of a team device\n", portname);
1153 if (dev == port_dev) {
1154 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1155 netdev_err(dev, "Cannot enslave team device to itself\n");
1159 if (netdev_has_upper_dev(dev, port_dev)) {
1160 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1161 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1166 if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1167 vlan_uses_dev(dev)) {
1168 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1169 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1174 err = team_dev_type_check_change(dev, port_dev);
1178 if (port_dev->flags & IFF_UP) {
1179 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1180 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1185 port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1190 port->dev = port_dev;
1192 INIT_LIST_HEAD(&port->qom_list);
1194 port->orig.mtu = port_dev->mtu;
1195 err = dev_set_mtu(port_dev, dev->mtu);
1197 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1201 memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1203 err = team_port_enter(team, port);
1205 netdev_err(dev, "Device %s failed to enter team mode\n",
1207 goto err_port_enter;
1210 err = dev_open(port_dev, extack);
1212 netdev_dbg(dev, "Device %s opening failed\n",
1217 err = vlan_vids_add_by_dev(port_dev, dev);
1219 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1224 err = team_port_enable_netpoll(port);
1226 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1228 goto err_enable_netpoll;
1231 if (!(dev->features & NETIF_F_LRO))
1232 dev_disable_lro(port_dev);
1234 err = netdev_rx_handler_register(port_dev, team_handle_frame,
1237 netdev_err(dev, "Device %s failed to register rx_handler\n",
1239 goto err_handler_register;
1242 err = team_upper_dev_link(team, port, extack);
1244 netdev_err(dev, "Device %s failed to set upper link\n",
1246 goto err_set_upper_link;
1249 err = __team_option_inst_add_port(team, port);
1251 netdev_err(dev, "Device %s failed to add per-port options\n",
1253 goto err_option_port_add;
1256 /* set promiscuity level to new slave */
1257 if (dev->flags & IFF_PROMISC) {
1258 err = dev_set_promiscuity(port_dev, 1);
1260 goto err_set_slave_promisc;
1263 /* set allmulti level to new slave */
1264 if (dev->flags & IFF_ALLMULTI) {
1265 err = dev_set_allmulti(port_dev, 1);
1267 if (dev->flags & IFF_PROMISC)
1268 dev_set_promiscuity(port_dev, -1);
1269 goto err_set_slave_promisc;
1273 netif_addr_lock_bh(dev);
1274 dev_uc_sync_multiple(port_dev, dev);
1275 dev_mc_sync_multiple(port_dev, dev);
1276 netif_addr_unlock_bh(dev);
1279 list_add_tail_rcu(&port->list, &team->port_list);
1280 team_port_enable(team, port);
1281 __team_compute_features(team);
1282 __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1283 __team_options_change_check(team);
1285 netdev_info(dev, "Port device %s added\n", portname);
1289 err_set_slave_promisc:
1290 __team_option_inst_del_port(team, port);
1292 err_option_port_add:
1293 team_upper_dev_unlink(team, port);
1296 netdev_rx_handler_unregister(port_dev);
1298 err_handler_register:
1299 team_port_disable_netpoll(port);
1302 vlan_vids_del_by_dev(port_dev, dev);
1305 dev_close(port_dev);
1308 team_port_leave(team, port);
1309 team_port_set_orig_dev_addr(port);
1312 dev_set_mtu(port_dev, port->orig.mtu);
1320 static void __team_port_change_port_removed(struct team_port *port);
1322 static int team_port_del(struct team *team, struct net_device *port_dev)
1324 struct net_device *dev = team->dev;
1325 struct team_port *port;
1326 char *portname = port_dev->name;
1328 port = team_port_get_rtnl(port_dev);
1329 if (!port || !team_port_find(team, port)) {
1330 netdev_err(dev, "Device %s does not act as a port of this team\n",
1335 team_port_disable(team, port);
1336 list_del_rcu(&port->list);
1338 if (dev->flags & IFF_PROMISC)
1339 dev_set_promiscuity(port_dev, -1);
1340 if (dev->flags & IFF_ALLMULTI)
1341 dev_set_allmulti(port_dev, -1);
1343 team_upper_dev_unlink(team, port);
1344 netdev_rx_handler_unregister(port_dev);
1345 team_port_disable_netpoll(port);
1346 vlan_vids_del_by_dev(port_dev, dev);
1347 dev_uc_unsync(port_dev, dev);
1348 dev_mc_unsync(port_dev, dev);
1349 dev_close(port_dev);
1350 team_port_leave(team, port);
1352 __team_option_inst_mark_removed_port(team, port);
1353 __team_options_change_check(team);
1354 __team_option_inst_del_port(team, port);
1355 __team_port_change_port_removed(port);
1357 team_port_set_orig_dev_addr(port);
1358 dev_set_mtu(port_dev, port->orig.mtu);
1359 kfree_rcu(port, rcu);
1360 netdev_info(dev, "Port device %s removed\n", portname);
1361 __team_compute_features(team);
1371 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1373 ctx->data.str_val = team->mode->kind;
1377 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1379 return team_change_mode(team, ctx->data.str_val);
1382 static int team_notify_peers_count_get(struct team *team,
1383 struct team_gsetter_ctx *ctx)
1385 ctx->data.u32_val = team->notify_peers.count;
1389 static int team_notify_peers_count_set(struct team *team,
1390 struct team_gsetter_ctx *ctx)
1392 team->notify_peers.count = ctx->data.u32_val;
1396 static int team_notify_peers_interval_get(struct team *team,
1397 struct team_gsetter_ctx *ctx)
1399 ctx->data.u32_val = team->notify_peers.interval;
1403 static int team_notify_peers_interval_set(struct team *team,
1404 struct team_gsetter_ctx *ctx)
1406 team->notify_peers.interval = ctx->data.u32_val;
1410 static int team_mcast_rejoin_count_get(struct team *team,
1411 struct team_gsetter_ctx *ctx)
1413 ctx->data.u32_val = team->mcast_rejoin.count;
1417 static int team_mcast_rejoin_count_set(struct team *team,
1418 struct team_gsetter_ctx *ctx)
1420 team->mcast_rejoin.count = ctx->data.u32_val;
1424 static int team_mcast_rejoin_interval_get(struct team *team,
1425 struct team_gsetter_ctx *ctx)
1427 ctx->data.u32_val = team->mcast_rejoin.interval;
1431 static int team_mcast_rejoin_interval_set(struct team *team,
1432 struct team_gsetter_ctx *ctx)
1434 team->mcast_rejoin.interval = ctx->data.u32_val;
1438 static int team_port_en_option_get(struct team *team,
1439 struct team_gsetter_ctx *ctx)
1441 struct team_port *port = ctx->info->port;
1443 ctx->data.bool_val = team_port_enabled(port);
1447 static int team_port_en_option_set(struct team *team,
1448 struct team_gsetter_ctx *ctx)
1450 struct team_port *port = ctx->info->port;
1452 if (ctx->data.bool_val)
1453 team_port_enable(team, port);
1455 team_port_disable(team, port);
1459 static int team_user_linkup_option_get(struct team *team,
1460 struct team_gsetter_ctx *ctx)
1462 struct team_port *port = ctx->info->port;
1464 ctx->data.bool_val = port->user.linkup;
1468 static void __team_carrier_check(struct team *team);
1470 static int team_user_linkup_option_set(struct team *team,
1471 struct team_gsetter_ctx *ctx)
1473 struct team_port *port = ctx->info->port;
1475 port->user.linkup = ctx->data.bool_val;
1476 team_refresh_port_linkup(port);
1477 __team_carrier_check(port->team);
1481 static int team_user_linkup_en_option_get(struct team *team,
1482 struct team_gsetter_ctx *ctx)
1484 struct team_port *port = ctx->info->port;
1486 ctx->data.bool_val = port->user.linkup_enabled;
1490 static int team_user_linkup_en_option_set(struct team *team,
1491 struct team_gsetter_ctx *ctx)
1493 struct team_port *port = ctx->info->port;
1495 port->user.linkup_enabled = ctx->data.bool_val;
1496 team_refresh_port_linkup(port);
1497 __team_carrier_check(port->team);
1501 static int team_priority_option_get(struct team *team,
1502 struct team_gsetter_ctx *ctx)
1504 struct team_port *port = ctx->info->port;
1506 ctx->data.s32_val = port->priority;
1510 static int team_priority_option_set(struct team *team,
1511 struct team_gsetter_ctx *ctx)
1513 struct team_port *port = ctx->info->port;
1514 s32 priority = ctx->data.s32_val;
1516 if (port->priority == priority)
1518 port->priority = priority;
1519 team_queue_override_port_prio_changed(team, port);
1523 static int team_queue_id_option_get(struct team *team,
1524 struct team_gsetter_ctx *ctx)
1526 struct team_port *port = ctx->info->port;
1528 ctx->data.u32_val = port->queue_id;
1532 static int team_queue_id_option_set(struct team *team,
1533 struct team_gsetter_ctx *ctx)
1535 struct team_port *port = ctx->info->port;
1536 u16 new_queue_id = ctx->data.u32_val;
1538 if (port->queue_id == new_queue_id)
1540 if (new_queue_id >= team->dev->real_num_tx_queues)
1542 team_queue_override_port_change_queue_id(team, port, new_queue_id);
1546 static const struct team_option team_options[] = {
1549 .type = TEAM_OPTION_TYPE_STRING,
1550 .getter = team_mode_option_get,
1551 .setter = team_mode_option_set,
1554 .name = "notify_peers_count",
1555 .type = TEAM_OPTION_TYPE_U32,
1556 .getter = team_notify_peers_count_get,
1557 .setter = team_notify_peers_count_set,
1560 .name = "notify_peers_interval",
1561 .type = TEAM_OPTION_TYPE_U32,
1562 .getter = team_notify_peers_interval_get,
1563 .setter = team_notify_peers_interval_set,
1566 .name = "mcast_rejoin_count",
1567 .type = TEAM_OPTION_TYPE_U32,
1568 .getter = team_mcast_rejoin_count_get,
1569 .setter = team_mcast_rejoin_count_set,
1572 .name = "mcast_rejoin_interval",
1573 .type = TEAM_OPTION_TYPE_U32,
1574 .getter = team_mcast_rejoin_interval_get,
1575 .setter = team_mcast_rejoin_interval_set,
1579 .type = TEAM_OPTION_TYPE_BOOL,
1581 .getter = team_port_en_option_get,
1582 .setter = team_port_en_option_set,
1585 .name = "user_linkup",
1586 .type = TEAM_OPTION_TYPE_BOOL,
1588 .getter = team_user_linkup_option_get,
1589 .setter = team_user_linkup_option_set,
1592 .name = "user_linkup_enabled",
1593 .type = TEAM_OPTION_TYPE_BOOL,
1595 .getter = team_user_linkup_en_option_get,
1596 .setter = team_user_linkup_en_option_set,
1600 .type = TEAM_OPTION_TYPE_S32,
1602 .getter = team_priority_option_get,
1603 .setter = team_priority_option_set,
1607 .type = TEAM_OPTION_TYPE_U32,
1609 .getter = team_queue_id_option_get,
1610 .setter = team_queue_id_option_set,
1615 static int team_init(struct net_device *dev)
1617 struct team *team = netdev_priv(dev);
1622 mutex_init(&team->lock);
1623 team_set_no_mode(team);
1625 team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1626 if (!team->pcpu_stats)
1629 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1630 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1631 INIT_LIST_HEAD(&team->port_list);
1632 err = team_queue_override_init(team);
1634 goto err_team_queue_override_init;
1636 team_adjust_ops(team);
1638 INIT_LIST_HEAD(&team->option_list);
1639 INIT_LIST_HEAD(&team->option_inst_list);
1641 team_notify_peers_init(team);
1642 team_mcast_rejoin_init(team);
1644 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1646 goto err_options_register;
1647 netif_carrier_off(dev);
1649 netdev_lockdep_set_classes(dev);
1653 err_options_register:
1654 team_mcast_rejoin_fini(team);
1655 team_notify_peers_fini(team);
1656 team_queue_override_fini(team);
1657 err_team_queue_override_init:
1658 free_percpu(team->pcpu_stats);
1663 static void team_uninit(struct net_device *dev)
1665 struct team *team = netdev_priv(dev);
1666 struct team_port *port;
1667 struct team_port *tmp;
1669 mutex_lock(&team->lock);
1670 list_for_each_entry_safe(port, tmp, &team->port_list, list)
1671 team_port_del(team, port->dev);
1673 __team_change_mode(team, NULL); /* cleanup */
1674 __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1675 team_mcast_rejoin_fini(team);
1676 team_notify_peers_fini(team);
1677 team_queue_override_fini(team);
1678 mutex_unlock(&team->lock);
1679 netdev_change_features(dev);
1682 static void team_destructor(struct net_device *dev)
1684 struct team *team = netdev_priv(dev);
1686 free_percpu(team->pcpu_stats);
1689 static int team_open(struct net_device *dev)
1694 static int team_close(struct net_device *dev)
1700 * note: already called with rcu_read_lock
1702 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1704 struct team *team = netdev_priv(dev);
1706 unsigned int len = skb->len;
1708 tx_success = team_queue_override_transmit(team, skb);
1710 tx_success = team->ops.transmit(team, skb);
1712 struct team_pcpu_stats *pcpu_stats;
1714 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1715 u64_stats_update_begin(&pcpu_stats->syncp);
1716 pcpu_stats->tx_packets++;
1717 pcpu_stats->tx_bytes += len;
1718 u64_stats_update_end(&pcpu_stats->syncp);
1720 this_cpu_inc(team->pcpu_stats->tx_dropped);
1723 return NETDEV_TX_OK;
1726 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1727 struct net_device *sb_dev,
1728 select_queue_fallback_t fallback)
1731 * This helper function exists to help dev_pick_tx get the correct
1732 * destination queue. Using a helper function skips a call to
1733 * skb_tx_hash and will put the skbs in the queue we expect on their
1734 * way down to the team driver.
1736 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1739 * Save the original txq to restore before passing to the driver
1741 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1743 if (unlikely(txq >= dev->real_num_tx_queues)) {
1745 txq -= dev->real_num_tx_queues;
1746 } while (txq >= dev->real_num_tx_queues);
1751 static void team_change_rx_flags(struct net_device *dev, int change)
1753 struct team *team = netdev_priv(dev);
1754 struct team_port *port;
1758 list_for_each_entry_rcu(port, &team->port_list, list) {
1759 if (change & IFF_PROMISC) {
1760 inc = dev->flags & IFF_PROMISC ? 1 : -1;
1761 dev_set_promiscuity(port->dev, inc);
1763 if (change & IFF_ALLMULTI) {
1764 inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1765 dev_set_allmulti(port->dev, inc);
1771 static void team_set_rx_mode(struct net_device *dev)
1773 struct team *team = netdev_priv(dev);
1774 struct team_port *port;
1777 list_for_each_entry_rcu(port, &team->port_list, list) {
1778 dev_uc_sync_multiple(port->dev, dev);
1779 dev_mc_sync_multiple(port->dev, dev);
1784 static int team_set_mac_address(struct net_device *dev, void *p)
1786 struct sockaddr *addr = p;
1787 struct team *team = netdev_priv(dev);
1788 struct team_port *port;
1790 if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1791 return -EADDRNOTAVAIL;
1792 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1793 mutex_lock(&team->lock);
1794 list_for_each_entry(port, &team->port_list, list)
1795 if (team->ops.port_change_dev_addr)
1796 team->ops.port_change_dev_addr(team, port);
1797 mutex_unlock(&team->lock);
1801 static int team_change_mtu(struct net_device *dev, int new_mtu)
1803 struct team *team = netdev_priv(dev);
1804 struct team_port *port;
1808 * Alhough this is reader, it's guarded by team lock. It's not possible
1809 * to traverse list in reverse under rcu_read_lock
1811 mutex_lock(&team->lock);
1812 team->port_mtu_change_allowed = true;
1813 list_for_each_entry(port, &team->port_list, list) {
1814 err = dev_set_mtu(port->dev, new_mtu);
1816 netdev_err(dev, "Device %s failed to change mtu",
1821 team->port_mtu_change_allowed = false;
1822 mutex_unlock(&team->lock);
1829 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1830 dev_set_mtu(port->dev, dev->mtu);
1831 team->port_mtu_change_allowed = false;
1832 mutex_unlock(&team->lock);
1838 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1840 struct team *team = netdev_priv(dev);
1841 struct team_pcpu_stats *p;
1842 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1843 u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1847 for_each_possible_cpu(i) {
1848 p = per_cpu_ptr(team->pcpu_stats, i);
1850 start = u64_stats_fetch_begin_irq(&p->syncp);
1851 rx_packets = p->rx_packets;
1852 rx_bytes = p->rx_bytes;
1853 rx_multicast = p->rx_multicast;
1854 tx_packets = p->tx_packets;
1855 tx_bytes = p->tx_bytes;
1856 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1858 stats->rx_packets += rx_packets;
1859 stats->rx_bytes += rx_bytes;
1860 stats->multicast += rx_multicast;
1861 stats->tx_packets += tx_packets;
1862 stats->tx_bytes += tx_bytes;
1864 * rx_dropped, tx_dropped & rx_nohandler are u32,
1865 * updated without syncp protection.
1867 rx_dropped += p->rx_dropped;
1868 tx_dropped += p->tx_dropped;
1869 rx_nohandler += p->rx_nohandler;
1871 stats->rx_dropped = rx_dropped;
1872 stats->tx_dropped = tx_dropped;
1873 stats->rx_nohandler = rx_nohandler;
1876 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1878 struct team *team = netdev_priv(dev);
1879 struct team_port *port;
1883 * Alhough this is reader, it's guarded by team lock. It's not possible
1884 * to traverse list in reverse under rcu_read_lock
1886 mutex_lock(&team->lock);
1887 list_for_each_entry(port, &team->port_list, list) {
1888 err = vlan_vid_add(port->dev, proto, vid);
1892 mutex_unlock(&team->lock);
1897 list_for_each_entry_continue_reverse(port, &team->port_list, list)
1898 vlan_vid_del(port->dev, proto, vid);
1899 mutex_unlock(&team->lock);
1904 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1906 struct team *team = netdev_priv(dev);
1907 struct team_port *port;
1909 mutex_lock(&team->lock);
1910 list_for_each_entry(port, &team->port_list, list)
1911 vlan_vid_del(port->dev, proto, vid);
1912 mutex_unlock(&team->lock);
1917 #ifdef CONFIG_NET_POLL_CONTROLLER
1918 static void team_poll_controller(struct net_device *dev)
1922 static void __team_netpoll_cleanup(struct team *team)
1924 struct team_port *port;
1926 list_for_each_entry(port, &team->port_list, list)
1927 team_port_disable_netpoll(port);
1930 static void team_netpoll_cleanup(struct net_device *dev)
1932 struct team *team = netdev_priv(dev);
1934 mutex_lock(&team->lock);
1935 __team_netpoll_cleanup(team);
1936 mutex_unlock(&team->lock);
1939 static int team_netpoll_setup(struct net_device *dev,
1940 struct netpoll_info *npifo)
1942 struct team *team = netdev_priv(dev);
1943 struct team_port *port;
1946 mutex_lock(&team->lock);
1947 list_for_each_entry(port, &team->port_list, list) {
1948 err = __team_port_enable_netpoll(port);
1950 __team_netpoll_cleanup(team);
1954 mutex_unlock(&team->lock);
1959 static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1960 struct netlink_ext_ack *extack)
1962 struct team *team = netdev_priv(dev);
1965 mutex_lock(&team->lock);
1966 err = team_port_add(team, port_dev, extack);
1967 mutex_unlock(&team->lock);
1970 netdev_change_features(dev);
1975 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1977 struct team *team = netdev_priv(dev);
1980 mutex_lock(&team->lock);
1981 err = team_port_del(team, port_dev);
1982 mutex_unlock(&team->lock);
1985 netdev_change_features(dev);
1990 static netdev_features_t team_fix_features(struct net_device *dev,
1991 netdev_features_t features)
1993 struct team_port *port;
1994 struct team *team = netdev_priv(dev);
1995 netdev_features_t mask;
1998 features &= ~NETIF_F_ONE_FOR_ALL;
1999 features |= NETIF_F_ALL_FOR_ALL;
2002 list_for_each_entry_rcu(port, &team->port_list, list) {
2003 features = netdev_increment_features(features,
2004 port->dev->features,
2009 features = netdev_add_tso_features(features, mask);
2014 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2016 struct team *team = netdev_priv(dev);
2018 team->user_carrier_enabled = true;
2021 netif_carrier_on(dev);
2023 netif_carrier_off(dev);
2027 static const struct net_device_ops team_netdev_ops = {
2028 .ndo_init = team_init,
2029 .ndo_uninit = team_uninit,
2030 .ndo_open = team_open,
2031 .ndo_stop = team_close,
2032 .ndo_start_xmit = team_xmit,
2033 .ndo_select_queue = team_select_queue,
2034 .ndo_change_rx_flags = team_change_rx_flags,
2035 .ndo_set_rx_mode = team_set_rx_mode,
2036 .ndo_set_mac_address = team_set_mac_address,
2037 .ndo_change_mtu = team_change_mtu,
2038 .ndo_get_stats64 = team_get_stats64,
2039 .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
2040 .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
2041 #ifdef CONFIG_NET_POLL_CONTROLLER
2042 .ndo_poll_controller = team_poll_controller,
2043 .ndo_netpoll_setup = team_netpoll_setup,
2044 .ndo_netpoll_cleanup = team_netpoll_cleanup,
2046 .ndo_add_slave = team_add_slave,
2047 .ndo_del_slave = team_del_slave,
2048 .ndo_fix_features = team_fix_features,
2049 .ndo_change_carrier = team_change_carrier,
2050 .ndo_features_check = passthru_features_check,
2053 /***********************
2055 ***********************/
2057 static void team_ethtool_get_drvinfo(struct net_device *dev,
2058 struct ethtool_drvinfo *drvinfo)
2060 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2061 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2064 static const struct ethtool_ops team_ethtool_ops = {
2065 .get_drvinfo = team_ethtool_get_drvinfo,
2066 .get_link = ethtool_op_get_link,
2069 /***********************
2070 * rt netlink interface
2071 ***********************/
2073 static void team_setup_by_port(struct net_device *dev,
2074 struct net_device *port_dev)
2076 dev->header_ops = port_dev->header_ops;
2077 dev->type = port_dev->type;
2078 dev->hard_header_len = port_dev->hard_header_len;
2079 dev->addr_len = port_dev->addr_len;
2080 dev->mtu = port_dev->mtu;
2081 memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2082 eth_hw_addr_inherit(dev, port_dev);
2085 static int team_dev_type_check_change(struct net_device *dev,
2086 struct net_device *port_dev)
2088 struct team *team = netdev_priv(dev);
2089 char *portname = port_dev->name;
2092 if (dev->type == port_dev->type)
2094 if (!list_empty(&team->port_list)) {
2095 netdev_err(dev, "Device %s is of different type\n", portname);
2098 err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2099 err = notifier_to_errno(err);
2101 netdev_err(dev, "Refused to change device type\n");
2106 team_setup_by_port(dev, port_dev);
2107 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2111 static void team_setup(struct net_device *dev)
2114 dev->max_mtu = ETH_MAX_MTU;
2116 dev->netdev_ops = &team_netdev_ops;
2117 dev->ethtool_ops = &team_ethtool_ops;
2118 dev->needs_free_netdev = true;
2119 dev->priv_destructor = team_destructor;
2120 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2121 dev->priv_flags |= IFF_NO_QUEUE;
2122 dev->priv_flags |= IFF_TEAM;
2125 * Indicate we support unicast address filtering. That way core won't
2126 * bring us to promisc mode in case a unicast addr is added.
2127 * Let this up to underlay drivers.
2129 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2131 dev->features |= NETIF_F_LLTX;
2132 dev->features |= NETIF_F_GRO;
2134 /* Don't allow team devices to change network namespaces. */
2135 dev->features |= NETIF_F_NETNS_LOCAL;
2137 dev->hw_features = TEAM_VLAN_FEATURES |
2138 NETIF_F_HW_VLAN_CTAG_TX |
2139 NETIF_F_HW_VLAN_CTAG_RX |
2140 NETIF_F_HW_VLAN_CTAG_FILTER;
2142 dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2143 dev->features |= dev->hw_features;
2146 static int team_newlink(struct net *src_net, struct net_device *dev,
2147 struct nlattr *tb[], struct nlattr *data[],
2148 struct netlink_ext_ack *extack)
2150 if (tb[IFLA_ADDRESS] == NULL)
2151 eth_hw_addr_random(dev);
2153 return register_netdevice(dev);
2156 static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2157 struct netlink_ext_ack *extack)
2159 if (tb[IFLA_ADDRESS]) {
2160 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2162 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2163 return -EADDRNOTAVAIL;
2168 static unsigned int team_get_num_tx_queues(void)
2170 return TEAM_DEFAULT_NUM_TX_QUEUES;
2173 static unsigned int team_get_num_rx_queues(void)
2175 return TEAM_DEFAULT_NUM_RX_QUEUES;
2178 static struct rtnl_link_ops team_link_ops __read_mostly = {
2180 .priv_size = sizeof(struct team),
2181 .setup = team_setup,
2182 .newlink = team_newlink,
2183 .validate = team_validate,
2184 .get_num_tx_queues = team_get_num_tx_queues,
2185 .get_num_rx_queues = team_get_num_rx_queues,
2189 /***********************************
2190 * Generic netlink custom interface
2191 ***********************************/
2193 static struct genl_family team_nl_family;
2195 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2196 [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
2197 [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
2198 [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
2199 [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
2202 static const struct nla_policy
2203 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2204 [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
2205 [TEAM_ATTR_OPTION_NAME] = {
2207 .len = TEAM_STRING_MAX_LEN,
2209 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
2210 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
2211 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
2214 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2216 struct sk_buff *msg;
2220 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2224 hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2225 &team_nl_family, 0, TEAM_CMD_NOOP);
2231 genlmsg_end(msg, hdr);
2233 return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2242 * Netlink cmd functions should be locked by following two functions.
2243 * Since dev gets held here, that ensures dev won't disappear in between.
2245 static struct team *team_nl_team_get(struct genl_info *info)
2247 struct net *net = genl_info_net(info);
2249 struct net_device *dev;
2252 if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2255 ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2256 dev = dev_get_by_index(net, ifindex);
2257 if (!dev || dev->netdev_ops != &team_netdev_ops) {
2263 team = netdev_priv(dev);
2264 mutex_lock(&team->lock);
2268 static void team_nl_team_put(struct team *team)
2270 mutex_unlock(&team->lock);
2274 typedef int team_nl_send_func_t(struct sk_buff *skb,
2275 struct team *team, u32 portid);
2277 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2279 return genlmsg_unicast(dev_net(team->dev), skb, portid);
2282 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2283 struct team_option_inst *opt_inst)
2285 struct nlattr *option_item;
2286 struct team_option *option = opt_inst->option;
2287 struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2288 struct team_gsetter_ctx ctx;
2291 ctx.info = opt_inst_info;
2292 err = team_option_get(team, opt_inst, &ctx);
2296 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2300 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2302 if (opt_inst_info->port &&
2303 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2304 opt_inst_info->port->dev->ifindex))
2306 if (opt_inst->option->array_size &&
2307 nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2308 opt_inst_info->array_index))
2311 switch (option->type) {
2312 case TEAM_OPTION_TYPE_U32:
2313 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2315 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2318 case TEAM_OPTION_TYPE_STRING:
2319 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2321 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2325 case TEAM_OPTION_TYPE_BINARY:
2326 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2328 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2329 ctx.data.bin_val.ptr))
2332 case TEAM_OPTION_TYPE_BOOL:
2333 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2335 if (ctx.data.bool_val &&
2336 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2339 case TEAM_OPTION_TYPE_S32:
2340 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2342 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2348 if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2350 if (opt_inst->changed) {
2351 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2353 opt_inst->changed = false;
2355 nla_nest_end(skb, option_item);
2359 nla_nest_cancel(skb, option_item);
2363 static int __send_and_alloc_skb(struct sk_buff **pskb,
2364 struct team *team, u32 portid,
2365 team_nl_send_func_t *send_func)
2370 err = send_func(*pskb, team, portid);
2374 *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2380 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2381 int flags, team_nl_send_func_t *send_func,
2382 struct list_head *sel_opt_inst_list)
2384 struct nlattr *option_list;
2385 struct nlmsghdr *nlh;
2387 struct team_option_inst *opt_inst;
2389 struct sk_buff *skb = NULL;
2393 opt_inst = list_first_entry(sel_opt_inst_list,
2394 struct team_option_inst, tmp_list);
2397 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2401 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2402 TEAM_CMD_OPTIONS_GET);
2408 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2409 goto nla_put_failure;
2410 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2412 goto nla_put_failure;
2416 list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2417 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2419 if (err == -EMSGSIZE) {
2430 nla_nest_end(skb, option_list);
2431 genlmsg_end(skb, hdr);
2436 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2438 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2444 return send_func(skb, team, portid);
2453 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2456 struct team_option_inst *opt_inst;
2458 LIST_HEAD(sel_opt_inst_list);
2460 team = team_nl_team_get(info);
2464 list_for_each_entry(opt_inst, &team->option_inst_list, list)
2465 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2466 err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2467 NLM_F_ACK, team_nl_send_unicast,
2468 &sel_opt_inst_list);
2470 team_nl_team_put(team);
2475 static int team_nl_send_event_options_get(struct team *team,
2476 struct list_head *sel_opt_inst_list);
2478 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2483 struct nlattr *nl_option;
2487 team = team_nl_team_get(info);
2494 if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2499 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2500 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2501 struct nlattr *attr;
2502 struct nlattr *attr_data;
2503 LIST_HEAD(opt_inst_list);
2504 enum team_option_type opt_type;
2505 int opt_port_ifindex = 0; /* != 0 for per-port options */
2506 u32 opt_array_index = 0;
2507 bool opt_is_array = false;
2508 struct team_option_inst *opt_inst;
2510 bool opt_found = false;
2512 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2516 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2517 nl_option, team_nl_option_policy,
2521 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2522 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2526 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2528 opt_type = TEAM_OPTION_TYPE_U32;
2531 opt_type = TEAM_OPTION_TYPE_STRING;
2534 opt_type = TEAM_OPTION_TYPE_BINARY;
2537 opt_type = TEAM_OPTION_TYPE_BOOL;
2540 opt_type = TEAM_OPTION_TYPE_S32;
2546 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2547 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2552 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2553 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2555 opt_port_ifindex = nla_get_u32(attr);
2557 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2559 opt_is_array = true;
2560 opt_array_index = nla_get_u32(attr);
2563 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2564 struct team_option *option = opt_inst->option;
2565 struct team_gsetter_ctx ctx;
2566 struct team_option_inst_info *opt_inst_info;
2569 opt_inst_info = &opt_inst->info;
2570 tmp_ifindex = opt_inst_info->port ?
2571 opt_inst_info->port->dev->ifindex : 0;
2572 if (option->type != opt_type ||
2573 strcmp(option->name, opt_name) ||
2574 tmp_ifindex != opt_port_ifindex ||
2575 (option->array_size && !opt_is_array) ||
2576 opt_inst_info->array_index != opt_array_index)
2579 ctx.info = opt_inst_info;
2581 case TEAM_OPTION_TYPE_U32:
2582 ctx.data.u32_val = nla_get_u32(attr_data);
2584 case TEAM_OPTION_TYPE_STRING:
2585 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2589 ctx.data.str_val = nla_data(attr_data);
2591 case TEAM_OPTION_TYPE_BINARY:
2592 ctx.data.bin_val.len = nla_len(attr_data);
2593 ctx.data.bin_val.ptr = nla_data(attr_data);
2595 case TEAM_OPTION_TYPE_BOOL:
2596 ctx.data.bool_val = attr_data ? true : false;
2598 case TEAM_OPTION_TYPE_S32:
2599 ctx.data.s32_val = nla_get_s32(attr_data);
2604 err = team_option_set(team, opt_inst, &ctx);
2607 opt_inst->changed = true;
2608 list_add(&opt_inst->tmp_list, &opt_inst_list);
2615 err = team_nl_send_event_options_get(team, &opt_inst_list);
2621 team_nl_team_put(team);
2627 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2628 struct team_port *port)
2630 struct nlattr *port_item;
2632 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2635 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2637 if (port->changed) {
2638 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2640 port->changed = false;
2642 if ((port->removed &&
2643 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2644 (port->state.linkup &&
2645 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2646 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2647 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2649 nla_nest_end(skb, port_item);
2653 nla_nest_cancel(skb, port_item);
2657 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2658 int flags, team_nl_send_func_t *send_func,
2659 struct team_port *one_port)
2661 struct nlattr *port_list;
2662 struct nlmsghdr *nlh;
2664 struct team_port *port;
2666 struct sk_buff *skb = NULL;
2670 port = list_first_entry_or_null(&team->port_list,
2671 struct team_port, list);
2674 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2678 hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2679 TEAM_CMD_PORT_LIST_GET);
2685 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2686 goto nla_put_failure;
2687 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2689 goto nla_put_failure;
2694 /* If one port is selected, called wants to send port list containing
2695 * only this port. Otherwise go through all listed ports and send all
2698 err = team_nl_fill_one_port_get(skb, one_port);
2702 list_for_each_entry_from(port, &team->port_list, list) {
2703 err = team_nl_fill_one_port_get(skb, port);
2705 if (err == -EMSGSIZE) {
2717 nla_nest_end(skb, port_list);
2718 genlmsg_end(skb, hdr);
2723 nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2725 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2731 return send_func(skb, team, portid);
2740 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2741 struct genl_info *info)
2746 team = team_nl_team_get(info);
2750 err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2751 NLM_F_ACK, team_nl_send_unicast, NULL);
2753 team_nl_team_put(team);
2758 static const struct genl_ops team_nl_ops[] = {
2760 .cmd = TEAM_CMD_NOOP,
2761 .doit = team_nl_cmd_noop,
2762 .policy = team_nl_policy,
2765 .cmd = TEAM_CMD_OPTIONS_SET,
2766 .doit = team_nl_cmd_options_set,
2767 .policy = team_nl_policy,
2768 .flags = GENL_ADMIN_PERM,
2771 .cmd = TEAM_CMD_OPTIONS_GET,
2772 .doit = team_nl_cmd_options_get,
2773 .policy = team_nl_policy,
2774 .flags = GENL_ADMIN_PERM,
2777 .cmd = TEAM_CMD_PORT_LIST_GET,
2778 .doit = team_nl_cmd_port_list_get,
2779 .policy = team_nl_policy,
2780 .flags = GENL_ADMIN_PERM,
2784 static const struct genl_multicast_group team_nl_mcgrps[] = {
2785 { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2788 static struct genl_family team_nl_family __ro_after_init = {
2789 .name = TEAM_GENL_NAME,
2790 .version = TEAM_GENL_VERSION,
2791 .maxattr = TEAM_ATTR_MAX,
2793 .module = THIS_MODULE,
2795 .n_ops = ARRAY_SIZE(team_nl_ops),
2796 .mcgrps = team_nl_mcgrps,
2797 .n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
2800 static int team_nl_send_multicast(struct sk_buff *skb,
2801 struct team *team, u32 portid)
2803 return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2804 skb, 0, 0, GFP_KERNEL);
2807 static int team_nl_send_event_options_get(struct team *team,
2808 struct list_head *sel_opt_inst_list)
2810 return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2814 static int team_nl_send_event_port_get(struct team *team,
2815 struct team_port *port)
2817 return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2821 static int __init team_nl_init(void)
2823 return genl_register_family(&team_nl_family);
2826 static void team_nl_fini(void)
2828 genl_unregister_family(&team_nl_family);
2836 static void __team_options_change_check(struct team *team)
2839 struct team_option_inst *opt_inst;
2840 LIST_HEAD(sel_opt_inst_list);
2842 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2843 if (opt_inst->changed)
2844 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2846 err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2847 if (err && err != -ESRCH)
2848 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2852 /* rtnl lock is held */
2854 static void __team_port_change_send(struct team_port *port, bool linkup)
2858 port->changed = true;
2859 port->state.linkup = linkup;
2860 team_refresh_port_linkup(port);
2862 struct ethtool_link_ksettings ecmd;
2864 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2866 port->state.speed = ecmd.base.speed;
2867 port->state.duplex = ecmd.base.duplex;
2871 port->state.speed = 0;
2872 port->state.duplex = 0;
2875 err = team_nl_send_event_port_get(port->team, port);
2876 if (err && err != -ESRCH)
2877 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2878 port->dev->name, err);
2882 static void __team_carrier_check(struct team *team)
2884 struct team_port *port;
2887 if (team->user_carrier_enabled)
2890 team_linkup = false;
2891 list_for_each_entry(port, &team->port_list, list) {
2899 netif_carrier_on(team->dev);
2901 netif_carrier_off(team->dev);
2904 static void __team_port_change_check(struct team_port *port, bool linkup)
2906 if (port->state.linkup != linkup)
2907 __team_port_change_send(port, linkup);
2908 __team_carrier_check(port->team);
2911 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2913 __team_port_change_send(port, linkup);
2914 __team_carrier_check(port->team);
2917 static void __team_port_change_port_removed(struct team_port *port)
2919 port->removed = true;
2920 __team_port_change_send(port, false);
2921 __team_carrier_check(port->team);
2924 static void team_port_change_check(struct team_port *port, bool linkup)
2926 struct team *team = port->team;
2928 mutex_lock(&team->lock);
2929 __team_port_change_check(port, linkup);
2930 mutex_unlock(&team->lock);
2934 /************************************
2935 * Net device notifier event handler
2936 ************************************/
2938 static int team_device_event(struct notifier_block *unused,
2939 unsigned long event, void *ptr)
2941 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2942 struct team_port *port;
2944 port = team_port_get_rtnl(dev);
2950 if (netif_oper_up(dev))
2951 team_port_change_check(port, true);
2954 team_port_change_check(port, false);
2957 if (netif_running(port->dev))
2958 team_port_change_check(port,
2959 !!netif_oper_up(port->dev));
2961 case NETDEV_UNREGISTER:
2962 team_del_slave(port->team->dev, dev);
2964 case NETDEV_FEAT_CHANGE:
2965 team_compute_features(port->team);
2967 case NETDEV_PRECHANGEMTU:
2968 /* Forbid to change mtu of underlaying device */
2969 if (!port->team->port_mtu_change_allowed)
2972 case NETDEV_PRE_TYPE_CHANGE:
2973 /* Forbid to change type of underlaying device */
2975 case NETDEV_RESEND_IGMP:
2976 /* Propagate to master device */
2977 call_netdevice_notifiers(event, port->team->dev);
2983 static struct notifier_block team_notifier_block __read_mostly = {
2984 .notifier_call = team_device_event,
2988 /***********************
2989 * Module init and exit
2990 ***********************/
2992 static int __init team_module_init(void)
2996 register_netdevice_notifier(&team_notifier_block);
2998 err = rtnl_link_register(&team_link_ops);
3002 err = team_nl_init();
3009 rtnl_link_unregister(&team_link_ops);
3012 unregister_netdevice_notifier(&team_notifier_block);
3017 static void __exit team_module_exit(void)
3020 rtnl_link_unregister(&team_link_ops);
3021 unregister_netdevice_notifier(&team_notifier_block);
3024 module_init(team_module_init);
3025 module_exit(team_module_exit);
3027 MODULE_LICENSE("GPL v2");
3028 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3029 MODULE_DESCRIPTION("Ethernet team device driver");
3030 MODULE_ALIAS_RTNL_LINK(DRV_NAME);