Merge branch 'dmi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jdelvar...
[linux-2.6-microblaze.git] / drivers / net / team / team.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * drivers/net/team/team.c - Network team device driver
4  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include <linux/errno.h>
14 #include <linux/ctype.h>
15 #include <linux/notifier.h>
16 #include <linux/netdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/if_vlan.h>
19 #include <linux/if_arp.h>
20 #include <linux/socket.h>
21 #include <linux/etherdevice.h>
22 #include <linux/rtnetlink.h>
23 #include <net/rtnetlink.h>
24 #include <net/genetlink.h>
25 #include <net/netlink.h>
26 #include <net/sch_generic.h>
27 #include <generated/utsrelease.h>
28 #include <linux/if_team.h>
29
30 #define DRV_NAME "team"
31
32
33 /**********
34  * Helpers
35  **********/
36
37 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
38 {
39         struct team_port *port = rtnl_dereference(dev->rx_handler_data);
40
41         return netif_is_team_port(dev) ? port : NULL;
42 }
43
44 /*
45  * Since the ability to change device address for open port device is tested in
46  * team_port_add, this function can be called without control of return value
47  */
48 static int __set_port_dev_addr(struct net_device *port_dev,
49                                const unsigned char *dev_addr)
50 {
51         struct sockaddr_storage addr;
52
53         memcpy(addr.__data, dev_addr, port_dev->addr_len);
54         addr.ss_family = port_dev->type;
55         return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL);
56 }
57
58 static int team_port_set_orig_dev_addr(struct team_port *port)
59 {
60         return __set_port_dev_addr(port->dev, port->orig.dev_addr);
61 }
62
63 static int team_port_set_team_dev_addr(struct team *team,
64                                        struct team_port *port)
65 {
66         return __set_port_dev_addr(port->dev, team->dev->dev_addr);
67 }
68
69 int team_modeop_port_enter(struct team *team, struct team_port *port)
70 {
71         return team_port_set_team_dev_addr(team, port);
72 }
73 EXPORT_SYMBOL(team_modeop_port_enter);
74
75 void team_modeop_port_change_dev_addr(struct team *team,
76                                       struct team_port *port)
77 {
78         team_port_set_team_dev_addr(team, port);
79 }
80 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
81
82 static void team_lower_state_changed(struct team_port *port)
83 {
84         struct netdev_lag_lower_state_info info;
85
86         info.link_up = port->linkup;
87         info.tx_enabled = team_port_enabled(port);
88         netdev_lower_state_changed(port->dev, &info);
89 }
90
91 static void team_refresh_port_linkup(struct team_port *port)
92 {
93         bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
94                                                       port->state.linkup;
95
96         if (port->linkup != new_linkup) {
97                 port->linkup = new_linkup;
98                 team_lower_state_changed(port);
99         }
100 }
101
102
103 /*******************
104  * Options handling
105  *******************/
106
107 struct team_option_inst { /* One for each option instance */
108         struct list_head list;
109         struct list_head tmp_list;
110         struct team_option *option;
111         struct team_option_inst_info info;
112         bool changed;
113         bool removed;
114 };
115
116 static struct team_option *__team_find_option(struct team *team,
117                                               const char *opt_name)
118 {
119         struct team_option *option;
120
121         list_for_each_entry(option, &team->option_list, list) {
122                 if (strcmp(option->name, opt_name) == 0)
123                         return option;
124         }
125         return NULL;
126 }
127
128 static void __team_option_inst_del(struct team_option_inst *opt_inst)
129 {
130         list_del(&opt_inst->list);
131         kfree(opt_inst);
132 }
133
134 static void __team_option_inst_del_option(struct team *team,
135                                           struct team_option *option)
136 {
137         struct team_option_inst *opt_inst, *tmp;
138
139         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
140                 if (opt_inst->option == option)
141                         __team_option_inst_del(opt_inst);
142         }
143 }
144
145 static int __team_option_inst_add(struct team *team, struct team_option *option,
146                                   struct team_port *port)
147 {
148         struct team_option_inst *opt_inst;
149         unsigned int array_size;
150         unsigned int i;
151         int err;
152
153         array_size = option->array_size;
154         if (!array_size)
155                 array_size = 1; /* No array but still need one instance */
156
157         for (i = 0; i < array_size; i++) {
158                 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
159                 if (!opt_inst)
160                         return -ENOMEM;
161                 opt_inst->option = option;
162                 opt_inst->info.port = port;
163                 opt_inst->info.array_index = i;
164                 opt_inst->changed = true;
165                 opt_inst->removed = false;
166                 list_add_tail(&opt_inst->list, &team->option_inst_list);
167                 if (option->init) {
168                         err = option->init(team, &opt_inst->info);
169                         if (err)
170                                 return err;
171                 }
172
173         }
174         return 0;
175 }
176
177 static int __team_option_inst_add_option(struct team *team,
178                                          struct team_option *option)
179 {
180         int err;
181
182         if (!option->per_port) {
183                 err = __team_option_inst_add(team, option, NULL);
184                 if (err)
185                         goto inst_del_option;
186         }
187         return 0;
188
189 inst_del_option:
190         __team_option_inst_del_option(team, option);
191         return err;
192 }
193
194 static void __team_option_inst_mark_removed_option(struct team *team,
195                                                    struct team_option *option)
196 {
197         struct team_option_inst *opt_inst;
198
199         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
200                 if (opt_inst->option == option) {
201                         opt_inst->changed = true;
202                         opt_inst->removed = true;
203                 }
204         }
205 }
206
207 static void __team_option_inst_del_port(struct team *team,
208                                         struct team_port *port)
209 {
210         struct team_option_inst *opt_inst, *tmp;
211
212         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
213                 if (opt_inst->option->per_port &&
214                     opt_inst->info.port == port)
215                         __team_option_inst_del(opt_inst);
216         }
217 }
218
219 static int __team_option_inst_add_port(struct team *team,
220                                        struct team_port *port)
221 {
222         struct team_option *option;
223         int err;
224
225         list_for_each_entry(option, &team->option_list, list) {
226                 if (!option->per_port)
227                         continue;
228                 err = __team_option_inst_add(team, option, port);
229                 if (err)
230                         goto inst_del_port;
231         }
232         return 0;
233
234 inst_del_port:
235         __team_option_inst_del_port(team, port);
236         return err;
237 }
238
239 static void __team_option_inst_mark_removed_port(struct team *team,
240                                                  struct team_port *port)
241 {
242         struct team_option_inst *opt_inst;
243
244         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
245                 if (opt_inst->info.port == port) {
246                         opt_inst->changed = true;
247                         opt_inst->removed = true;
248                 }
249         }
250 }
251
252 static int __team_options_register(struct team *team,
253                                    const struct team_option *option,
254                                    size_t option_count)
255 {
256         int i;
257         struct team_option **dst_opts;
258         int err;
259
260         dst_opts = kcalloc(option_count, sizeof(struct team_option *),
261                            GFP_KERNEL);
262         if (!dst_opts)
263                 return -ENOMEM;
264         for (i = 0; i < option_count; i++, option++) {
265                 if (__team_find_option(team, option->name)) {
266                         err = -EEXIST;
267                         goto alloc_rollback;
268                 }
269                 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
270                 if (!dst_opts[i]) {
271                         err = -ENOMEM;
272                         goto alloc_rollback;
273                 }
274         }
275
276         for (i = 0; i < option_count; i++) {
277                 err = __team_option_inst_add_option(team, dst_opts[i]);
278                 if (err)
279                         goto inst_rollback;
280                 list_add_tail(&dst_opts[i]->list, &team->option_list);
281         }
282
283         kfree(dst_opts);
284         return 0;
285
286 inst_rollback:
287         for (i--; i >= 0; i--)
288                 __team_option_inst_del_option(team, dst_opts[i]);
289
290         i = option_count;
291 alloc_rollback:
292         for (i--; i >= 0; i--)
293                 kfree(dst_opts[i]);
294
295         kfree(dst_opts);
296         return err;
297 }
298
299 static void __team_options_mark_removed(struct team *team,
300                                         const struct team_option *option,
301                                         size_t option_count)
302 {
303         int i;
304
305         for (i = 0; i < option_count; i++, option++) {
306                 struct team_option *del_opt;
307
308                 del_opt = __team_find_option(team, option->name);
309                 if (del_opt)
310                         __team_option_inst_mark_removed_option(team, del_opt);
311         }
312 }
313
314 static void __team_options_unregister(struct team *team,
315                                       const struct team_option *option,
316                                       size_t option_count)
317 {
318         int i;
319
320         for (i = 0; i < option_count; i++, option++) {
321                 struct team_option *del_opt;
322
323                 del_opt = __team_find_option(team, option->name);
324                 if (del_opt) {
325                         __team_option_inst_del_option(team, del_opt);
326                         list_del(&del_opt->list);
327                         kfree(del_opt);
328                 }
329         }
330 }
331
332 static void __team_options_change_check(struct team *team);
333
334 int team_options_register(struct team *team,
335                           const struct team_option *option,
336                           size_t option_count)
337 {
338         int err;
339
340         err = __team_options_register(team, option, option_count);
341         if (err)
342                 return err;
343         __team_options_change_check(team);
344         return 0;
345 }
346 EXPORT_SYMBOL(team_options_register);
347
348 void team_options_unregister(struct team *team,
349                              const struct team_option *option,
350                              size_t option_count)
351 {
352         __team_options_mark_removed(team, option, option_count);
353         __team_options_change_check(team);
354         __team_options_unregister(team, option, option_count);
355 }
356 EXPORT_SYMBOL(team_options_unregister);
357
358 static int team_option_get(struct team *team,
359                            struct team_option_inst *opt_inst,
360                            struct team_gsetter_ctx *ctx)
361 {
362         if (!opt_inst->option->getter)
363                 return -EOPNOTSUPP;
364         return opt_inst->option->getter(team, ctx);
365 }
366
367 static int team_option_set(struct team *team,
368                            struct team_option_inst *opt_inst,
369                            struct team_gsetter_ctx *ctx)
370 {
371         if (!opt_inst->option->setter)
372                 return -EOPNOTSUPP;
373         return opt_inst->option->setter(team, ctx);
374 }
375
376 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
377 {
378         struct team_option_inst *opt_inst;
379
380         opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
381         opt_inst->changed = true;
382 }
383 EXPORT_SYMBOL(team_option_inst_set_change);
384
385 void team_options_change_check(struct team *team)
386 {
387         __team_options_change_check(team);
388 }
389 EXPORT_SYMBOL(team_options_change_check);
390
391
392 /****************
393  * Mode handling
394  ****************/
395
396 static LIST_HEAD(mode_list);
397 static DEFINE_SPINLOCK(mode_list_lock);
398
399 struct team_mode_item {
400         struct list_head list;
401         const struct team_mode *mode;
402 };
403
404 static struct team_mode_item *__find_mode(const char *kind)
405 {
406         struct team_mode_item *mitem;
407
408         list_for_each_entry(mitem, &mode_list, list) {
409                 if (strcmp(mitem->mode->kind, kind) == 0)
410                         return mitem;
411         }
412         return NULL;
413 }
414
415 static bool is_good_mode_name(const char *name)
416 {
417         while (*name != '\0') {
418                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
419                         return false;
420                 name++;
421         }
422         return true;
423 }
424
425 int team_mode_register(const struct team_mode *mode)
426 {
427         int err = 0;
428         struct team_mode_item *mitem;
429
430         if (!is_good_mode_name(mode->kind) ||
431             mode->priv_size > TEAM_MODE_PRIV_SIZE)
432                 return -EINVAL;
433
434         mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
435         if (!mitem)
436                 return -ENOMEM;
437
438         spin_lock(&mode_list_lock);
439         if (__find_mode(mode->kind)) {
440                 err = -EEXIST;
441                 kfree(mitem);
442                 goto unlock;
443         }
444         mitem->mode = mode;
445         list_add_tail(&mitem->list, &mode_list);
446 unlock:
447         spin_unlock(&mode_list_lock);
448         return err;
449 }
450 EXPORT_SYMBOL(team_mode_register);
451
452 void team_mode_unregister(const struct team_mode *mode)
453 {
454         struct team_mode_item *mitem;
455
456         spin_lock(&mode_list_lock);
457         mitem = __find_mode(mode->kind);
458         if (mitem) {
459                 list_del_init(&mitem->list);
460                 kfree(mitem);
461         }
462         spin_unlock(&mode_list_lock);
463 }
464 EXPORT_SYMBOL(team_mode_unregister);
465
466 static const struct team_mode *team_mode_get(const char *kind)
467 {
468         struct team_mode_item *mitem;
469         const struct team_mode *mode = NULL;
470
471         if (!try_module_get(THIS_MODULE))
472                 return NULL;
473
474         spin_lock(&mode_list_lock);
475         mitem = __find_mode(kind);
476         if (!mitem) {
477                 spin_unlock(&mode_list_lock);
478                 request_module("team-mode-%s", kind);
479                 spin_lock(&mode_list_lock);
480                 mitem = __find_mode(kind);
481         }
482         if (mitem) {
483                 mode = mitem->mode;
484                 if (!try_module_get(mode->owner))
485                         mode = NULL;
486         }
487
488         spin_unlock(&mode_list_lock);
489         module_put(THIS_MODULE);
490         return mode;
491 }
492
493 static void team_mode_put(const struct team_mode *mode)
494 {
495         module_put(mode->owner);
496 }
497
498 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
499 {
500         dev_kfree_skb_any(skb);
501         return false;
502 }
503
504 static rx_handler_result_t team_dummy_receive(struct team *team,
505                                               struct team_port *port,
506                                               struct sk_buff *skb)
507 {
508         return RX_HANDLER_ANOTHER;
509 }
510
511 static const struct team_mode __team_no_mode = {
512         .kind           = "*NOMODE*",
513 };
514
515 static bool team_is_mode_set(struct team *team)
516 {
517         return team->mode != &__team_no_mode;
518 }
519
520 static void team_set_no_mode(struct team *team)
521 {
522         team->user_carrier_enabled = false;
523         team->mode = &__team_no_mode;
524 }
525
526 static void team_adjust_ops(struct team *team)
527 {
528         /*
529          * To avoid checks in rx/tx skb paths, ensure here that non-null and
530          * correct ops are always set.
531          */
532
533         if (!team->en_port_count || !team_is_mode_set(team) ||
534             !team->mode->ops->transmit)
535                 team->ops.transmit = team_dummy_transmit;
536         else
537                 team->ops.transmit = team->mode->ops->transmit;
538
539         if (!team->en_port_count || !team_is_mode_set(team) ||
540             !team->mode->ops->receive)
541                 team->ops.receive = team_dummy_receive;
542         else
543                 team->ops.receive = team->mode->ops->receive;
544 }
545
546 /*
547  * We can benefit from the fact that it's ensured no port is present
548  * at the time of mode change. Therefore no packets are in fly so there's no
549  * need to set mode operations in any special way.
550  */
551 static int __team_change_mode(struct team *team,
552                               const struct team_mode *new_mode)
553 {
554         /* Check if mode was previously set and do cleanup if so */
555         if (team_is_mode_set(team)) {
556                 void (*exit_op)(struct team *team) = team->ops.exit;
557
558                 /* Clear ops area so no callback is called any longer */
559                 memset(&team->ops, 0, sizeof(struct team_mode_ops));
560                 team_adjust_ops(team);
561
562                 if (exit_op)
563                         exit_op(team);
564                 team_mode_put(team->mode);
565                 team_set_no_mode(team);
566                 /* zero private data area */
567                 memset(&team->mode_priv, 0,
568                        sizeof(struct team) - offsetof(struct team, mode_priv));
569         }
570
571         if (!new_mode)
572                 return 0;
573
574         if (new_mode->ops->init) {
575                 int err;
576
577                 err = new_mode->ops->init(team);
578                 if (err)
579                         return err;
580         }
581
582         team->mode = new_mode;
583         memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
584         team_adjust_ops(team);
585
586         return 0;
587 }
588
589 static int team_change_mode(struct team *team, const char *kind)
590 {
591         const struct team_mode *new_mode;
592         struct net_device *dev = team->dev;
593         int err;
594
595         if (!list_empty(&team->port_list)) {
596                 netdev_err(dev, "No ports can be present during mode change\n");
597                 return -EBUSY;
598         }
599
600         if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
601                 netdev_err(dev, "Unable to change to the same mode the team is in\n");
602                 return -EINVAL;
603         }
604
605         new_mode = team_mode_get(kind);
606         if (!new_mode) {
607                 netdev_err(dev, "Mode \"%s\" not found\n", kind);
608                 return -EINVAL;
609         }
610
611         err = __team_change_mode(team, new_mode);
612         if (err) {
613                 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
614                 team_mode_put(new_mode);
615                 return err;
616         }
617
618         netdev_info(dev, "Mode changed to \"%s\"\n", kind);
619         return 0;
620 }
621
622
623 /*********************
624  * Peers notification
625  *********************/
626
627 static void team_notify_peers_work(struct work_struct *work)
628 {
629         struct team *team;
630         int val;
631
632         team = container_of(work, struct team, notify_peers.dw.work);
633
634         if (!rtnl_trylock()) {
635                 schedule_delayed_work(&team->notify_peers.dw, 0);
636                 return;
637         }
638         val = atomic_dec_if_positive(&team->notify_peers.count_pending);
639         if (val < 0) {
640                 rtnl_unlock();
641                 return;
642         }
643         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
644         rtnl_unlock();
645         if (val)
646                 schedule_delayed_work(&team->notify_peers.dw,
647                                       msecs_to_jiffies(team->notify_peers.interval));
648 }
649
650 static void team_notify_peers(struct team *team)
651 {
652         if (!team->notify_peers.count || !netif_running(team->dev))
653                 return;
654         atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
655         schedule_delayed_work(&team->notify_peers.dw, 0);
656 }
657
658 static void team_notify_peers_init(struct team *team)
659 {
660         INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
661 }
662
663 static void team_notify_peers_fini(struct team *team)
664 {
665         cancel_delayed_work_sync(&team->notify_peers.dw);
666 }
667
668
669 /*******************************
670  * Send multicast group rejoins
671  *******************************/
672
673 static void team_mcast_rejoin_work(struct work_struct *work)
674 {
675         struct team *team;
676         int val;
677
678         team = container_of(work, struct team, mcast_rejoin.dw.work);
679
680         if (!rtnl_trylock()) {
681                 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
682                 return;
683         }
684         val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
685         if (val < 0) {
686                 rtnl_unlock();
687                 return;
688         }
689         call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
690         rtnl_unlock();
691         if (val)
692                 schedule_delayed_work(&team->mcast_rejoin.dw,
693                                       msecs_to_jiffies(team->mcast_rejoin.interval));
694 }
695
696 static void team_mcast_rejoin(struct team *team)
697 {
698         if (!team->mcast_rejoin.count || !netif_running(team->dev))
699                 return;
700         atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
701         schedule_delayed_work(&team->mcast_rejoin.dw, 0);
702 }
703
704 static void team_mcast_rejoin_init(struct team *team)
705 {
706         INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
707 }
708
709 static void team_mcast_rejoin_fini(struct team *team)
710 {
711         cancel_delayed_work_sync(&team->mcast_rejoin.dw);
712 }
713
714
715 /************************
716  * Rx path frame handler
717  ************************/
718
719 /* note: already called with rcu_read_lock */
720 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
721 {
722         struct sk_buff *skb = *pskb;
723         struct team_port *port;
724         struct team *team;
725         rx_handler_result_t res;
726
727         skb = skb_share_check(skb, GFP_ATOMIC);
728         if (!skb)
729                 return RX_HANDLER_CONSUMED;
730
731         *pskb = skb;
732
733         port = team_port_get_rcu(skb->dev);
734         team = port->team;
735         if (!team_port_enabled(port)) {
736                 /* allow exact match delivery for disabled ports */
737                 res = RX_HANDLER_EXACT;
738         } else {
739                 res = team->ops.receive(team, port, skb);
740         }
741         if (res == RX_HANDLER_ANOTHER) {
742                 struct team_pcpu_stats *pcpu_stats;
743
744                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
745                 u64_stats_update_begin(&pcpu_stats->syncp);
746                 pcpu_stats->rx_packets++;
747                 pcpu_stats->rx_bytes += skb->len;
748                 if (skb->pkt_type == PACKET_MULTICAST)
749                         pcpu_stats->rx_multicast++;
750                 u64_stats_update_end(&pcpu_stats->syncp);
751
752                 skb->dev = team->dev;
753         } else if (res == RX_HANDLER_EXACT) {
754                 this_cpu_inc(team->pcpu_stats->rx_nohandler);
755         } else {
756                 this_cpu_inc(team->pcpu_stats->rx_dropped);
757         }
758
759         return res;
760 }
761
762
763 /*************************************
764  * Multiqueue Tx port select override
765  *************************************/
766
767 static int team_queue_override_init(struct team *team)
768 {
769         struct list_head *listarr;
770         unsigned int queue_cnt = team->dev->num_tx_queues - 1;
771         unsigned int i;
772
773         if (!queue_cnt)
774                 return 0;
775         listarr = kmalloc_array(queue_cnt, sizeof(struct list_head),
776                                 GFP_KERNEL);
777         if (!listarr)
778                 return -ENOMEM;
779         team->qom_lists = listarr;
780         for (i = 0; i < queue_cnt; i++)
781                 INIT_LIST_HEAD(listarr++);
782         return 0;
783 }
784
785 static void team_queue_override_fini(struct team *team)
786 {
787         kfree(team->qom_lists);
788 }
789
790 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
791 {
792         return &team->qom_lists[queue_id - 1];
793 }
794
795 /*
796  * note: already called with rcu_read_lock
797  */
798 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
799 {
800         struct list_head *qom_list;
801         struct team_port *port;
802
803         if (!team->queue_override_enabled || !skb->queue_mapping)
804                 return false;
805         qom_list = __team_get_qom_list(team, skb->queue_mapping);
806         list_for_each_entry_rcu(port, qom_list, qom_list) {
807                 if (!team_dev_queue_xmit(team, port, skb))
808                         return true;
809         }
810         return false;
811 }
812
813 static void __team_queue_override_port_del(struct team *team,
814                                            struct team_port *port)
815 {
816         if (!port->queue_id)
817                 return;
818         list_del_rcu(&port->qom_list);
819 }
820
821 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
822                                                       struct team_port *cur)
823 {
824         if (port->priority < cur->priority)
825                 return true;
826         if (port->priority > cur->priority)
827                 return false;
828         if (port->index < cur->index)
829                 return true;
830         return false;
831 }
832
833 static void __team_queue_override_port_add(struct team *team,
834                                            struct team_port *port)
835 {
836         struct team_port *cur;
837         struct list_head *qom_list;
838         struct list_head *node;
839
840         if (!port->queue_id)
841                 return;
842         qom_list = __team_get_qom_list(team, port->queue_id);
843         node = qom_list;
844         list_for_each_entry(cur, qom_list, qom_list) {
845                 if (team_queue_override_port_has_gt_prio_than(port, cur))
846                         break;
847                 node = &cur->qom_list;
848         }
849         list_add_tail_rcu(&port->qom_list, node);
850 }
851
852 static void __team_queue_override_enabled_check(struct team *team)
853 {
854         struct team_port *port;
855         bool enabled = false;
856
857         list_for_each_entry(port, &team->port_list, list) {
858                 if (port->queue_id) {
859                         enabled = true;
860                         break;
861                 }
862         }
863         if (enabled == team->queue_override_enabled)
864                 return;
865         netdev_dbg(team->dev, "%s queue override\n",
866                    enabled ? "Enabling" : "Disabling");
867         team->queue_override_enabled = enabled;
868 }
869
870 static void team_queue_override_port_prio_changed(struct team *team,
871                                                   struct team_port *port)
872 {
873         if (!port->queue_id || team_port_enabled(port))
874                 return;
875         __team_queue_override_port_del(team, port);
876         __team_queue_override_port_add(team, port);
877         __team_queue_override_enabled_check(team);
878 }
879
880 static void team_queue_override_port_change_queue_id(struct team *team,
881                                                      struct team_port *port,
882                                                      u16 new_queue_id)
883 {
884         if (team_port_enabled(port)) {
885                 __team_queue_override_port_del(team, port);
886                 port->queue_id = new_queue_id;
887                 __team_queue_override_port_add(team, port);
888                 __team_queue_override_enabled_check(team);
889         } else {
890                 port->queue_id = new_queue_id;
891         }
892 }
893
894 static void team_queue_override_port_add(struct team *team,
895                                          struct team_port *port)
896 {
897         __team_queue_override_port_add(team, port);
898         __team_queue_override_enabled_check(team);
899 }
900
901 static void team_queue_override_port_del(struct team *team,
902                                          struct team_port *port)
903 {
904         __team_queue_override_port_del(team, port);
905         __team_queue_override_enabled_check(team);
906 }
907
908
909 /****************
910  * Port handling
911  ****************/
912
913 static bool team_port_find(const struct team *team,
914                            const struct team_port *port)
915 {
916         struct team_port *cur;
917
918         list_for_each_entry(cur, &team->port_list, list)
919                 if (cur == port)
920                         return true;
921         return false;
922 }
923
924 /*
925  * Enable/disable port by adding to enabled port hashlist and setting
926  * port->index (Might be racy so reader could see incorrect ifindex when
927  * processing a flying packet, but that is not a problem). Write guarded
928  * by team->lock.
929  */
930 static void team_port_enable(struct team *team,
931                              struct team_port *port)
932 {
933         if (team_port_enabled(port))
934                 return;
935         port->index = team->en_port_count++;
936         hlist_add_head_rcu(&port->hlist,
937                            team_port_index_hash(team, port->index));
938         team_adjust_ops(team);
939         team_queue_override_port_add(team, port);
940         if (team->ops.port_enabled)
941                 team->ops.port_enabled(team, port);
942         team_notify_peers(team);
943         team_mcast_rejoin(team);
944         team_lower_state_changed(port);
945 }
946
947 static void __reconstruct_port_hlist(struct team *team, int rm_index)
948 {
949         int i;
950         struct team_port *port;
951
952         for (i = rm_index + 1; i < team->en_port_count; i++) {
953                 port = team_get_port_by_index(team, i);
954                 hlist_del_rcu(&port->hlist);
955                 port->index--;
956                 hlist_add_head_rcu(&port->hlist,
957                                    team_port_index_hash(team, port->index));
958         }
959 }
960
961 static void team_port_disable(struct team *team,
962                               struct team_port *port)
963 {
964         if (!team_port_enabled(port))
965                 return;
966         if (team->ops.port_disabled)
967                 team->ops.port_disabled(team, port);
968         hlist_del_rcu(&port->hlist);
969         __reconstruct_port_hlist(team, port->index);
970         port->index = -1;
971         team->en_port_count--;
972         team_queue_override_port_del(team, port);
973         team_adjust_ops(team);
974         team_lower_state_changed(port);
975 }
976
977 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
978                             NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
979                             NETIF_F_HIGHDMA | NETIF_F_LRO)
980
981 #define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
982                                  NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
983
984 static void __team_compute_features(struct team *team)
985 {
986         struct team_port *port;
987         netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
988                                           NETIF_F_ALL_FOR_ALL;
989         netdev_features_t enc_features  = TEAM_ENC_FEATURES;
990         unsigned short max_hard_header_len = ETH_HLEN;
991         unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
992                                         IFF_XMIT_DST_RELEASE_PERM;
993
994         list_for_each_entry(port, &team->port_list, list) {
995                 vlan_features = netdev_increment_features(vlan_features,
996                                         port->dev->vlan_features,
997                                         TEAM_VLAN_FEATURES);
998                 enc_features =
999                         netdev_increment_features(enc_features,
1000                                                   port->dev->hw_enc_features,
1001                                                   TEAM_ENC_FEATURES);
1002
1003
1004                 dst_release_flag &= port->dev->priv_flags;
1005                 if (port->dev->hard_header_len > max_hard_header_len)
1006                         max_hard_header_len = port->dev->hard_header_len;
1007         }
1008
1009         team->dev->vlan_features = vlan_features;
1010         team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1011                                      NETIF_F_HW_VLAN_CTAG_TX |
1012                                      NETIF_F_HW_VLAN_STAG_TX |
1013                                      NETIF_F_GSO_UDP_L4;
1014         team->dev->hard_header_len = max_hard_header_len;
1015
1016         team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1017         if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1018                 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1019 }
1020
1021 static void team_compute_features(struct team *team)
1022 {
1023         mutex_lock(&team->lock);
1024         __team_compute_features(team);
1025         mutex_unlock(&team->lock);
1026         netdev_change_features(team->dev);
1027 }
1028
1029 static int team_port_enter(struct team *team, struct team_port *port)
1030 {
1031         int err = 0;
1032
1033         dev_hold(team->dev);
1034         if (team->ops.port_enter) {
1035                 err = team->ops.port_enter(team, port);
1036                 if (err) {
1037                         netdev_err(team->dev, "Device %s failed to enter team mode\n",
1038                                    port->dev->name);
1039                         goto err_port_enter;
1040                 }
1041         }
1042
1043         return 0;
1044
1045 err_port_enter:
1046         dev_put(team->dev);
1047
1048         return err;
1049 }
1050
1051 static void team_port_leave(struct team *team, struct team_port *port)
1052 {
1053         if (team->ops.port_leave)
1054                 team->ops.port_leave(team, port);
1055         dev_put(team->dev);
1056 }
1057
1058 #ifdef CONFIG_NET_POLL_CONTROLLER
1059 static int __team_port_enable_netpoll(struct team_port *port)
1060 {
1061         struct netpoll *np;
1062         int err;
1063
1064         np = kzalloc(sizeof(*np), GFP_KERNEL);
1065         if (!np)
1066                 return -ENOMEM;
1067
1068         err = __netpoll_setup(np, port->dev);
1069         if (err) {
1070                 kfree(np);
1071                 return err;
1072         }
1073         port->np = np;
1074         return err;
1075 }
1076
1077 static int team_port_enable_netpoll(struct team_port *port)
1078 {
1079         if (!port->team->dev->npinfo)
1080                 return 0;
1081
1082         return __team_port_enable_netpoll(port);
1083 }
1084
1085 static void team_port_disable_netpoll(struct team_port *port)
1086 {
1087         struct netpoll *np = port->np;
1088
1089         if (!np)
1090                 return;
1091         port->np = NULL;
1092
1093         __netpoll_free(np);
1094 }
1095 #else
1096 static int team_port_enable_netpoll(struct team_port *port)
1097 {
1098         return 0;
1099 }
1100 static void team_port_disable_netpoll(struct team_port *port)
1101 {
1102 }
1103 #endif
1104
1105 static int team_upper_dev_link(struct team *team, struct team_port *port,
1106                                struct netlink_ext_ack *extack)
1107 {
1108         struct netdev_lag_upper_info lag_upper_info;
1109         int err;
1110
1111         lag_upper_info.tx_type = team->mode->lag_tx_type;
1112         lag_upper_info.hash_type = NETDEV_LAG_HASH_UNKNOWN;
1113         err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1114                                            &lag_upper_info, extack);
1115         if (err)
1116                 return err;
1117         port->dev->priv_flags |= IFF_TEAM_PORT;
1118         return 0;
1119 }
1120
1121 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1122 {
1123         netdev_upper_dev_unlink(port->dev, team->dev);
1124         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1125 }
1126
1127 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1128 static int team_dev_type_check_change(struct net_device *dev,
1129                                       struct net_device *port_dev);
1130
1131 static int team_port_add(struct team *team, struct net_device *port_dev,
1132                          struct netlink_ext_ack *extack)
1133 {
1134         struct net_device *dev = team->dev;
1135         struct team_port *port;
1136         char *portname = port_dev->name;
1137         int err;
1138
1139         if (port_dev->flags & IFF_LOOPBACK) {
1140                 NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
1141                 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1142                            portname);
1143                 return -EINVAL;
1144         }
1145
1146         if (netif_is_team_port(port_dev)) {
1147                 NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
1148                 netdev_err(dev, "Device %s is already a port "
1149                                 "of a team device\n", portname);
1150                 return -EBUSY;
1151         }
1152
1153         if (dev == port_dev) {
1154                 NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
1155                 netdev_err(dev, "Cannot enslave team device to itself\n");
1156                 return -EINVAL;
1157         }
1158
1159         if (netdev_has_upper_dev(dev, port_dev)) {
1160                 NL_SET_ERR_MSG(extack, "Device is already an upper device of the team interface");
1161                 netdev_err(dev, "Device %s is already an upper device of the team interface\n",
1162                            portname);
1163                 return -EBUSY;
1164         }
1165
1166         if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1167             vlan_uses_dev(dev)) {
1168                 NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
1169                 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1170                            portname);
1171                 return -EPERM;
1172         }
1173
1174         err = team_dev_type_check_change(dev, port_dev);
1175         if (err)
1176                 return err;
1177
1178         if (port_dev->flags & IFF_UP) {
1179                 NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
1180                 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1181                            portname);
1182                 return -EBUSY;
1183         }
1184
1185         port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1186                        GFP_KERNEL);
1187         if (!port)
1188                 return -ENOMEM;
1189
1190         port->dev = port_dev;
1191         port->team = team;
1192         INIT_LIST_HEAD(&port->qom_list);
1193
1194         port->orig.mtu = port_dev->mtu;
1195         err = dev_set_mtu(port_dev, dev->mtu);
1196         if (err) {
1197                 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1198                 goto err_set_mtu;
1199         }
1200
1201         memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1202
1203         err = team_port_enter(team, port);
1204         if (err) {
1205                 netdev_err(dev, "Device %s failed to enter team mode\n",
1206                            portname);
1207                 goto err_port_enter;
1208         }
1209
1210         err = dev_open(port_dev, extack);
1211         if (err) {
1212                 netdev_dbg(dev, "Device %s opening failed\n",
1213                            portname);
1214                 goto err_dev_open;
1215         }
1216
1217         err = vlan_vids_add_by_dev(port_dev, dev);
1218         if (err) {
1219                 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1220                                 portname);
1221                 goto err_vids_add;
1222         }
1223
1224         err = team_port_enable_netpoll(port);
1225         if (err) {
1226                 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1227                            portname);
1228                 goto err_enable_netpoll;
1229         }
1230
1231         if (!(dev->features & NETIF_F_LRO))
1232                 dev_disable_lro(port_dev);
1233
1234         err = netdev_rx_handler_register(port_dev, team_handle_frame,
1235                                          port);
1236         if (err) {
1237                 netdev_err(dev, "Device %s failed to register rx_handler\n",
1238                            portname);
1239                 goto err_handler_register;
1240         }
1241
1242         err = team_upper_dev_link(team, port, extack);
1243         if (err) {
1244                 netdev_err(dev, "Device %s failed to set upper link\n",
1245                            portname);
1246                 goto err_set_upper_link;
1247         }
1248
1249         err = __team_option_inst_add_port(team, port);
1250         if (err) {
1251                 netdev_err(dev, "Device %s failed to add per-port options\n",
1252                            portname);
1253                 goto err_option_port_add;
1254         }
1255
1256         /* set promiscuity level to new slave */
1257         if (dev->flags & IFF_PROMISC) {
1258                 err = dev_set_promiscuity(port_dev, 1);
1259                 if (err)
1260                         goto err_set_slave_promisc;
1261         }
1262
1263         /* set allmulti level to new slave */
1264         if (dev->flags & IFF_ALLMULTI) {
1265                 err = dev_set_allmulti(port_dev, 1);
1266                 if (err) {
1267                         if (dev->flags & IFF_PROMISC)
1268                                 dev_set_promiscuity(port_dev, -1);
1269                         goto err_set_slave_promisc;
1270                 }
1271         }
1272
1273         netif_addr_lock_bh(dev);
1274         dev_uc_sync_multiple(port_dev, dev);
1275         dev_mc_sync_multiple(port_dev, dev);
1276         netif_addr_unlock_bh(dev);
1277
1278         port->index = -1;
1279         list_add_tail_rcu(&port->list, &team->port_list);
1280         team_port_enable(team, port);
1281         __team_compute_features(team);
1282         __team_port_change_port_added(port, !!netif_oper_up(port_dev));
1283         __team_options_change_check(team);
1284
1285         netdev_info(dev, "Port device %s added\n", portname);
1286
1287         return 0;
1288
1289 err_set_slave_promisc:
1290         __team_option_inst_del_port(team, port);
1291
1292 err_option_port_add:
1293         team_upper_dev_unlink(team, port);
1294
1295 err_set_upper_link:
1296         netdev_rx_handler_unregister(port_dev);
1297
1298 err_handler_register:
1299         team_port_disable_netpoll(port);
1300
1301 err_enable_netpoll:
1302         vlan_vids_del_by_dev(port_dev, dev);
1303
1304 err_vids_add:
1305         dev_close(port_dev);
1306
1307 err_dev_open:
1308         team_port_leave(team, port);
1309         team_port_set_orig_dev_addr(port);
1310
1311 err_port_enter:
1312         dev_set_mtu(port_dev, port->orig.mtu);
1313
1314 err_set_mtu:
1315         kfree(port);
1316
1317         return err;
1318 }
1319
1320 static void __team_port_change_port_removed(struct team_port *port);
1321
1322 static int team_port_del(struct team *team, struct net_device *port_dev)
1323 {
1324         struct net_device *dev = team->dev;
1325         struct team_port *port;
1326         char *portname = port_dev->name;
1327
1328         port = team_port_get_rtnl(port_dev);
1329         if (!port || !team_port_find(team, port)) {
1330                 netdev_err(dev, "Device %s does not act as a port of this team\n",
1331                            portname);
1332                 return -ENOENT;
1333         }
1334
1335         team_port_disable(team, port);
1336         list_del_rcu(&port->list);
1337
1338         if (dev->flags & IFF_PROMISC)
1339                 dev_set_promiscuity(port_dev, -1);
1340         if (dev->flags & IFF_ALLMULTI)
1341                 dev_set_allmulti(port_dev, -1);
1342
1343         team_upper_dev_unlink(team, port);
1344         netdev_rx_handler_unregister(port_dev);
1345         team_port_disable_netpoll(port);
1346         vlan_vids_del_by_dev(port_dev, dev);
1347         dev_uc_unsync(port_dev, dev);
1348         dev_mc_unsync(port_dev, dev);
1349         dev_close(port_dev);
1350         team_port_leave(team, port);
1351
1352         __team_option_inst_mark_removed_port(team, port);
1353         __team_options_change_check(team);
1354         __team_option_inst_del_port(team, port);
1355         __team_port_change_port_removed(port);
1356
1357         team_port_set_orig_dev_addr(port);
1358         dev_set_mtu(port_dev, port->orig.mtu);
1359         kfree_rcu(port, rcu);
1360         netdev_info(dev, "Port device %s removed\n", portname);
1361         __team_compute_features(team);
1362
1363         return 0;
1364 }
1365
1366
1367 /*****************
1368  * Net device ops
1369  *****************/
1370
1371 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1372 {
1373         ctx->data.str_val = team->mode->kind;
1374         return 0;
1375 }
1376
1377 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1378 {
1379         return team_change_mode(team, ctx->data.str_val);
1380 }
1381
1382 static int team_notify_peers_count_get(struct team *team,
1383                                        struct team_gsetter_ctx *ctx)
1384 {
1385         ctx->data.u32_val = team->notify_peers.count;
1386         return 0;
1387 }
1388
1389 static int team_notify_peers_count_set(struct team *team,
1390                                        struct team_gsetter_ctx *ctx)
1391 {
1392         team->notify_peers.count = ctx->data.u32_val;
1393         return 0;
1394 }
1395
1396 static int team_notify_peers_interval_get(struct team *team,
1397                                           struct team_gsetter_ctx *ctx)
1398 {
1399         ctx->data.u32_val = team->notify_peers.interval;
1400         return 0;
1401 }
1402
1403 static int team_notify_peers_interval_set(struct team *team,
1404                                           struct team_gsetter_ctx *ctx)
1405 {
1406         team->notify_peers.interval = ctx->data.u32_val;
1407         return 0;
1408 }
1409
1410 static int team_mcast_rejoin_count_get(struct team *team,
1411                                        struct team_gsetter_ctx *ctx)
1412 {
1413         ctx->data.u32_val = team->mcast_rejoin.count;
1414         return 0;
1415 }
1416
1417 static int team_mcast_rejoin_count_set(struct team *team,
1418                                        struct team_gsetter_ctx *ctx)
1419 {
1420         team->mcast_rejoin.count = ctx->data.u32_val;
1421         return 0;
1422 }
1423
1424 static int team_mcast_rejoin_interval_get(struct team *team,
1425                                           struct team_gsetter_ctx *ctx)
1426 {
1427         ctx->data.u32_val = team->mcast_rejoin.interval;
1428         return 0;
1429 }
1430
1431 static int team_mcast_rejoin_interval_set(struct team *team,
1432                                           struct team_gsetter_ctx *ctx)
1433 {
1434         team->mcast_rejoin.interval = ctx->data.u32_val;
1435         return 0;
1436 }
1437
1438 static int team_port_en_option_get(struct team *team,
1439                                    struct team_gsetter_ctx *ctx)
1440 {
1441         struct team_port *port = ctx->info->port;
1442
1443         ctx->data.bool_val = team_port_enabled(port);
1444         return 0;
1445 }
1446
1447 static int team_port_en_option_set(struct team *team,
1448                                    struct team_gsetter_ctx *ctx)
1449 {
1450         struct team_port *port = ctx->info->port;
1451
1452         if (ctx->data.bool_val)
1453                 team_port_enable(team, port);
1454         else
1455                 team_port_disable(team, port);
1456         return 0;
1457 }
1458
1459 static int team_user_linkup_option_get(struct team *team,
1460                                        struct team_gsetter_ctx *ctx)
1461 {
1462         struct team_port *port = ctx->info->port;
1463
1464         ctx->data.bool_val = port->user.linkup;
1465         return 0;
1466 }
1467
1468 static void __team_carrier_check(struct team *team);
1469
1470 static int team_user_linkup_option_set(struct team *team,
1471                                        struct team_gsetter_ctx *ctx)
1472 {
1473         struct team_port *port = ctx->info->port;
1474
1475         port->user.linkup = ctx->data.bool_val;
1476         team_refresh_port_linkup(port);
1477         __team_carrier_check(port->team);
1478         return 0;
1479 }
1480
1481 static int team_user_linkup_en_option_get(struct team *team,
1482                                           struct team_gsetter_ctx *ctx)
1483 {
1484         struct team_port *port = ctx->info->port;
1485
1486         ctx->data.bool_val = port->user.linkup_enabled;
1487         return 0;
1488 }
1489
1490 static int team_user_linkup_en_option_set(struct team *team,
1491                                           struct team_gsetter_ctx *ctx)
1492 {
1493         struct team_port *port = ctx->info->port;
1494
1495         port->user.linkup_enabled = ctx->data.bool_val;
1496         team_refresh_port_linkup(port);
1497         __team_carrier_check(port->team);
1498         return 0;
1499 }
1500
1501 static int team_priority_option_get(struct team *team,
1502                                     struct team_gsetter_ctx *ctx)
1503 {
1504         struct team_port *port = ctx->info->port;
1505
1506         ctx->data.s32_val = port->priority;
1507         return 0;
1508 }
1509
1510 static int team_priority_option_set(struct team *team,
1511                                     struct team_gsetter_ctx *ctx)
1512 {
1513         struct team_port *port = ctx->info->port;
1514         s32 priority = ctx->data.s32_val;
1515
1516         if (port->priority == priority)
1517                 return 0;
1518         port->priority = priority;
1519         team_queue_override_port_prio_changed(team, port);
1520         return 0;
1521 }
1522
1523 static int team_queue_id_option_get(struct team *team,
1524                                     struct team_gsetter_ctx *ctx)
1525 {
1526         struct team_port *port = ctx->info->port;
1527
1528         ctx->data.u32_val = port->queue_id;
1529         return 0;
1530 }
1531
1532 static int team_queue_id_option_set(struct team *team,
1533                                     struct team_gsetter_ctx *ctx)
1534 {
1535         struct team_port *port = ctx->info->port;
1536         u16 new_queue_id = ctx->data.u32_val;
1537
1538         if (port->queue_id == new_queue_id)
1539                 return 0;
1540         if (new_queue_id >= team->dev->real_num_tx_queues)
1541                 return -EINVAL;
1542         team_queue_override_port_change_queue_id(team, port, new_queue_id);
1543         return 0;
1544 }
1545
1546 static const struct team_option team_options[] = {
1547         {
1548                 .name = "mode",
1549                 .type = TEAM_OPTION_TYPE_STRING,
1550                 .getter = team_mode_option_get,
1551                 .setter = team_mode_option_set,
1552         },
1553         {
1554                 .name = "notify_peers_count",
1555                 .type = TEAM_OPTION_TYPE_U32,
1556                 .getter = team_notify_peers_count_get,
1557                 .setter = team_notify_peers_count_set,
1558         },
1559         {
1560                 .name = "notify_peers_interval",
1561                 .type = TEAM_OPTION_TYPE_U32,
1562                 .getter = team_notify_peers_interval_get,
1563                 .setter = team_notify_peers_interval_set,
1564         },
1565         {
1566                 .name = "mcast_rejoin_count",
1567                 .type = TEAM_OPTION_TYPE_U32,
1568                 .getter = team_mcast_rejoin_count_get,
1569                 .setter = team_mcast_rejoin_count_set,
1570         },
1571         {
1572                 .name = "mcast_rejoin_interval",
1573                 .type = TEAM_OPTION_TYPE_U32,
1574                 .getter = team_mcast_rejoin_interval_get,
1575                 .setter = team_mcast_rejoin_interval_set,
1576         },
1577         {
1578                 .name = "enabled",
1579                 .type = TEAM_OPTION_TYPE_BOOL,
1580                 .per_port = true,
1581                 .getter = team_port_en_option_get,
1582                 .setter = team_port_en_option_set,
1583         },
1584         {
1585                 .name = "user_linkup",
1586                 .type = TEAM_OPTION_TYPE_BOOL,
1587                 .per_port = true,
1588                 .getter = team_user_linkup_option_get,
1589                 .setter = team_user_linkup_option_set,
1590         },
1591         {
1592                 .name = "user_linkup_enabled",
1593                 .type = TEAM_OPTION_TYPE_BOOL,
1594                 .per_port = true,
1595                 .getter = team_user_linkup_en_option_get,
1596                 .setter = team_user_linkup_en_option_set,
1597         },
1598         {
1599                 .name = "priority",
1600                 .type = TEAM_OPTION_TYPE_S32,
1601                 .per_port = true,
1602                 .getter = team_priority_option_get,
1603                 .setter = team_priority_option_set,
1604         },
1605         {
1606                 .name = "queue_id",
1607                 .type = TEAM_OPTION_TYPE_U32,
1608                 .per_port = true,
1609                 .getter = team_queue_id_option_get,
1610                 .setter = team_queue_id_option_set,
1611         },
1612 };
1613
1614
1615 static int team_init(struct net_device *dev)
1616 {
1617         struct team *team = netdev_priv(dev);
1618         int i;
1619         int err;
1620
1621         team->dev = dev;
1622         team_set_no_mode(team);
1623
1624         team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1625         if (!team->pcpu_stats)
1626                 return -ENOMEM;
1627
1628         for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1629                 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1630         INIT_LIST_HEAD(&team->port_list);
1631         err = team_queue_override_init(team);
1632         if (err)
1633                 goto err_team_queue_override_init;
1634
1635         team_adjust_ops(team);
1636
1637         INIT_LIST_HEAD(&team->option_list);
1638         INIT_LIST_HEAD(&team->option_inst_list);
1639
1640         team_notify_peers_init(team);
1641         team_mcast_rejoin_init(team);
1642
1643         err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1644         if (err)
1645                 goto err_options_register;
1646         netif_carrier_off(dev);
1647
1648         lockdep_register_key(&team->team_lock_key);
1649         __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
1650         netdev_lockdep_set_classes(dev);
1651
1652         return 0;
1653
1654 err_options_register:
1655         team_mcast_rejoin_fini(team);
1656         team_notify_peers_fini(team);
1657         team_queue_override_fini(team);
1658 err_team_queue_override_init:
1659         free_percpu(team->pcpu_stats);
1660
1661         return err;
1662 }
1663
1664 static void team_uninit(struct net_device *dev)
1665 {
1666         struct team *team = netdev_priv(dev);
1667         struct team_port *port;
1668         struct team_port *tmp;
1669
1670         mutex_lock(&team->lock);
1671         list_for_each_entry_safe(port, tmp, &team->port_list, list)
1672                 team_port_del(team, port->dev);
1673
1674         __team_change_mode(team, NULL); /* cleanup */
1675         __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1676         team_mcast_rejoin_fini(team);
1677         team_notify_peers_fini(team);
1678         team_queue_override_fini(team);
1679         mutex_unlock(&team->lock);
1680         netdev_change_features(dev);
1681         lockdep_unregister_key(&team->team_lock_key);
1682 }
1683
1684 static void team_destructor(struct net_device *dev)
1685 {
1686         struct team *team = netdev_priv(dev);
1687
1688         free_percpu(team->pcpu_stats);
1689 }
1690
1691 static int team_open(struct net_device *dev)
1692 {
1693         return 0;
1694 }
1695
1696 static int team_close(struct net_device *dev)
1697 {
1698         return 0;
1699 }
1700
1701 /*
1702  * note: already called with rcu_read_lock
1703  */
1704 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1705 {
1706         struct team *team = netdev_priv(dev);
1707         bool tx_success;
1708         unsigned int len = skb->len;
1709
1710         tx_success = team_queue_override_transmit(team, skb);
1711         if (!tx_success)
1712                 tx_success = team->ops.transmit(team, skb);
1713         if (tx_success) {
1714                 struct team_pcpu_stats *pcpu_stats;
1715
1716                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1717                 u64_stats_update_begin(&pcpu_stats->syncp);
1718                 pcpu_stats->tx_packets++;
1719                 pcpu_stats->tx_bytes += len;
1720                 u64_stats_update_end(&pcpu_stats->syncp);
1721         } else {
1722                 this_cpu_inc(team->pcpu_stats->tx_dropped);
1723         }
1724
1725         return NETDEV_TX_OK;
1726 }
1727
1728 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1729                              struct net_device *sb_dev)
1730 {
1731         /*
1732          * This helper function exists to help dev_pick_tx get the correct
1733          * destination queue.  Using a helper function skips a call to
1734          * skb_tx_hash and will put the skbs in the queue we expect on their
1735          * way down to the team driver.
1736          */
1737         u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1738
1739         /*
1740          * Save the original txq to restore before passing to the driver
1741          */
1742         qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1743
1744         if (unlikely(txq >= dev->real_num_tx_queues)) {
1745                 do {
1746                         txq -= dev->real_num_tx_queues;
1747                 } while (txq >= dev->real_num_tx_queues);
1748         }
1749         return txq;
1750 }
1751
1752 static void team_change_rx_flags(struct net_device *dev, int change)
1753 {
1754         struct team *team = netdev_priv(dev);
1755         struct team_port *port;
1756         int inc;
1757
1758         rcu_read_lock();
1759         list_for_each_entry_rcu(port, &team->port_list, list) {
1760                 if (change & IFF_PROMISC) {
1761                         inc = dev->flags & IFF_PROMISC ? 1 : -1;
1762                         dev_set_promiscuity(port->dev, inc);
1763                 }
1764                 if (change & IFF_ALLMULTI) {
1765                         inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1766                         dev_set_allmulti(port->dev, inc);
1767                 }
1768         }
1769         rcu_read_unlock();
1770 }
1771
1772 static void team_set_rx_mode(struct net_device *dev)
1773 {
1774         struct team *team = netdev_priv(dev);
1775         struct team_port *port;
1776
1777         rcu_read_lock();
1778         list_for_each_entry_rcu(port, &team->port_list, list) {
1779                 dev_uc_sync_multiple(port->dev, dev);
1780                 dev_mc_sync_multiple(port->dev, dev);
1781         }
1782         rcu_read_unlock();
1783 }
1784
1785 static int team_set_mac_address(struct net_device *dev, void *p)
1786 {
1787         struct sockaddr *addr = p;
1788         struct team *team = netdev_priv(dev);
1789         struct team_port *port;
1790
1791         if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1792                 return -EADDRNOTAVAIL;
1793         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1794         mutex_lock(&team->lock);
1795         list_for_each_entry(port, &team->port_list, list)
1796                 if (team->ops.port_change_dev_addr)
1797                         team->ops.port_change_dev_addr(team, port);
1798         mutex_unlock(&team->lock);
1799         return 0;
1800 }
1801
1802 static int team_change_mtu(struct net_device *dev, int new_mtu)
1803 {
1804         struct team *team = netdev_priv(dev);
1805         struct team_port *port;
1806         int err;
1807
1808         /*
1809          * Alhough this is reader, it's guarded by team lock. It's not possible
1810          * to traverse list in reverse under rcu_read_lock
1811          */
1812         mutex_lock(&team->lock);
1813         team->port_mtu_change_allowed = true;
1814         list_for_each_entry(port, &team->port_list, list) {
1815                 err = dev_set_mtu(port->dev, new_mtu);
1816                 if (err) {
1817                         netdev_err(dev, "Device %s failed to change mtu",
1818                                    port->dev->name);
1819                         goto unwind;
1820                 }
1821         }
1822         team->port_mtu_change_allowed = false;
1823         mutex_unlock(&team->lock);
1824
1825         dev->mtu = new_mtu;
1826
1827         return 0;
1828
1829 unwind:
1830         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1831                 dev_set_mtu(port->dev, dev->mtu);
1832         team->port_mtu_change_allowed = false;
1833         mutex_unlock(&team->lock);
1834
1835         return err;
1836 }
1837
1838 static void
1839 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1840 {
1841         struct team *team = netdev_priv(dev);
1842         struct team_pcpu_stats *p;
1843         u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1844         u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1845         unsigned int start;
1846         int i;
1847
1848         for_each_possible_cpu(i) {
1849                 p = per_cpu_ptr(team->pcpu_stats, i);
1850                 do {
1851                         start = u64_stats_fetch_begin_irq(&p->syncp);
1852                         rx_packets      = p->rx_packets;
1853                         rx_bytes        = p->rx_bytes;
1854                         rx_multicast    = p->rx_multicast;
1855                         tx_packets      = p->tx_packets;
1856                         tx_bytes        = p->tx_bytes;
1857                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1858
1859                 stats->rx_packets       += rx_packets;
1860                 stats->rx_bytes         += rx_bytes;
1861                 stats->multicast        += rx_multicast;
1862                 stats->tx_packets       += tx_packets;
1863                 stats->tx_bytes         += tx_bytes;
1864                 /*
1865                  * rx_dropped, tx_dropped & rx_nohandler are u32,
1866                  * updated without syncp protection.
1867                  */
1868                 rx_dropped      += p->rx_dropped;
1869                 tx_dropped      += p->tx_dropped;
1870                 rx_nohandler    += p->rx_nohandler;
1871         }
1872         stats->rx_dropped       = rx_dropped;
1873         stats->tx_dropped       = tx_dropped;
1874         stats->rx_nohandler     = rx_nohandler;
1875 }
1876
1877 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1878 {
1879         struct team *team = netdev_priv(dev);
1880         struct team_port *port;
1881         int err;
1882
1883         /*
1884          * Alhough this is reader, it's guarded by team lock. It's not possible
1885          * to traverse list in reverse under rcu_read_lock
1886          */
1887         mutex_lock(&team->lock);
1888         list_for_each_entry(port, &team->port_list, list) {
1889                 err = vlan_vid_add(port->dev, proto, vid);
1890                 if (err)
1891                         goto unwind;
1892         }
1893         mutex_unlock(&team->lock);
1894
1895         return 0;
1896
1897 unwind:
1898         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1899                 vlan_vid_del(port->dev, proto, vid);
1900         mutex_unlock(&team->lock);
1901
1902         return err;
1903 }
1904
1905 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1906 {
1907         struct team *team = netdev_priv(dev);
1908         struct team_port *port;
1909
1910         mutex_lock(&team->lock);
1911         list_for_each_entry(port, &team->port_list, list)
1912                 vlan_vid_del(port->dev, proto, vid);
1913         mutex_unlock(&team->lock);
1914
1915         return 0;
1916 }
1917
1918 #ifdef CONFIG_NET_POLL_CONTROLLER
1919 static void team_poll_controller(struct net_device *dev)
1920 {
1921 }
1922
1923 static void __team_netpoll_cleanup(struct team *team)
1924 {
1925         struct team_port *port;
1926
1927         list_for_each_entry(port, &team->port_list, list)
1928                 team_port_disable_netpoll(port);
1929 }
1930
1931 static void team_netpoll_cleanup(struct net_device *dev)
1932 {
1933         struct team *team = netdev_priv(dev);
1934
1935         mutex_lock(&team->lock);
1936         __team_netpoll_cleanup(team);
1937         mutex_unlock(&team->lock);
1938 }
1939
1940 static int team_netpoll_setup(struct net_device *dev,
1941                               struct netpoll_info *npifo)
1942 {
1943         struct team *team = netdev_priv(dev);
1944         struct team_port *port;
1945         int err = 0;
1946
1947         mutex_lock(&team->lock);
1948         list_for_each_entry(port, &team->port_list, list) {
1949                 err = __team_port_enable_netpoll(port);
1950                 if (err) {
1951                         __team_netpoll_cleanup(team);
1952                         break;
1953                 }
1954         }
1955         mutex_unlock(&team->lock);
1956         return err;
1957 }
1958 #endif
1959
1960 static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
1961                           struct netlink_ext_ack *extack)
1962 {
1963         struct team *team = netdev_priv(dev);
1964         int err;
1965
1966         mutex_lock(&team->lock);
1967         err = team_port_add(team, port_dev, extack);
1968         mutex_unlock(&team->lock);
1969
1970         if (!err)
1971                 netdev_change_features(dev);
1972
1973         return err;
1974 }
1975
1976 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1977 {
1978         struct team *team = netdev_priv(dev);
1979         int err;
1980
1981         mutex_lock(&team->lock);
1982         err = team_port_del(team, port_dev);
1983         mutex_unlock(&team->lock);
1984
1985         if (err)
1986                 return err;
1987
1988         if (netif_is_team_master(port_dev)) {
1989                 lockdep_unregister_key(&team->team_lock_key);
1990                 lockdep_register_key(&team->team_lock_key);
1991                 lockdep_set_class(&team->lock, &team->team_lock_key);
1992         }
1993         netdev_change_features(dev);
1994
1995         return err;
1996 }
1997
1998 static netdev_features_t team_fix_features(struct net_device *dev,
1999                                            netdev_features_t features)
2000 {
2001         struct team_port *port;
2002         struct team *team = netdev_priv(dev);
2003         netdev_features_t mask;
2004
2005         mask = features;
2006         features &= ~NETIF_F_ONE_FOR_ALL;
2007         features |= NETIF_F_ALL_FOR_ALL;
2008
2009         rcu_read_lock();
2010         list_for_each_entry_rcu(port, &team->port_list, list) {
2011                 features = netdev_increment_features(features,
2012                                                      port->dev->features,
2013                                                      mask);
2014         }
2015         rcu_read_unlock();
2016
2017         features = netdev_add_tso_features(features, mask);
2018
2019         return features;
2020 }
2021
2022 static int team_change_carrier(struct net_device *dev, bool new_carrier)
2023 {
2024         struct team *team = netdev_priv(dev);
2025
2026         team->user_carrier_enabled = true;
2027
2028         if (new_carrier)
2029                 netif_carrier_on(dev);
2030         else
2031                 netif_carrier_off(dev);
2032         return 0;
2033 }
2034
2035 static const struct net_device_ops team_netdev_ops = {
2036         .ndo_init               = team_init,
2037         .ndo_uninit             = team_uninit,
2038         .ndo_open               = team_open,
2039         .ndo_stop               = team_close,
2040         .ndo_start_xmit         = team_xmit,
2041         .ndo_select_queue       = team_select_queue,
2042         .ndo_change_rx_flags    = team_change_rx_flags,
2043         .ndo_set_rx_mode        = team_set_rx_mode,
2044         .ndo_set_mac_address    = team_set_mac_address,
2045         .ndo_change_mtu         = team_change_mtu,
2046         .ndo_get_stats64        = team_get_stats64,
2047         .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
2048         .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
2049 #ifdef CONFIG_NET_POLL_CONTROLLER
2050         .ndo_poll_controller    = team_poll_controller,
2051         .ndo_netpoll_setup      = team_netpoll_setup,
2052         .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2053 #endif
2054         .ndo_add_slave          = team_add_slave,
2055         .ndo_del_slave          = team_del_slave,
2056         .ndo_fix_features       = team_fix_features,
2057         .ndo_change_carrier     = team_change_carrier,
2058         .ndo_features_check     = passthru_features_check,
2059 };
2060
2061 /***********************
2062  * ethtool interface
2063  ***********************/
2064
2065 static void team_ethtool_get_drvinfo(struct net_device *dev,
2066                                      struct ethtool_drvinfo *drvinfo)
2067 {
2068         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2069         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2070 }
2071
2072 static int team_ethtool_get_link_ksettings(struct net_device *dev,
2073                                            struct ethtool_link_ksettings *cmd)
2074 {
2075         struct team *team= netdev_priv(dev);
2076         unsigned long speed = 0;
2077         struct team_port *port;
2078
2079         cmd->base.duplex = DUPLEX_UNKNOWN;
2080         cmd->base.port = PORT_OTHER;
2081
2082         rcu_read_lock();
2083         list_for_each_entry_rcu(port, &team->port_list, list) {
2084                 if (team_port_txable(port)) {
2085                         if (port->state.speed != SPEED_UNKNOWN)
2086                                 speed += port->state.speed;
2087                         if (cmd->base.duplex == DUPLEX_UNKNOWN &&
2088                             port->state.duplex != DUPLEX_UNKNOWN)
2089                                 cmd->base.duplex = port->state.duplex;
2090                 }
2091         }
2092         rcu_read_unlock();
2093
2094         cmd->base.speed = speed ? : SPEED_UNKNOWN;
2095
2096         return 0;
2097 }
2098
2099 static const struct ethtool_ops team_ethtool_ops = {
2100         .get_drvinfo            = team_ethtool_get_drvinfo,
2101         .get_link               = ethtool_op_get_link,
2102         .get_link_ksettings     = team_ethtool_get_link_ksettings,
2103 };
2104
2105 /***********************
2106  * rt netlink interface
2107  ***********************/
2108
2109 static void team_setup_by_port(struct net_device *dev,
2110                                struct net_device *port_dev)
2111 {
2112         dev->header_ops = port_dev->header_ops;
2113         dev->type = port_dev->type;
2114         dev->hard_header_len = port_dev->hard_header_len;
2115         dev->needed_headroom = port_dev->needed_headroom;
2116         dev->addr_len = port_dev->addr_len;
2117         dev->mtu = port_dev->mtu;
2118         memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2119         eth_hw_addr_inherit(dev, port_dev);
2120 }
2121
2122 static int team_dev_type_check_change(struct net_device *dev,
2123                                       struct net_device *port_dev)
2124 {
2125         struct team *team = netdev_priv(dev);
2126         char *portname = port_dev->name;
2127         int err;
2128
2129         if (dev->type == port_dev->type)
2130                 return 0;
2131         if (!list_empty(&team->port_list)) {
2132                 netdev_err(dev, "Device %s is of different type\n", portname);
2133                 return -EBUSY;
2134         }
2135         err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2136         err = notifier_to_errno(err);
2137         if (err) {
2138                 netdev_err(dev, "Refused to change device type\n");
2139                 return err;
2140         }
2141         dev_uc_flush(dev);
2142         dev_mc_flush(dev);
2143         team_setup_by_port(dev, port_dev);
2144         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2145         return 0;
2146 }
2147
2148 static void team_setup(struct net_device *dev)
2149 {
2150         ether_setup(dev);
2151         dev->max_mtu = ETH_MAX_MTU;
2152
2153         dev->netdev_ops = &team_netdev_ops;
2154         dev->ethtool_ops = &team_ethtool_ops;
2155         dev->needs_free_netdev = true;
2156         dev->priv_destructor = team_destructor;
2157         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2158         dev->priv_flags |= IFF_NO_QUEUE;
2159         dev->priv_flags |= IFF_TEAM;
2160
2161         /*
2162          * Indicate we support unicast address filtering. That way core won't
2163          * bring us to promisc mode in case a unicast addr is added.
2164          * Let this up to underlay drivers.
2165          */
2166         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2167
2168         dev->features |= NETIF_F_LLTX;
2169         dev->features |= NETIF_F_GRO;
2170
2171         /* Don't allow team devices to change network namespaces. */
2172         dev->features |= NETIF_F_NETNS_LOCAL;
2173
2174         dev->hw_features = TEAM_VLAN_FEATURES |
2175                            NETIF_F_HW_VLAN_CTAG_RX |
2176                            NETIF_F_HW_VLAN_CTAG_FILTER;
2177
2178         dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
2179         dev->features |= dev->hw_features;
2180         dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2181 }
2182
2183 static int team_newlink(struct net *src_net, struct net_device *dev,
2184                         struct nlattr *tb[], struct nlattr *data[],
2185                         struct netlink_ext_ack *extack)
2186 {
2187         if (tb[IFLA_ADDRESS] == NULL)
2188                 eth_hw_addr_random(dev);
2189
2190         return register_netdevice(dev);
2191 }
2192
2193 static int team_validate(struct nlattr *tb[], struct nlattr *data[],
2194                          struct netlink_ext_ack *extack)
2195 {
2196         if (tb[IFLA_ADDRESS]) {
2197                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2198                         return -EINVAL;
2199                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2200                         return -EADDRNOTAVAIL;
2201         }
2202         return 0;
2203 }
2204
2205 static unsigned int team_get_num_tx_queues(void)
2206 {
2207         return TEAM_DEFAULT_NUM_TX_QUEUES;
2208 }
2209
2210 static unsigned int team_get_num_rx_queues(void)
2211 {
2212         return TEAM_DEFAULT_NUM_RX_QUEUES;
2213 }
2214
2215 static struct rtnl_link_ops team_link_ops __read_mostly = {
2216         .kind                   = DRV_NAME,
2217         .priv_size              = sizeof(struct team),
2218         .setup                  = team_setup,
2219         .newlink                = team_newlink,
2220         .validate               = team_validate,
2221         .get_num_tx_queues      = team_get_num_tx_queues,
2222         .get_num_rx_queues      = team_get_num_rx_queues,
2223 };
2224
2225
2226 /***********************************
2227  * Generic netlink custom interface
2228  ***********************************/
2229
2230 static struct genl_family team_nl_family;
2231
2232 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2233         [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2234         [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2235         [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2236         [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2237 };
2238
2239 static const struct nla_policy
2240 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2241         [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2242         [TEAM_ATTR_OPTION_NAME] = {
2243                 .type = NLA_STRING,
2244                 .len = TEAM_STRING_MAX_LEN,
2245         },
2246         [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2247         [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2248         [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2249         [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
2250         [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
2251 };
2252
2253 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2254 {
2255         struct sk_buff *msg;
2256         void *hdr;
2257         int err;
2258
2259         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2260         if (!msg)
2261                 return -ENOMEM;
2262
2263         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2264                           &team_nl_family, 0, TEAM_CMD_NOOP);
2265         if (!hdr) {
2266                 err = -EMSGSIZE;
2267                 goto err_msg_put;
2268         }
2269
2270         genlmsg_end(msg, hdr);
2271
2272         return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2273
2274 err_msg_put:
2275         nlmsg_free(msg);
2276
2277         return err;
2278 }
2279
2280 /*
2281  * Netlink cmd functions should be locked by following two functions.
2282  * Since dev gets held here, that ensures dev won't disappear in between.
2283  */
2284 static struct team *team_nl_team_get(struct genl_info *info)
2285 {
2286         struct net *net = genl_info_net(info);
2287         int ifindex;
2288         struct net_device *dev;
2289         struct team *team;
2290
2291         if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2292                 return NULL;
2293
2294         ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2295         dev = dev_get_by_index(net, ifindex);
2296         if (!dev || dev->netdev_ops != &team_netdev_ops) {
2297                 if (dev)
2298                         dev_put(dev);
2299                 return NULL;
2300         }
2301
2302         team = netdev_priv(dev);
2303         mutex_lock(&team->lock);
2304         return team;
2305 }
2306
2307 static void team_nl_team_put(struct team *team)
2308 {
2309         mutex_unlock(&team->lock);
2310         dev_put(team->dev);
2311 }
2312
2313 typedef int team_nl_send_func_t(struct sk_buff *skb,
2314                                 struct team *team, u32 portid);
2315
2316 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2317 {
2318         return genlmsg_unicast(dev_net(team->dev), skb, portid);
2319 }
2320
2321 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2322                                        struct team_option_inst *opt_inst)
2323 {
2324         struct nlattr *option_item;
2325         struct team_option *option = opt_inst->option;
2326         struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2327         struct team_gsetter_ctx ctx;
2328         int err;
2329
2330         ctx.info = opt_inst_info;
2331         err = team_option_get(team, opt_inst, &ctx);
2332         if (err)
2333                 return err;
2334
2335         option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
2336         if (!option_item)
2337                 return -EMSGSIZE;
2338
2339         if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2340                 goto nest_cancel;
2341         if (opt_inst_info->port &&
2342             nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2343                         opt_inst_info->port->dev->ifindex))
2344                 goto nest_cancel;
2345         if (opt_inst->option->array_size &&
2346             nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2347                         opt_inst_info->array_index))
2348                 goto nest_cancel;
2349
2350         switch (option->type) {
2351         case TEAM_OPTION_TYPE_U32:
2352                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2353                         goto nest_cancel;
2354                 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2355                         goto nest_cancel;
2356                 break;
2357         case TEAM_OPTION_TYPE_STRING:
2358                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2359                         goto nest_cancel;
2360                 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2361                                    ctx.data.str_val))
2362                         goto nest_cancel;
2363                 break;
2364         case TEAM_OPTION_TYPE_BINARY:
2365                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2366                         goto nest_cancel;
2367                 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2368                             ctx.data.bin_val.ptr))
2369                         goto nest_cancel;
2370                 break;
2371         case TEAM_OPTION_TYPE_BOOL:
2372                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2373                         goto nest_cancel;
2374                 if (ctx.data.bool_val &&
2375                     nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2376                         goto nest_cancel;
2377                 break;
2378         case TEAM_OPTION_TYPE_S32:
2379                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2380                         goto nest_cancel;
2381                 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2382                         goto nest_cancel;
2383                 break;
2384         default:
2385                 BUG();
2386         }
2387         if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2388                 goto nest_cancel;
2389         if (opt_inst->changed) {
2390                 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2391                         goto nest_cancel;
2392                 opt_inst->changed = false;
2393         }
2394         nla_nest_end(skb, option_item);
2395         return 0;
2396
2397 nest_cancel:
2398         nla_nest_cancel(skb, option_item);
2399         return -EMSGSIZE;
2400 }
2401
2402 static int __send_and_alloc_skb(struct sk_buff **pskb,
2403                                 struct team *team, u32 portid,
2404                                 team_nl_send_func_t *send_func)
2405 {
2406         int err;
2407
2408         if (*pskb) {
2409                 err = send_func(*pskb, team, portid);
2410                 if (err)
2411                         return err;
2412         }
2413         *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2414         if (!*pskb)
2415                 return -ENOMEM;
2416         return 0;
2417 }
2418
2419 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2420                                     int flags, team_nl_send_func_t *send_func,
2421                                     struct list_head *sel_opt_inst_list)
2422 {
2423         struct nlattr *option_list;
2424         struct nlmsghdr *nlh;
2425         void *hdr;
2426         struct team_option_inst *opt_inst;
2427         int err;
2428         struct sk_buff *skb = NULL;
2429         bool incomplete;
2430         int i;
2431
2432         opt_inst = list_first_entry(sel_opt_inst_list,
2433                                     struct team_option_inst, tmp_list);
2434
2435 start_again:
2436         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2437         if (err)
2438                 return err;
2439
2440         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2441                           TEAM_CMD_OPTIONS_GET);
2442         if (!hdr) {
2443                 nlmsg_free(skb);
2444                 return -EMSGSIZE;
2445         }
2446
2447         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2448                 goto nla_put_failure;
2449         option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
2450         if (!option_list)
2451                 goto nla_put_failure;
2452
2453         i = 0;
2454         incomplete = false;
2455         list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2456                 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2457                 if (err) {
2458                         if (err == -EMSGSIZE) {
2459                                 if (!i)
2460                                         goto errout;
2461                                 incomplete = true;
2462                                 break;
2463                         }
2464                         goto errout;
2465                 }
2466                 i++;
2467         }
2468
2469         nla_nest_end(skb, option_list);
2470         genlmsg_end(skb, hdr);
2471         if (incomplete)
2472                 goto start_again;
2473
2474 send_done:
2475         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2476         if (!nlh) {
2477                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2478                 if (err)
2479                         return err;
2480                 goto send_done;
2481         }
2482
2483         return send_func(skb, team, portid);
2484
2485 nla_put_failure:
2486         err = -EMSGSIZE;
2487 errout:
2488         nlmsg_free(skb);
2489         return err;
2490 }
2491
2492 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2493 {
2494         struct team *team;
2495         struct team_option_inst *opt_inst;
2496         int err;
2497         LIST_HEAD(sel_opt_inst_list);
2498
2499         team = team_nl_team_get(info);
2500         if (!team)
2501                 return -EINVAL;
2502
2503         list_for_each_entry(opt_inst, &team->option_inst_list, list)
2504                 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2505         err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2506                                        NLM_F_ACK, team_nl_send_unicast,
2507                                        &sel_opt_inst_list);
2508
2509         team_nl_team_put(team);
2510
2511         return err;
2512 }
2513
2514 static int team_nl_send_event_options_get(struct team *team,
2515                                           struct list_head *sel_opt_inst_list);
2516
2517 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2518 {
2519         struct team *team;
2520         int err = 0;
2521         int i;
2522         struct nlattr *nl_option;
2523
2524         rtnl_lock();
2525
2526         team = team_nl_team_get(info);
2527         if (!team) {
2528                 err = -EINVAL;
2529                 goto rtnl_unlock;
2530         }
2531
2532         err = -EINVAL;
2533         if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2534                 err = -EINVAL;
2535                 goto team_put;
2536         }
2537
2538         nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2539                 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2540                 struct nlattr *attr;
2541                 struct nlattr *attr_data;
2542                 LIST_HEAD(opt_inst_list);
2543                 enum team_option_type opt_type;
2544                 int opt_port_ifindex = 0; /* != 0 for per-port options */
2545                 u32 opt_array_index = 0;
2546                 bool opt_is_array = false;
2547                 struct team_option_inst *opt_inst;
2548                 char *opt_name;
2549                 bool opt_found = false;
2550
2551                 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2552                         err = -EINVAL;
2553                         goto team_put;
2554                 }
2555                 err = nla_parse_nested_deprecated(opt_attrs,
2556                                                   TEAM_ATTR_OPTION_MAX,
2557                                                   nl_option,
2558                                                   team_nl_option_policy,
2559                                                   info->extack);
2560                 if (err)
2561                         goto team_put;
2562                 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2563                     !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2564                         err = -EINVAL;
2565                         goto team_put;
2566                 }
2567                 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2568                 case NLA_U32:
2569                         opt_type = TEAM_OPTION_TYPE_U32;
2570                         break;
2571                 case NLA_STRING:
2572                         opt_type = TEAM_OPTION_TYPE_STRING;
2573                         break;
2574                 case NLA_BINARY:
2575                         opt_type = TEAM_OPTION_TYPE_BINARY;
2576                         break;
2577                 case NLA_FLAG:
2578                         opt_type = TEAM_OPTION_TYPE_BOOL;
2579                         break;
2580                 case NLA_S32:
2581                         opt_type = TEAM_OPTION_TYPE_S32;
2582                         break;
2583                 default:
2584                         goto team_put;
2585                 }
2586
2587                 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2588                 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2589                         err = -EINVAL;
2590                         goto team_put;
2591                 }
2592
2593                 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2594                 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2595                 if (attr)
2596                         opt_port_ifindex = nla_get_u32(attr);
2597
2598                 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2599                 if (attr) {
2600                         opt_is_array = true;
2601                         opt_array_index = nla_get_u32(attr);
2602                 }
2603
2604                 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2605                         struct team_option *option = opt_inst->option;
2606                         struct team_gsetter_ctx ctx;
2607                         struct team_option_inst_info *opt_inst_info;
2608                         int tmp_ifindex;
2609
2610                         opt_inst_info = &opt_inst->info;
2611                         tmp_ifindex = opt_inst_info->port ?
2612                                       opt_inst_info->port->dev->ifindex : 0;
2613                         if (option->type != opt_type ||
2614                             strcmp(option->name, opt_name) ||
2615                             tmp_ifindex != opt_port_ifindex ||
2616                             (option->array_size && !opt_is_array) ||
2617                             opt_inst_info->array_index != opt_array_index)
2618                                 continue;
2619                         opt_found = true;
2620                         ctx.info = opt_inst_info;
2621                         switch (opt_type) {
2622                         case TEAM_OPTION_TYPE_U32:
2623                                 ctx.data.u32_val = nla_get_u32(attr_data);
2624                                 break;
2625                         case TEAM_OPTION_TYPE_STRING:
2626                                 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2627                                         err = -EINVAL;
2628                                         goto team_put;
2629                                 }
2630                                 ctx.data.str_val = nla_data(attr_data);
2631                                 break;
2632                         case TEAM_OPTION_TYPE_BINARY:
2633                                 ctx.data.bin_val.len = nla_len(attr_data);
2634                                 ctx.data.bin_val.ptr = nla_data(attr_data);
2635                                 break;
2636                         case TEAM_OPTION_TYPE_BOOL:
2637                                 ctx.data.bool_val = attr_data ? true : false;
2638                                 break;
2639                         case TEAM_OPTION_TYPE_S32:
2640                                 ctx.data.s32_val = nla_get_s32(attr_data);
2641                                 break;
2642                         default:
2643                                 BUG();
2644                         }
2645                         err = team_option_set(team, opt_inst, &ctx);
2646                         if (err)
2647                                 goto team_put;
2648                         opt_inst->changed = true;
2649                         list_add(&opt_inst->tmp_list, &opt_inst_list);
2650                 }
2651                 if (!opt_found) {
2652                         err = -ENOENT;
2653                         goto team_put;
2654                 }
2655
2656                 err = team_nl_send_event_options_get(team, &opt_inst_list);
2657                 if (err)
2658                         break;
2659         }
2660
2661 team_put:
2662         team_nl_team_put(team);
2663 rtnl_unlock:
2664         rtnl_unlock();
2665         return err;
2666 }
2667
2668 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2669                                      struct team_port *port)
2670 {
2671         struct nlattr *port_item;
2672
2673         port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
2674         if (!port_item)
2675                 goto nest_cancel;
2676         if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2677                 goto nest_cancel;
2678         if (port->changed) {
2679                 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2680                         goto nest_cancel;
2681                 port->changed = false;
2682         }
2683         if ((port->removed &&
2684              nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2685             (port->state.linkup &&
2686              nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2687             nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2688             nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2689                 goto nest_cancel;
2690         nla_nest_end(skb, port_item);
2691         return 0;
2692
2693 nest_cancel:
2694         nla_nest_cancel(skb, port_item);
2695         return -EMSGSIZE;
2696 }
2697
2698 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2699                                       int flags, team_nl_send_func_t *send_func,
2700                                       struct team_port *one_port)
2701 {
2702         struct nlattr *port_list;
2703         struct nlmsghdr *nlh;
2704         void *hdr;
2705         struct team_port *port;
2706         int err;
2707         struct sk_buff *skb = NULL;
2708         bool incomplete;
2709         int i;
2710
2711         port = list_first_entry_or_null(&team->port_list,
2712                                         struct team_port, list);
2713
2714 start_again:
2715         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2716         if (err)
2717                 return err;
2718
2719         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2720                           TEAM_CMD_PORT_LIST_GET);
2721         if (!hdr) {
2722                 nlmsg_free(skb);
2723                 return -EMSGSIZE;
2724         }
2725
2726         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2727                 goto nla_put_failure;
2728         port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
2729         if (!port_list)
2730                 goto nla_put_failure;
2731
2732         i = 0;
2733         incomplete = false;
2734
2735         /* If one port is selected, called wants to send port list containing
2736          * only this port. Otherwise go through all listed ports and send all
2737          */
2738         if (one_port) {
2739                 err = team_nl_fill_one_port_get(skb, one_port);
2740                 if (err)
2741                         goto errout;
2742         } else if (port) {
2743                 list_for_each_entry_from(port, &team->port_list, list) {
2744                         err = team_nl_fill_one_port_get(skb, port);
2745                         if (err) {
2746                                 if (err == -EMSGSIZE) {
2747                                         if (!i)
2748                                                 goto errout;
2749                                         incomplete = true;
2750                                         break;
2751                                 }
2752                                 goto errout;
2753                         }
2754                         i++;
2755                 }
2756         }
2757
2758         nla_nest_end(skb, port_list);
2759         genlmsg_end(skb, hdr);
2760         if (incomplete)
2761                 goto start_again;
2762
2763 send_done:
2764         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2765         if (!nlh) {
2766                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2767                 if (err)
2768                         return err;
2769                 goto send_done;
2770         }
2771
2772         return send_func(skb, team, portid);
2773
2774 nla_put_failure:
2775         err = -EMSGSIZE;
2776 errout:
2777         nlmsg_free(skb);
2778         return err;
2779 }
2780
2781 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2782                                      struct genl_info *info)
2783 {
2784         struct team *team;
2785         int err;
2786
2787         team = team_nl_team_get(info);
2788         if (!team)
2789                 return -EINVAL;
2790
2791         err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2792                                          NLM_F_ACK, team_nl_send_unicast, NULL);
2793
2794         team_nl_team_put(team);
2795
2796         return err;
2797 }
2798
2799 static const struct genl_small_ops team_nl_ops[] = {
2800         {
2801                 .cmd = TEAM_CMD_NOOP,
2802                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2803                 .doit = team_nl_cmd_noop,
2804         },
2805         {
2806                 .cmd = TEAM_CMD_OPTIONS_SET,
2807                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2808                 .doit = team_nl_cmd_options_set,
2809                 .flags = GENL_ADMIN_PERM,
2810         },
2811         {
2812                 .cmd = TEAM_CMD_OPTIONS_GET,
2813                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2814                 .doit = team_nl_cmd_options_get,
2815                 .flags = GENL_ADMIN_PERM,
2816         },
2817         {
2818                 .cmd = TEAM_CMD_PORT_LIST_GET,
2819                 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2820                 .doit = team_nl_cmd_port_list_get,
2821                 .flags = GENL_ADMIN_PERM,
2822         },
2823 };
2824
2825 static const struct genl_multicast_group team_nl_mcgrps[] = {
2826         { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2827 };
2828
2829 static struct genl_family team_nl_family __ro_after_init = {
2830         .name           = TEAM_GENL_NAME,
2831         .version        = TEAM_GENL_VERSION,
2832         .maxattr        = TEAM_ATTR_MAX,
2833         .policy = team_nl_policy,
2834         .netnsok        = true,
2835         .module         = THIS_MODULE,
2836         .small_ops      = team_nl_ops,
2837         .n_small_ops    = ARRAY_SIZE(team_nl_ops),
2838         .mcgrps         = team_nl_mcgrps,
2839         .n_mcgrps       = ARRAY_SIZE(team_nl_mcgrps),
2840 };
2841
2842 static int team_nl_send_multicast(struct sk_buff *skb,
2843                                   struct team *team, u32 portid)
2844 {
2845         return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2846                                        skb, 0, 0, GFP_KERNEL);
2847 }
2848
2849 static int team_nl_send_event_options_get(struct team *team,
2850                                           struct list_head *sel_opt_inst_list)
2851 {
2852         return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2853                                         sel_opt_inst_list);
2854 }
2855
2856 static int team_nl_send_event_port_get(struct team *team,
2857                                        struct team_port *port)
2858 {
2859         return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2860                                           port);
2861 }
2862
2863 static int __init team_nl_init(void)
2864 {
2865         return genl_register_family(&team_nl_family);
2866 }
2867
2868 static void team_nl_fini(void)
2869 {
2870         genl_unregister_family(&team_nl_family);
2871 }
2872
2873
2874 /******************
2875  * Change checkers
2876  ******************/
2877
2878 static void __team_options_change_check(struct team *team)
2879 {
2880         int err;
2881         struct team_option_inst *opt_inst;
2882         LIST_HEAD(sel_opt_inst_list);
2883
2884         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2885                 if (opt_inst->changed)
2886                         list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2887         }
2888         err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2889         if (err && err != -ESRCH)
2890                 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2891                             err);
2892 }
2893
2894 /* rtnl lock is held */
2895
2896 static void __team_port_change_send(struct team_port *port, bool linkup)
2897 {
2898         int err;
2899
2900         port->changed = true;
2901         port->state.linkup = linkup;
2902         team_refresh_port_linkup(port);
2903         if (linkup) {
2904                 struct ethtool_link_ksettings ecmd;
2905
2906                 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2907                 if (!err) {
2908                         port->state.speed = ecmd.base.speed;
2909                         port->state.duplex = ecmd.base.duplex;
2910                         goto send_event;
2911                 }
2912         }
2913         port->state.speed = 0;
2914         port->state.duplex = 0;
2915
2916 send_event:
2917         err = team_nl_send_event_port_get(port->team, port);
2918         if (err && err != -ESRCH)
2919                 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2920                             port->dev->name, err);
2921
2922 }
2923
2924 static void __team_carrier_check(struct team *team)
2925 {
2926         struct team_port *port;
2927         bool team_linkup;
2928
2929         if (team->user_carrier_enabled)
2930                 return;
2931
2932         team_linkup = false;
2933         list_for_each_entry(port, &team->port_list, list) {
2934                 if (port->linkup) {
2935                         team_linkup = true;
2936                         break;
2937                 }
2938         }
2939
2940         if (team_linkup)
2941                 netif_carrier_on(team->dev);
2942         else
2943                 netif_carrier_off(team->dev);
2944 }
2945
2946 static void __team_port_change_check(struct team_port *port, bool linkup)
2947 {
2948         if (port->state.linkup != linkup)
2949                 __team_port_change_send(port, linkup);
2950         __team_carrier_check(port->team);
2951 }
2952
2953 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2954 {
2955         __team_port_change_send(port, linkup);
2956         __team_carrier_check(port->team);
2957 }
2958
2959 static void __team_port_change_port_removed(struct team_port *port)
2960 {
2961         port->removed = true;
2962         __team_port_change_send(port, false);
2963         __team_carrier_check(port->team);
2964 }
2965
2966 static void team_port_change_check(struct team_port *port, bool linkup)
2967 {
2968         struct team *team = port->team;
2969
2970         mutex_lock(&team->lock);
2971         __team_port_change_check(port, linkup);
2972         mutex_unlock(&team->lock);
2973 }
2974
2975
2976 /************************************
2977  * Net device notifier event handler
2978  ************************************/
2979
2980 static int team_device_event(struct notifier_block *unused,
2981                              unsigned long event, void *ptr)
2982 {
2983         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2984         struct team_port *port;
2985
2986         port = team_port_get_rtnl(dev);
2987         if (!port)
2988                 return NOTIFY_DONE;
2989
2990         switch (event) {
2991         case NETDEV_UP:
2992                 if (netif_oper_up(dev))
2993                         team_port_change_check(port, true);
2994                 break;
2995         case NETDEV_DOWN:
2996                 team_port_change_check(port, false);
2997                 break;
2998         case NETDEV_CHANGE:
2999                 if (netif_running(port->dev))
3000                         team_port_change_check(port,
3001                                                !!netif_oper_up(port->dev));
3002                 break;
3003         case NETDEV_UNREGISTER:
3004                 team_del_slave(port->team->dev, dev);
3005                 break;
3006         case NETDEV_FEAT_CHANGE:
3007                 team_compute_features(port->team);
3008                 break;
3009         case NETDEV_PRECHANGEMTU:
3010                 /* Forbid to change mtu of underlaying device */
3011                 if (!port->team->port_mtu_change_allowed)
3012                         return NOTIFY_BAD;
3013                 break;
3014         case NETDEV_PRE_TYPE_CHANGE:
3015                 /* Forbid to change type of underlaying device */
3016                 return NOTIFY_BAD;
3017         case NETDEV_RESEND_IGMP:
3018                 /* Propagate to master device */
3019                 call_netdevice_notifiers(event, port->team->dev);
3020                 break;
3021         }
3022         return NOTIFY_DONE;
3023 }
3024
3025 static struct notifier_block team_notifier_block __read_mostly = {
3026         .notifier_call = team_device_event,
3027 };
3028
3029
3030 /***********************
3031  * Module init and exit
3032  ***********************/
3033
3034 static int __init team_module_init(void)
3035 {
3036         int err;
3037
3038         register_netdevice_notifier(&team_notifier_block);
3039
3040         err = rtnl_link_register(&team_link_ops);
3041         if (err)
3042                 goto err_rtnl_reg;
3043
3044         err = team_nl_init();
3045         if (err)
3046                 goto err_nl_init;
3047
3048         return 0;
3049
3050 err_nl_init:
3051         rtnl_link_unregister(&team_link_ops);
3052
3053 err_rtnl_reg:
3054         unregister_netdevice_notifier(&team_notifier_block);
3055
3056         return err;
3057 }
3058
3059 static void __exit team_module_exit(void)
3060 {
3061         team_nl_fini();
3062         rtnl_link_unregister(&team_link_ops);
3063         unregister_netdevice_notifier(&team_notifier_block);
3064 }
3065
3066 module_init(team_module_init);
3067 module_exit(team_module_exit);
3068
3069 MODULE_LICENSE("GPL v2");
3070 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3071 MODULE_DESCRIPTION("Ethernet team device driver");
3072 MODULE_ALIAS_RTNL_LINK(DRV_NAME);