2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <net/bonding.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <linux/mlx5/vport.h>
38 #include "lib/devcom.h"
39 #include "mlx5_core.h"
41 #include "esw/acl/ofld.h"
46 MLX5_LAG_EGRESS_PORT_1 = 1,
47 MLX5_LAG_EGRESS_PORT_2,
50 /* General purpose, use for short periods of time.
51 * Beware of lock dependencies (preferably, no locks should be acquired
54 static DEFINE_SPINLOCK(lag_lock);
56 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
57 u8 remap_port2, bool shared_fdb, u8 flags)
59 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
60 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
62 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
64 MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
65 if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
66 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
67 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
69 MLX5_SET(lagc, lag_ctx, port_select_mode,
70 MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
73 return mlx5_cmd_exec_in(dev, create_lag, in);
76 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
79 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
80 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
82 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
83 MLX5_SET(modify_lag_in, in, field_select, 0x1);
85 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
86 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
88 return mlx5_cmd_exec_in(dev, modify_lag, in);
91 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
93 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
95 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
97 return mlx5_cmd_exec_in(dev, create_vport_lag, in);
99 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
101 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
103 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
105 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
107 return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
109 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
111 static int mlx5_lag_netdev_event(struct notifier_block *this,
112 unsigned long event, void *ptr);
113 static void mlx5_do_bond_work(struct work_struct *work);
115 static void mlx5_ldev_free(struct kref *ref)
117 struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
119 if (ldev->nb.notifier_call)
120 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
121 mlx5_lag_mp_cleanup(ldev);
122 cancel_delayed_work_sync(&ldev->bond_work);
123 destroy_workqueue(ldev->wq);
127 static void mlx5_ldev_put(struct mlx5_lag *ldev)
129 kref_put(&ldev->ref, mlx5_ldev_free);
132 static void mlx5_ldev_get(struct mlx5_lag *ldev)
134 kref_get(&ldev->ref);
137 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
139 struct mlx5_lag *ldev;
142 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
146 ldev->wq = create_singlethread_workqueue("mlx5_lag");
152 kref_init(&ldev->ref);
153 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
155 ldev->nb.notifier_call = mlx5_lag_netdev_event;
156 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
157 ldev->nb.notifier_call = NULL;
158 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
161 err = mlx5_lag_mp_init(ldev);
163 mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
169 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
170 struct net_device *ndev)
174 for (i = 0; i < MLX5_MAX_PORTS; i++)
175 if (ldev->pf[i].netdev == ndev)
181 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
183 return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
186 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
188 return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
191 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
192 u8 *port1, u8 *port2)
197 p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
198 tracker->netdev_state[MLX5_LAG_P1].link_up;
200 p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
201 tracker->netdev_state[MLX5_LAG_P2].link_up;
203 *port1 = MLX5_LAG_EGRESS_PORT_1;
204 *port2 = MLX5_LAG_EGRESS_PORT_2;
205 if ((!p1en && !p2en) || (p1en && p2en))
209 *port2 = MLX5_LAG_EGRESS_PORT_1;
211 *port1 = MLX5_LAG_EGRESS_PORT_2;
214 static bool mlx5_lag_has_drop_rule(struct mlx5_lag *ldev)
216 return ldev->pf[MLX5_LAG_P1].has_drop || ldev->pf[MLX5_LAG_P2].has_drop;
219 static void mlx5_lag_drop_rule_cleanup(struct mlx5_lag *ldev)
223 for (i = 0; i < MLX5_MAX_PORTS; i++) {
224 if (!ldev->pf[i].has_drop)
227 mlx5_esw_acl_ingress_vport_drop_rule_destroy(ldev->pf[i].dev->priv.eswitch,
229 ldev->pf[i].has_drop = false;
233 static void mlx5_lag_drop_rule_setup(struct mlx5_lag *ldev,
234 struct lag_tracker *tracker)
236 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
237 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
238 struct mlx5_core_dev *inactive;
239 u8 v2p_port1, v2p_port2;
243 /* First delete the current drop rule so there won't be any dropped
246 mlx5_lag_drop_rule_cleanup(ldev);
248 if (!ldev->tracker.has_inactive)
251 mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, &v2p_port2);
253 if (v2p_port1 == MLX5_LAG_EGRESS_PORT_1) {
255 inactive_idx = MLX5_LAG_P2;
258 inactive_idx = MLX5_LAG_P1;
261 err = mlx5_esw_acl_ingress_vport_drop_rule_create(inactive->priv.eswitch,
264 ldev->pf[inactive_idx].has_drop = true;
266 mlx5_core_err(inactive,
267 "Failed to create lag drop rule, error: %d", err);
270 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
272 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
274 if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
275 return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
276 return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
279 void mlx5_modify_lag(struct mlx5_lag *ldev,
280 struct lag_tracker *tracker)
282 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
283 u8 v2p_port1, v2p_port2;
286 mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
289 if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
290 v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
291 err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
294 "Failed to modify LAG (%d)\n",
298 ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
299 ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
300 mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
301 ldev->v2p_map[MLX5_LAG_P1],
302 ldev->v2p_map[MLX5_LAG_P2]);
305 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
306 !(ldev->flags & MLX5_LAG_FLAG_ROCE))
307 mlx5_lag_drop_rule_setup(ldev, tracker);
310 static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
311 struct lag_tracker *tracker, u8 *flags)
313 bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
314 struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
317 !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
318 tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
320 *flags |= MLX5_LAG_FLAG_HASH_BASED;
323 static char *get_str_port_sel_mode(u8 flags)
325 if (flags & MLX5_LAG_FLAG_HASH_BASED)
327 return "queue_affinity";
330 static int mlx5_create_lag(struct mlx5_lag *ldev,
331 struct lag_tracker *tracker,
332 bool shared_fdb, u8 flags)
334 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
335 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
336 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
339 mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
340 ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
341 shared_fdb, get_str_port_sel_mode(flags));
343 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
344 ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
347 "Failed to create LAG (%d)\n",
353 err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
356 mlx5_core_err(dev0, "Can't enable single FDB mode\n");
358 mlx5_core_info(dev0, "Operation mode is single FDB\n");
362 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
363 if (mlx5_cmd_exec_in(dev0, destroy_lag, in))
365 "Failed to deactivate RoCE LAG; driver restart required\n");
371 int mlx5_activate_lag(struct mlx5_lag *ldev,
372 struct lag_tracker *tracker,
376 bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
377 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
380 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
381 &ldev->v2p_map[MLX5_LAG_P2]);
382 mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
383 if (flags & MLX5_LAG_FLAG_HASH_BASED) {
384 err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
385 ldev->v2p_map[MLX5_LAG_P1],
386 ldev->v2p_map[MLX5_LAG_P2]);
389 "Failed to create LAG port selection(%d)\n",
395 err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
397 if (flags & MLX5_LAG_FLAG_HASH_BASED)
398 mlx5_lag_port_sel_destroy(ldev);
401 "Failed to activate RoCE LAG\n");
404 "Failed to activate VF LAG\n"
405 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
409 if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP &&
411 mlx5_lag_drop_rule_setup(ldev, tracker);
413 ldev->flags |= flags;
414 ldev->shared_fdb = shared_fdb;
418 static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
420 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
421 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
422 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
423 bool roce_lag = __mlx5_lag_is_roce(ldev);
424 u8 flags = ldev->flags;
427 ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
428 mlx5_lag_mp_reset(ldev);
430 if (ldev->shared_fdb) {
431 mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
433 ldev->shared_fdb = false;
436 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
437 err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
441 "Failed to deactivate RoCE LAG; driver restart required\n");
444 "Failed to deactivate VF LAG; driver restart required\n"
445 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
450 if (flags & MLX5_LAG_FLAG_HASH_BASED)
451 mlx5_lag_port_sel_destroy(ldev);
452 if (mlx5_lag_has_drop_rule(ldev))
453 mlx5_lag_drop_rule_cleanup(ldev);
458 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
460 if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
463 #ifdef CONFIG_MLX5_ESWITCH
464 return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
465 ldev->pf[MLX5_LAG_P2].dev);
467 return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
468 !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
472 static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
476 for (i = 0; i < MLX5_MAX_PORTS; i++) {
477 if (!ldev->pf[i].dev)
480 if (ldev->pf[i].dev->priv.flags &
481 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
484 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
485 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
489 static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
493 for (i = 0; i < MLX5_MAX_PORTS; i++) {
494 if (!ldev->pf[i].dev)
497 if (ldev->pf[i].dev->priv.flags &
498 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
501 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
502 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
506 static void mlx5_disable_lag(struct mlx5_lag *ldev)
508 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
509 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
510 bool shared_fdb = ldev->shared_fdb;
514 roce_lag = __mlx5_lag_is_roce(ldev);
517 mlx5_lag_remove_devices(ldev);
518 } else if (roce_lag) {
519 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
520 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
521 mlx5_rescan_drivers_locked(dev0);
523 mlx5_nic_vport_disable_roce(dev1);
526 err = mlx5_deactivate_lag(ldev);
530 if (shared_fdb || roce_lag)
531 mlx5_lag_add_devices(ldev);
534 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
535 mlx5_eswitch_reload_reps(dev0->priv.eswitch);
536 if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
537 mlx5_eswitch_reload_reps(dev1->priv.eswitch);
541 static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
543 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
544 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
546 if (is_mdev_switchdev_mode(dev0) &&
547 is_mdev_switchdev_mode(dev1) &&
548 mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
549 mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
550 mlx5_devcom_is_paired(dev0->priv.devcom,
551 MLX5_DEVCOM_ESW_OFFLOADS) &&
552 MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
553 MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
554 MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
560 static void mlx5_do_bond(struct mlx5_lag *ldev)
562 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
563 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
564 struct lag_tracker tracker;
565 bool do_bond, roce_lag;
568 if (!mlx5_lag_is_ready(ldev)) {
571 /* VF LAG is in multipath mode, ignore bond change requests */
572 if (mlx5_lag_is_multipath(dev0))
575 tracker = ldev->tracker;
577 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
580 if (do_bond && !__mlx5_lag_is_active(ldev)) {
581 bool shared_fdb = mlx5_shared_fdb_supported(ldev);
583 roce_lag = !mlx5_sriov_is_enabled(dev0) &&
584 !mlx5_sriov_is_enabled(dev1);
586 #ifdef CONFIG_MLX5_ESWITCH
587 roce_lag = roce_lag &&
588 dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
589 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
592 if (shared_fdb || roce_lag)
593 mlx5_lag_remove_devices(ldev);
595 err = mlx5_activate_lag(ldev, &tracker,
596 roce_lag ? MLX5_LAG_FLAG_ROCE :
600 if (shared_fdb || roce_lag)
601 mlx5_lag_add_devices(ldev);
604 } else if (roce_lag) {
605 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
606 mlx5_rescan_drivers_locked(dev0);
607 mlx5_nic_vport_enable_roce(dev1);
608 } else if (shared_fdb) {
609 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
610 mlx5_rescan_drivers_locked(dev0);
612 err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
614 err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
617 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
618 mlx5_rescan_drivers_locked(dev0);
619 mlx5_deactivate_lag(ldev);
620 mlx5_lag_add_devices(ldev);
621 mlx5_eswitch_reload_reps(dev0->priv.eswitch);
622 mlx5_eswitch_reload_reps(dev1->priv.eswitch);
623 mlx5_core_err(dev0, "Failed to enable lag\n");
627 } else if (do_bond && __mlx5_lag_is_active(ldev)) {
628 mlx5_modify_lag(ldev, &tracker);
629 } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
630 mlx5_disable_lag(ldev);
634 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
636 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
639 static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0,
640 struct mlx5_core_dev *dev1)
643 mlx5_esw_lock(dev0->priv.eswitch);
645 mlx5_esw_lock(dev1->priv.eswitch);
648 static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0,
649 struct mlx5_core_dev *dev1)
652 mlx5_esw_unlock(dev1->priv.eswitch);
654 mlx5_esw_unlock(dev0->priv.eswitch);
657 static void mlx5_do_bond_work(struct work_struct *work)
659 struct delayed_work *delayed_work = to_delayed_work(work);
660 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
662 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
663 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
666 status = mlx5_dev_list_trylock();
668 mlx5_queue_bond_work(ldev, HZ);
672 if (ldev->mode_changes_in_progress) {
673 mlx5_dev_list_unlock();
674 mlx5_queue_bond_work(ldev, HZ);
678 mlx5_lag_lock_eswitches(dev0, dev1);
680 mlx5_lag_unlock_eswitches(dev0, dev1);
681 mlx5_dev_list_unlock();
684 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
685 struct lag_tracker *tracker,
686 struct net_device *ndev,
687 struct netdev_notifier_changeupper_info *info)
689 struct net_device *upper = info->upper_dev, *ndev_tmp;
690 struct netdev_lag_upper_info *lag_upper_info = NULL;
691 bool is_bonded, is_in_lag, mode_supported;
692 bool has_inactive = 0;
699 if (!netif_is_lag_master(upper))
703 lag_upper_info = info->upper_info;
705 /* The event may still be of interest if the slave does not belong to
706 * us, but is enslaved to a master which has one or more of our netdevs
707 * as slaves (e.g., if a new slave is added to a master that bonds two
708 * of our netdevs, we should unbond).
711 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
712 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
714 slave = bond_slave_get_rcu(ndev_tmp);
716 has_inactive |= bond_is_slave_inactive(slave);
717 bond_status |= (1 << idx);
724 /* None of this lagdev's netdevs are slaves of this master. */
725 if (!(bond_status & 0x3))
728 if (lag_upper_info) {
729 tracker->tx_type = lag_upper_info->tx_type;
730 tracker->hash_type = lag_upper_info->hash_type;
733 tracker->has_inactive = has_inactive;
734 /* Determine bonding status:
735 * A device is considered bonded if both its physical ports are slaves
736 * of the same lag master, and only them.
738 is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
740 /* Lag mode must be activebackup or hash. */
741 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
742 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
744 is_bonded = is_in_lag && mode_supported;
745 if (tracker->is_bonded != is_bonded) {
746 tracker->is_bonded = is_bonded;
753 if (!mlx5_lag_is_ready(ldev))
754 NL_SET_ERR_MSG_MOD(info->info.extack,
755 "Can't activate LAG offload, PF is configured with more than 64 VFs");
756 else if (!mode_supported)
757 NL_SET_ERR_MSG_MOD(info->info.extack,
758 "Can't activate LAG offload, TX type isn't supported");
763 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
764 struct lag_tracker *tracker,
765 struct net_device *ndev,
766 struct netdev_notifier_changelowerstate_info *info)
768 struct netdev_lag_lower_state_info *lag_lower_info;
771 if (!netif_is_lag_port(ndev))
774 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
778 /* This information is used to determine virtual to physical
781 lag_lower_info = info->lower_state_info;
785 tracker->netdev_state[idx] = *lag_lower_info;
790 static int mlx5_handle_changeinfodata_event(struct mlx5_lag *ldev,
791 struct lag_tracker *tracker,
792 struct net_device *ndev)
794 struct net_device *ndev_tmp;
796 bool has_inactive = 0;
799 if (!netif_is_lag_master(ndev))
803 for_each_netdev_in_bond_rcu(ndev, ndev_tmp) {
804 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
808 slave = bond_slave_get_rcu(ndev_tmp);
810 has_inactive |= bond_is_slave_inactive(slave);
814 if (tracker->has_inactive == has_inactive)
817 tracker->has_inactive = has_inactive;
822 static int mlx5_lag_netdev_event(struct notifier_block *this,
823 unsigned long event, void *ptr)
825 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
826 struct lag_tracker tracker;
827 struct mlx5_lag *ldev;
830 if (event != NETDEV_CHANGEUPPER &&
831 event != NETDEV_CHANGELOWERSTATE &&
832 event != NETDEV_CHANGEINFODATA)
835 ldev = container_of(this, struct mlx5_lag, nb);
837 tracker = ldev->tracker;
840 case NETDEV_CHANGEUPPER:
841 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
844 case NETDEV_CHANGELOWERSTATE:
845 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
848 case NETDEV_CHANGEINFODATA:
849 changed = mlx5_handle_changeinfodata_event(ldev, &tracker, ndev);
853 ldev->tracker = tracker;
856 mlx5_queue_bond_work(ldev, 0);
861 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
862 struct mlx5_core_dev *dev,
863 struct net_device *netdev)
865 unsigned int fn = mlx5_get_dev_index(dev);
867 if (fn >= MLX5_MAX_PORTS)
870 spin_lock(&lag_lock);
871 ldev->pf[fn].netdev = netdev;
872 ldev->tracker.netdev_state[fn].link_up = 0;
873 ldev->tracker.netdev_state[fn].tx_enabled = 0;
874 spin_unlock(&lag_lock);
877 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
878 struct net_device *netdev)
882 spin_lock(&lag_lock);
883 for (i = 0; i < MLX5_MAX_PORTS; i++) {
884 if (ldev->pf[i].netdev == netdev) {
885 ldev->pf[i].netdev = NULL;
889 spin_unlock(&lag_lock);
892 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
893 struct mlx5_core_dev *dev)
895 unsigned int fn = mlx5_get_dev_index(dev);
897 if (fn >= MLX5_MAX_PORTS)
900 ldev->pf[fn].dev = dev;
901 dev->priv.lag = ldev;
904 /* Must be called with intf_mutex held */
905 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
906 struct mlx5_core_dev *dev)
910 for (i = 0; i < MLX5_MAX_PORTS; i++)
911 if (ldev->pf[i].dev == dev)
914 if (i == MLX5_MAX_PORTS)
917 ldev->pf[i].dev = NULL;
918 dev->priv.lag = NULL;
921 /* Must be called with intf_mutex held */
922 static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
924 struct mlx5_lag *ldev = NULL;
925 struct mlx5_core_dev *tmp_dev;
927 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
928 !MLX5_CAP_GEN(dev, lag_master) ||
929 MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
932 tmp_dev = mlx5_get_next_phys_dev(dev);
934 ldev = tmp_dev->priv.lag;
937 ldev = mlx5_lag_dev_alloc(dev);
939 mlx5_core_err(dev, "Failed to alloc lag dev\n");
943 if (ldev->mode_changes_in_progress)
948 mlx5_ldev_add_mdev(ldev, dev);
953 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
955 struct mlx5_lag *ldev;
957 ldev = mlx5_lag_dev(dev);
962 mlx5_dev_list_lock();
963 if (ldev->mode_changes_in_progress) {
964 mlx5_dev_list_unlock();
968 mlx5_ldev_remove_mdev(ldev, dev);
969 mlx5_dev_list_unlock();
973 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
978 mlx5_dev_list_lock();
979 err = __mlx5_lag_dev_add_mdev(dev);
981 mlx5_dev_list_unlock();
985 mlx5_dev_list_unlock();
988 /* Must be called with intf_mutex held */
989 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
990 struct net_device *netdev)
992 struct mlx5_lag *ldev;
994 ldev = mlx5_lag_dev(dev);
998 mlx5_ldev_remove_netdev(ldev, netdev);
999 ldev->flags &= ~MLX5_LAG_FLAG_READY;
1001 if (__mlx5_lag_is_active(ldev))
1002 mlx5_queue_bond_work(ldev, 0);
1005 /* Must be called with intf_mutex held */
1006 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
1007 struct net_device *netdev)
1009 struct mlx5_lag *ldev;
1012 ldev = mlx5_lag_dev(dev);
1016 mlx5_ldev_add_netdev(ldev, dev, netdev);
1018 for (i = 0; i < MLX5_MAX_PORTS; i++)
1019 if (!ldev->pf[i].dev)
1022 if (i >= MLX5_MAX_PORTS)
1023 ldev->flags |= MLX5_LAG_FLAG_READY;
1024 mlx5_queue_bond_work(ldev, 0);
1027 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
1029 struct mlx5_lag *ldev;
1032 spin_lock(&lag_lock);
1033 ldev = mlx5_lag_dev(dev);
1034 res = ldev && __mlx5_lag_is_roce(ldev);
1035 spin_unlock(&lag_lock);
1039 EXPORT_SYMBOL(mlx5_lag_is_roce);
1041 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
1043 struct mlx5_lag *ldev;
1046 spin_lock(&lag_lock);
1047 ldev = mlx5_lag_dev(dev);
1048 res = ldev && __mlx5_lag_is_active(ldev);
1049 spin_unlock(&lag_lock);
1053 EXPORT_SYMBOL(mlx5_lag_is_active);
1055 bool mlx5_lag_is_master(struct mlx5_core_dev *dev)
1057 struct mlx5_lag *ldev;
1060 spin_lock(&lag_lock);
1061 ldev = mlx5_lag_dev(dev);
1062 res = ldev && __mlx5_lag_is_active(ldev) &&
1063 dev == ldev->pf[MLX5_LAG_P1].dev;
1064 spin_unlock(&lag_lock);
1068 EXPORT_SYMBOL(mlx5_lag_is_master);
1070 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
1072 struct mlx5_lag *ldev;
1075 spin_lock(&lag_lock);
1076 ldev = mlx5_lag_dev(dev);
1077 res = ldev && __mlx5_lag_is_sriov(ldev);
1078 spin_unlock(&lag_lock);
1082 EXPORT_SYMBOL(mlx5_lag_is_sriov);
1084 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
1086 struct mlx5_lag *ldev;
1089 spin_lock(&lag_lock);
1090 ldev = mlx5_lag_dev(dev);
1091 res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb;
1092 spin_unlock(&lag_lock);
1096 EXPORT_SYMBOL(mlx5_lag_is_shared_fdb);
1098 void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
1100 struct mlx5_core_dev *dev0;
1101 struct mlx5_core_dev *dev1;
1102 struct mlx5_lag *ldev;
1104 ldev = mlx5_lag_dev(dev);
1108 mlx5_dev_list_lock();
1110 dev0 = ldev->pf[MLX5_LAG_P1].dev;
1111 dev1 = ldev->pf[MLX5_LAG_P2].dev;
1113 ldev->mode_changes_in_progress++;
1114 if (__mlx5_lag_is_active(ldev)) {
1115 mlx5_lag_lock_eswitches(dev0, dev1);
1116 mlx5_disable_lag(ldev);
1117 mlx5_lag_unlock_eswitches(dev0, dev1);
1119 mlx5_dev_list_unlock();
1122 void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
1124 struct mlx5_lag *ldev;
1126 ldev = mlx5_lag_dev(dev);
1130 mlx5_dev_list_lock();
1131 ldev->mode_changes_in_progress--;
1132 mlx5_dev_list_unlock();
1133 mlx5_queue_bond_work(ldev, 0);
1136 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
1138 struct net_device *ndev = NULL;
1139 struct mlx5_lag *ldev;
1141 spin_lock(&lag_lock);
1142 ldev = mlx5_lag_dev(dev);
1144 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1147 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
1148 ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
1149 ldev->pf[MLX5_LAG_P1].netdev :
1150 ldev->pf[MLX5_LAG_P2].netdev;
1152 ndev = ldev->pf[MLX5_LAG_P1].netdev;
1158 spin_unlock(&lag_lock);
1162 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
1164 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1165 struct net_device *slave)
1167 struct mlx5_lag *ldev;
1170 spin_lock(&lag_lock);
1171 ldev = mlx5_lag_dev(dev);
1172 if (!(ldev && __mlx5_lag_is_roce(ldev)))
1175 if (ldev->pf[MLX5_LAG_P1].netdev == slave)
1180 port = ldev->v2p_map[port];
1183 spin_unlock(&lag_lock);
1186 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
1188 struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
1190 struct mlx5_core_dev *peer_dev = NULL;
1191 struct mlx5_lag *ldev;
1193 spin_lock(&lag_lock);
1194 ldev = mlx5_lag_dev(dev);
1198 peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
1199 ldev->pf[MLX5_LAG_P2].dev :
1200 ldev->pf[MLX5_LAG_P1].dev;
1203 spin_unlock(&lag_lock);
1206 EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
1208 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1213 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
1214 struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
1215 struct mlx5_lag *ldev;
1220 out = kvzalloc(outlen, GFP_KERNEL);
1224 memset(values, 0, sizeof(*values) * num_counters);
1226 spin_lock(&lag_lock);
1227 ldev = mlx5_lag_dev(dev);
1228 if (ldev && __mlx5_lag_is_active(ldev)) {
1229 num_ports = MLX5_MAX_PORTS;
1230 mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
1231 mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
1234 mdev[MLX5_LAG_P1] = dev;
1236 spin_unlock(&lag_lock);
1238 for (i = 0; i < num_ports; ++i) {
1239 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
1241 MLX5_SET(query_cong_statistics_in, in, opcode,
1242 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1243 ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
1248 for (j = 0; j < num_counters; ++j)
1249 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
1256 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);