2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
41 /* General purpose, use for short periods of time.
42 * Beware of lock dependencies (preferably, no locks should be acquired
45 static DEFINE_SPINLOCK(lag_lock);
47 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
50 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
51 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
53 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
55 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
56 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
58 return mlx5_cmd_exec_in(dev, create_lag, in);
61 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
64 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {};
65 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
67 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
68 MLX5_SET(modify_lag_in, in, field_select, 0x1);
70 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
71 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
73 return mlx5_cmd_exec_in(dev, modify_lag, in);
76 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
78 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {};
80 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
82 return mlx5_cmd_exec_in(dev, create_vport_lag, in);
84 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
86 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
88 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {};
90 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
92 return mlx5_cmd_exec_in(dev, destroy_vport_lag, in);
94 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
96 static int mlx5_lag_netdev_event(struct notifier_block *this,
97 unsigned long event, void *ptr);
98 static void mlx5_do_bond_work(struct work_struct *work);
100 static void mlx5_ldev_free(struct kref *ref)
102 struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
104 if (ldev->nb.notifier_call)
105 unregister_netdevice_notifier_net(&init_net, &ldev->nb);
106 mlx5_lag_mp_cleanup(ldev);
107 cancel_delayed_work_sync(&ldev->bond_work);
108 destroy_workqueue(ldev->wq);
112 static void mlx5_ldev_put(struct mlx5_lag *ldev)
114 kref_put(&ldev->ref, mlx5_ldev_free);
117 static void mlx5_ldev_get(struct mlx5_lag *ldev)
119 kref_get(&ldev->ref);
122 static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
124 struct mlx5_lag *ldev;
127 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
131 ldev->wq = create_singlethread_workqueue("mlx5_lag");
137 kref_init(&ldev->ref);
138 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
140 ldev->nb.notifier_call = mlx5_lag_netdev_event;
141 if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
142 ldev->nb.notifier_call = NULL;
143 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
146 err = mlx5_lag_mp_init(ldev);
148 mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
154 int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
155 struct net_device *ndev)
159 for (i = 0; i < MLX5_MAX_PORTS; i++)
160 if (ldev->pf[i].netdev == ndev)
166 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
168 return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
171 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
173 return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
176 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
177 u8 *port1, u8 *port2)
182 p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
183 tracker->netdev_state[MLX5_LAG_P1].link_up;
185 p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
186 tracker->netdev_state[MLX5_LAG_P2].link_up;
190 if ((!p1en && !p2en) || (p1en && p2en))
199 void mlx5_modify_lag(struct mlx5_lag *ldev,
200 struct lag_tracker *tracker)
202 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
203 u8 v2p_port1, v2p_port2;
206 mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
209 if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
210 v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
211 ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
212 ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
214 mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
215 ldev->v2p_map[MLX5_LAG_P1],
216 ldev->v2p_map[MLX5_LAG_P2]);
218 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
221 "Failed to modify LAG (%d)\n",
226 static int mlx5_create_lag(struct mlx5_lag *ldev,
227 struct lag_tracker *tracker)
229 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
232 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
233 &ldev->v2p_map[MLX5_LAG_P2]);
235 mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
236 ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]);
238 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
239 ldev->v2p_map[MLX5_LAG_P2]);
242 "Failed to create LAG (%d)\n",
247 int mlx5_activate_lag(struct mlx5_lag *ldev,
248 struct lag_tracker *tracker,
251 bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
252 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
255 err = mlx5_create_lag(ldev, tracker);
259 "Failed to activate RoCE LAG\n");
262 "Failed to activate VF LAG\n"
263 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
268 ldev->flags |= flags;
272 static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
274 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
275 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
276 bool roce_lag = __mlx5_lag_is_roce(ldev);
279 ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
280 mlx5_lag_mp_reset(ldev);
282 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
283 err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
287 "Failed to deactivate RoCE LAG; driver restart required\n");
290 "Failed to deactivate VF LAG; driver restart required\n"
291 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
298 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
300 if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
303 #ifdef CONFIG_MLX5_ESWITCH
304 return mlx5_esw_lag_prereq(ldev->pf[MLX5_LAG_P1].dev,
305 ldev->pf[MLX5_LAG_P2].dev);
307 return (!mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P1].dev) &&
308 !mlx5_sriov_is_enabled(ldev->pf[MLX5_LAG_P2].dev));
312 static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
316 for (i = 0; i < MLX5_MAX_PORTS; i++) {
317 if (!ldev->pf[i].dev)
320 if (ldev->pf[i].dev->priv.flags &
321 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
324 ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
325 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
329 static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
333 for (i = 0; i < MLX5_MAX_PORTS; i++) {
334 if (!ldev->pf[i].dev)
337 ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
338 mlx5_rescan_drivers_locked(ldev->pf[i].dev);
342 static void mlx5_disable_lag(struct mlx5_lag *ldev)
344 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
345 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
349 roce_lag = __mlx5_lag_is_roce(ldev);
352 if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
353 dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
354 mlx5_rescan_drivers_locked(dev0);
356 mlx5_nic_vport_disable_roce(dev1);
359 err = mlx5_deactivate_lag(ldev);
364 mlx5_lag_add_devices(ldev);
367 static void mlx5_do_bond(struct mlx5_lag *ldev)
369 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
370 struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
371 struct lag_tracker tracker;
372 bool do_bond, roce_lag;
375 if (!mlx5_lag_is_ready(ldev))
378 tracker = ldev->tracker;
380 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
382 if (do_bond && !__mlx5_lag_is_active(ldev)) {
383 roce_lag = !mlx5_sriov_is_enabled(dev0) &&
384 !mlx5_sriov_is_enabled(dev1);
386 #ifdef CONFIG_MLX5_ESWITCH
387 roce_lag = roce_lag &&
388 dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
389 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
393 mlx5_lag_remove_devices(ldev);
395 err = mlx5_activate_lag(ldev, &tracker,
396 roce_lag ? MLX5_LAG_FLAG_ROCE :
397 MLX5_LAG_FLAG_SRIOV);
400 mlx5_lag_add_devices(ldev);
406 dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
407 mlx5_rescan_drivers_locked(dev0);
408 mlx5_nic_vport_enable_roce(dev1);
410 } else if (do_bond && __mlx5_lag_is_active(ldev)) {
411 mlx5_modify_lag(ldev, &tracker);
412 } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
413 mlx5_disable_lag(ldev);
417 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
419 queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
422 static void mlx5_do_bond_work(struct work_struct *work)
424 struct delayed_work *delayed_work = to_delayed_work(work);
425 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
429 status = mlx5_dev_list_trylock();
432 mlx5_queue_bond_work(ldev, HZ);
437 mlx5_dev_list_unlock();
440 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
441 struct lag_tracker *tracker,
442 struct net_device *ndev,
443 struct netdev_notifier_changeupper_info *info)
445 struct net_device *upper = info->upper_dev, *ndev_tmp;
446 struct netdev_lag_upper_info *lag_upper_info = NULL;
447 bool is_bonded, is_in_lag, mode_supported;
452 if (!netif_is_lag_master(upper))
456 lag_upper_info = info->upper_info;
458 /* The event may still be of interest if the slave does not belong to
459 * us, but is enslaved to a master which has one or more of our netdevs
460 * as slaves (e.g., if a new slave is added to a master that bonds two
461 * of our netdevs, we should unbond).
464 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
465 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
467 bond_status |= (1 << idx);
473 /* None of this lagdev's netdevs are slaves of this master. */
474 if (!(bond_status & 0x3))
478 tracker->tx_type = lag_upper_info->tx_type;
480 /* Determine bonding status:
481 * A device is considered bonded if both its physical ports are slaves
482 * of the same lag master, and only them.
484 is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
486 if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
487 NL_SET_ERR_MSG_MOD(info->info.extack,
488 "Can't activate LAG offload, PF is configured with more than 64 VFs");
492 /* Lag mode must be activebackup or hash. */
493 mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
494 tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
496 if (is_in_lag && !mode_supported)
497 NL_SET_ERR_MSG_MOD(info->info.extack,
498 "Can't activate LAG offload, TX type isn't supported");
500 is_bonded = is_in_lag && mode_supported;
501 if (tracker->is_bonded != is_bonded) {
502 tracker->is_bonded = is_bonded;
509 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
510 struct lag_tracker *tracker,
511 struct net_device *ndev,
512 struct netdev_notifier_changelowerstate_info *info)
514 struct netdev_lag_lower_state_info *lag_lower_info;
517 if (!netif_is_lag_port(ndev))
520 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
524 /* This information is used to determine virtual to physical
527 lag_lower_info = info->lower_state_info;
531 tracker->netdev_state[idx] = *lag_lower_info;
536 static int mlx5_lag_netdev_event(struct notifier_block *this,
537 unsigned long event, void *ptr)
539 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
540 struct lag_tracker tracker;
541 struct mlx5_lag *ldev;
544 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
547 ldev = container_of(this, struct mlx5_lag, nb);
549 if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
552 tracker = ldev->tracker;
555 case NETDEV_CHANGEUPPER:
556 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
559 case NETDEV_CHANGELOWERSTATE:
560 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
565 ldev->tracker = tracker;
568 mlx5_queue_bond_work(ldev, 0);
573 static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
574 struct mlx5_core_dev *dev,
575 struct net_device *netdev)
577 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
579 if (fn >= MLX5_MAX_PORTS)
582 spin_lock(&lag_lock);
583 ldev->pf[fn].netdev = netdev;
584 ldev->tracker.netdev_state[fn].link_up = 0;
585 ldev->tracker.netdev_state[fn].tx_enabled = 0;
586 spin_unlock(&lag_lock);
589 static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
590 struct net_device *netdev)
594 spin_lock(&lag_lock);
595 for (i = 0; i < MLX5_MAX_PORTS; i++) {
596 if (ldev->pf[i].netdev == netdev) {
597 ldev->pf[i].netdev = NULL;
601 spin_unlock(&lag_lock);
604 static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
605 struct mlx5_core_dev *dev)
607 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
609 if (fn >= MLX5_MAX_PORTS)
612 ldev->pf[fn].dev = dev;
613 dev->priv.lag = ldev;
616 /* Must be called with intf_mutex held */
617 static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
618 struct mlx5_core_dev *dev)
622 for (i = 0; i < MLX5_MAX_PORTS; i++)
623 if (ldev->pf[i].dev == dev)
626 if (i == MLX5_MAX_PORTS)
629 ldev->pf[i].dev = NULL;
630 dev->priv.lag = NULL;
633 /* Must be called with intf_mutex held */
634 static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
636 struct mlx5_lag *ldev = NULL;
637 struct mlx5_core_dev *tmp_dev;
639 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
640 !MLX5_CAP_GEN(dev, lag_master) ||
641 MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
644 tmp_dev = mlx5_get_next_phys_dev(dev);
646 ldev = tmp_dev->priv.lag;
649 ldev = mlx5_lag_dev_alloc(dev);
651 mlx5_core_err(dev, "Failed to alloc lag dev\n");
658 mlx5_ldev_add_mdev(ldev, dev);
663 void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
665 struct mlx5_lag *ldev;
667 ldev = mlx5_lag_dev(dev);
671 mlx5_dev_list_lock();
672 mlx5_ldev_remove_mdev(ldev, dev);
673 mlx5_dev_list_unlock();
677 void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
679 mlx5_dev_list_lock();
680 __mlx5_lag_dev_add_mdev(dev);
681 mlx5_dev_list_unlock();
684 /* Must be called with intf_mutex held */
685 void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
686 struct net_device *netdev)
688 struct mlx5_lag *ldev;
690 ldev = mlx5_lag_dev(dev);
694 if (__mlx5_lag_is_active(ldev))
695 mlx5_disable_lag(ldev);
697 mlx5_ldev_remove_netdev(ldev, netdev);
698 ldev->flags &= ~MLX5_LAG_FLAG_READY;
701 /* Must be called with intf_mutex held */
702 void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
703 struct net_device *netdev)
705 struct mlx5_lag *ldev;
708 ldev = mlx5_lag_dev(dev);
712 mlx5_ldev_add_netdev(ldev, dev, netdev);
714 for (i = 0; i < MLX5_MAX_PORTS; i++)
715 if (!ldev->pf[i].dev)
718 if (i >= MLX5_MAX_PORTS)
719 ldev->flags |= MLX5_LAG_FLAG_READY;
722 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
724 struct mlx5_lag *ldev;
727 spin_lock(&lag_lock);
728 ldev = mlx5_lag_dev(dev);
729 res = ldev && __mlx5_lag_is_roce(ldev);
730 spin_unlock(&lag_lock);
734 EXPORT_SYMBOL(mlx5_lag_is_roce);
736 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
738 struct mlx5_lag *ldev;
741 spin_lock(&lag_lock);
742 ldev = mlx5_lag_dev(dev);
743 res = ldev && __mlx5_lag_is_active(ldev);
744 spin_unlock(&lag_lock);
748 EXPORT_SYMBOL(mlx5_lag_is_active);
750 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
752 struct mlx5_lag *ldev;
755 spin_lock(&lag_lock);
756 ldev = mlx5_lag_dev(dev);
757 res = ldev && __mlx5_lag_is_sriov(ldev);
758 spin_unlock(&lag_lock);
762 EXPORT_SYMBOL(mlx5_lag_is_sriov);
764 void mlx5_lag_update(struct mlx5_core_dev *dev)
766 struct mlx5_lag *ldev;
768 mlx5_dev_list_lock();
769 ldev = mlx5_lag_dev(dev);
776 mlx5_dev_list_unlock();
779 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
781 struct net_device *ndev = NULL;
782 struct mlx5_lag *ldev;
784 spin_lock(&lag_lock);
785 ldev = mlx5_lag_dev(dev);
787 if (!(ldev && __mlx5_lag_is_roce(ldev)))
790 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
791 ndev = ldev->tracker.netdev_state[MLX5_LAG_P1].tx_enabled ?
792 ldev->pf[MLX5_LAG_P1].netdev :
793 ldev->pf[MLX5_LAG_P2].netdev;
795 ndev = ldev->pf[MLX5_LAG_P1].netdev;
801 spin_unlock(&lag_lock);
805 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
807 u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
808 struct net_device *slave)
810 struct mlx5_lag *ldev;
813 spin_lock(&lag_lock);
814 ldev = mlx5_lag_dev(dev);
815 if (!(ldev && __mlx5_lag_is_roce(ldev)))
818 if (ldev->pf[MLX5_LAG_P1].netdev == slave)
823 port = ldev->v2p_map[port];
826 spin_unlock(&lag_lock);
829 EXPORT_SYMBOL(mlx5_lag_get_slave_port);
831 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
836 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
837 struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
838 struct mlx5_lag *ldev;
843 out = kvzalloc(outlen, GFP_KERNEL);
847 memset(values, 0, sizeof(*values) * num_counters);
849 spin_lock(&lag_lock);
850 ldev = mlx5_lag_dev(dev);
851 if (ldev && __mlx5_lag_is_active(ldev)) {
852 num_ports = MLX5_MAX_PORTS;
853 mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
854 mdev[MLX5_LAG_P2] = ldev->pf[MLX5_LAG_P2].dev;
857 mdev[MLX5_LAG_P1] = dev;
859 spin_unlock(&lag_lock);
861 for (i = 0; i < num_ports; ++i) {
862 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = {};
864 MLX5_SET(query_cong_statistics_in, in, opcode,
865 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
866 ret = mlx5_cmd_exec_inout(mdev[i], query_cong_statistics, in,
871 for (j = 0; j < num_counters; ++j)
872 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
879 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);