2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/netdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include "mlx5_core.h"
40 MLX5_LAG_FLAG_ROCE = 1 << 0,
41 MLX5_LAG_FLAG_SRIOV = 1 << 1,
44 #define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV)
47 struct mlx5_core_dev *dev;
48 struct net_device *netdev;
51 /* Used for collection of netdev event info. */
53 enum netdev_lag_tx_type tx_type;
54 struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
58 /* LAG data of a ConnectX card.
59 * It serves both its phys functions.
63 u8 v2p_map[MLX5_MAX_PORTS];
64 struct lag_func pf[MLX5_MAX_PORTS];
65 struct lag_tracker tracker;
66 struct delayed_work bond_work;
67 struct notifier_block nb;
70 /* General purpose, use for short periods of time.
71 * Beware of lock dependencies (preferably, no locks should be acquired
74 static DEFINE_MUTEX(lag_mutex);
76 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
79 u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {0};
80 u32 out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
81 void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
83 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
85 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
86 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
88 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
91 static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
94 u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {0};
95 u32 out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
96 void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
98 MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
99 MLX5_SET(modify_lag_in, in, field_select, 0x1);
101 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
102 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
104 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
107 static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
109 u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {0};
110 u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
112 MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
114 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
117 int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
119 u32 in[MLX5_ST_SZ_DW(create_vport_lag_in)] = {0};
120 u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
122 MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
124 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
126 EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
128 int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
130 u32 in[MLX5_ST_SZ_DW(destroy_vport_lag_in)] = {0};
131 u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
133 MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
135 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
137 EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
139 static int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
140 bool reset, void *out, int out_size)
142 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
144 MLX5_SET(query_cong_statistics_in, in, opcode,
145 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
146 MLX5_SET(query_cong_statistics_in, in, clear, reset);
147 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
150 static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
152 return dev->priv.lag;
155 static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
156 struct net_device *ndev)
160 for (i = 0; i < MLX5_MAX_PORTS; i++)
161 if (ldev->pf[i].netdev == ndev)
167 static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
169 return !!(ldev->flags & MLX5_LAG_FLAG_ROCE);
172 static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
174 return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV);
177 static bool __mlx5_lag_is_active(struct mlx5_lag *ldev)
179 return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
182 static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
183 u8 *port1, u8 *port2)
187 if (!tracker->netdev_state[0].tx_enabled ||
188 !tracker->netdev_state[0].link_up) {
193 if (!tracker->netdev_state[1].tx_enabled ||
194 !tracker->netdev_state[1].link_up)
198 static void mlx5_modify_lag(struct mlx5_lag *ldev,
199 struct lag_tracker *tracker)
201 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
202 u8 v2p_port1, v2p_port2;
205 mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1,
208 if (v2p_port1 != ldev->v2p_map[0] ||
209 v2p_port2 != ldev->v2p_map[1]) {
210 ldev->v2p_map[0] = v2p_port1;
211 ldev->v2p_map[1] = v2p_port2;
213 mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
214 ldev->v2p_map[0], ldev->v2p_map[1]);
216 err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
219 "Failed to modify LAG (%d)\n",
224 static int mlx5_create_lag(struct mlx5_lag *ldev,
225 struct lag_tracker *tracker)
227 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
230 mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
233 mlx5_core_info(dev0, "lag map port 1:%d port 2:%d",
234 ldev->v2p_map[0], ldev->v2p_map[1]);
236 err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
239 "Failed to create LAG (%d)\n",
244 static int mlx5_activate_lag(struct mlx5_lag *ldev,
245 struct lag_tracker *tracker,
248 bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE);
249 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
252 err = mlx5_create_lag(ldev, tracker);
256 "Failed to activate RoCE LAG\n");
259 "Failed to activate VF LAG\n"
260 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
265 ldev->flags |= flags;
269 static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
271 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
272 bool roce_lag = __mlx5_lag_is_roce(ldev);
275 ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
277 err = mlx5_cmd_destroy_lag(dev0);
281 "Failed to deactivate RoCE LAG; driver restart required\n");
284 "Failed to deactivate VF LAG; driver restart required\n"
285 "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
292 static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
294 if (!ldev->pf[0].dev || !ldev->pf[1].dev)
297 #ifdef CONFIG_MLX5_ESWITCH
298 return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev);
300 return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) &&
301 !mlx5_sriov_is_enabled(ldev->pf[1].dev));
305 static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev)
309 for (i = 0; i < MLX5_MAX_PORTS; i++)
311 mlx5_add_dev_by_protocol(ldev->pf[i].dev,
312 MLX5_INTERFACE_PROTOCOL_IB);
315 static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev)
319 for (i = 0; i < MLX5_MAX_PORTS; i++)
321 mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
322 MLX5_INTERFACE_PROTOCOL_IB);
325 static void mlx5_do_bond(struct mlx5_lag *ldev)
327 struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
328 struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
329 struct lag_tracker tracker;
330 bool do_bond, roce_lag;
336 mutex_lock(&lag_mutex);
337 tracker = ldev->tracker;
338 mutex_unlock(&lag_mutex);
340 do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
342 if (do_bond && !__mlx5_lag_is_active(ldev)) {
343 roce_lag = !mlx5_sriov_is_enabled(dev0) &&
344 !mlx5_sriov_is_enabled(dev1);
347 mlx5_lag_remove_ib_devices(ldev);
349 err = mlx5_activate_lag(ldev, &tracker,
350 roce_lag ? MLX5_LAG_FLAG_ROCE :
351 MLX5_LAG_FLAG_SRIOV);
354 mlx5_lag_add_ib_devices(ldev);
360 mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
361 mlx5_nic_vport_enable_roce(dev1);
363 } else if (do_bond && __mlx5_lag_is_active(ldev)) {
364 mlx5_modify_lag(ldev, &tracker);
365 } else if (!do_bond && __mlx5_lag_is_active(ldev)) {
366 roce_lag = __mlx5_lag_is_roce(ldev);
369 mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
370 mlx5_nic_vport_disable_roce(dev1);
373 err = mlx5_deactivate_lag(ldev);
378 mlx5_lag_add_ib_devices(ldev);
382 static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
384 schedule_delayed_work(&ldev->bond_work, delay);
387 static void mlx5_do_bond_work(struct work_struct *work)
389 struct delayed_work *delayed_work = to_delayed_work(work);
390 struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
394 status = mlx5_dev_list_trylock();
397 mlx5_queue_bond_work(ldev, HZ);
402 mlx5_dev_list_unlock();
405 static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
406 struct lag_tracker *tracker,
407 struct net_device *ndev,
408 struct netdev_notifier_changeupper_info *info)
410 struct net_device *upper = info->upper_dev, *ndev_tmp;
411 struct netdev_lag_upper_info *lag_upper_info = NULL;
417 if (!netif_is_lag_master(upper))
421 lag_upper_info = info->upper_info;
423 /* The event may still be of interest if the slave does not belong to
424 * us, but is enslaved to a master which has one or more of our netdevs
425 * as slaves (e.g., if a new slave is added to a master that bonds two
426 * of our netdevs, we should unbond).
429 for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
430 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
432 bond_status |= (1 << idx);
438 /* None of this lagdev's netdevs are slaves of this master. */
439 if (!(bond_status & 0x3))
443 tracker->tx_type = lag_upper_info->tx_type;
445 /* Determine bonding status:
446 * A device is considered bonded if both its physical ports are slaves
447 * of the same lag master, and only them.
448 * Lag mode must be activebackup or hash.
450 is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
451 (bond_status == 0x3) &&
452 ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
453 (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
455 if (tracker->is_bonded != is_bonded) {
456 tracker->is_bonded = is_bonded;
463 static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
464 struct lag_tracker *tracker,
465 struct net_device *ndev,
466 struct netdev_notifier_changelowerstate_info *info)
468 struct netdev_lag_lower_state_info *lag_lower_info;
471 if (!netif_is_lag_port(ndev))
474 idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
478 /* This information is used to determine virtual to physical
481 lag_lower_info = info->lower_state_info;
485 tracker->netdev_state[idx] = *lag_lower_info;
490 static int mlx5_lag_netdev_event(struct notifier_block *this,
491 unsigned long event, void *ptr)
493 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
494 struct lag_tracker tracker;
495 struct mlx5_lag *ldev;
498 if (!net_eq(dev_net(ndev), &init_net))
501 if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
504 ldev = container_of(this, struct mlx5_lag, nb);
505 tracker = ldev->tracker;
508 case NETDEV_CHANGEUPPER:
509 changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
512 case NETDEV_CHANGELOWERSTATE:
513 changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
518 mutex_lock(&lag_mutex);
519 ldev->tracker = tracker;
520 mutex_unlock(&lag_mutex);
523 mlx5_queue_bond_work(ldev, 0);
528 static struct mlx5_lag *mlx5_lag_dev_alloc(void)
530 struct mlx5_lag *ldev;
532 ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
536 INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
541 static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
546 static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
547 struct mlx5_core_dev *dev,
548 struct net_device *netdev)
550 unsigned int fn = PCI_FUNC(dev->pdev->devfn);
552 if (fn >= MLX5_MAX_PORTS)
555 mutex_lock(&lag_mutex);
556 ldev->pf[fn].dev = dev;
557 ldev->pf[fn].netdev = netdev;
558 ldev->tracker.netdev_state[fn].link_up = 0;
559 ldev->tracker.netdev_state[fn].tx_enabled = 0;
561 dev->priv.lag = ldev;
563 mutex_unlock(&lag_mutex);
566 static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
567 struct mlx5_core_dev *dev)
571 for (i = 0; i < MLX5_MAX_PORTS; i++)
572 if (ldev->pf[i].dev == dev)
575 if (i == MLX5_MAX_PORTS)
578 mutex_lock(&lag_mutex);
579 memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
581 dev->priv.lag = NULL;
582 mutex_unlock(&lag_mutex);
585 /* Must be called with intf_mutex held */
586 void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
588 struct mlx5_lag *ldev = NULL;
589 struct mlx5_core_dev *tmp_dev;
591 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
592 !MLX5_CAP_GEN(dev, lag_master) ||
593 (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
596 tmp_dev = mlx5_get_next_phys_dev(dev);
598 ldev = tmp_dev->priv.lag;
601 ldev = mlx5_lag_dev_alloc();
603 mlx5_core_err(dev, "Failed to alloc lag dev\n");
608 mlx5_lag_dev_add_pf(ldev, dev, netdev);
610 if (!ldev->nb.notifier_call) {
611 ldev->nb.notifier_call = mlx5_lag_netdev_event;
612 if (register_netdevice_notifier(&ldev->nb)) {
613 ldev->nb.notifier_call = NULL;
614 mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
619 /* Must be called with intf_mutex held */
620 void mlx5_lag_remove(struct mlx5_core_dev *dev)
622 struct mlx5_lag *ldev;
625 ldev = mlx5_lag_dev_get(dev);
629 if (__mlx5_lag_is_active(ldev))
630 mlx5_deactivate_lag(ldev);
632 mlx5_lag_dev_remove_pf(ldev, dev);
634 for (i = 0; i < MLX5_MAX_PORTS; i++)
638 if (i == MLX5_MAX_PORTS) {
639 if (ldev->nb.notifier_call)
640 unregister_netdevice_notifier(&ldev->nb);
641 cancel_delayed_work_sync(&ldev->bond_work);
642 mlx5_lag_dev_free(ldev);
646 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
648 struct mlx5_lag *ldev;
651 mutex_lock(&lag_mutex);
652 ldev = mlx5_lag_dev_get(dev);
653 res = ldev && __mlx5_lag_is_roce(ldev);
654 mutex_unlock(&lag_mutex);
658 EXPORT_SYMBOL(mlx5_lag_is_roce);
660 bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
662 struct mlx5_lag *ldev;
665 mutex_lock(&lag_mutex);
666 ldev = mlx5_lag_dev_get(dev);
667 res = ldev && __mlx5_lag_is_active(ldev);
668 mutex_unlock(&lag_mutex);
672 EXPORT_SYMBOL(mlx5_lag_is_active);
674 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
676 struct mlx5_lag *ldev;
679 mutex_lock(&lag_mutex);
680 ldev = mlx5_lag_dev_get(dev);
681 res = ldev && __mlx5_lag_is_sriov(ldev);
682 mutex_unlock(&lag_mutex);
686 EXPORT_SYMBOL(mlx5_lag_is_sriov);
688 void mlx5_lag_update(struct mlx5_core_dev *dev)
690 struct mlx5_lag *ldev;
692 mlx5_dev_list_lock();
693 ldev = mlx5_lag_dev_get(dev);
700 mlx5_dev_list_unlock();
703 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
705 struct net_device *ndev = NULL;
706 struct mlx5_lag *ldev;
708 mutex_lock(&lag_mutex);
709 ldev = mlx5_lag_dev_get(dev);
711 if (!(ldev && __mlx5_lag_is_roce(ldev)))
714 if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
715 ndev = ldev->tracker.netdev_state[0].tx_enabled ?
716 ldev->pf[0].netdev : ldev->pf[1].netdev;
718 ndev = ldev->pf[0].netdev;
724 mutex_unlock(&lag_mutex);
728 EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
730 bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
732 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
734 struct mlx5_lag *ldev;
736 if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
739 ldev = mlx5_lag_dev_get(dev);
740 if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev)
743 /* If bonded, we do not add an IB device for PF1. */
747 int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
752 int outlen = MLX5_ST_SZ_BYTES(query_cong_statistics_out);
753 struct mlx5_core_dev *mdev[MLX5_MAX_PORTS];
754 struct mlx5_lag *ldev;
759 out = kvzalloc(outlen, GFP_KERNEL);
763 memset(values, 0, sizeof(*values) * num_counters);
765 mutex_lock(&lag_mutex);
766 ldev = mlx5_lag_dev_get(dev);
767 if (ldev && __mlx5_lag_is_roce(ldev)) {
768 num_ports = MLX5_MAX_PORTS;
769 mdev[0] = ldev->pf[0].dev;
770 mdev[1] = ldev->pf[1].dev;
776 for (i = 0; i < num_ports; ++i) {
777 ret = mlx5_cmd_query_cong_counter(mdev[i], false, out, outlen);
781 for (j = 0; j < num_counters; ++j)
782 values[j] += be64_to_cpup((__be64 *)(out + offsets[j]));
786 mutex_unlock(&lag_mutex);
790 EXPORT_SYMBOL(mlx5_lag_query_cong_counters);