2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "esw/acl/lgcy.h"
39 #include "esw/legacy.h"
40 #include "mlx5_core.h"
46 #include "en/mod_hdr.h"
54 /* Vport UC/MC hash node */
56 struct l2addr_node node;
59 struct mlx5_flow_handle *flow_rule;
60 bool mpfs; /* UC MAC was added to MPFs */
61 /* A flag indicating that mac was added due to mc promiscuous vport */
65 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
67 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
70 if (!MLX5_ESWITCH_MANAGER(dev))
76 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
78 struct mlx5_core_dev *dev = devlink_priv(devlink);
81 err = mlx5_eswitch_check(dev);
85 return dev->priv.eswitch;
88 struct mlx5_vport *__must_check
89 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
91 struct mlx5_vport *vport;
93 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
94 return ERR_PTR(-EPERM);
96 vport = xa_load(&esw->vports, vport_num);
98 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
99 return ERR_PTR(-EINVAL);
104 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
107 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
110 MLX5_SET(modify_nic_vport_context_in, in,
111 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
112 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
113 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
114 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
115 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
116 in, nic_vport_context);
118 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
120 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
121 MLX5_SET(nic_vport_context, nic_vport_ctx,
122 event_on_uc_address_change, 1);
123 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
124 MLX5_SET(nic_vport_context, nic_vport_ctx,
125 event_on_mc_address_change, 1);
126 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
127 MLX5_SET(nic_vport_context, nic_vport_ctx,
128 event_on_promisc_change, 1);
130 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
133 /* E-Switch vport context HW commands */
134 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
135 bool other_vport, void *in)
137 MLX5_SET(modify_esw_vport_context_in, in, opcode,
138 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
139 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
140 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
141 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
144 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
145 u16 vlan, u8 qos, u8 set_flags)
147 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
149 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
150 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
153 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
154 vport, vlan, qos, set_flags);
156 if (set_flags & SET_VLAN_STRIP)
157 MLX5_SET(modify_esw_vport_context_in, in,
158 esw_vport_context.vport_cvlan_strip, 1);
160 if (set_flags & SET_VLAN_INSERT) {
161 /* insert only if no vlan in packet */
162 MLX5_SET(modify_esw_vport_context_in, in,
163 esw_vport_context.vport_cvlan_insert, 1);
165 MLX5_SET(modify_esw_vport_context_in, in,
166 esw_vport_context.cvlan_pcp, qos);
167 MLX5_SET(modify_esw_vport_context_in, in,
168 esw_vport_context.cvlan_id, vlan);
171 MLX5_SET(modify_esw_vport_context_in, in,
172 field_select.vport_cvlan_strip, 1);
173 MLX5_SET(modify_esw_vport_context_in, in,
174 field_select.vport_cvlan_insert, 1);
176 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
180 static struct mlx5_flow_handle *
181 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
182 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
184 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
185 MLX5_MATCH_OUTER_HEADERS);
186 struct mlx5_flow_handle *flow_rule = NULL;
187 struct mlx5_flow_act flow_act = {0};
188 struct mlx5_flow_destination dest = {};
189 struct mlx5_flow_spec *spec;
190 void *mv_misc = NULL;
191 void *mc_misc = NULL;
196 match_header |= MLX5_MATCH_MISC_PARAMETERS;
198 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
202 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
203 outer_headers.dmac_47_16);
204 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
205 outer_headers.dmac_47_16);
207 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
208 ether_addr_copy(dmac_v, mac_v);
209 ether_addr_copy(dmac_c, mac_c);
212 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
213 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
215 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
217 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
218 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
221 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
222 dest.vport.num = vport;
225 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
226 dmac_v, dmac_c, vport);
227 spec->match_criteria_enable = match_header;
228 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
230 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
231 &flow_act, &dest, 1);
232 if (IS_ERR(flow_rule)) {
234 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
235 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
243 static struct mlx5_flow_handle *
244 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
248 eth_broadcast_addr(mac_c);
249 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
252 static struct mlx5_flow_handle *
253 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
258 eth_zero_addr(mac_c);
259 eth_zero_addr(mac_v);
262 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
265 static struct mlx5_flow_handle *
266 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
271 eth_zero_addr(mac_c);
272 eth_zero_addr(mac_v);
273 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
276 /* E-Switch vport UC/MC lists management */
277 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
278 struct vport_addr *vaddr);
280 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
282 u8 *mac = vaddr->node.addr;
283 u16 vport = vaddr->vport;
286 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
287 * it is already done by its netdev in mlx5e_execute_l2_action
289 if (mlx5_esw_is_manager_vport(esw, vport))
292 err = mlx5_mpfs_add_mac(esw->dev, mac);
295 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
302 /* SRIOV is enabled: Forward UC MAC to vport */
303 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
304 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
306 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
307 vport, mac, vaddr->flow_rule);
312 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
314 u8 *mac = vaddr->node.addr;
315 u16 vport = vaddr->vport;
318 /* Skip mlx5_mpfs_del_mac for eswitch managers,
319 * it is already done by its netdev in mlx5e_execute_l2_action
321 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
324 err = mlx5_mpfs_del_mac(esw->dev, mac);
327 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
332 if (vaddr->flow_rule)
333 mlx5_del_flow_rules(vaddr->flow_rule);
334 vaddr->flow_rule = NULL;
339 static void update_allmulti_vports(struct mlx5_eswitch *esw,
340 struct vport_addr *vaddr,
341 struct esw_mc_addr *esw_mc)
343 u8 *mac = vaddr->node.addr;
344 struct mlx5_vport *vport;
348 mlx5_esw_for_each_vport(esw, i, vport) {
349 struct hlist_head *vport_hash = vport->mc_list;
350 struct vport_addr *iter_vaddr =
351 l2addr_hash_find(vport_hash,
354 vport_num = vport->vport;
355 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
356 vaddr->vport == vport_num)
358 switch (vaddr->action) {
359 case MLX5_ACTION_ADD:
362 iter_vaddr = l2addr_hash_add(vport_hash, mac,
367 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
371 iter_vaddr->vport = vport_num;
372 iter_vaddr->flow_rule =
373 esw_fdb_set_vport_rule(esw,
376 iter_vaddr->mc_promisc = true;
378 case MLX5_ACTION_DEL:
381 mlx5_del_flow_rules(iter_vaddr->flow_rule);
382 l2addr_hash_del(iter_vaddr);
388 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
390 struct hlist_head *hash = esw->mc_table;
391 struct esw_mc_addr *esw_mc;
392 u8 *mac = vaddr->node.addr;
393 u16 vport = vaddr->vport;
395 if (!esw->fdb_table.legacy.fdb)
398 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
402 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
406 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
407 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
409 /* Add this multicast mac to all the mc promiscuous vports */
410 update_allmulti_vports(esw, vaddr, esw_mc);
413 /* If the multicast mac is added as a result of mc promiscuous vport,
414 * don't increment the multicast ref count
416 if (!vaddr->mc_promisc)
419 /* Forward MC MAC to vport */
420 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
422 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
423 vport, mac, vaddr->flow_rule,
424 esw_mc->refcnt, esw_mc->uplink_rule);
428 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
430 struct hlist_head *hash = esw->mc_table;
431 struct esw_mc_addr *esw_mc;
432 u8 *mac = vaddr->node.addr;
433 u16 vport = vaddr->vport;
435 if (!esw->fdb_table.legacy.fdb)
438 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
441 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
446 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
447 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
448 esw_mc->uplink_rule);
450 if (vaddr->flow_rule)
451 mlx5_del_flow_rules(vaddr->flow_rule);
452 vaddr->flow_rule = NULL;
454 /* If the multicast mac is added as a result of mc promiscuous vport,
455 * don't decrement the multicast ref count.
457 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
460 /* Remove this multicast mac from all the mc promiscuous vports */
461 update_allmulti_vports(esw, vaddr, esw_mc);
463 if (esw_mc->uplink_rule)
464 mlx5_del_flow_rules(esw_mc->uplink_rule);
466 l2addr_hash_del(esw_mc);
470 /* Apply vport UC/MC list to HW l2 table and FDB table */
471 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
472 struct mlx5_vport *vport, int list_type)
474 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
475 vport_addr_action vport_addr_add;
476 vport_addr_action vport_addr_del;
477 struct vport_addr *addr;
478 struct l2addr_node *node;
479 struct hlist_head *hash;
480 struct hlist_node *tmp;
483 vport_addr_add = is_uc ? esw_add_uc_addr :
485 vport_addr_del = is_uc ? esw_del_uc_addr :
488 hash = is_uc ? vport->uc_list : vport->mc_list;
489 for_each_l2hash_node(node, tmp, hash, hi) {
490 addr = container_of(node, struct vport_addr, node);
491 switch (addr->action) {
492 case MLX5_ACTION_ADD:
493 vport_addr_add(esw, addr);
494 addr->action = MLX5_ACTION_NONE;
496 case MLX5_ACTION_DEL:
497 vport_addr_del(esw, addr);
498 l2addr_hash_del(addr);
504 /* Sync vport UC/MC list from vport context */
505 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
506 struct mlx5_vport *vport, int list_type)
508 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
509 u8 (*mac_list)[ETH_ALEN];
510 struct l2addr_node *node;
511 struct vport_addr *addr;
512 struct hlist_head *hash;
513 struct hlist_node *tmp;
519 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
520 MLX5_MAX_MC_PER_VPORT(esw->dev);
522 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
526 hash = is_uc ? vport->uc_list : vport->mc_list;
528 for_each_l2hash_node(node, tmp, hash, hi) {
529 addr = container_of(node, struct vport_addr, node);
530 addr->action = MLX5_ACTION_DEL;
536 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
540 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
541 vport->vport, is_uc ? "UC" : "MC", size);
543 for (i = 0; i < size; i++) {
544 if (is_uc && !is_valid_ether_addr(mac_list[i]))
547 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
550 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
552 addr->action = MLX5_ACTION_NONE;
553 /* If this mac was previously added because of allmulti
554 * promiscuous rx mode, its now converted to be original
557 if (addr->mc_promisc) {
558 struct esw_mc_addr *esw_mc =
559 l2addr_hash_find(esw->mc_table,
564 "Failed to MAC(%pM) in mcast DB\n",
569 addr->mc_promisc = false;
574 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
578 "Failed to add MAC(%pM) to vport[%d] DB\n",
579 mac_list[i], vport->vport);
582 addr->vport = vport->vport;
583 addr->action = MLX5_ACTION_ADD;
589 /* Sync vport UC/MC list from vport context
590 * Must be called after esw_update_vport_addr_list
592 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
593 struct mlx5_vport *vport)
595 struct l2addr_node *node;
596 struct vport_addr *addr;
597 struct hlist_head *hash;
598 struct hlist_node *tmp;
601 hash = vport->mc_list;
603 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
604 u8 *mac = node->addr;
606 addr = l2addr_hash_find(hash, mac, struct vport_addr);
608 if (addr->action == MLX5_ACTION_DEL)
609 addr->action = MLX5_ACTION_NONE;
612 addr = l2addr_hash_add(hash, mac, struct vport_addr,
616 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
620 addr->vport = vport->vport;
621 addr->action = MLX5_ACTION_ADD;
622 addr->mc_promisc = true;
626 /* Apply vport rx mode to HW FDB table */
627 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
628 struct mlx5_vport *vport,
629 bool promisc, bool mc_promisc)
631 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
633 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
637 vport->allmulti_rule =
638 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
639 if (!allmulti_addr->uplink_rule)
640 allmulti_addr->uplink_rule =
641 esw_fdb_set_vport_allmulti_rule(esw,
643 allmulti_addr->refcnt++;
644 } else if (vport->allmulti_rule) {
645 mlx5_del_flow_rules(vport->allmulti_rule);
646 vport->allmulti_rule = NULL;
648 if (--allmulti_addr->refcnt > 0)
651 if (allmulti_addr->uplink_rule)
652 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
653 allmulti_addr->uplink_rule = NULL;
657 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
661 vport->promisc_rule =
662 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
663 } else if (vport->promisc_rule) {
664 mlx5_del_flow_rules(vport->promisc_rule);
665 vport->promisc_rule = NULL;
669 /* Sync vport rx mode from vport context */
670 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
671 struct mlx5_vport *vport)
678 err = mlx5_query_nic_vport_promisc(esw->dev,
685 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
686 vport->vport, promisc_all, promisc_mc);
688 if (!vport->info.trusted || !vport->enabled) {
694 esw_apply_vport_rx_mode(esw, vport, promisc_all,
695 (promisc_all || promisc_mc));
698 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
700 struct mlx5_core_dev *dev = vport->dev;
701 struct mlx5_eswitch *esw = dev->priv.eswitch;
704 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
705 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
708 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
709 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
710 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
713 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
714 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
716 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
717 esw_update_vport_rx_mode(esw, vport);
718 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
719 esw_update_vport_mc_promisc(esw, vport);
722 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
723 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
725 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
727 arm_vport_context_events_cmd(dev, vport->vport,
728 vport->enabled_events);
731 static void esw_vport_change_handler(struct work_struct *work)
733 struct mlx5_vport *vport =
734 container_of(work, struct mlx5_vport, vport_change_handler);
735 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
737 mutex_lock(&esw->state_lock);
738 esw_vport_change_handle_locked(vport);
739 mutex_unlock(&esw->state_lock);
742 static bool element_type_supported(struct mlx5_eswitch *esw, int type)
744 const struct mlx5_core_dev *dev = esw->dev;
747 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
748 return MLX5_CAP_QOS(dev, esw_element_type) &
749 ELEMENT_TYPE_CAP_MASK_TASR;
750 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
751 return MLX5_CAP_QOS(dev, esw_element_type) &
752 ELEMENT_TYPE_CAP_MASK_VPORT;
753 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
754 return MLX5_CAP_QOS(dev, esw_element_type) &
755 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
756 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
757 return MLX5_CAP_QOS(dev, esw_element_type) &
758 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
763 /* Vport QoS management */
764 static void esw_create_tsar(struct mlx5_eswitch *esw)
766 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
767 struct mlx5_core_dev *dev = esw->dev;
771 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
774 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
777 if (esw->qos.enabled)
780 MLX5_SET(scheduling_context, tsar_ctx, element_type,
781 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
783 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
784 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
786 err = mlx5_create_scheduling_element_cmd(dev,
787 SCHEDULING_HIERARCHY_E_SWITCH,
789 &esw->qos.root_tsar_id);
791 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
795 esw->qos.enabled = true;
798 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
802 if (!esw->qos.enabled)
805 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
806 SCHEDULING_HIERARCHY_E_SWITCH,
807 esw->qos.root_tsar_id);
809 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
811 esw->qos.enabled = false;
814 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
815 struct mlx5_vport *vport,
816 u32 initial_max_rate, u32 initial_bw_share)
818 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
819 struct mlx5_core_dev *dev = esw->dev;
823 if (!esw->qos.enabled)
826 if (vport->qos.enabled)
829 MLX5_SET(scheduling_context, sched_ctx, element_type,
830 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
831 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
833 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
834 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
835 esw->qos.root_tsar_id);
836 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
838 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
840 err = mlx5_create_scheduling_element_cmd(dev,
841 SCHEDULING_HIERARCHY_E_SWITCH,
843 &vport->qos.esw_tsar_ix);
845 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
850 vport->qos.enabled = true;
854 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
855 struct mlx5_vport *vport)
859 if (!vport->qos.enabled)
862 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
863 SCHEDULING_HIERARCHY_E_SWITCH,
864 vport->qos.esw_tsar_ix);
866 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
869 vport->qos.enabled = false;
872 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
873 struct mlx5_vport *vport,
874 u32 max_rate, u32 bw_share)
876 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
877 struct mlx5_core_dev *dev = esw->dev;
882 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
885 if (!vport->qos.enabled)
888 MLX5_SET(scheduling_context, sched_ctx, element_type,
889 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
890 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
892 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
893 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
894 esw->qos.root_tsar_id);
895 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
897 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
898 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
899 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
901 err = mlx5_modify_scheduling_element_cmd(dev,
902 SCHEDULING_HIERARCHY_E_SWITCH,
904 vport->qos.esw_tsar_ix,
907 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
915 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
918 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
919 struct mlx5_vport *vport;
921 vport = mlx5_eswitch_get_vport(esw, vport_num);
923 return PTR_ERR(vport);
925 if (!vport->qos.enabled)
928 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
930 return mlx5_modify_scheduling_element_cmd(esw->dev,
931 SCHEDULING_HIERARCHY_E_SWITCH,
933 vport->qos.esw_tsar_ix,
934 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
937 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
939 ((u8 *)node_guid)[7] = mac[0];
940 ((u8 *)node_guid)[6] = mac[1];
941 ((u8 *)node_guid)[5] = mac[2];
942 ((u8 *)node_guid)[4] = 0xff;
943 ((u8 *)node_guid)[3] = 0xfe;
944 ((u8 *)node_guid)[2] = mac[3];
945 ((u8 *)node_guid)[1] = mac[4];
946 ((u8 *)node_guid)[0] = mac[5];
949 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
950 struct mlx5_vport *vport)
952 if (esw->mode == MLX5_ESWITCH_LEGACY)
953 return esw_legacy_vport_acl_setup(esw, vport);
955 return esw_vport_create_offloads_acl_tables(esw, vport);
958 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
959 struct mlx5_vport *vport)
961 if (esw->mode == MLX5_ESWITCH_LEGACY)
962 esw_legacy_vport_acl_cleanup(esw, vport);
964 esw_vport_destroy_offloads_acl_tables(esw, vport);
967 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
969 u16 vport_num = vport->vport;
973 err = esw_vport_setup_acl(esw, vport);
977 /* Attach vport to the eswitch rate limiter */
978 esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
980 if (mlx5_esw_is_manager_vport(esw, vport_num))
983 mlx5_modify_vport_admin_state(esw->dev,
984 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
986 vport->info.link_state);
988 /* Host PF has its own mac/guid. */
990 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
992 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
993 vport->info.node_guid);
996 flags = (vport->info.vlan || vport->info.qos) ?
997 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
998 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
999 vport->info.qos, flags);
1004 /* Don't cleanup vport->info, it's needed to restore vport configuration */
1005 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1007 u16 vport_num = vport->vport;
1009 if (!mlx5_esw_is_manager_vport(esw, vport_num))
1010 mlx5_modify_vport_admin_state(esw->dev,
1011 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1013 MLX5_VPORT_ADMIN_STATE_DOWN);
1015 esw_vport_disable_qos(esw, vport);
1016 esw_vport_cleanup_acl(esw, vport);
1019 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
1020 enum mlx5_eswitch_vport_event enabled_events)
1022 struct mlx5_vport *vport;
1025 vport = mlx5_eswitch_get_vport(esw, vport_num);
1027 return PTR_ERR(vport);
1029 mutex_lock(&esw->state_lock);
1030 WARN_ON(vport->enabled);
1032 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1034 ret = esw_vport_setup(esw, vport);
1038 /* Sync with current vport context */
1039 vport->enabled_events = enabled_events;
1040 vport->enabled = true;
1042 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1043 * in smartNIC as it's a vport group manager.
1045 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1046 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1047 vport->info.trusted = true;
1049 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1050 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
1051 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
1053 goto err_vhca_mapping;
1056 esw_vport_change_handle_locked(vport);
1058 esw->enabled_vports++;
1059 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1061 mutex_unlock(&esw->state_lock);
1065 esw_vport_cleanup(esw, vport);
1066 mutex_unlock(&esw->state_lock);
1070 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
1072 struct mlx5_vport *vport;
1074 vport = mlx5_eswitch_get_vport(esw, vport_num);
1078 mutex_lock(&esw->state_lock);
1079 if (!vport->enabled)
1082 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1083 /* Mark this vport as disabled to discard new events */
1084 vport->enabled = false;
1086 /* Disable events from this vport */
1087 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1089 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1090 MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
1091 mlx5_esw_vport_vhca_id_clear(esw, vport_num);
1093 /* We don't assume VFs will cleanup after themselves.
1094 * Calling vport change handler while vport is disabled will cleanup
1095 * the vport resources.
1097 esw_vport_change_handle_locked(vport);
1098 vport->enabled_events = 0;
1099 esw_vport_cleanup(esw, vport);
1100 esw->enabled_vports--;
1103 mutex_unlock(&esw->state_lock);
1106 static int eswitch_vport_event(struct notifier_block *nb,
1107 unsigned long type, void *data)
1109 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1110 struct mlx5_eqe *eqe = data;
1111 struct mlx5_vport *vport;
1114 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1115 vport = mlx5_eswitch_get_vport(esw, vport_num);
1117 queue_work(esw->work_queue, &vport->vport_change_handler);
1122 * mlx5_esw_query_functions - Returns raw output about functions state
1123 * @dev: Pointer to device to query
1125 * mlx5_esw_query_functions() allocates and returns functions changed
1126 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1127 * Caller must free the memory using kvfree() when valid pointer is returned.
1129 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1131 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1132 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1136 out = kvzalloc(outlen, GFP_KERNEL);
1138 return ERR_PTR(-ENOMEM);
1140 MLX5_SET(query_esw_functions_in, in, opcode,
1141 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1143 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1148 return ERR_PTR(err);
1151 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1153 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1154 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1156 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1157 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1158 ESW_FUNCTIONS_CHANGED);
1159 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1163 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1165 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1166 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1168 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1170 flush_workqueue(esw->work_queue);
1173 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1175 struct mlx5_vport *vport;
1178 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1179 memset(&vport->qos, 0, sizeof(vport->qos));
1180 memset(&vport->info, 0, sizeof(vport->info));
1181 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1185 /* Public E-Switch API */
1186 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1187 enum mlx5_eswitch_vport_event enabled_events)
1191 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
1195 err = esw_offloads_load_rep(esw, vport_num);
1202 mlx5_esw_vport_disable(esw, vport_num);
1206 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1208 esw_offloads_unload_rep(esw, vport_num);
1209 mlx5_esw_vport_disable(esw, vport_num);
1212 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1214 struct mlx5_vport *vport;
1217 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1218 if (!vport->enabled)
1220 mlx5_eswitch_unload_vport(esw, vport->vport);
1224 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1225 enum mlx5_eswitch_vport_event enabled_events)
1227 struct mlx5_vport *vport;
1231 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1232 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1240 mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1244 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1246 if (!mlx5_core_is_ecpf(dev))
1249 /* Once vport and representor are ready, take out the external host PF
1250 * out of initializing state. Enabling HCA clears the iser->initializing
1251 * bit and host PF driver loading can progress.
1253 return mlx5_cmd_host_pf_enable_hca(dev);
1256 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1258 if (!mlx5_core_is_ecpf(dev))
1261 mlx5_cmd_host_pf_disable_hca(dev);
1264 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1265 * whichever are present on the eswitch.
1268 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1269 enum mlx5_eswitch_vport_event enabled_events)
1273 /* Enable PF vport */
1274 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1278 /* Enable external host PF HCA */
1279 ret = host_pf_enable_hca(esw->dev);
1283 /* Enable ECPF vport */
1284 if (mlx5_ecpf_vport_exists(esw->dev)) {
1285 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1290 /* Enable VF vports */
1291 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1298 if (mlx5_ecpf_vport_exists(esw->dev))
1299 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1301 host_pf_disable_hca(esw->dev);
1303 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1307 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1308 * whichever are previously enabled on the eswitch.
1310 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1312 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1314 if (mlx5_ecpf_vport_exists(esw->dev))
1315 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1317 host_pf_disable_hca(esw->dev);
1318 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1321 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1323 struct devlink *devlink = priv_to_devlink(esw->dev);
1324 union devlink_param_value val;
1327 err = devlink_param_driverinit_value_get(devlink,
1328 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1331 esw->params.large_group_num = val.vu32;
1334 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1335 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1336 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1341 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1345 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
1350 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1351 esw->esw_funcs.num_vfs = num_vfs;
1355 out = mlx5_esw_query_functions(esw->dev);
1359 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1360 host_params_context.host_num_of_vfs);
1364 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1366 struct mlx5_esw_event_info info = {};
1368 info.new_mode = mode;
1370 blocking_notifier_call_chain(&esw->n_head, 0, &info);
1373 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1375 struct mlx5_core_dev *dev = esw->dev;
1379 total_vports = mlx5_eswitch_get_total_vports(dev);
1381 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1382 err = mlx5_fs_egress_acls_init(dev, total_vports);
1386 esw_warn(dev, "engress ACL is not supported by FW\n");
1389 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1390 err = mlx5_fs_ingress_acls_init(dev, total_vports);
1394 esw_warn(dev, "ingress ACL is not supported by FW\n");
1399 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1400 mlx5_fs_egress_acls_cleanup(dev);
1404 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1406 struct mlx5_core_dev *dev = esw->dev;
1408 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1409 mlx5_fs_ingress_acls_cleanup(dev);
1410 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1411 mlx5_fs_egress_acls_cleanup(dev);
1415 * mlx5_eswitch_enable_locked - Enable eswitch
1416 * @esw: Pointer to eswitch
1417 * @mode: Eswitch mode to enable
1418 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1419 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1420 * Caller should pass num_vfs > 0 when enabling eswitch for
1421 * vf vports. Caller should pass num_vfs = 0, when eswitch
1422 * is enabled without sriov VFs or when caller
1423 * is unaware of the sriov state of the host PF on ECPF based
1424 * eswitch. Caller should pass < 0 when num_vfs should be
1425 * completely ignored. This is typically the case when eswitch
1426 * is enabled without sriov regardless of PF/ECPF system.
1427 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1428 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1429 * It returns 0 on success or error code on failure.
1431 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
1435 lockdep_assert_held(&esw->mode_lock);
1437 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1438 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1442 mlx5_eswitch_get_devlink_param(esw);
1444 err = mlx5_esw_acls_ns_init(esw);
1448 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1450 esw_create_tsar(esw);
1454 mlx5_lag_update(esw->dev);
1456 if (mode == MLX5_ESWITCH_LEGACY) {
1457 err = esw_legacy_enable(esw);
1459 mlx5_rescan_drivers(esw->dev);
1460 err = esw_offloads_enable(esw);
1466 mlx5_eswitch_event_handlers_register(esw);
1468 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1469 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1470 esw->esw_funcs.num_vfs, esw->enabled_vports);
1472 mlx5_esw_mode_change_notify(esw, mode);
1477 esw->mode = MLX5_ESWITCH_NONE;
1479 if (mode == MLX5_ESWITCH_OFFLOADS)
1480 mlx5_rescan_drivers(esw->dev);
1482 esw_destroy_tsar(esw);
1483 mlx5_esw_acls_ns_cleanup(esw);
1488 * mlx5_eswitch_enable - Enable eswitch
1489 * @esw: Pointer to eswitch
1490 * @num_vfs: Enable eswitch swich for given number of VFs.
1491 * Caller must pass num_vfs > 0 when enabling eswitch for
1493 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1495 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1499 if (!mlx5_esw_allowed(esw))
1502 down_write(&esw->mode_lock);
1503 if (esw->mode == MLX5_ESWITCH_NONE) {
1504 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
1506 enum mlx5_eswitch_vport_event vport_events;
1508 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1509 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1510 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1512 esw->esw_funcs.num_vfs = num_vfs;
1514 up_write(&esw->mode_lock);
1518 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1522 lockdep_assert_held_write(&esw->mode_lock);
1524 if (esw->mode == MLX5_ESWITCH_NONE)
1527 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1528 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1529 esw->esw_funcs.num_vfs, esw->enabled_vports);
1531 /* Notify eswitch users that it is exiting from current mode.
1532 * So that it can do necessary cleanup before the eswitch is disabled.
1534 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
1536 mlx5_eswitch_event_handlers_unregister(esw);
1538 if (esw->mode == MLX5_ESWITCH_LEGACY)
1539 esw_legacy_disable(esw);
1540 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1541 esw_offloads_disable(esw);
1543 old_mode = esw->mode;
1544 esw->mode = MLX5_ESWITCH_NONE;
1546 mlx5_lag_update(esw->dev);
1548 if (old_mode == MLX5_ESWITCH_OFFLOADS)
1549 mlx5_rescan_drivers(esw->dev);
1551 esw_destroy_tsar(esw);
1552 mlx5_esw_acls_ns_cleanup(esw);
1555 mlx5_eswitch_clear_vf_vports_info(esw);
1558 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
1560 if (!mlx5_esw_allowed(esw))
1563 down_write(&esw->mode_lock);
1564 mlx5_eswitch_disable_locked(esw, clear_vf);
1565 esw->esw_funcs.num_vfs = 0;
1566 up_write(&esw->mode_lock);
1569 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1571 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1572 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1574 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1575 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1576 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1577 MLX5_SET(query_hca_cap_in, in, other_function, true);
1578 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1581 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1584 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1589 if (!mlx5_core_is_ecpf(dev)) {
1594 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1598 err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1602 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1603 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1604 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1611 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
1612 int index, u16 vport_num)
1614 struct mlx5_vport *vport;
1617 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1621 vport->dev = esw->dev;
1622 vport->vport = vport_num;
1623 vport->index = index;
1624 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1625 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1626 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1630 esw->total_vports++;
1638 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1640 xa_erase(&esw->vports, vport->vport);
1644 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1646 struct mlx5_vport *vport;
1649 mlx5_esw_for_each_vport(esw, i, vport)
1650 mlx5_esw_vport_free(esw, vport);
1651 xa_destroy(&esw->vports);
1654 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1656 struct mlx5_core_dev *dev = esw->dev;
1657 u16 max_host_pf_sfs;
1663 xa_init(&esw->vports);
1665 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
1668 if (esw->first_host_vport == MLX5_VPORT_PF)
1669 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1672 for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1673 err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
1676 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1677 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1680 base_sf_num = mlx5_sf_start_function_id(dev);
1681 for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1682 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1685 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1689 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1692 for (i = 0; i < max_host_pf_sfs; i++) {
1693 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1696 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1700 if (mlx5_ecpf_vport_exists(dev)) {
1701 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
1706 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
1712 mlx5_esw_vports_cleanup(esw);
1716 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1718 struct mlx5_eswitch *esw;
1721 if (!MLX5_VPORT_MANAGER(dev))
1724 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1729 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1730 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1732 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1733 if (!esw->work_queue) {
1738 err = mlx5_esw_vports_init(esw);
1742 err = esw_offloads_init_reps(esw);
1746 mutex_init(&esw->offloads.encap_tbl_lock);
1747 hash_init(esw->offloads.encap_tbl);
1748 mutex_init(&esw->offloads.decap_tbl_lock);
1749 hash_init(esw->offloads.decap_tbl);
1750 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1751 atomic64_set(&esw->offloads.num_flows, 0);
1752 ida_init(&esw->offloads.vport_metadata_ida);
1753 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1754 mutex_init(&esw->state_lock);
1755 init_rwsem(&esw->mode_lock);
1757 esw->enabled_vports = 0;
1758 esw->mode = MLX5_ESWITCH_NONE;
1759 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1761 dev->priv.eswitch = esw;
1762 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1765 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1767 MLX5_MAX_UC_PER_VPORT(dev),
1768 MLX5_MAX_MC_PER_VPORT(dev));
1772 mlx5_esw_vports_cleanup(esw);
1774 if (esw->work_queue)
1775 destroy_workqueue(esw->work_queue);
1780 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1782 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1785 esw_info(esw->dev, "cleanup\n");
1787 esw->dev->priv.eswitch = NULL;
1788 destroy_workqueue(esw->work_queue);
1789 mutex_destroy(&esw->state_lock);
1790 WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1791 xa_destroy(&esw->offloads.vhca_map);
1792 ida_destroy(&esw->offloads.vport_metadata_ida);
1793 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1794 mutex_destroy(&esw->offloads.encap_tbl_lock);
1795 mutex_destroy(&esw->offloads.decap_tbl_lock);
1796 esw_offloads_cleanup_reps(esw);
1797 mlx5_esw_vports_cleanup(esw);
1801 /* Vport Administration */
1803 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1804 struct mlx5_vport *evport, const u8 *mac)
1806 u16 vport_num = evport->vport;
1810 if (is_multicast_ether_addr(mac))
1813 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1814 mlx5_core_warn(esw->dev,
1815 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1818 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1820 mlx5_core_warn(esw->dev,
1821 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1826 node_guid_gen_from_mac(&node_guid, mac);
1827 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1829 mlx5_core_warn(esw->dev,
1830 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1833 ether_addr_copy(evport->info.mac, mac);
1834 evport->info.node_guid = node_guid;
1835 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1836 err = esw_acl_ingress_lgcy_setup(esw, evport);
1841 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1842 u16 vport, const u8 *mac)
1844 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1848 return PTR_ERR(evport);
1850 mutex_lock(&esw->state_lock);
1851 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1852 mutex_unlock(&esw->state_lock);
1856 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
1858 struct mlx5_vport *vport;
1860 vport = mlx5_eswitch_get_vport(esw, vport_num);
1864 return xa_get_mark(&esw->vports, vport_num, mark);
1867 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1869 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
1872 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1874 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
1878 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
1880 return vport_num == MLX5_VPORT_PF ||
1881 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
1882 mlx5_esw_is_sf_vport(esw, vport_num);
1885 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
1886 struct devlink_port *port,
1887 u8 *hw_addr, int *hw_addr_len,
1888 struct netlink_ext_ack *extack)
1890 struct mlx5_eswitch *esw;
1891 struct mlx5_vport *vport;
1892 int err = -EOPNOTSUPP;
1895 esw = mlx5_devlink_eswitch_get(devlink);
1897 return PTR_ERR(esw);
1899 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1900 if (!is_port_function_supported(esw, vport_num))
1903 vport = mlx5_eswitch_get_vport(esw, vport_num);
1904 if (IS_ERR(vport)) {
1905 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1906 return PTR_ERR(vport);
1909 mutex_lock(&esw->state_lock);
1910 if (vport->enabled) {
1911 ether_addr_copy(hw_addr, vport->info.mac);
1912 *hw_addr_len = ETH_ALEN;
1915 mutex_unlock(&esw->state_lock);
1919 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
1920 struct devlink_port *port,
1921 const u8 *hw_addr, int hw_addr_len,
1922 struct netlink_ext_ack *extack)
1924 struct mlx5_eswitch *esw;
1925 struct mlx5_vport *vport;
1926 int err = -EOPNOTSUPP;
1929 esw = mlx5_devlink_eswitch_get(devlink);
1931 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
1932 return PTR_ERR(esw);
1935 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1936 if (!is_port_function_supported(esw, vport_num)) {
1937 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
1940 vport = mlx5_eswitch_get_vport(esw, vport_num);
1941 if (IS_ERR(vport)) {
1942 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1943 return PTR_ERR(vport);
1946 mutex_lock(&esw->state_lock);
1948 err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
1950 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1951 mutex_unlock(&esw->state_lock);
1955 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1956 u16 vport, int link_state)
1958 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1959 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1960 int other_vport = 1;
1963 if (!mlx5_esw_allowed(esw))
1966 return PTR_ERR(evport);
1968 if (vport == MLX5_VPORT_UPLINK) {
1969 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1973 mutex_lock(&esw->state_lock);
1974 if (esw->mode != MLX5_ESWITCH_LEGACY) {
1979 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1981 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1986 evport->info.link_state = link_state;
1989 mutex_unlock(&esw->state_lock);
1993 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1994 u16 vport, struct ifla_vf_info *ivi)
1996 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1999 return PTR_ERR(evport);
2001 memset(ivi, 0, sizeof(*ivi));
2002 ivi->vf = vport - 1;
2004 mutex_lock(&esw->state_lock);
2005 ether_addr_copy(ivi->mac, evport->info.mac);
2006 ivi->linkstate = evport->info.link_state;
2007 ivi->vlan = evport->info.vlan;
2008 ivi->qos = evport->info.qos;
2009 ivi->spoofchk = evport->info.spoofchk;
2010 ivi->trusted = evport->info.trusted;
2011 ivi->min_tx_rate = evport->qos.min_rate;
2012 ivi->max_tx_rate = evport->qos.max_rate;
2013 mutex_unlock(&esw->state_lock);
2018 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2019 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2021 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2025 return PTR_ERR(evport);
2026 if (vlan > 4095 || qos > 7)
2029 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2033 evport->info.vlan = vlan;
2034 evport->info.qos = qos;
2035 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2036 err = esw_acl_ingress_lgcy_setup(esw, evport);
2039 err = esw_acl_egress_lgcy_setup(esw, evport);
2045 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2047 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2048 struct mlx5_vport *evport;
2049 u32 max_guarantee = 0;
2052 mlx5_esw_for_each_vport(esw, i, evport) {
2053 if (!evport->enabled || evport->qos.min_rate < max_guarantee)
2055 max_guarantee = evport->qos.min_rate;
2059 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2063 static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
2065 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2066 u32 divider = calculate_vports_min_rate_divider(esw);
2067 struct mlx5_vport *evport;
2074 mlx5_esw_for_each_vport(esw, i, evport) {
2075 if (!evport->enabled)
2077 vport_min_rate = evport->qos.min_rate;
2078 vport_max_rate = evport->qos.max_rate;
2082 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2086 if (bw_share == evport->qos.bw_share)
2089 err = esw_vport_qos_config(esw, evport, vport_max_rate,
2092 evport->qos.bw_share = bw_share;
2100 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2101 u32 max_rate, u32 min_rate)
2103 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2104 u32 fw_max_bw_share;
2105 u32 previous_min_rate;
2106 bool min_rate_supported;
2107 bool max_rate_supported;
2110 if (!mlx5_esw_allowed(esw))
2113 return PTR_ERR(evport);
2115 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2116 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2117 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2118 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2120 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2123 mutex_lock(&esw->state_lock);
2125 if (min_rate == evport->qos.min_rate)
2128 previous_min_rate = evport->qos.min_rate;
2129 evport->qos.min_rate = min_rate;
2130 err = normalize_vports_min_rate(esw);
2132 evport->qos.min_rate = previous_min_rate;
2137 if (max_rate == evport->qos.max_rate)
2140 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2142 evport->qos.max_rate = max_rate;
2145 mutex_unlock(&esw->state_lock);
2149 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2151 struct ifla_vf_stats *vf_stats)
2153 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2154 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2155 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2156 struct mlx5_vport_drop_stats stats = {};
2161 return PTR_ERR(vport);
2163 out = kvzalloc(outlen, GFP_KERNEL);
2167 MLX5_SET(query_vport_counter_in, in, opcode,
2168 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2169 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2170 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2171 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2173 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2177 #define MLX5_GET_CTR(p, x) \
2178 MLX5_GET64(query_vport_counter_out, p, x)
2180 memset(vf_stats, 0, sizeof(*vf_stats));
2181 vf_stats->rx_packets =
2182 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2183 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2184 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2185 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2186 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2188 vf_stats->rx_bytes =
2189 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2190 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2191 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2192 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2193 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2195 vf_stats->tx_packets =
2196 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2197 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2198 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2199 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2200 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2202 vf_stats->tx_bytes =
2203 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2204 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2205 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2206 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2207 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2209 vf_stats->multicast =
2210 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2211 MLX5_GET_CTR(out, received_ib_multicast.packets);
2213 vf_stats->broadcast =
2214 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2216 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
2219 vf_stats->rx_dropped = stats.rx_dropped;
2220 vf_stats->tx_dropped = stats.tx_dropped;
2227 u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
2229 struct mlx5_eswitch *esw = dev->priv.eswitch;
2231 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2233 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2235 enum devlink_eswitch_encap_mode
2236 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2238 struct mlx5_eswitch *esw;
2240 esw = dev->priv.eswitch;
2241 return mlx5_esw_allowed(esw) ? esw->offloads.encap :
2242 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2244 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2246 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2248 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2249 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2250 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2251 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2257 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2258 struct mlx5_core_dev *dev1)
2260 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2261 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2264 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
2266 return blocking_notifier_chain_register(&esw->n_head, nb);
2269 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
2271 blocking_notifier_chain_unregister(&esw->n_head, nb);
2275 * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
2276 * @mdev: mlx5 core device.
2278 * Should be called by esw resources callers.
2280 * Return: true on success or false.
2282 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
2284 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2286 /* e.g. VF doesn't have eswitch so nothing to do */
2287 if (!mlx5_esw_allowed(esw))
2290 if (down_read_trylock(&esw->mode_lock) != 0)
2297 * mlx5_esw_release() - Release a read lock on esw mode lock.
2298 * @mdev: mlx5 core device.
2300 void mlx5_esw_release(struct mlx5_core_dev *mdev)
2302 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2304 if (mlx5_esw_allowed(esw))
2305 up_read(&esw->mode_lock);
2309 * mlx5_esw_get() - Increase esw user count.
2310 * @mdev: mlx5 core device.
2312 void mlx5_esw_get(struct mlx5_core_dev *mdev)
2314 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2316 if (mlx5_esw_allowed(esw))
2317 atomic64_inc(&esw->user_count);
2321 * mlx5_esw_put() - Decrease esw user count.
2322 * @mdev: mlx5 core device.
2324 void mlx5_esw_put(struct mlx5_core_dev *mdev)
2326 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2328 if (mlx5_esw_allowed(esw))
2329 atomic64_dec_if_positive(&esw->user_count);
2333 * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2334 * @esw: eswitch device.
2336 * Should be called by esw mode change routine.
2339 * * 0 - esw mode if successfully locked and refcount is 0.
2340 * * -EBUSY - refcount is not 0.
2341 * * -EINVAL - In the middle of switching mode or lock is already held.
2343 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2345 if (down_write_trylock(&esw->mode_lock) == 0)
2348 if (atomic64_read(&esw->user_count) > 0) {
2349 up_write(&esw->mode_lock);
2357 * mlx5_esw_unlock() - Release write lock on esw mode lock
2358 * @esw: eswitch device.
2360 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2362 up_write(&esw->mode_lock);
2366 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2368 * @dev: Pointer to core device
2370 * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2372 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2374 struct mlx5_eswitch *esw;
2376 esw = dev->priv.eswitch;
2377 return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2379 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);