2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/mpfs.h>
39 #include "esw/acl/lgcy.h"
40 #include "esw/legacy.h"
41 #include "mlx5_core.h"
47 #include "en/mod_hdr.h"
55 /* Vport UC/MC hash node */
57 struct l2addr_node node;
60 struct mlx5_flow_handle *flow_rule;
61 bool mpfs; /* UC MAC was added to MPFs */
62 /* A flag indicating that mac was added due to mc promiscuous vport */
66 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
68 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
71 if (!MLX5_ESWITCH_MANAGER(dev))
77 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
79 struct mlx5_core_dev *dev = devlink_priv(devlink);
82 err = mlx5_eswitch_check(dev);
86 return dev->priv.eswitch;
89 struct mlx5_vport *__must_check
90 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
92 struct mlx5_vport *vport;
94 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
95 return ERR_PTR(-EPERM);
97 vport = xa_load(&esw->vports, vport_num);
99 esw_debug(esw->dev, "vport out of range: num(0x%x)\n", vport_num);
100 return ERR_PTR(-EINVAL);
105 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
108 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
111 MLX5_SET(modify_nic_vport_context_in, in,
112 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
113 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
114 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
115 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
116 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
117 in, nic_vport_context);
119 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
121 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
122 MLX5_SET(nic_vport_context, nic_vport_ctx,
123 event_on_uc_address_change, 1);
124 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
125 MLX5_SET(nic_vport_context, nic_vport_ctx,
126 event_on_mc_address_change, 1);
127 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
128 MLX5_SET(nic_vport_context, nic_vport_ctx,
129 event_on_promisc_change, 1);
131 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
134 /* E-Switch vport context HW commands */
135 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
136 bool other_vport, void *in)
138 MLX5_SET(modify_esw_vport_context_in, in, opcode,
139 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
140 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
141 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
142 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
145 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
146 u16 vlan, u8 qos, u8 set_flags)
148 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
150 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
151 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
154 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
155 vport, vlan, qos, set_flags);
157 if (set_flags & SET_VLAN_STRIP)
158 MLX5_SET(modify_esw_vport_context_in, in,
159 esw_vport_context.vport_cvlan_strip, 1);
161 if (set_flags & SET_VLAN_INSERT) {
162 /* insert only if no vlan in packet */
163 MLX5_SET(modify_esw_vport_context_in, in,
164 esw_vport_context.vport_cvlan_insert, 1);
166 MLX5_SET(modify_esw_vport_context_in, in,
167 esw_vport_context.cvlan_pcp, qos);
168 MLX5_SET(modify_esw_vport_context_in, in,
169 esw_vport_context.cvlan_id, vlan);
172 MLX5_SET(modify_esw_vport_context_in, in,
173 field_select.vport_cvlan_strip, 1);
174 MLX5_SET(modify_esw_vport_context_in, in,
175 field_select.vport_cvlan_insert, 1);
177 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
181 static struct mlx5_flow_handle *
182 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
183 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
185 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
186 MLX5_MATCH_OUTER_HEADERS);
187 struct mlx5_flow_handle *flow_rule = NULL;
188 struct mlx5_flow_act flow_act = {0};
189 struct mlx5_flow_destination dest = {};
190 struct mlx5_flow_spec *spec;
191 void *mv_misc = NULL;
192 void *mc_misc = NULL;
197 match_header |= MLX5_MATCH_MISC_PARAMETERS;
199 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
203 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
204 outer_headers.dmac_47_16);
205 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
206 outer_headers.dmac_47_16);
208 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
209 ether_addr_copy(dmac_v, mac_v);
210 ether_addr_copy(dmac_c, mac_c);
213 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
214 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
216 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
218 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
219 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
222 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
223 dest.vport.num = vport;
226 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
227 dmac_v, dmac_c, vport);
228 spec->match_criteria_enable = match_header;
229 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
231 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
232 &flow_act, &dest, 1);
233 if (IS_ERR(flow_rule)) {
235 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
236 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
244 static struct mlx5_flow_handle *
245 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
249 eth_broadcast_addr(mac_c);
250 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
253 static struct mlx5_flow_handle *
254 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
259 eth_zero_addr(mac_c);
260 eth_zero_addr(mac_v);
263 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
266 static struct mlx5_flow_handle *
267 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
272 eth_zero_addr(mac_c);
273 eth_zero_addr(mac_v);
274 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
277 /* E-Switch vport UC/MC lists management */
278 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
279 struct vport_addr *vaddr);
281 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
283 u8 *mac = vaddr->node.addr;
284 u16 vport = vaddr->vport;
287 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
288 * it is already done by its netdev in mlx5e_execute_l2_action
290 if (mlx5_esw_is_manager_vport(esw, vport))
293 err = mlx5_mpfs_add_mac(esw->dev, mac);
296 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
303 /* SRIOV is enabled: Forward UC MAC to vport */
304 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
305 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
307 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
308 vport, mac, vaddr->flow_rule);
313 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
315 u8 *mac = vaddr->node.addr;
316 u16 vport = vaddr->vport;
319 /* Skip mlx5_mpfs_del_mac for eswitch managers,
320 * it is already done by its netdev in mlx5e_execute_l2_action
322 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
325 err = mlx5_mpfs_del_mac(esw->dev, mac);
328 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
333 if (vaddr->flow_rule)
334 mlx5_del_flow_rules(vaddr->flow_rule);
335 vaddr->flow_rule = NULL;
340 static void update_allmulti_vports(struct mlx5_eswitch *esw,
341 struct vport_addr *vaddr,
342 struct esw_mc_addr *esw_mc)
344 u8 *mac = vaddr->node.addr;
345 struct mlx5_vport *vport;
349 mlx5_esw_for_each_vport(esw, i, vport) {
350 struct hlist_head *vport_hash = vport->mc_list;
351 struct vport_addr *iter_vaddr =
352 l2addr_hash_find(vport_hash,
355 vport_num = vport->vport;
356 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
357 vaddr->vport == vport_num)
359 switch (vaddr->action) {
360 case MLX5_ACTION_ADD:
363 iter_vaddr = l2addr_hash_add(vport_hash, mac,
368 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
372 iter_vaddr->vport = vport_num;
373 iter_vaddr->flow_rule =
374 esw_fdb_set_vport_rule(esw,
377 iter_vaddr->mc_promisc = true;
379 case MLX5_ACTION_DEL:
382 mlx5_del_flow_rules(iter_vaddr->flow_rule);
383 l2addr_hash_del(iter_vaddr);
389 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
391 struct hlist_head *hash = esw->mc_table;
392 struct esw_mc_addr *esw_mc;
393 u8 *mac = vaddr->node.addr;
394 u16 vport = vaddr->vport;
396 if (!esw->fdb_table.legacy.fdb)
399 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
403 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
407 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
408 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
410 /* Add this multicast mac to all the mc promiscuous vports */
411 update_allmulti_vports(esw, vaddr, esw_mc);
414 /* If the multicast mac is added as a result of mc promiscuous vport,
415 * don't increment the multicast ref count
417 if (!vaddr->mc_promisc)
420 /* Forward MC MAC to vport */
421 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
423 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
424 vport, mac, vaddr->flow_rule,
425 esw_mc->refcnt, esw_mc->uplink_rule);
429 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
431 struct hlist_head *hash = esw->mc_table;
432 struct esw_mc_addr *esw_mc;
433 u8 *mac = vaddr->node.addr;
434 u16 vport = vaddr->vport;
436 if (!esw->fdb_table.legacy.fdb)
439 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
442 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
447 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
448 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
449 esw_mc->uplink_rule);
451 if (vaddr->flow_rule)
452 mlx5_del_flow_rules(vaddr->flow_rule);
453 vaddr->flow_rule = NULL;
455 /* If the multicast mac is added as a result of mc promiscuous vport,
456 * don't decrement the multicast ref count.
458 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
461 /* Remove this multicast mac from all the mc promiscuous vports */
462 update_allmulti_vports(esw, vaddr, esw_mc);
464 if (esw_mc->uplink_rule)
465 mlx5_del_flow_rules(esw_mc->uplink_rule);
467 l2addr_hash_del(esw_mc);
471 /* Apply vport UC/MC list to HW l2 table and FDB table */
472 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
473 struct mlx5_vport *vport, int list_type)
475 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
476 vport_addr_action vport_addr_add;
477 vport_addr_action vport_addr_del;
478 struct vport_addr *addr;
479 struct l2addr_node *node;
480 struct hlist_head *hash;
481 struct hlist_node *tmp;
484 vport_addr_add = is_uc ? esw_add_uc_addr :
486 vport_addr_del = is_uc ? esw_del_uc_addr :
489 hash = is_uc ? vport->uc_list : vport->mc_list;
490 for_each_l2hash_node(node, tmp, hash, hi) {
491 addr = container_of(node, struct vport_addr, node);
492 switch (addr->action) {
493 case MLX5_ACTION_ADD:
494 vport_addr_add(esw, addr);
495 addr->action = MLX5_ACTION_NONE;
497 case MLX5_ACTION_DEL:
498 vport_addr_del(esw, addr);
499 l2addr_hash_del(addr);
505 /* Sync vport UC/MC list from vport context */
506 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
507 struct mlx5_vport *vport, int list_type)
509 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
510 u8 (*mac_list)[ETH_ALEN];
511 struct l2addr_node *node;
512 struct vport_addr *addr;
513 struct hlist_head *hash;
514 struct hlist_node *tmp;
520 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
521 MLX5_MAX_MC_PER_VPORT(esw->dev);
523 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
527 hash = is_uc ? vport->uc_list : vport->mc_list;
529 for_each_l2hash_node(node, tmp, hash, hi) {
530 addr = container_of(node, struct vport_addr, node);
531 addr->action = MLX5_ACTION_DEL;
537 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
541 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
542 vport->vport, is_uc ? "UC" : "MC", size);
544 for (i = 0; i < size; i++) {
545 if (is_uc && !is_valid_ether_addr(mac_list[i]))
548 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
551 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
553 addr->action = MLX5_ACTION_NONE;
554 /* If this mac was previously added because of allmulti
555 * promiscuous rx mode, its now converted to be original
558 if (addr->mc_promisc) {
559 struct esw_mc_addr *esw_mc =
560 l2addr_hash_find(esw->mc_table,
565 "Failed to MAC(%pM) in mcast DB\n",
570 addr->mc_promisc = false;
575 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
579 "Failed to add MAC(%pM) to vport[%d] DB\n",
580 mac_list[i], vport->vport);
583 addr->vport = vport->vport;
584 addr->action = MLX5_ACTION_ADD;
590 /* Sync vport UC/MC list from vport context
591 * Must be called after esw_update_vport_addr_list
593 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
594 struct mlx5_vport *vport)
596 struct l2addr_node *node;
597 struct vport_addr *addr;
598 struct hlist_head *hash;
599 struct hlist_node *tmp;
602 hash = vport->mc_list;
604 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
605 u8 *mac = node->addr;
607 addr = l2addr_hash_find(hash, mac, struct vport_addr);
609 if (addr->action == MLX5_ACTION_DEL)
610 addr->action = MLX5_ACTION_NONE;
613 addr = l2addr_hash_add(hash, mac, struct vport_addr,
617 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
621 addr->vport = vport->vport;
622 addr->action = MLX5_ACTION_ADD;
623 addr->mc_promisc = true;
627 /* Apply vport rx mode to HW FDB table */
628 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
629 struct mlx5_vport *vport,
630 bool promisc, bool mc_promisc)
632 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
634 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
638 vport->allmulti_rule =
639 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
640 if (!allmulti_addr->uplink_rule)
641 allmulti_addr->uplink_rule =
642 esw_fdb_set_vport_allmulti_rule(esw,
644 allmulti_addr->refcnt++;
645 } else if (vport->allmulti_rule) {
646 mlx5_del_flow_rules(vport->allmulti_rule);
647 vport->allmulti_rule = NULL;
649 if (--allmulti_addr->refcnt > 0)
652 if (allmulti_addr->uplink_rule)
653 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
654 allmulti_addr->uplink_rule = NULL;
658 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
662 vport->promisc_rule =
663 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
664 } else if (vport->promisc_rule) {
665 mlx5_del_flow_rules(vport->promisc_rule);
666 vport->promisc_rule = NULL;
670 /* Sync vport rx mode from vport context */
671 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
672 struct mlx5_vport *vport)
679 err = mlx5_query_nic_vport_promisc(esw->dev,
686 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
687 vport->vport, promisc_all, promisc_mc);
689 if (!vport->info.trusted || !vport->enabled) {
695 esw_apply_vport_rx_mode(esw, vport, promisc_all,
696 (promisc_all || promisc_mc));
699 void esw_vport_change_handle_locked(struct mlx5_vport *vport)
701 struct mlx5_core_dev *dev = vport->dev;
702 struct mlx5_eswitch *esw = dev->priv.eswitch;
705 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
706 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
709 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
710 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
711 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
714 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
715 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
717 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
718 esw_update_vport_rx_mode(esw, vport);
719 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
720 esw_update_vport_mc_promisc(esw, vport);
723 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
724 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
726 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
728 arm_vport_context_events_cmd(dev, vport->vport,
729 vport->enabled_events);
732 static void esw_vport_change_handler(struct work_struct *work)
734 struct mlx5_vport *vport =
735 container_of(work, struct mlx5_vport, vport_change_handler);
736 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
738 mutex_lock(&esw->state_lock);
739 esw_vport_change_handle_locked(vport);
740 mutex_unlock(&esw->state_lock);
743 static bool element_type_supported(struct mlx5_eswitch *esw, int type)
745 const struct mlx5_core_dev *dev = esw->dev;
748 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
749 return MLX5_CAP_QOS(dev, esw_element_type) &
750 ELEMENT_TYPE_CAP_MASK_TASR;
751 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
752 return MLX5_CAP_QOS(dev, esw_element_type) &
753 ELEMENT_TYPE_CAP_MASK_VPORT;
754 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
755 return MLX5_CAP_QOS(dev, esw_element_type) &
756 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
757 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
758 return MLX5_CAP_QOS(dev, esw_element_type) &
759 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
764 /* Vport QoS management */
765 static void esw_create_tsar(struct mlx5_eswitch *esw)
767 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
768 struct mlx5_core_dev *dev = esw->dev;
772 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
775 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
778 if (esw->qos.enabled)
781 MLX5_SET(scheduling_context, tsar_ctx, element_type,
782 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
784 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
785 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
787 err = mlx5_create_scheduling_element_cmd(dev,
788 SCHEDULING_HIERARCHY_E_SWITCH,
790 &esw->qos.root_tsar_id);
792 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
796 esw->qos.enabled = true;
799 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
803 if (!esw->qos.enabled)
806 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
807 SCHEDULING_HIERARCHY_E_SWITCH,
808 esw->qos.root_tsar_id);
810 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
812 esw->qos.enabled = false;
815 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
816 struct mlx5_vport *vport,
817 u32 initial_max_rate, u32 initial_bw_share)
819 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
820 struct mlx5_core_dev *dev = esw->dev;
824 if (!esw->qos.enabled)
827 if (vport->qos.enabled)
830 MLX5_SET(scheduling_context, sched_ctx, element_type,
831 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
832 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
834 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
835 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
836 esw->qos.root_tsar_id);
837 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
839 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
841 err = mlx5_create_scheduling_element_cmd(dev,
842 SCHEDULING_HIERARCHY_E_SWITCH,
844 &vport->qos.esw_tsar_ix);
846 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
851 vport->qos.enabled = true;
855 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
856 struct mlx5_vport *vport)
860 if (!vport->qos.enabled)
863 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
864 SCHEDULING_HIERARCHY_E_SWITCH,
865 vport->qos.esw_tsar_ix);
867 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
870 vport->qos.enabled = false;
873 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
874 struct mlx5_vport *vport,
875 u32 max_rate, u32 bw_share)
877 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
878 struct mlx5_core_dev *dev = esw->dev;
883 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
886 if (!vport->qos.enabled)
889 MLX5_SET(scheduling_context, sched_ctx, element_type,
890 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
891 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
893 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
894 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
895 esw->qos.root_tsar_id);
896 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
898 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
899 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
900 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
902 err = mlx5_modify_scheduling_element_cmd(dev,
903 SCHEDULING_HIERARCHY_E_SWITCH,
905 vport->qos.esw_tsar_ix,
908 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
916 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
919 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
920 struct mlx5_vport *vport;
922 vport = mlx5_eswitch_get_vport(esw, vport_num);
924 return PTR_ERR(vport);
926 if (!vport->qos.enabled)
929 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
931 return mlx5_modify_scheduling_element_cmd(esw->dev,
932 SCHEDULING_HIERARCHY_E_SWITCH,
934 vport->qos.esw_tsar_ix,
935 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
938 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
940 ((u8 *)node_guid)[7] = mac[0];
941 ((u8 *)node_guid)[6] = mac[1];
942 ((u8 *)node_guid)[5] = mac[2];
943 ((u8 *)node_guid)[4] = 0xff;
944 ((u8 *)node_guid)[3] = 0xfe;
945 ((u8 *)node_guid)[2] = mac[3];
946 ((u8 *)node_guid)[1] = mac[4];
947 ((u8 *)node_guid)[0] = mac[5];
950 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
951 struct mlx5_vport *vport)
953 if (esw->mode == MLX5_ESWITCH_LEGACY)
954 return esw_legacy_vport_acl_setup(esw, vport);
956 return esw_vport_create_offloads_acl_tables(esw, vport);
959 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
960 struct mlx5_vport *vport)
962 if (esw->mode == MLX5_ESWITCH_LEGACY)
963 esw_legacy_vport_acl_cleanup(esw, vport);
965 esw_vport_destroy_offloads_acl_tables(esw, vport);
968 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
970 u16 vport_num = vport->vport;
974 err = esw_vport_setup_acl(esw, vport);
978 /* Attach vport to the eswitch rate limiter */
979 esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share);
981 if (mlx5_esw_is_manager_vport(esw, vport_num))
984 mlx5_modify_vport_admin_state(esw->dev,
985 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
987 vport->info.link_state);
989 /* Host PF has its own mac/guid. */
991 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
993 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
994 vport->info.node_guid);
997 flags = (vport->info.vlan || vport->info.qos) ?
998 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
999 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
1000 vport->info.qos, flags);
1005 /* Don't cleanup vport->info, it's needed to restore vport configuration */
1006 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1008 u16 vport_num = vport->vport;
1010 if (!mlx5_esw_is_manager_vport(esw, vport_num))
1011 mlx5_modify_vport_admin_state(esw->dev,
1012 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1014 MLX5_VPORT_ADMIN_STATE_DOWN);
1016 esw_vport_disable_qos(esw, vport);
1017 esw_vport_cleanup_acl(esw, vport);
1020 int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
1021 enum mlx5_eswitch_vport_event enabled_events)
1023 struct mlx5_vport *vport;
1026 vport = mlx5_eswitch_get_vport(esw, vport_num);
1028 return PTR_ERR(vport);
1030 mutex_lock(&esw->state_lock);
1031 WARN_ON(vport->enabled);
1033 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1035 ret = esw_vport_setup(esw, vport);
1039 /* Sync with current vport context */
1040 vport->enabled_events = enabled_events;
1041 vport->enabled = true;
1043 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1044 * in smartNIC as it's a vport group manager.
1046 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1047 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1048 vport->info.trusted = true;
1050 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1051 MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
1052 ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
1054 goto err_vhca_mapping;
1057 esw_vport_change_handle_locked(vport);
1059 esw->enabled_vports++;
1060 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1062 mutex_unlock(&esw->state_lock);
1066 esw_vport_cleanup(esw, vport);
1067 mutex_unlock(&esw->state_lock);
1071 void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
1073 struct mlx5_vport *vport;
1075 vport = mlx5_eswitch_get_vport(esw, vport_num);
1079 mutex_lock(&esw->state_lock);
1080 if (!vport->enabled)
1083 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1084 /* Mark this vport as disabled to discard new events */
1085 vport->enabled = false;
1087 /* Disable events from this vport */
1088 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1090 if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
1091 MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
1092 mlx5_esw_vport_vhca_id_clear(esw, vport_num);
1094 /* We don't assume VFs will cleanup after themselves.
1095 * Calling vport change handler while vport is disabled will cleanup
1096 * the vport resources.
1098 esw_vport_change_handle_locked(vport);
1099 vport->enabled_events = 0;
1100 esw_vport_cleanup(esw, vport);
1101 esw->enabled_vports--;
1104 mutex_unlock(&esw->state_lock);
1107 static int eswitch_vport_event(struct notifier_block *nb,
1108 unsigned long type, void *data)
1110 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1111 struct mlx5_eqe *eqe = data;
1112 struct mlx5_vport *vport;
1115 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1116 vport = mlx5_eswitch_get_vport(esw, vport_num);
1118 queue_work(esw->work_queue, &vport->vport_change_handler);
1123 * mlx5_esw_query_functions - Returns raw output about functions state
1124 * @dev: Pointer to device to query
1126 * mlx5_esw_query_functions() allocates and returns functions changed
1127 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1128 * Caller must free the memory using kvfree() when valid pointer is returned.
1130 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1132 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1133 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1137 out = kvzalloc(outlen, GFP_KERNEL);
1139 return ERR_PTR(-ENOMEM);
1141 MLX5_SET(query_esw_functions_in, in, opcode,
1142 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1144 err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
1149 return ERR_PTR(err);
1152 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1154 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1155 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1157 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1158 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1159 ESW_FUNCTIONS_CHANGED);
1160 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1164 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1166 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1167 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1169 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1171 flush_workqueue(esw->work_queue);
1174 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1176 struct mlx5_vport *vport;
1179 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1180 memset(&vport->qos, 0, sizeof(vport->qos));
1181 memset(&vport->info, 0, sizeof(vport->info));
1182 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1186 /* Public E-Switch API */
1187 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1188 enum mlx5_eswitch_vport_event enabled_events)
1192 err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
1196 err = esw_offloads_load_rep(esw, vport_num);
1203 mlx5_esw_vport_disable(esw, vport_num);
1207 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1209 esw_offloads_unload_rep(esw, vport_num);
1210 mlx5_esw_vport_disable(esw, vport_num);
1213 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1215 struct mlx5_vport *vport;
1218 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1219 if (!vport->enabled)
1221 mlx5_eswitch_unload_vport(esw, vport->vport);
1225 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1226 enum mlx5_eswitch_vport_event enabled_events)
1228 struct mlx5_vport *vport;
1232 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1233 err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
1241 mlx5_eswitch_unload_vf_vports(esw, num_vfs);
1245 static int host_pf_enable_hca(struct mlx5_core_dev *dev)
1247 if (!mlx5_core_is_ecpf(dev))
1250 /* Once vport and representor are ready, take out the external host PF
1251 * out of initializing state. Enabling HCA clears the iser->initializing
1252 * bit and host PF driver loading can progress.
1254 return mlx5_cmd_host_pf_enable_hca(dev);
1257 static void host_pf_disable_hca(struct mlx5_core_dev *dev)
1259 if (!mlx5_core_is_ecpf(dev))
1262 mlx5_cmd_host_pf_disable_hca(dev);
1265 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1266 * whichever are present on the eswitch.
1269 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1270 enum mlx5_eswitch_vport_event enabled_events)
1274 /* Enable PF vport */
1275 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1279 /* Enable external host PF HCA */
1280 ret = host_pf_enable_hca(esw->dev);
1284 /* Enable ECPF vport */
1285 if (mlx5_ecpf_vport_exists(esw->dev)) {
1286 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1291 /* Enable VF vports */
1292 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1299 if (mlx5_ecpf_vport_exists(esw->dev))
1300 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1302 host_pf_disable_hca(esw->dev);
1304 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1308 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1309 * whichever are previously enabled on the eswitch.
1311 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1313 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1315 if (mlx5_ecpf_vport_exists(esw->dev))
1316 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1318 host_pf_disable_hca(esw->dev);
1319 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1322 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1324 struct devlink *devlink = priv_to_devlink(esw->dev);
1325 union devlink_param_value val;
1328 err = devlink_param_driverinit_value_get(devlink,
1329 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1332 esw->params.large_group_num = val.vu32;
1335 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1336 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1337 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1342 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1346 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
1351 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1352 esw->esw_funcs.num_vfs = num_vfs;
1356 out = mlx5_esw_query_functions(esw->dev);
1360 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1361 host_params_context.host_num_of_vfs);
1365 static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
1367 struct mlx5_esw_event_info info = {};
1369 info.new_mode = mode;
1371 blocking_notifier_call_chain(&esw->n_head, 0, &info);
1374 static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
1376 struct mlx5_core_dev *dev = esw->dev;
1380 total_vports = mlx5_eswitch_get_total_vports(dev);
1382 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
1383 err = mlx5_fs_egress_acls_init(dev, total_vports);
1387 esw_warn(dev, "engress ACL is not supported by FW\n");
1390 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
1391 err = mlx5_fs_ingress_acls_init(dev, total_vports);
1395 esw_warn(dev, "ingress ACL is not supported by FW\n");
1400 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1401 mlx5_fs_egress_acls_cleanup(dev);
1405 static void mlx5_esw_acls_ns_cleanup(struct mlx5_eswitch *esw)
1407 struct mlx5_core_dev *dev = esw->dev;
1409 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1410 mlx5_fs_ingress_acls_cleanup(dev);
1411 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
1412 mlx5_fs_egress_acls_cleanup(dev);
1416 * mlx5_eswitch_enable_locked - Enable eswitch
1417 * @esw: Pointer to eswitch
1418 * @mode: Eswitch mode to enable
1419 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1420 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1421 * Caller should pass num_vfs > 0 when enabling eswitch for
1422 * vf vports. Caller should pass num_vfs = 0, when eswitch
1423 * is enabled without sriov VFs or when caller
1424 * is unaware of the sriov state of the host PF on ECPF based
1425 * eswitch. Caller should pass < 0 when num_vfs should be
1426 * completely ignored. This is typically the case when eswitch
1427 * is enabled without sriov regardless of PF/ECPF system.
1428 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1429 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1430 * It returns 0 on success or error code on failure.
1432 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
1436 lockdep_assert_held(&esw->mode_lock);
1438 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1439 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1443 mlx5_eswitch_get_devlink_param(esw);
1445 err = mlx5_esw_acls_ns_init(esw);
1449 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1451 esw_create_tsar(esw);
1455 mlx5_lag_update(esw->dev);
1457 if (mode == MLX5_ESWITCH_LEGACY) {
1458 err = esw_legacy_enable(esw);
1460 mlx5_rescan_drivers(esw->dev);
1461 err = esw_offloads_enable(esw);
1467 mlx5_eswitch_event_handlers_register(esw);
1469 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1470 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1471 esw->esw_funcs.num_vfs, esw->enabled_vports);
1473 mlx5_esw_mode_change_notify(esw, mode);
1478 esw->mode = MLX5_ESWITCH_NONE;
1480 if (mode == MLX5_ESWITCH_OFFLOADS)
1481 mlx5_rescan_drivers(esw->dev);
1483 esw_destroy_tsar(esw);
1484 mlx5_esw_acls_ns_cleanup(esw);
1489 * mlx5_eswitch_enable - Enable eswitch
1490 * @esw: Pointer to eswitch
1491 * @num_vfs: Enable eswitch swich for given number of VFs.
1492 * Caller must pass num_vfs > 0 when enabling eswitch for
1494 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1496 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1500 if (!mlx5_esw_allowed(esw))
1503 down_write(&esw->mode_lock);
1504 if (esw->mode == MLX5_ESWITCH_NONE) {
1505 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
1507 enum mlx5_eswitch_vport_event vport_events;
1509 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1510 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1511 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1513 esw->esw_funcs.num_vfs = num_vfs;
1515 up_write(&esw->mode_lock);
1519 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1523 lockdep_assert_held_write(&esw->mode_lock);
1525 if (esw->mode == MLX5_ESWITCH_NONE)
1528 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1529 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1530 esw->esw_funcs.num_vfs, esw->enabled_vports);
1532 /* Notify eswitch users that it is exiting from current mode.
1533 * So that it can do necessary cleanup before the eswitch is disabled.
1535 mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
1537 mlx5_eswitch_event_handlers_unregister(esw);
1539 if (esw->mode == MLX5_ESWITCH_LEGACY)
1540 esw_legacy_disable(esw);
1541 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1542 esw_offloads_disable(esw);
1544 old_mode = esw->mode;
1545 esw->mode = MLX5_ESWITCH_NONE;
1547 mlx5_lag_update(esw->dev);
1549 if (old_mode == MLX5_ESWITCH_OFFLOADS)
1550 mlx5_rescan_drivers(esw->dev);
1552 esw_destroy_tsar(esw);
1553 mlx5_esw_acls_ns_cleanup(esw);
1556 mlx5_eswitch_clear_vf_vports_info(esw);
1559 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
1561 if (!mlx5_esw_allowed(esw))
1564 down_write(&esw->mode_lock);
1565 mlx5_eswitch_disable_locked(esw, clear_vf);
1566 esw->esw_funcs.num_vfs = 0;
1567 up_write(&esw->mode_lock);
1570 static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out)
1572 u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1573 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1575 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1576 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1577 MLX5_SET(query_hca_cap_in, in, function_id, MLX5_VPORT_PF);
1578 MLX5_SET(query_hca_cap_in, in, other_function, true);
1579 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1582 int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *sf_base_id)
1585 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1590 if (!mlx5_core_is_ecpf(dev)) {
1595 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1599 err = mlx5_query_hca_cap_host_pf(dev, query_ctx);
1603 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1604 *max_sfs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_sf);
1605 *sf_base_id = MLX5_GET(cmd_hca_cap, hca_caps, sf_base_id);
1612 static int mlx5_esw_vport_alloc(struct mlx5_eswitch *esw, struct mlx5_core_dev *dev,
1613 int index, u16 vport_num)
1615 struct mlx5_vport *vport;
1618 vport = kzalloc(sizeof(*vport), GFP_KERNEL);
1622 vport->dev = esw->dev;
1623 vport->vport = vport_num;
1624 vport->index = index;
1625 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1626 INIT_WORK(&vport->vport_change_handler, esw_vport_change_handler);
1627 err = xa_insert(&esw->vports, vport_num, vport, GFP_KERNEL);
1631 esw->total_vports++;
1639 static void mlx5_esw_vport_free(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1641 xa_erase(&esw->vports, vport->vport);
1645 static void mlx5_esw_vports_cleanup(struct mlx5_eswitch *esw)
1647 struct mlx5_vport *vport;
1650 mlx5_esw_for_each_vport(esw, i, vport)
1651 mlx5_esw_vport_free(esw, vport);
1652 xa_destroy(&esw->vports);
1655 static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1657 struct mlx5_core_dev *dev = esw->dev;
1658 u16 max_host_pf_sfs;
1664 xa_init(&esw->vports);
1666 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_PF);
1669 if (esw->first_host_vport == MLX5_VPORT_PF)
1670 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1673 for (i = 0; i < mlx5_core_max_vfs(dev); i++) {
1674 err = mlx5_esw_vport_alloc(esw, dev, idx, idx);
1677 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_VF);
1678 xa_set_mark(&esw->vports, idx, MLX5_ESW_VPT_HOST_FN);
1681 base_sf_num = mlx5_sf_start_function_id(dev);
1682 for (i = 0; i < mlx5_sf_max_functions(dev); i++) {
1683 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1686 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1690 err = mlx5_esw_sf_max_hpf_functions(dev, &max_host_pf_sfs, &base_sf_num);
1693 for (i = 0; i < max_host_pf_sfs; i++) {
1694 err = mlx5_esw_vport_alloc(esw, dev, idx, base_sf_num + i);
1697 xa_set_mark(&esw->vports, base_sf_num + i, MLX5_ESW_VPT_SF);
1701 if (mlx5_ecpf_vport_exists(dev)) {
1702 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_ECPF);
1707 err = mlx5_esw_vport_alloc(esw, dev, idx, MLX5_VPORT_UPLINK);
1713 mlx5_esw_vports_cleanup(esw);
1717 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1719 struct mlx5_eswitch *esw;
1722 if (!MLX5_VPORT_MANAGER(dev))
1725 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1730 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1731 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1733 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1734 if (!esw->work_queue) {
1739 err = mlx5_esw_vports_init(esw);
1743 err = esw_offloads_init_reps(esw);
1747 mutex_init(&esw->offloads.encap_tbl_lock);
1748 hash_init(esw->offloads.encap_tbl);
1749 mutex_init(&esw->offloads.decap_tbl_lock);
1750 hash_init(esw->offloads.decap_tbl);
1751 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1752 atomic64_set(&esw->offloads.num_flows, 0);
1753 ida_init(&esw->offloads.vport_metadata_ida);
1754 xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
1755 mutex_init(&esw->state_lock);
1756 init_rwsem(&esw->mode_lock);
1758 esw->enabled_vports = 0;
1759 esw->mode = MLX5_ESWITCH_NONE;
1760 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1762 dev->priv.eswitch = esw;
1763 BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
1766 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1768 MLX5_MAX_UC_PER_VPORT(dev),
1769 MLX5_MAX_MC_PER_VPORT(dev));
1773 mlx5_esw_vports_cleanup(esw);
1775 if (esw->work_queue)
1776 destroy_workqueue(esw->work_queue);
1781 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1783 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1786 esw_info(esw->dev, "cleanup\n");
1788 esw->dev->priv.eswitch = NULL;
1789 destroy_workqueue(esw->work_queue);
1790 mutex_destroy(&esw->state_lock);
1791 WARN_ON(!xa_empty(&esw->offloads.vhca_map));
1792 xa_destroy(&esw->offloads.vhca_map);
1793 ida_destroy(&esw->offloads.vport_metadata_ida);
1794 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1795 mutex_destroy(&esw->offloads.encap_tbl_lock);
1796 mutex_destroy(&esw->offloads.decap_tbl_lock);
1797 esw_offloads_cleanup_reps(esw);
1798 mlx5_esw_vports_cleanup(esw);
1802 /* Vport Administration */
1804 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1805 struct mlx5_vport *evport, const u8 *mac)
1807 u16 vport_num = evport->vport;
1811 if (is_multicast_ether_addr(mac))
1814 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1815 mlx5_core_warn(esw->dev,
1816 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1819 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1821 mlx5_core_warn(esw->dev,
1822 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1827 node_guid_gen_from_mac(&node_guid, mac);
1828 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1830 mlx5_core_warn(esw->dev,
1831 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1834 ether_addr_copy(evport->info.mac, mac);
1835 evport->info.node_guid = node_guid;
1836 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1837 err = esw_acl_ingress_lgcy_setup(esw, evport);
1842 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1843 u16 vport, const u8 *mac)
1845 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1849 return PTR_ERR(evport);
1851 mutex_lock(&esw->state_lock);
1852 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1853 mutex_unlock(&esw->state_lock);
1857 static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
1859 struct mlx5_vport *vport;
1861 vport = mlx5_eswitch_get_vport(esw, vport_num);
1865 return xa_get_mark(&esw->vports, vport_num, mark);
1868 bool mlx5_eswitch_is_vf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1870 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_VF);
1873 bool mlx5_esw_is_sf_vport(struct mlx5_eswitch *esw, u16 vport_num)
1875 return mlx5_esw_check_port_type(esw, vport_num, MLX5_ESW_VPT_SF);
1879 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
1881 return vport_num == MLX5_VPORT_PF ||
1882 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
1883 mlx5_esw_is_sf_vport(esw, vport_num);
1886 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
1887 struct devlink_port *port,
1888 u8 *hw_addr, int *hw_addr_len,
1889 struct netlink_ext_ack *extack)
1891 struct mlx5_eswitch *esw;
1892 struct mlx5_vport *vport;
1893 int err = -EOPNOTSUPP;
1896 esw = mlx5_devlink_eswitch_get(devlink);
1898 return PTR_ERR(esw);
1900 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1901 if (!is_port_function_supported(esw, vport_num))
1904 vport = mlx5_eswitch_get_vport(esw, vport_num);
1905 if (IS_ERR(vport)) {
1906 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1907 return PTR_ERR(vport);
1910 mutex_lock(&esw->state_lock);
1911 if (vport->enabled) {
1912 ether_addr_copy(hw_addr, vport->info.mac);
1913 *hw_addr_len = ETH_ALEN;
1916 mutex_unlock(&esw->state_lock);
1920 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
1921 struct devlink_port *port,
1922 const u8 *hw_addr, int hw_addr_len,
1923 struct netlink_ext_ack *extack)
1925 struct mlx5_eswitch *esw;
1926 struct mlx5_vport *vport;
1927 int err = -EOPNOTSUPP;
1930 esw = mlx5_devlink_eswitch_get(devlink);
1932 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
1933 return PTR_ERR(esw);
1936 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1937 if (!is_port_function_supported(esw, vport_num)) {
1938 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
1941 vport = mlx5_eswitch_get_vport(esw, vport_num);
1942 if (IS_ERR(vport)) {
1943 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1944 return PTR_ERR(vport);
1947 mutex_lock(&esw->state_lock);
1949 err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
1951 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1952 mutex_unlock(&esw->state_lock);
1956 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1957 u16 vport, int link_state)
1959 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1960 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1961 int other_vport = 1;
1964 if (!mlx5_esw_allowed(esw))
1967 return PTR_ERR(evport);
1969 if (vport == MLX5_VPORT_UPLINK) {
1970 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1974 mutex_lock(&esw->state_lock);
1975 if (esw->mode != MLX5_ESWITCH_LEGACY) {
1980 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1982 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1987 evport->info.link_state = link_state;
1990 mutex_unlock(&esw->state_lock);
1994 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1995 u16 vport, struct ifla_vf_info *ivi)
1997 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2000 return PTR_ERR(evport);
2002 memset(ivi, 0, sizeof(*ivi));
2003 ivi->vf = vport - 1;
2005 mutex_lock(&esw->state_lock);
2006 ether_addr_copy(ivi->mac, evport->info.mac);
2007 ivi->linkstate = evport->info.link_state;
2008 ivi->vlan = evport->info.vlan;
2009 ivi->qos = evport->info.qos;
2010 ivi->spoofchk = evport->info.spoofchk;
2011 ivi->trusted = evport->info.trusted;
2012 ivi->min_tx_rate = evport->qos.min_rate;
2013 ivi->max_tx_rate = evport->qos.max_rate;
2014 mutex_unlock(&esw->state_lock);
2019 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2020 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2022 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2026 return PTR_ERR(evport);
2027 if (vlan > 4095 || qos > 7)
2030 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2034 evport->info.vlan = vlan;
2035 evport->info.qos = qos;
2036 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2037 err = esw_acl_ingress_lgcy_setup(esw, evport);
2040 err = esw_acl_egress_lgcy_setup(esw, evport);
2046 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2048 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2049 struct mlx5_vport *evport;
2050 u32 max_guarantee = 0;
2053 mlx5_esw_for_each_vport(esw, i, evport) {
2054 if (!evport->enabled || evport->qos.min_rate < max_guarantee)
2056 max_guarantee = evport->qos.min_rate;
2060 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2064 static int normalize_vports_min_rate(struct mlx5_eswitch *esw)
2066 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2067 u32 divider = calculate_vports_min_rate_divider(esw);
2068 struct mlx5_vport *evport;
2075 mlx5_esw_for_each_vport(esw, i, evport) {
2076 if (!evport->enabled)
2078 vport_min_rate = evport->qos.min_rate;
2079 vport_max_rate = evport->qos.max_rate;
2083 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2087 if (bw_share == evport->qos.bw_share)
2090 err = esw_vport_qos_config(esw, evport, vport_max_rate,
2093 evport->qos.bw_share = bw_share;
2101 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2102 u32 max_rate, u32 min_rate)
2104 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2105 u32 fw_max_bw_share;
2106 u32 previous_min_rate;
2107 bool min_rate_supported;
2108 bool max_rate_supported;
2111 if (!mlx5_esw_allowed(esw))
2114 return PTR_ERR(evport);
2116 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2117 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2118 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2119 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2121 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2124 mutex_lock(&esw->state_lock);
2126 if (min_rate == evport->qos.min_rate)
2129 previous_min_rate = evport->qos.min_rate;
2130 evport->qos.min_rate = min_rate;
2131 err = normalize_vports_min_rate(esw);
2133 evport->qos.min_rate = previous_min_rate;
2138 if (max_rate == evport->qos.max_rate)
2141 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2143 evport->qos.max_rate = max_rate;
2146 mutex_unlock(&esw->state_lock);
2150 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2152 struct ifla_vf_stats *vf_stats)
2154 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2155 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2156 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2157 struct mlx5_vport_drop_stats stats = {};
2162 return PTR_ERR(vport);
2164 out = kvzalloc(outlen, GFP_KERNEL);
2168 MLX5_SET(query_vport_counter_in, in, opcode,
2169 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2170 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2171 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2172 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2174 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2178 #define MLX5_GET_CTR(p, x) \
2179 MLX5_GET64(query_vport_counter_out, p, x)
2181 memset(vf_stats, 0, sizeof(*vf_stats));
2182 vf_stats->rx_packets =
2183 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2184 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2185 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2186 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2187 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2189 vf_stats->rx_bytes =
2190 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2191 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2192 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2193 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2194 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2196 vf_stats->tx_packets =
2197 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2198 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2199 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2200 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2201 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2203 vf_stats->tx_bytes =
2204 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2205 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2206 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2207 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2208 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2210 vf_stats->multicast =
2211 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2212 MLX5_GET_CTR(out, received_ib_multicast.packets);
2214 vf_stats->broadcast =
2215 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2217 err = mlx5_esw_query_vport_drop_stats(esw->dev, vport, &stats);
2220 vf_stats->rx_dropped = stats.rx_dropped;
2221 vf_stats->tx_dropped = stats.tx_dropped;
2228 u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev)
2230 struct mlx5_eswitch *esw = dev->priv.eswitch;
2232 return mlx5_esw_allowed(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2234 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2236 enum devlink_eswitch_encap_mode
2237 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2239 struct mlx5_eswitch *esw;
2241 esw = dev->priv.eswitch;
2242 return mlx5_esw_allowed(esw) ? esw->offloads.encap :
2243 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2245 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2247 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2249 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2250 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2251 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2252 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2258 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2259 struct mlx5_core_dev *dev1)
2261 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2262 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
2265 int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
2267 return blocking_notifier_chain_register(&esw->n_head, nb);
2270 void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
2272 blocking_notifier_chain_unregister(&esw->n_head, nb);
2276 * mlx5_esw_hold() - Try to take a read lock on esw mode lock.
2277 * @mdev: mlx5 core device.
2279 * Should be called by esw resources callers.
2281 * Return: true on success or false.
2283 bool mlx5_esw_hold(struct mlx5_core_dev *mdev)
2285 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2287 /* e.g. VF doesn't have eswitch so nothing to do */
2288 if (!mlx5_esw_allowed(esw))
2291 if (down_read_trylock(&esw->mode_lock) != 0)
2298 * mlx5_esw_release() - Release a read lock on esw mode lock.
2299 * @mdev: mlx5 core device.
2301 void mlx5_esw_release(struct mlx5_core_dev *mdev)
2303 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2305 if (mlx5_esw_allowed(esw))
2306 up_read(&esw->mode_lock);
2310 * mlx5_esw_get() - Increase esw user count.
2311 * @mdev: mlx5 core device.
2313 void mlx5_esw_get(struct mlx5_core_dev *mdev)
2315 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2317 if (mlx5_esw_allowed(esw))
2318 atomic64_inc(&esw->user_count);
2322 * mlx5_esw_put() - Decrease esw user count.
2323 * @mdev: mlx5 core device.
2325 void mlx5_esw_put(struct mlx5_core_dev *mdev)
2327 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2329 if (mlx5_esw_allowed(esw))
2330 atomic64_dec_if_positive(&esw->user_count);
2334 * mlx5_esw_try_lock() - Take a write lock on esw mode lock.
2335 * @esw: eswitch device.
2337 * Should be called by esw mode change routine.
2340 * * 0 - esw mode if successfully locked and refcount is 0.
2341 * * -EBUSY - refcount is not 0.
2342 * * -EINVAL - In the middle of switching mode or lock is already held.
2344 int mlx5_esw_try_lock(struct mlx5_eswitch *esw)
2346 if (down_write_trylock(&esw->mode_lock) == 0)
2349 if (atomic64_read(&esw->user_count) > 0) {
2350 up_write(&esw->mode_lock);
2358 * mlx5_esw_unlock() - Release write lock on esw mode lock
2359 * @esw: eswitch device.
2361 void mlx5_esw_unlock(struct mlx5_eswitch *esw)
2363 up_write(&esw->mode_lock);
2367 * mlx5_eswitch_get_total_vports - Get total vports of the eswitch
2369 * @dev: Pointer to core device
2371 * mlx5_eswitch_get_total_vports returns total number of eswitch vports.
2373 u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
2375 struct mlx5_eswitch *esw;
2377 esw = dev->priv.eswitch;
2378 return mlx5_esw_allowed(esw) ? esw->total_vports : 0;
2380 EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);