2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
43 #include "lib/devcom.h"
47 /* There are two match-all miss flows, one for unicast dst mac and
50 #define MLX5_ESW_MISS_FLOWS (2)
52 #define fdb_prio_table(esw, chain, prio, level) \
53 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
55 #define UPLINK_REP_INDEX 0
57 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
60 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
62 WARN_ON(idx > esw->total_vports - 1);
63 return &esw->offloads.vport_reps[idx];
66 static struct mlx5_flow_table *
67 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
69 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
71 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
73 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
76 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
78 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
84 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
86 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
92 struct mlx5_flow_handle *
93 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
94 struct mlx5_flow_spec *spec,
95 struct mlx5_esw_flow_attr *attr)
97 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
98 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
99 bool split = !!(attr->split_count);
100 struct mlx5_flow_handle *rule;
101 struct mlx5_flow_table *fdb;
105 if (esw->mode != SRIOV_OFFLOADS)
106 return ERR_PTR(-EOPNOTSUPP);
108 flow_act.action = attr->action;
109 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
110 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
111 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
112 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
113 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
114 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
115 flow_act.vlan[0].vid = attr->vlan_vid[0];
116 flow_act.vlan[0].prio = attr->vlan_prio[0];
117 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
118 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
119 flow_act.vlan[1].vid = attr->vlan_vid[1];
120 flow_act.vlan[1].prio = attr->vlan_prio[1];
124 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
125 if (attr->dest_chain) {
126 struct mlx5_flow_table *ft;
128 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
131 goto err_create_goto_table;
134 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
138 for (j = attr->split_count; j < attr->out_count; j++) {
139 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
140 dest[i].vport.num = attr->dests[j].rep->vport;
141 dest[i].vport.vhca_id =
142 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
143 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
144 dest[i].vport.flags |=
145 MLX5_FLOW_DEST_VPORT_VHCA_ID;
146 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
147 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
148 flow_act.reformat_id = attr->dests[j].encap_id;
149 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
150 dest[i].vport.reformat_id =
151 attr->dests[j].encap_id;
157 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
158 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
159 dest[i].counter_id = mlx5_fc_id(attr->counter);
163 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
164 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
166 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
167 MLX5_SET(fte_match_set_misc, misc,
168 source_eswitch_owner_vhca_id,
169 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
171 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
172 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
173 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
174 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
175 source_eswitch_owner_vhca_id);
177 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
178 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
179 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
180 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
181 if (attr->match_level != MLX5_MATCH_NONE)
182 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
183 } else if (attr->match_level != MLX5_MATCH_NONE) {
184 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
187 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
188 flow_act.modify_id = attr->mod_hdr_id;
190 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
192 rule = ERR_CAST(fdb);
196 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
200 esw->offloads.num_flows++;
205 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
207 if (attr->dest_chain)
208 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
209 err_create_goto_table:
213 struct mlx5_flow_handle *
214 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
215 struct mlx5_flow_spec *spec,
216 struct mlx5_esw_flow_attr *attr)
218 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
219 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
220 struct mlx5_flow_table *fast_fdb;
221 struct mlx5_flow_table *fwd_fdb;
222 struct mlx5_flow_handle *rule;
226 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
227 if (IS_ERR(fast_fdb)) {
228 rule = ERR_CAST(fast_fdb);
232 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
233 if (IS_ERR(fwd_fdb)) {
234 rule = ERR_CAST(fwd_fdb);
238 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
239 for (i = 0; i < attr->split_count; i++) {
240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
241 dest[i].vport.num = attr->dests[i].rep->vport;
242 dest[i].vport.vhca_id =
243 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
244 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
245 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
246 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
247 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
248 dest[i].vport.reformat_id = attr->dests[i].encap_id;
251 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
252 dest[i].ft = fwd_fdb,
255 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
256 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
258 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
259 MLX5_SET(fte_match_set_misc, misc,
260 source_eswitch_owner_vhca_id,
261 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
263 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
264 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
265 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
266 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
267 source_eswitch_owner_vhca_id);
269 if (attr->match_level == MLX5_MATCH_NONE)
270 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
272 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
273 MLX5_MATCH_MISC_PARAMETERS;
275 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
280 esw->offloads.num_flows++;
284 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
286 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
292 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
293 struct mlx5_flow_handle *rule,
294 struct mlx5_esw_flow_attr *attr,
297 bool split = (attr->split_count > 0);
299 mlx5_del_flow_rules(rule);
300 esw->offloads.num_flows--;
303 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
304 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
306 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
307 if (attr->dest_chain)
308 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
313 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
314 struct mlx5_flow_handle *rule,
315 struct mlx5_esw_flow_attr *attr)
317 __mlx5_eswitch_del_rule(esw, rule, attr, false);
321 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
322 struct mlx5_flow_handle *rule,
323 struct mlx5_esw_flow_attr *attr)
325 __mlx5_eswitch_del_rule(esw, rule, attr, true);
328 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
330 struct mlx5_eswitch_rep *rep;
331 int vf_vport, err = 0;
333 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
334 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
335 rep = &esw->offloads.vport_reps[vf_vport];
336 if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
339 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
348 static struct mlx5_eswitch_rep *
349 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
351 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
353 in_rep = attr->in_rep;
354 out_rep = attr->dests[0].rep;
366 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
367 bool push, bool pop, bool fwd)
369 struct mlx5_eswitch_rep *in_rep, *out_rep;
371 if ((push || pop) && !fwd)
374 in_rep = attr->in_rep;
375 out_rep = attr->dests[0].rep;
377 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
380 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
383 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
384 if (!push && !pop && fwd)
385 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
388 /* protects against (1) setting rules with different vlans to push and
389 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
391 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
400 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
401 struct mlx5_esw_flow_attr *attr)
403 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
404 struct mlx5_eswitch_rep *vport = NULL;
408 /* nop if we're on the vlan push/pop non emulation mode */
409 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
412 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
413 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
414 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
417 err = esw_add_vlan_action_check(attr, push, pop, fwd);
421 attr->vlan_handled = false;
423 vport = esw_vlan_action_get_vport(attr, push, pop);
425 if (!push && !pop && fwd) {
426 /* tracks VF --> wire rules without vlan push action */
427 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
428 vport->vlan_refcount++;
429 attr->vlan_handled = true;
438 if (!(offloads->vlan_push_pop_refcount)) {
439 /* it's the 1st vlan rule, apply global vlan pop policy */
440 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
444 offloads->vlan_push_pop_refcount++;
447 if (vport->vlan_refcount)
450 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
451 SET_VLAN_INSERT | SET_VLAN_STRIP);
454 vport->vlan = attr->vlan_vid[0];
456 vport->vlan_refcount++;
460 attr->vlan_handled = true;
464 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
465 struct mlx5_esw_flow_attr *attr)
467 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
468 struct mlx5_eswitch_rep *vport = NULL;
472 /* nop if we're on the vlan push/pop non emulation mode */
473 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
476 if (!attr->vlan_handled)
479 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
480 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
481 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
483 vport = esw_vlan_action_get_vport(attr, push, pop);
485 if (!push && !pop && fwd) {
486 /* tracks VF --> wire rules without vlan push action */
487 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
488 vport->vlan_refcount--;
494 vport->vlan_refcount--;
495 if (vport->vlan_refcount)
496 goto skip_unset_push;
499 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
500 0, 0, SET_VLAN_STRIP);
506 offloads->vlan_push_pop_refcount--;
507 if (offloads->vlan_push_pop_refcount)
510 /* no more vlan rules, stop global vlan pop policy */
511 err = esw_set_global_vlan_pop(esw, 0);
517 struct mlx5_flow_handle *
518 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
521 struct mlx5_flow_act flow_act = {0};
522 struct mlx5_flow_destination dest = {};
523 struct mlx5_flow_handle *flow_rule;
524 struct mlx5_flow_spec *spec;
527 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
529 flow_rule = ERR_PTR(-ENOMEM);
533 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
534 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
535 /* source vport is the esw manager */
536 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
538 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
539 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
540 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
542 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
543 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
544 dest.vport.num = vport;
545 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
547 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
548 &flow_act, &dest, 1);
549 if (IS_ERR(flow_rule))
550 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
555 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
557 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
559 mlx5_del_flow_rules(rule);
562 static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
563 struct mlx5_flow_spec *spec,
564 struct mlx5_flow_destination *dest)
566 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
569 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
570 MLX5_CAP_GEN(peer_dev, vhca_id));
572 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
574 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
576 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
577 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
578 source_eswitch_owner_vhca_id);
580 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
581 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
582 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
583 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
586 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
587 struct mlx5_core_dev *peer_dev)
589 struct mlx5_flow_destination dest = {};
590 struct mlx5_flow_act flow_act = {0};
591 struct mlx5_flow_handle **flows;
592 struct mlx5_flow_handle *flow;
593 struct mlx5_flow_spec *spec;
594 /* total vports is the same for both e-switches */
595 int nvports = esw->total_vports;
599 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
603 peer_miss_rules_setup(peer_dev, spec, &dest);
605 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
608 goto alloc_flows_err;
611 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
612 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
615 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
616 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_PF);
617 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
618 spec, &flow_act, &dest, 1);
621 goto add_pf_flow_err;
623 flows[MLX5_VPORT_PF] = flow;
626 if (mlx5_ecpf_vport_exists(esw->dev)) {
627 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
628 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
629 spec, &flow_act, &dest, 1);
632 goto add_ecpf_flow_err;
634 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
637 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
638 MLX5_SET(fte_match_set_misc, misc, source_port, i);
639 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
640 spec, &flow_act, &dest, 1);
643 goto add_vf_flow_err;
648 esw->fdb_table.offloads.peer_miss_rules = flows;
655 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
656 mlx5_del_flow_rules(flows[i]);
658 if (mlx5_ecpf_vport_exists(esw->dev))
659 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
661 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
662 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
664 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
671 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
673 struct mlx5_flow_handle **flows;
676 flows = esw->fdb_table.offloads.peer_miss_rules;
678 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
679 mlx5_core_max_vfs(esw->dev))
680 mlx5_del_flow_rules(flows[i]);
682 if (mlx5_ecpf_vport_exists(esw->dev))
683 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
685 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
686 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
691 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
693 struct mlx5_flow_act flow_act = {0};
694 struct mlx5_flow_destination dest = {};
695 struct mlx5_flow_handle *flow_rule = NULL;
696 struct mlx5_flow_spec *spec;
703 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
709 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
710 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
712 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
713 outer_headers.dmac_47_16);
716 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
717 dest.vport.num = esw->manager_vport;
718 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
720 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
721 &flow_act, &dest, 1);
722 if (IS_ERR(flow_rule)) {
723 err = PTR_ERR(flow_rule);
724 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
728 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
730 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
732 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
733 outer_headers.dmac_47_16);
735 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
736 &flow_act, &dest, 1);
737 if (IS_ERR(flow_rule)) {
738 err = PTR_ERR(flow_rule);
739 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
740 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
744 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
751 #define ESW_OFFLOADS_NUM_GROUPS 4
753 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
754 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
755 * for each flow table pool. We can allocate up to 16M of each pool,
756 * and we keep track of how much we used via put/get_sz_to_pool.
757 * Firmware doesn't report any of this for now.
758 * ESW_POOL is expected to be sorted from large to small
760 #define ESW_SIZE (16 * 1024 * 1024)
761 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
762 64 * 1024, 4 * 1024 };
765 get_sz_from_pool(struct mlx5_eswitch *esw)
769 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
770 if (esw->fdb_table.offloads.fdb_left[i]) {
771 --esw->fdb_table.offloads.fdb_left[i];
781 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
785 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
786 if (sz >= ESW_POOLS[i]) {
787 ++esw->fdb_table.offloads.fdb_left[i];
793 static struct mlx5_flow_table *
794 create_next_size_table(struct mlx5_eswitch *esw,
795 struct mlx5_flow_namespace *ns,
800 struct mlx5_flow_table *fdb;
803 sz = get_sz_from_pool(esw);
805 return ERR_PTR(-ENOSPC);
807 fdb = mlx5_create_auto_grouped_flow_table(ns,
810 ESW_OFFLOADS_NUM_GROUPS,
814 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
815 (int)PTR_ERR(fdb), table_prio, level, sz);
816 put_sz_to_pool(esw, sz);
822 static struct mlx5_flow_table *
823 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
825 struct mlx5_core_dev *dev = esw->dev;
826 struct mlx5_flow_table *fdb = NULL;
827 struct mlx5_flow_namespace *ns;
828 int table_prio, l = 0;
831 if (chain == FDB_SLOW_PATH_CHAIN)
832 return esw->fdb_table.offloads.slow_fdb;
834 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
836 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
838 /* take ref on earlier levels as well */
840 fdb_prio_table(esw, chain, prio, level--).num_rules++;
841 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
845 ns = mlx5_get_fdb_sub_ns(dev, chain);
847 esw_warn(dev, "Failed to get FDB sub namespace\n");
848 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
849 return ERR_PTR(-EOPNOTSUPP);
852 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
853 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
854 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
856 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
858 /* create earlier levels for correct fs_core lookup when
861 for (l = 0; l <= level; l++) {
862 if (fdb_prio_table(esw, chain, prio, l).fdb) {
863 fdb_prio_table(esw, chain, prio, l).num_rules++;
867 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
873 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
874 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
877 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
881 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
883 esw_put_prio_table(esw, chain, prio, l);
889 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
893 if (chain == FDB_SLOW_PATH_CHAIN)
896 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
898 for (l = level; l >= 0; l--) {
899 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
902 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
903 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
904 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
907 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
910 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
912 /* If lazy creation isn't supported, deref the fast path tables */
913 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
914 esw_put_prio_table(esw, 0, 1, 1);
915 esw_put_prio_table(esw, 0, 1, 0);
919 #define MAX_PF_SQ 256
920 #define MAX_SQ_NVPORTS 32
922 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
924 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
925 struct mlx5_flow_table_attr ft_attr = {};
926 struct mlx5_core_dev *dev = esw->dev;
927 u32 *flow_group_in, max_flow_counter;
928 struct mlx5_flow_namespace *root_ns;
929 struct mlx5_flow_table *fdb = NULL;
930 int table_size, ix, err = 0, i;
931 struct mlx5_flow_group *g;
932 u32 flags = 0, fdb_max;
933 void *match_criteria;
936 esw_debug(esw->dev, "Create offloads FDB Tables\n");
937 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
941 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
943 esw_warn(dev, "Failed to get FDB flow namespace\n");
948 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
949 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
950 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
952 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
953 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
954 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
957 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
958 esw->fdb_table.offloads.fdb_left[i] =
959 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
961 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
962 MLX5_ESW_MISS_FLOWS + esw->total_vports;
964 /* create the slow path fdb with encap set, so further table instances
965 * can be created at run time while VFs are probed if the FW allows that.
967 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
968 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
969 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
971 ft_attr.flags = flags;
972 ft_attr.max_fte = table_size;
973 ft_attr.prio = FDB_SLOW_PATH;
975 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
978 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
981 esw->fdb_table.offloads.slow_fdb = fdb;
983 /* If lazy creation isn't supported, open the fast path tables now */
984 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
985 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
986 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
987 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
988 esw_get_prio_table(esw, 0, 1, 0);
989 esw_get_prio_table(esw, 0, 1, 1);
991 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
992 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
995 /* create send-to-vport group */
996 memset(flow_group_in, 0, inlen);
997 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
998 MLX5_MATCH_MISC_PARAMETERS);
1000 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1002 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1003 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1005 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1006 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1007 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1009 g = mlx5_create_flow_group(fdb, flow_group_in);
1012 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1013 goto send_vport_err;
1015 esw->fdb_table.offloads.send_to_vport_grp = g;
1017 /* create peer esw miss group */
1018 memset(flow_group_in, 0, inlen);
1019 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1020 MLX5_MATCH_MISC_PARAMETERS);
1022 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1025 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1026 misc_parameters.source_port);
1027 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1028 misc_parameters.source_eswitch_owner_vhca_id);
1030 MLX5_SET(create_flow_group_in, flow_group_in,
1031 source_eswitch_owner_vhca_id_valid, 1);
1032 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1033 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1034 ix + esw->total_vports - 1);
1035 ix += esw->total_vports;
1037 g = mlx5_create_flow_group(fdb, flow_group_in);
1040 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1043 esw->fdb_table.offloads.peer_miss_grp = g;
1045 /* create miss group */
1046 memset(flow_group_in, 0, inlen);
1047 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1048 MLX5_MATCH_OUTER_HEADERS);
1049 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1051 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1052 outer_headers.dmac_47_16);
1055 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1056 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1057 ix + MLX5_ESW_MISS_FLOWS);
1059 g = mlx5_create_flow_group(fdb, flow_group_in);
1062 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1065 esw->fdb_table.offloads.miss_grp = g;
1067 err = esw_add_fdb_miss_rule(esw);
1071 esw->nvports = nvports;
1072 kvfree(flow_group_in);
1076 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1078 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1080 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1082 esw_destroy_offloads_fast_fdb_tables(esw);
1083 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1086 kvfree(flow_group_in);
1090 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1092 if (!esw->fdb_table.offloads.slow_fdb)
1095 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1096 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1097 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1098 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1099 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1100 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1102 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1103 esw_destroy_offloads_fast_fdb_tables(esw);
1106 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1108 struct mlx5_flow_table_attr ft_attr = {};
1109 struct mlx5_core_dev *dev = esw->dev;
1110 struct mlx5_flow_table *ft_offloads;
1111 struct mlx5_flow_namespace *ns;
1114 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1116 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1120 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1122 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1123 if (IS_ERR(ft_offloads)) {
1124 err = PTR_ERR(ft_offloads);
1125 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1129 esw->offloads.ft_offloads = ft_offloads;
1133 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1135 struct mlx5_esw_offload *offloads = &esw->offloads;
1137 mlx5_destroy_flow_table(offloads->ft_offloads);
1140 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1142 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1143 struct mlx5_flow_group *g;
1145 void *match_criteria, *misc;
1148 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1149 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1153 /* create vport rx group */
1154 memset(flow_group_in, 0, inlen);
1155 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1156 MLX5_MATCH_MISC_PARAMETERS);
1158 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1159 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1160 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1162 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1163 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1165 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1169 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1173 esw->offloads.vport_rx_group = g;
1175 kvfree(flow_group_in);
1179 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1181 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1184 struct mlx5_flow_handle *
1185 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1186 struct mlx5_flow_destination *dest)
1188 struct mlx5_flow_act flow_act = {0};
1189 struct mlx5_flow_handle *flow_rule;
1190 struct mlx5_flow_spec *spec;
1193 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1195 flow_rule = ERR_PTR(-ENOMEM);
1199 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1200 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1202 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1203 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1205 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1207 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1208 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1209 &flow_act, dest, 1);
1210 if (IS_ERR(flow_rule)) {
1211 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1220 static int esw_offloads_start(struct mlx5_eswitch *esw,
1221 struct netlink_ext_ack *extack)
1223 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1225 if (esw->mode != SRIOV_LEGACY &&
1226 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1227 NL_SET_ERR_MSG_MOD(extack,
1228 "Can't set offloads mode, SRIOV legacy not enabled");
1232 mlx5_eswitch_disable_sriov(esw);
1233 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1235 NL_SET_ERR_MSG_MOD(extack,
1236 "Failed setting eswitch to offloads");
1237 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1239 NL_SET_ERR_MSG_MOD(extack,
1240 "Failed setting eswitch back to legacy");
1243 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1244 if (mlx5_eswitch_inline_mode_get(esw,
1246 &esw->offloads.inline_mode)) {
1247 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1248 NL_SET_ERR_MSG_MOD(extack,
1249 "Inline mode is different between vports");
1255 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1257 kfree(esw->offloads.vport_reps);
1260 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1262 int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
1263 struct mlx5_core_dev *dev = esw->dev;
1264 struct mlx5_eswitch_rep *rep;
1265 u8 hw_id[ETH_ALEN], rep_type;
1268 esw->offloads.vport_reps = kcalloc(total_vports,
1269 sizeof(struct mlx5_eswitch_rep),
1271 if (!esw->offloads.vport_reps)
1274 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1276 mlx5_esw_for_all_reps(esw, vport, rep) {
1277 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
1278 ether_addr_copy(rep->hw_id, hw_id);
1280 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1281 atomic_set(&rep->rep_if[rep_type].state,
1288 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1289 struct mlx5_eswitch_rep *rep, u8 rep_type)
1291 if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
1292 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1293 rep->rep_if[rep_type].unload(rep);
1296 static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1298 struct mlx5_eswitch_rep *rep;
1300 if (mlx5_ecpf_vport_exists(esw->dev)) {
1301 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1302 __esw_offloads_unload_rep(esw, rep, rep_type);
1305 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1306 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1307 __esw_offloads_unload_rep(esw, rep, rep_type);
1310 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1311 __esw_offloads_unload_rep(esw, rep, rep_type);
1314 static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1317 struct mlx5_eswitch_rep *rep;
1320 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1321 __esw_offloads_unload_rep(esw, rep, rep_type);
1324 static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1326 u8 rep_type = NUM_REP_TYPES;
1328 while (rep_type-- > 0)
1329 __unload_reps_vf_vport(esw, nvports, rep_type);
1332 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1335 __unload_reps_vf_vport(esw, nvports, rep_type);
1337 /* Special vports must be the last to unload. */
1338 __unload_reps_special_vport(esw, rep_type);
1341 static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
1343 u8 rep_type = NUM_REP_TYPES;
1345 while (rep_type-- > 0)
1346 __unload_reps_all_vport(esw, nvports, rep_type);
1349 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1350 struct mlx5_eswitch_rep *rep, u8 rep_type)
1354 if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
1355 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1356 err = rep->rep_if[rep_type].load(esw->dev, rep);
1358 atomic_set(&rep->rep_if[rep_type].state,
1365 static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1367 struct mlx5_eswitch_rep *rep;
1370 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1371 err = __esw_offloads_load_rep(esw, rep, rep_type);
1375 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1376 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1377 err = __esw_offloads_load_rep(esw, rep, rep_type);
1382 if (mlx5_ecpf_vport_exists(esw->dev)) {
1383 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1384 err = __esw_offloads_load_rep(esw, rep, rep_type);
1392 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1393 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1394 __esw_offloads_unload_rep(esw, rep, rep_type);
1398 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1399 __esw_offloads_unload_rep(esw, rep, rep_type);
1403 static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1406 struct mlx5_eswitch_rep *rep;
1409 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
1410 err = __esw_offloads_load_rep(esw, rep, rep_type);
1418 __unload_reps_vf_vport(esw, --i, rep_type);
1422 static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1427 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1428 err = __load_reps_vf_vport(esw, nvports, rep_type);
1436 while (rep_type-- > 0)
1437 __unload_reps_vf_vport(esw, nvports, rep_type);
1441 static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1446 /* Special vports must be loaded first. */
1447 err = __load_reps_special_vport(esw, rep_type);
1451 err = __load_reps_vf_vport(esw, nvports, rep_type);
1458 __unload_reps_special_vport(esw, rep_type);
1462 static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
1467 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1468 err = __load_reps_all_vport(esw, nvports, rep_type);
1476 while (rep_type-- > 0)
1477 __unload_reps_all_vport(esw, nvports, rep_type);
1481 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1482 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1484 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1485 struct mlx5_eswitch *peer_esw)
1489 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1496 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1498 mlx5e_tc_clean_fdb_peer_flows(esw);
1499 esw_del_fdb_peer_miss_rules(esw);
1502 static int mlx5_esw_offloads_devcom_event(int event,
1506 struct mlx5_eswitch *esw = my_data;
1507 struct mlx5_eswitch *peer_esw = event_data;
1508 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1512 case ESW_OFFLOADS_DEVCOM_PAIR:
1513 err = mlx5_esw_offloads_pair(esw, peer_esw);
1517 err = mlx5_esw_offloads_pair(peer_esw, esw);
1521 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1524 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1525 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1528 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1529 mlx5_esw_offloads_unpair(peer_esw);
1530 mlx5_esw_offloads_unpair(esw);
1537 mlx5_esw_offloads_unpair(esw);
1540 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1545 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1547 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1549 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1550 mutex_init(&esw->offloads.peer_mutex);
1552 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1555 mlx5_devcom_register_component(devcom,
1556 MLX5_DEVCOM_ESW_OFFLOADS,
1557 mlx5_esw_offloads_devcom_event,
1560 mlx5_devcom_send_event(devcom,
1561 MLX5_DEVCOM_ESW_OFFLOADS,
1562 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1565 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1567 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1569 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1572 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1573 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1575 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1578 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1579 struct mlx5_vport *vport)
1581 struct mlx5_core_dev *dev = esw->dev;
1582 struct mlx5_flow_act flow_act = {0};
1583 struct mlx5_flow_spec *spec;
1586 /* For prio tag mode, there is only 1 FTEs:
1587 * 1) Untagged packets - push prio tag VLAN, allow
1588 * Unmatched traffic is allowed by default
1591 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
1594 esw_vport_cleanup_ingress_rules(esw, vport);
1596 err = esw_vport_enable_ingress_acl(esw, vport);
1598 mlx5_core_warn(esw->dev,
1599 "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
1605 "vport[%d] configure ingress rules\n", vport->vport);
1607 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1613 /* Untagged packets - push prio tag VLAN, allow */
1614 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1615 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1616 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1617 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1618 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1619 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1620 flow_act.vlan[0].vid = 0;
1621 flow_act.vlan[0].prio = 0;
1622 vport->ingress.allow_rule =
1623 mlx5_add_flow_rules(vport->ingress.acl, spec,
1624 &flow_act, NULL, 0);
1625 if (IS_ERR(vport->ingress.allow_rule)) {
1626 err = PTR_ERR(vport->ingress.allow_rule);
1628 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1630 vport->ingress.allow_rule = NULL;
1638 esw_vport_cleanup_ingress_rules(esw, vport);
1642 static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1643 struct mlx5_vport *vport)
1645 struct mlx5_flow_act flow_act = {0};
1646 struct mlx5_flow_spec *spec;
1649 /* For prio tag mode, there is only 1 FTEs:
1650 * 1) prio tag packets - pop the prio tag VLAN, allow
1651 * Unmatched traffic is allowed by default
1654 esw_vport_cleanup_egress_rules(esw, vport);
1656 err = esw_vport_enable_egress_acl(esw, vport);
1658 mlx5_core_warn(esw->dev,
1659 "failed to enable egress acl (%d) on vport[%d]\n",
1665 "vport[%d] configure prio tag egress rules\n", vport->vport);
1667 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1673 /* prio tag vlan rule - pop it so VF receives untagged packets */
1674 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1675 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1676 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1677 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1679 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1680 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1681 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1682 vport->egress.allowed_vlan =
1683 mlx5_add_flow_rules(vport->egress.acl, spec,
1684 &flow_act, NULL, 0);
1685 if (IS_ERR(vport->egress.allowed_vlan)) {
1686 err = PTR_ERR(vport->egress.allowed_vlan);
1688 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1690 vport->egress.allowed_vlan = NULL;
1698 esw_vport_cleanup_egress_rules(esw, vport);
1702 static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
1704 struct mlx5_vport *vport = NULL;
1708 mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
1709 err = esw_vport_ingress_prio_tag_config(esw, vport);
1712 err = esw_vport_egress_prio_tag_config(esw, vport);
1720 esw_vport_disable_ingress_acl(esw, vport);
1722 mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
1723 esw_vport_disable_egress_acl(esw, vport);
1724 esw_vport_disable_ingress_acl(esw, vport);
1730 static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
1732 struct mlx5_vport *vport;
1735 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->dev->priv.sriov.num_vfs) {
1736 esw_vport_disable_egress_acl(esw, vport);
1737 esw_vport_disable_ingress_acl(esw, vport);
1741 static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports,
1746 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1747 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1749 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
1750 err = esw_prio_tag_acls_config(esw, vf_nvports);
1755 err = esw_create_offloads_fdb_tables(esw, nvports);
1759 err = esw_create_offloads_table(esw, nvports);
1763 err = esw_create_vport_rx_group(esw, nvports);
1770 esw_destroy_offloads_table(esw);
1773 esw_destroy_offloads_fdb_tables(esw);
1778 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
1780 esw_destroy_vport_rx_group(esw);
1781 esw_destroy_offloads_table(esw);
1782 esw_destroy_offloads_fdb_tables(esw);
1783 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
1784 esw_prio_tag_acls_cleanup(esw);
1787 static void esw_host_params_event_handler(struct work_struct *work)
1789 struct mlx5_host_work *host_work;
1790 struct mlx5_eswitch *esw;
1791 int err, num_vf = 0;
1793 host_work = container_of(work, struct mlx5_host_work, work);
1794 esw = host_work->esw;
1796 err = mlx5_query_host_params_num_vfs(esw->dev, &num_vf);
1797 if (err || num_vf == esw->host_info.num_vfs)
1800 /* Number of VFs can only change from "0 to x" or "x to 0". */
1801 if (esw->host_info.num_vfs > 0) {
1802 esw_offloads_unload_vf_reps(esw, esw->host_info.num_vfs);
1804 err = esw_offloads_load_vf_reps(esw, num_vf);
1810 esw->host_info.num_vfs = num_vf;
1816 static int esw_host_params_event(struct notifier_block *nb,
1817 unsigned long type, void *data)
1819 struct mlx5_host_work *host_work;
1820 struct mlx5_host_info *host_info;
1821 struct mlx5_eswitch *esw;
1823 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
1827 host_info = mlx5_nb_cof(nb, struct mlx5_host_info, nb);
1828 esw = container_of(host_info, struct mlx5_eswitch, host_info);
1830 host_work->esw = esw;
1832 INIT_WORK(&host_work->work, esw_host_params_event_handler);
1833 queue_work(esw->work_queue, &host_work->work);
1838 int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
1843 err = esw_offloads_steering_init(esw, vf_nvports, total_nvports);
1847 err = esw_offloads_load_all_reps(esw, vf_nvports);
1851 esw_offloads_devcom_init(esw);
1853 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1854 MLX5_NB_INIT(&esw->host_info.nb, esw_host_params_event,
1855 HOST_PARAMS_CHANGE);
1856 mlx5_eq_notifier_register(esw->dev, &esw->host_info.nb);
1857 esw->host_info.num_vfs = vf_nvports;
1860 mlx5_rdma_enable_roce(esw->dev);
1865 esw_offloads_steering_cleanup(esw);
1869 static int esw_offloads_stop(struct mlx5_eswitch *esw,
1870 struct netlink_ext_ack *extack)
1872 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1874 mlx5_eswitch_disable_sriov(esw);
1875 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1877 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
1878 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1880 NL_SET_ERR_MSG_MOD(extack,
1881 "Failed setting eswitch back to offloads");
1888 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
1892 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1893 mlx5_eq_notifier_unregister(esw->dev, &esw->host_info.nb);
1894 flush_workqueue(esw->work_queue);
1895 num_vfs = esw->host_info.num_vfs;
1897 num_vfs = esw->dev->priv.sriov.num_vfs;
1900 mlx5_rdma_disable_roce(esw->dev);
1901 esw_offloads_devcom_cleanup(esw);
1902 esw_offloads_unload_all_reps(esw, num_vfs);
1903 esw_offloads_steering_cleanup(esw);
1906 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1909 case DEVLINK_ESWITCH_MODE_LEGACY:
1910 *mlx5_mode = SRIOV_LEGACY;
1912 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1913 *mlx5_mode = SRIOV_OFFLOADS;
1922 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1924 switch (mlx5_mode) {
1926 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1928 case SRIOV_OFFLOADS:
1929 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1938 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1941 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1942 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1944 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1945 *mlx5_mode = MLX5_INLINE_MODE_L2;
1947 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1948 *mlx5_mode = MLX5_INLINE_MODE_IP;
1950 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1951 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1960 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1962 switch (mlx5_mode) {
1963 case MLX5_INLINE_MODE_NONE:
1964 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1966 case MLX5_INLINE_MODE_L2:
1967 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1969 case MLX5_INLINE_MODE_IP:
1970 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1972 case MLX5_INLINE_MODE_TCP_UDP:
1973 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1982 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1984 struct mlx5_core_dev *dev = devlink_priv(devlink);
1986 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1989 if(!MLX5_ESWITCH_MANAGER(dev))
1992 if (dev->priv.eswitch->mode == SRIOV_NONE &&
1993 !mlx5_core_is_ecpf_esw_manager(dev))
1999 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2000 struct netlink_ext_ack *extack)
2002 struct mlx5_core_dev *dev = devlink_priv(devlink);
2003 u16 cur_mlx5_mode, mlx5_mode = 0;
2006 err = mlx5_devlink_eswitch_check(devlink);
2010 cur_mlx5_mode = dev->priv.eswitch->mode;
2012 if (esw_mode_from_devlink(mode, &mlx5_mode))
2015 if (cur_mlx5_mode == mlx5_mode)
2018 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2019 return esw_offloads_start(dev->priv.eswitch, extack);
2020 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2021 return esw_offloads_stop(dev->priv.eswitch, extack);
2026 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2028 struct mlx5_core_dev *dev = devlink_priv(devlink);
2031 err = mlx5_devlink_eswitch_check(devlink);
2035 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2038 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2039 struct netlink_ext_ack *extack)
2041 struct mlx5_core_dev *dev = devlink_priv(devlink);
2042 struct mlx5_eswitch *esw = dev->priv.eswitch;
2046 err = mlx5_devlink_eswitch_check(devlink);
2050 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2051 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2052 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2055 case MLX5_CAP_INLINE_MODE_L2:
2056 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2058 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2062 if (esw->offloads.num_flows > 0) {
2063 NL_SET_ERR_MSG_MOD(extack,
2064 "Can't set inline mode when flows are configured");
2068 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2072 for (vport = 1; vport < esw->enabled_vports; vport++) {
2073 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2075 NL_SET_ERR_MSG_MOD(extack,
2076 "Failed to set min inline on vport");
2077 goto revert_inline_mode;
2081 esw->offloads.inline_mode = mlx5_mode;
2086 mlx5_modify_nic_vport_min_inline(dev,
2088 esw->offloads.inline_mode);
2093 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2095 struct mlx5_core_dev *dev = devlink_priv(devlink);
2096 struct mlx5_eswitch *esw = dev->priv.eswitch;
2099 err = mlx5_devlink_eswitch_check(devlink);
2103 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2106 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
2108 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2109 struct mlx5_core_dev *dev = esw->dev;
2112 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2115 if (esw->mode == SRIOV_NONE)
2118 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2119 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2120 mlx5_mode = MLX5_INLINE_MODE_NONE;
2122 case MLX5_CAP_INLINE_MODE_L2:
2123 mlx5_mode = MLX5_INLINE_MODE_L2;
2125 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2130 for (vport = 1; vport <= nvfs; vport++) {
2131 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2132 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
2134 prev_mlx5_mode = mlx5_mode;
2142 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
2143 struct netlink_ext_ack *extack)
2145 struct mlx5_core_dev *dev = devlink_priv(devlink);
2146 struct mlx5_eswitch *esw = dev->priv.eswitch;
2149 err = mlx5_devlink_eswitch_check(devlink);
2153 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2154 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2155 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2158 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2161 if (esw->mode == SRIOV_LEGACY) {
2162 esw->offloads.encap = encap;
2166 if (esw->offloads.encap == encap)
2169 if (esw->offloads.num_flows > 0) {
2170 NL_SET_ERR_MSG_MOD(extack,
2171 "Can't set encapsulation when flows are configured");
2175 esw_destroy_offloads_fdb_tables(esw);
2177 esw->offloads.encap = encap;
2179 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2182 NL_SET_ERR_MSG_MOD(extack,
2183 "Failed re-creating fast FDB table");
2184 esw->offloads.encap = !encap;
2185 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2191 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
2193 struct mlx5_core_dev *dev = devlink_priv(devlink);
2194 struct mlx5_eswitch *esw = dev->priv.eswitch;
2197 err = mlx5_devlink_eswitch_check(devlink);
2201 *encap = esw->offloads.encap;
2205 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2206 struct mlx5_eswitch_rep_if *__rep_if,
2209 struct mlx5_eswitch_rep_if *rep_if;
2210 struct mlx5_eswitch_rep *rep;
2213 mlx5_esw_for_all_reps(esw, i, rep) {
2214 rep_if = &rep->rep_if[rep_type];
2215 rep_if->load = __rep_if->load;
2216 rep_if->unload = __rep_if->unload;
2217 rep_if->get_proto_dev = __rep_if->get_proto_dev;
2218 rep_if->priv = __rep_if->priv;
2220 atomic_set(&rep_if->state, REP_REGISTERED);
2223 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2225 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2227 u16 max_vf = mlx5_core_max_vfs(esw->dev);
2228 struct mlx5_eswitch_rep *rep;
2231 if (esw->mode == SRIOV_OFFLOADS)
2232 __unload_reps_all_vport(esw, max_vf, rep_type);
2234 mlx5_esw_for_all_reps(esw, i, rep)
2235 atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
2237 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2239 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2241 struct mlx5_eswitch_rep *rep;
2243 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2244 return rep->rep_if[rep_type].priv;
2247 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2251 struct mlx5_eswitch_rep *rep;
2253 rep = mlx5_eswitch_get_rep(esw, vport);
2255 if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
2256 rep->rep_if[rep_type].get_proto_dev)
2257 return rep->rep_if[rep_type].get_proto_dev(rep);
2260 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2262 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2264 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2266 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2268 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2271 return mlx5_eswitch_get_rep(esw, vport);
2273 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);