2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
43 #include "lib/devcom.h"
46 /* There are two match-all miss flows, one for unicast dst mac and
49 #define MLX5_ESW_MISS_FLOWS (2)
51 #define fdb_prio_table(esw, chain, prio, level) \
52 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
54 #define UPLINK_REP_INDEX 0
56 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
59 u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
61 WARN_ON(idx > esw->total_vports - 1);
62 return &esw->offloads.vport_reps[idx];
65 static struct mlx5_flow_table *
66 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
68 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
70 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
72 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
75 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
77 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
83 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
85 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
92 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
93 struct mlx5_flow_spec *spec,
94 struct mlx5_esw_flow_attr *attr)
99 /* Use metadata matching because vport is not represented by single
100 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
103 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
104 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
105 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
106 attr->in_rep->vport));
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET_TO_ONES(fte_match_set_misc2, misc2, metadata_reg_c_0);
111 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
113 if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
114 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
117 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
119 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
120 MLX5_SET(fte_match_set_misc, misc,
121 source_eswitch_owner_vhca_id,
122 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
124 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
125 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
126 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
127 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
128 source_eswitch_owner_vhca_id);
130 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
133 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
134 attr->in_rep->vport == MLX5_VPORT_UPLINK)
135 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
138 struct mlx5_flow_handle *
139 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
140 struct mlx5_flow_spec *spec,
141 struct mlx5_esw_flow_attr *attr)
143 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
144 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
145 bool split = !!(attr->split_count);
146 struct mlx5_flow_handle *rule;
147 struct mlx5_flow_table *fdb;
150 if (esw->mode != SRIOV_OFFLOADS)
151 return ERR_PTR(-EOPNOTSUPP);
153 flow_act.action = attr->action;
154 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
155 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
156 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
157 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
158 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
159 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
160 flow_act.vlan[0].vid = attr->vlan_vid[0];
161 flow_act.vlan[0].prio = attr->vlan_prio[0];
162 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
163 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
164 flow_act.vlan[1].vid = attr->vlan_vid[1];
165 flow_act.vlan[1].prio = attr->vlan_prio[1];
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
170 if (attr->dest_chain) {
171 struct mlx5_flow_table *ft;
173 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
176 goto err_create_goto_table;
179 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
183 for (j = attr->split_count; j < attr->out_count; j++) {
184 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
185 dest[i].vport.num = attr->dests[j].rep->vport;
186 dest[i].vport.vhca_id =
187 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
188 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
189 dest[i].vport.flags |=
190 MLX5_FLOW_DEST_VPORT_VHCA_ID;
191 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
192 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
193 flow_act.reformat_id = attr->dests[j].encap_id;
194 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
195 dest[i].vport.reformat_id =
196 attr->dests[j].encap_id;
202 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
203 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
204 dest[i].counter_id = mlx5_fc_id(attr->counter);
208 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
210 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
211 if (attr->tunnel_match_level != MLX5_MATCH_NONE)
212 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
213 if (attr->match_level != MLX5_MATCH_NONE)
214 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
215 } else if (attr->match_level != MLX5_MATCH_NONE) {
216 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
219 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
220 flow_act.modify_id = attr->mod_hdr_id;
222 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
224 rule = ERR_CAST(fdb);
228 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
232 esw->offloads.num_flows++;
237 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
239 if (attr->dest_chain)
240 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
241 err_create_goto_table:
245 struct mlx5_flow_handle *
246 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_esw_flow_attr *attr)
250 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
251 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
252 struct mlx5_flow_table *fast_fdb;
253 struct mlx5_flow_table *fwd_fdb;
254 struct mlx5_flow_handle *rule;
257 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
258 if (IS_ERR(fast_fdb)) {
259 rule = ERR_CAST(fast_fdb);
263 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
264 if (IS_ERR(fwd_fdb)) {
265 rule = ERR_CAST(fwd_fdb);
269 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
270 for (i = 0; i < attr->split_count; i++) {
271 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
272 dest[i].vport.num = attr->dests[i].rep->vport;
273 dest[i].vport.vhca_id =
274 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
275 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
276 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
277 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
278 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
279 dest[i].vport.reformat_id = attr->dests[i].encap_id;
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
283 dest[i].ft = fwd_fdb,
286 mlx5_eswitch_set_rule_source_port(esw, spec, attr);
288 if (attr->match_level != MLX5_MATCH_NONE)
289 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
291 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
296 esw->offloads.num_flows++;
300 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
302 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
308 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
309 struct mlx5_flow_handle *rule,
310 struct mlx5_esw_flow_attr *attr,
313 bool split = (attr->split_count > 0);
315 mlx5_del_flow_rules(rule);
316 esw->offloads.num_flows--;
319 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
320 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
322 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
323 if (attr->dest_chain)
324 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
329 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
330 struct mlx5_flow_handle *rule,
331 struct mlx5_esw_flow_attr *attr)
333 __mlx5_eswitch_del_rule(esw, rule, attr, false);
337 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
338 struct mlx5_flow_handle *rule,
339 struct mlx5_esw_flow_attr *attr)
341 __mlx5_eswitch_del_rule(esw, rule, attr, true);
344 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
346 struct mlx5_eswitch_rep *rep;
347 int vf_vport, err = 0;
349 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
350 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
351 rep = &esw->offloads.vport_reps[vf_vport];
352 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
355 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
364 static struct mlx5_eswitch_rep *
365 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
367 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
369 in_rep = attr->in_rep;
370 out_rep = attr->dests[0].rep;
382 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
383 bool push, bool pop, bool fwd)
385 struct mlx5_eswitch_rep *in_rep, *out_rep;
387 if ((push || pop) && !fwd)
390 in_rep = attr->in_rep;
391 out_rep = attr->dests[0].rep;
393 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
396 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
399 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
400 if (!push && !pop && fwd)
401 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
404 /* protects against (1) setting rules with different vlans to push and
405 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
407 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
416 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
417 struct mlx5_esw_flow_attr *attr)
419 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
420 struct mlx5_eswitch_rep *vport = NULL;
424 /* nop if we're on the vlan push/pop non emulation mode */
425 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
428 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
429 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
430 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
433 err = esw_add_vlan_action_check(attr, push, pop, fwd);
437 attr->vlan_handled = false;
439 vport = esw_vlan_action_get_vport(attr, push, pop);
441 if (!push && !pop && fwd) {
442 /* tracks VF --> wire rules without vlan push action */
443 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
444 vport->vlan_refcount++;
445 attr->vlan_handled = true;
454 if (!(offloads->vlan_push_pop_refcount)) {
455 /* it's the 1st vlan rule, apply global vlan pop policy */
456 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
460 offloads->vlan_push_pop_refcount++;
463 if (vport->vlan_refcount)
466 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
467 SET_VLAN_INSERT | SET_VLAN_STRIP);
470 vport->vlan = attr->vlan_vid[0];
472 vport->vlan_refcount++;
476 attr->vlan_handled = true;
480 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
481 struct mlx5_esw_flow_attr *attr)
483 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
484 struct mlx5_eswitch_rep *vport = NULL;
488 /* nop if we're on the vlan push/pop non emulation mode */
489 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
492 if (!attr->vlan_handled)
495 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
496 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
497 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
499 vport = esw_vlan_action_get_vport(attr, push, pop);
501 if (!push && !pop && fwd) {
502 /* tracks VF --> wire rules without vlan push action */
503 if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
504 vport->vlan_refcount--;
510 vport->vlan_refcount--;
511 if (vport->vlan_refcount)
512 goto skip_unset_push;
515 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
516 0, 0, SET_VLAN_STRIP);
522 offloads->vlan_push_pop_refcount--;
523 if (offloads->vlan_push_pop_refcount)
526 /* no more vlan rules, stop global vlan pop policy */
527 err = esw_set_global_vlan_pop(esw, 0);
533 struct mlx5_flow_handle *
534 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
536 struct mlx5_flow_act flow_act = {0};
537 struct mlx5_flow_destination dest = {};
538 struct mlx5_flow_handle *flow_rule;
539 struct mlx5_flow_spec *spec;
542 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
544 flow_rule = ERR_PTR(-ENOMEM);
548 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
549 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
550 /* source vport is the esw manager */
551 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
553 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
554 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
555 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
557 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
558 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
559 dest.vport.num = vport;
560 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
562 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
563 &flow_act, &dest, 1);
564 if (IS_ERR(flow_rule))
565 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
570 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
572 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
574 mlx5_del_flow_rules(rule);
577 static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
579 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
580 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
581 u8 fdb_to_vport_reg_c_id;
584 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
589 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
590 esw_vport_context.fdb_to_vport_reg_c_id);
592 fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
593 MLX5_SET(modify_esw_vport_context_in, in,
594 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
596 MLX5_SET(modify_esw_vport_context_in, in,
597 field_select.fdb_to_vport_reg_c_id, 1);
599 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
603 static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
605 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
606 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
607 u8 fdb_to_vport_reg_c_id;
610 err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
615 fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
616 esw_vport_context.fdb_to_vport_reg_c_id);
618 fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
620 MLX5_SET(modify_esw_vport_context_in, in,
621 esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
623 MLX5_SET(modify_esw_vport_context_in, in,
624 field_select.fdb_to_vport_reg_c_id, 1);
626 return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
630 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
631 struct mlx5_core_dev *peer_dev,
632 struct mlx5_flow_spec *spec,
633 struct mlx5_flow_destination *dest)
637 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
638 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
640 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
642 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
644 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
647 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
648 MLX5_CAP_GEN(peer_dev, vhca_id));
650 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
652 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
654 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
655 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
656 source_eswitch_owner_vhca_id);
659 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
660 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
661 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
662 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
665 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
666 struct mlx5_eswitch *peer_esw,
667 struct mlx5_flow_spec *spec,
672 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
673 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
675 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
676 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
679 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
681 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
685 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
686 struct mlx5_core_dev *peer_dev)
688 struct mlx5_flow_destination dest = {};
689 struct mlx5_flow_act flow_act = {0};
690 struct mlx5_flow_handle **flows;
691 struct mlx5_flow_handle *flow;
692 struct mlx5_flow_spec *spec;
693 /* total vports is the same for both e-switches */
694 int nvports = esw->total_vports;
698 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
702 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
704 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
707 goto alloc_flows_err;
710 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
711 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
714 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
715 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
716 spec, MLX5_VPORT_PF);
718 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
719 spec, &flow_act, &dest, 1);
722 goto add_pf_flow_err;
724 flows[MLX5_VPORT_PF] = flow;
727 if (mlx5_ecpf_vport_exists(esw->dev)) {
728 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
729 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
730 spec, &flow_act, &dest, 1);
733 goto add_ecpf_flow_err;
735 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
738 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
739 esw_set_peer_miss_rule_source_port(esw,
740 peer_dev->priv.eswitch,
743 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
744 spec, &flow_act, &dest, 1);
747 goto add_vf_flow_err;
752 esw->fdb_table.offloads.peer_miss_rules = flows;
759 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
760 mlx5_del_flow_rules(flows[i]);
762 if (mlx5_ecpf_vport_exists(esw->dev))
763 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
765 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
766 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
768 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
775 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
777 struct mlx5_flow_handle **flows;
780 flows = esw->fdb_table.offloads.peer_miss_rules;
782 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
783 mlx5_core_max_vfs(esw->dev))
784 mlx5_del_flow_rules(flows[i]);
786 if (mlx5_ecpf_vport_exists(esw->dev))
787 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
789 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
790 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
795 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
797 struct mlx5_flow_act flow_act = {0};
798 struct mlx5_flow_destination dest = {};
799 struct mlx5_flow_handle *flow_rule = NULL;
800 struct mlx5_flow_spec *spec;
807 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
813 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
814 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
816 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
817 outer_headers.dmac_47_16);
820 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
821 dest.vport.num = esw->manager_vport;
822 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
824 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
825 &flow_act, &dest, 1);
826 if (IS_ERR(flow_rule)) {
827 err = PTR_ERR(flow_rule);
828 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
832 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
834 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
836 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
837 outer_headers.dmac_47_16);
839 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
840 &flow_act, &dest, 1);
841 if (IS_ERR(flow_rule)) {
842 err = PTR_ERR(flow_rule);
843 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
844 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
848 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
855 #define ESW_OFFLOADS_NUM_GROUPS 4
857 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
858 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
859 * for each flow table pool. We can allocate up to 16M of each pool,
860 * and we keep track of how much we used via put/get_sz_to_pool.
861 * Firmware doesn't report any of this for now.
862 * ESW_POOL is expected to be sorted from large to small
864 #define ESW_SIZE (16 * 1024 * 1024)
865 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
866 64 * 1024, 4 * 1024 };
869 get_sz_from_pool(struct mlx5_eswitch *esw)
873 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
874 if (esw->fdb_table.offloads.fdb_left[i]) {
875 --esw->fdb_table.offloads.fdb_left[i];
885 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
889 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
890 if (sz >= ESW_POOLS[i]) {
891 ++esw->fdb_table.offloads.fdb_left[i];
897 static struct mlx5_flow_table *
898 create_next_size_table(struct mlx5_eswitch *esw,
899 struct mlx5_flow_namespace *ns,
904 struct mlx5_flow_table *fdb;
907 sz = get_sz_from_pool(esw);
909 return ERR_PTR(-ENOSPC);
911 fdb = mlx5_create_auto_grouped_flow_table(ns,
914 ESW_OFFLOADS_NUM_GROUPS,
918 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
919 (int)PTR_ERR(fdb), table_prio, level, sz);
920 put_sz_to_pool(esw, sz);
926 static struct mlx5_flow_table *
927 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
929 struct mlx5_core_dev *dev = esw->dev;
930 struct mlx5_flow_table *fdb = NULL;
931 struct mlx5_flow_namespace *ns;
932 int table_prio, l = 0;
935 if (chain == FDB_SLOW_PATH_CHAIN)
936 return esw->fdb_table.offloads.slow_fdb;
938 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
940 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
942 /* take ref on earlier levels as well */
944 fdb_prio_table(esw, chain, prio, level--).num_rules++;
945 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
949 ns = mlx5_get_fdb_sub_ns(dev, chain);
951 esw_warn(dev, "Failed to get FDB sub namespace\n");
952 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
953 return ERR_PTR(-EOPNOTSUPP);
956 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
957 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
958 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
960 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
962 /* create earlier levels for correct fs_core lookup when
965 for (l = 0; l <= level; l++) {
966 if (fdb_prio_table(esw, chain, prio, l).fdb) {
967 fdb_prio_table(esw, chain, prio, l).num_rules++;
971 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
977 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
978 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
981 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
985 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
987 esw_put_prio_table(esw, chain, prio, l);
993 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
997 if (chain == FDB_SLOW_PATH_CHAIN)
1000 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
1002 for (l = level; l >= 0; l--) {
1003 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
1006 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
1007 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
1008 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
1011 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
1014 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
1016 /* If lazy creation isn't supported, deref the fast path tables */
1017 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
1018 esw_put_prio_table(esw, 0, 1, 1);
1019 esw_put_prio_table(esw, 0, 1, 0);
1023 #define MAX_PF_SQ 256
1024 #define MAX_SQ_NVPORTS 32
1026 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1029 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1033 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1034 MLX5_SET(create_flow_group_in, flow_group_in,
1035 match_criteria_enable,
1036 MLX5_MATCH_MISC_PARAMETERS_2);
1038 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1039 misc_parameters_2.metadata_reg_c_0);
1041 MLX5_SET(create_flow_group_in, flow_group_in,
1042 match_criteria_enable,
1043 MLX5_MATCH_MISC_PARAMETERS);
1045 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1046 misc_parameters.source_port);
1050 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1052 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1053 struct mlx5_flow_table_attr ft_attr = {};
1054 struct mlx5_core_dev *dev = esw->dev;
1055 u32 *flow_group_in, max_flow_counter;
1056 struct mlx5_flow_namespace *root_ns;
1057 struct mlx5_flow_table *fdb = NULL;
1058 int table_size, ix, err = 0, i;
1059 struct mlx5_flow_group *g;
1060 u32 flags = 0, fdb_max;
1061 void *match_criteria;
1064 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1065 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1069 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1071 esw_warn(dev, "Failed to get FDB flow namespace\n");
1076 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1077 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1078 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1080 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
1081 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1082 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1085 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
1086 esw->fdb_table.offloads.fdb_left[i] =
1087 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
1089 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1090 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1092 /* create the slow path fdb with encap set, so further table instances
1093 * can be created at run time while VFs are probed if the FW allows that.
1095 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1096 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1097 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1099 ft_attr.flags = flags;
1100 ft_attr.max_fte = table_size;
1101 ft_attr.prio = FDB_SLOW_PATH;
1103 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1106 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1109 esw->fdb_table.offloads.slow_fdb = fdb;
1111 /* If lazy creation isn't supported, open the fast path tables now */
1112 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
1113 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1114 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1115 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
1116 esw_get_prio_table(esw, 0, 1, 0);
1117 esw_get_prio_table(esw, 0, 1, 1);
1119 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
1120 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
1123 /* create send-to-vport group */
1124 memset(flow_group_in, 0, inlen);
1125 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1126 MLX5_MATCH_MISC_PARAMETERS);
1128 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1130 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1131 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1133 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1134 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1135 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1137 g = mlx5_create_flow_group(fdb, flow_group_in);
1140 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1141 goto send_vport_err;
1143 esw->fdb_table.offloads.send_to_vport_grp = g;
1145 /* create peer esw miss group */
1146 memset(flow_group_in, 0, inlen);
1148 esw_set_flow_group_source_port(esw, flow_group_in);
1150 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1151 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1155 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1156 misc_parameters.source_eswitch_owner_vhca_id);
1158 MLX5_SET(create_flow_group_in, flow_group_in,
1159 source_eswitch_owner_vhca_id_valid, 1);
1162 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1163 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1164 ix + esw->total_vports - 1);
1165 ix += esw->total_vports;
1167 g = mlx5_create_flow_group(fdb, flow_group_in);
1170 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1173 esw->fdb_table.offloads.peer_miss_grp = g;
1175 /* create miss group */
1176 memset(flow_group_in, 0, inlen);
1177 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1178 MLX5_MATCH_OUTER_HEADERS);
1179 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1181 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1182 outer_headers.dmac_47_16);
1185 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1186 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1187 ix + MLX5_ESW_MISS_FLOWS);
1189 g = mlx5_create_flow_group(fdb, flow_group_in);
1192 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1195 esw->fdb_table.offloads.miss_grp = g;
1197 err = esw_add_fdb_miss_rule(esw);
1201 esw->nvports = nvports;
1202 kvfree(flow_group_in);
1206 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1208 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1210 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1212 esw_destroy_offloads_fast_fdb_tables(esw);
1213 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1216 kvfree(flow_group_in);
1220 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1222 if (!esw->fdb_table.offloads.slow_fdb)
1225 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1226 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1227 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1228 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1229 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1230 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1232 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1233 esw_destroy_offloads_fast_fdb_tables(esw);
1236 static int esw_create_offloads_table(struct mlx5_eswitch *esw, int nvports)
1238 struct mlx5_flow_table_attr ft_attr = {};
1239 struct mlx5_core_dev *dev = esw->dev;
1240 struct mlx5_flow_table *ft_offloads;
1241 struct mlx5_flow_namespace *ns;
1244 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1246 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1250 ft_attr.max_fte = nvports + MLX5_ESW_MISS_FLOWS;
1252 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1253 if (IS_ERR(ft_offloads)) {
1254 err = PTR_ERR(ft_offloads);
1255 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1259 esw->offloads.ft_offloads = ft_offloads;
1263 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1265 struct mlx5_esw_offload *offloads = &esw->offloads;
1267 mlx5_destroy_flow_table(offloads->ft_offloads);
1270 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw, int nvports)
1272 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1273 struct mlx5_flow_group *g;
1277 nvports = nvports + MLX5_ESW_MISS_FLOWS;
1278 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1282 /* create vport rx group */
1283 memset(flow_group_in, 0, inlen);
1285 esw_set_flow_group_source_port(esw, flow_group_in);
1287 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1288 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1290 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1294 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1298 esw->offloads.vport_rx_group = g;
1300 kvfree(flow_group_in);
1304 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1306 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1309 struct mlx5_flow_handle *
1310 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1311 struct mlx5_flow_destination *dest)
1313 struct mlx5_flow_act flow_act = {0};
1314 struct mlx5_flow_handle *flow_rule;
1315 struct mlx5_flow_spec *spec;
1318 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1320 flow_rule = ERR_PTR(-ENOMEM);
1324 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1325 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1326 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1327 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1329 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1330 MLX5_SET_TO_ONES(fte_match_set_misc2, misc, metadata_reg_c_0);
1332 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1334 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1335 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1337 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1338 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1340 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1343 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1344 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1345 &flow_act, dest, 1);
1346 if (IS_ERR(flow_rule)) {
1347 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1356 static int esw_offloads_start(struct mlx5_eswitch *esw,
1357 struct netlink_ext_ack *extack)
1359 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1361 if (esw->mode != SRIOV_LEGACY &&
1362 !mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1363 NL_SET_ERR_MSG_MOD(extack,
1364 "Can't set offloads mode, SRIOV legacy not enabled");
1368 mlx5_eswitch_disable_sriov(esw);
1369 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1371 NL_SET_ERR_MSG_MOD(extack,
1372 "Failed setting eswitch to offloads");
1373 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1375 NL_SET_ERR_MSG_MOD(extack,
1376 "Failed setting eswitch back to legacy");
1379 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1380 if (mlx5_eswitch_inline_mode_get(esw,
1382 &esw->offloads.inline_mode)) {
1383 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1384 NL_SET_ERR_MSG_MOD(extack,
1385 "Inline mode is different between vports");
1391 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1393 kfree(esw->offloads.vport_reps);
1396 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1398 int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
1399 struct mlx5_core_dev *dev = esw->dev;
1400 struct mlx5_eswitch_rep *rep;
1401 u8 hw_id[ETH_ALEN], rep_type;
1404 esw->offloads.vport_reps = kcalloc(total_vports,
1405 sizeof(struct mlx5_eswitch_rep),
1407 if (!esw->offloads.vport_reps)
1410 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1412 mlx5_esw_for_all_reps(esw, vport, rep) {
1413 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport);
1414 ether_addr_copy(rep->hw_id, hw_id);
1416 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1417 atomic_set(&rep->rep_data[rep_type].state,
1424 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1425 struct mlx5_eswitch_rep *rep, u8 rep_type)
1427 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1428 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1429 esw->offloads.rep_ops[rep_type]->unload(rep);
1432 static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1434 struct mlx5_eswitch_rep *rep;
1436 if (mlx5_ecpf_vport_exists(esw->dev)) {
1437 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1438 __esw_offloads_unload_rep(esw, rep, rep_type);
1441 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1442 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1443 __esw_offloads_unload_rep(esw, rep, rep_type);
1446 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1447 __esw_offloads_unload_rep(esw, rep, rep_type);
1450 static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1453 struct mlx5_eswitch_rep *rep;
1456 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports)
1457 __esw_offloads_unload_rep(esw, rep, rep_type);
1460 static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports)
1462 u8 rep_type = NUM_REP_TYPES;
1464 while (rep_type-- > 0)
1465 __unload_reps_vf_vport(esw, nvports, rep_type);
1468 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
1471 __unload_reps_vf_vport(esw, nvports, rep_type);
1473 /* Special vports must be the last to unload. */
1474 __unload_reps_special_vport(esw, rep_type);
1477 static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports)
1479 u8 rep_type = NUM_REP_TYPES;
1481 while (rep_type-- > 0)
1482 __unload_reps_all_vport(esw, nvports, rep_type);
1485 static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
1486 struct mlx5_eswitch_rep *rep, u8 rep_type)
1490 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1491 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1492 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1494 atomic_set(&rep->rep_data[rep_type].state,
1501 static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
1503 struct mlx5_eswitch_rep *rep;
1506 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1507 err = __esw_offloads_load_rep(esw, rep, rep_type);
1511 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1512 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1513 err = __esw_offloads_load_rep(esw, rep, rep_type);
1518 if (mlx5_ecpf_vport_exists(esw->dev)) {
1519 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1520 err = __esw_offloads_load_rep(esw, rep, rep_type);
1528 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1529 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1530 __esw_offloads_unload_rep(esw, rep, rep_type);
1534 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1535 __esw_offloads_unload_rep(esw, rep, rep_type);
1539 static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports,
1542 struct mlx5_eswitch_rep *rep;
1545 mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) {
1546 err = __esw_offloads_load_rep(esw, rep, rep_type);
1554 __unload_reps_vf_vport(esw, --i, rep_type);
1558 static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
1563 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1564 err = __load_reps_vf_vport(esw, nvports, rep_type);
1572 while (rep_type-- > 0)
1573 __unload_reps_vf_vport(esw, nvports, rep_type);
1577 static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
1582 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1583 err = __load_reps_special_vport(esw, rep_type);
1591 while (rep_type-- > 0)
1592 __unload_reps_special_vport(esw, rep_type);
1596 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1597 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1599 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1600 struct mlx5_eswitch *peer_esw)
1604 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1611 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1613 mlx5e_tc_clean_fdb_peer_flows(esw);
1614 esw_del_fdb_peer_miss_rules(esw);
1617 static int mlx5_esw_offloads_devcom_event(int event,
1621 struct mlx5_eswitch *esw = my_data;
1622 struct mlx5_eswitch *peer_esw = event_data;
1623 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1627 case ESW_OFFLOADS_DEVCOM_PAIR:
1628 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1629 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1632 err = mlx5_esw_offloads_pair(esw, peer_esw);
1636 err = mlx5_esw_offloads_pair(peer_esw, esw);
1640 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1643 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1644 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1647 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1648 mlx5_esw_offloads_unpair(peer_esw);
1649 mlx5_esw_offloads_unpair(esw);
1656 mlx5_esw_offloads_unpair(esw);
1659 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1664 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1666 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1668 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1669 mutex_init(&esw->offloads.peer_mutex);
1671 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1674 mlx5_devcom_register_component(devcom,
1675 MLX5_DEVCOM_ESW_OFFLOADS,
1676 mlx5_esw_offloads_devcom_event,
1679 mlx5_devcom_send_event(devcom,
1680 MLX5_DEVCOM_ESW_OFFLOADS,
1681 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1684 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1686 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1688 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1691 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1692 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1694 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1697 static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
1698 struct mlx5_vport *vport)
1700 struct mlx5_flow_act flow_act = {0};
1701 struct mlx5_flow_spec *spec;
1704 /* For prio tag mode, there is only 1 FTEs:
1705 * 1) Untagged packets - push prio tag VLAN and modify metadata if
1707 * Unmatched traffic is allowed by default
1710 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1716 /* Untagged packets - push prio tag VLAN, allow */
1717 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1718 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
1719 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1720 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1721 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1722 flow_act.vlan[0].ethtype = ETH_P_8021Q;
1723 flow_act.vlan[0].vid = 0;
1724 flow_act.vlan[0].prio = 0;
1726 if (vport->ingress.modify_metadata_rule) {
1727 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1728 flow_act.modify_id = vport->ingress.modify_metadata_id;
1731 vport->ingress.allow_rule =
1732 mlx5_add_flow_rules(vport->ingress.acl, spec,
1733 &flow_act, NULL, 0);
1734 if (IS_ERR(vport->ingress.allow_rule)) {
1735 err = PTR_ERR(vport->ingress.allow_rule);
1737 "vport[%d] configure ingress untagged allow rule, err(%d)\n",
1739 vport->ingress.allow_rule = NULL;
1747 esw_vport_cleanup_ingress_rules(esw, vport);
1751 static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1752 struct mlx5_vport *vport)
1754 u8 action[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)] = {};
1755 struct mlx5_flow_act flow_act = {};
1756 struct mlx5_flow_spec spec = {};
1759 MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
1760 MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
1761 MLX5_SET(set_action_in, action, data,
1762 mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
1764 err = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
1765 1, action, &vport->ingress.modify_metadata_id);
1768 "failed to alloc modify header for vport %d ingress acl (%d)\n",
1773 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1774 flow_act.modify_id = vport->ingress.modify_metadata_id;
1775 vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
1776 &spec, &flow_act, NULL, 0);
1777 if (IS_ERR(vport->ingress.modify_metadata_rule)) {
1778 err = PTR_ERR(vport->ingress.modify_metadata_rule);
1780 "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
1782 vport->ingress.modify_metadata_rule = NULL;
1788 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1792 void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
1793 struct mlx5_vport *vport)
1795 if (vport->ingress.modify_metadata_rule) {
1796 mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
1797 mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata_id);
1799 vport->ingress.modify_metadata_rule = NULL;
1803 static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
1804 struct mlx5_vport *vport)
1806 struct mlx5_flow_act flow_act = {0};
1807 struct mlx5_flow_spec *spec;
1810 if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
1813 /* For prio tag mode, there is only 1 FTEs:
1814 * 1) prio tag packets - pop the prio tag VLAN, allow
1815 * Unmatched traffic is allowed by default
1818 esw_vport_cleanup_egress_rules(esw, vport);
1820 err = esw_vport_enable_egress_acl(esw, vport);
1822 mlx5_core_warn(esw->dev,
1823 "failed to enable egress acl (%d) on vport[%d]\n",
1829 "vport[%d] configure prio tag egress rules\n", vport->vport);
1831 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1837 /* prio tag vlan rule - pop it so VF receives untagged packets */
1838 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
1839 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
1840 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1841 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
1843 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1844 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1845 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
1846 vport->egress.allowed_vlan =
1847 mlx5_add_flow_rules(vport->egress.acl, spec,
1848 &flow_act, NULL, 0);
1849 if (IS_ERR(vport->egress.allowed_vlan)) {
1850 err = PTR_ERR(vport->egress.allowed_vlan);
1852 "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
1854 vport->egress.allowed_vlan = NULL;
1862 esw_vport_cleanup_egress_rules(esw, vport);
1866 static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
1867 struct mlx5_vport *vport)
1871 if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
1872 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
1875 esw_vport_cleanup_ingress_rules(esw, vport);
1877 err = esw_vport_enable_ingress_acl(esw, vport);
1880 "failed to enable ingress acl (%d) on vport[%d]\n",
1886 "vport[%d] configure ingress rules\n", vport->vport);
1888 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1889 err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
1894 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
1895 mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1896 err = esw_vport_ingress_prio_tag_config(esw, vport);
1903 esw_vport_disable_ingress_acl(esw, vport);
1908 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
1910 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
1913 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1914 MLX5_FDB_TO_VPORT_REG_C_0))
1917 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
1920 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
1921 mlx5_ecpf_vport_exists(esw->dev))
1927 static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
1929 struct mlx5_vport *vport;
1933 if (esw_check_vport_match_metadata_supported(esw))
1934 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
1936 mlx5_esw_for_all_vports(esw, i, vport) {
1937 err = esw_vport_ingress_common_config(esw, vport);
1941 if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
1942 err = esw_vport_egress_prio_tag_config(esw, vport);
1948 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
1949 esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
1954 esw_vport_disable_ingress_acl(esw, vport);
1956 for (j = MLX5_VPORT_PF; j < i; j++) {
1957 vport = &esw->vports[j];
1958 esw_vport_disable_egress_acl(esw, vport);
1959 esw_vport_disable_ingress_acl(esw, vport);
1965 static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
1967 struct mlx5_vport *vport;
1970 mlx5_esw_for_all_vports(esw, i, vport) {
1971 esw_vport_disable_egress_acl(esw, vport);
1972 esw_vport_disable_ingress_acl(esw, vport);
1975 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
1978 static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
1982 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1983 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1985 err = esw_create_offloads_acl_tables(esw);
1989 err = esw_create_offloads_fdb_tables(esw, nvports);
1991 goto create_fdb_err;
1993 err = esw_create_offloads_table(esw, nvports);
1997 err = esw_create_vport_rx_group(esw, nvports);
2004 esw_destroy_offloads_table(esw);
2007 esw_destroy_offloads_fdb_tables(esw);
2010 esw_destroy_offloads_acl_tables(esw);
2015 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2017 esw_destroy_vport_rx_group(esw);
2018 esw_destroy_offloads_table(esw);
2019 esw_destroy_offloads_fdb_tables(esw);
2020 esw_destroy_offloads_acl_tables(esw);
2023 static void esw_functions_changed_event_handler(struct work_struct *work)
2025 u32 out[MLX5_ST_SZ_DW(query_esw_functions_out)] = {};
2026 struct mlx5_host_work *host_work;
2027 struct mlx5_eswitch *esw;
2031 host_work = container_of(work, struct mlx5_host_work, work);
2032 esw = host_work->esw;
2034 err = mlx5_esw_query_functions(esw->dev, out, sizeof(out));
2035 num_vfs = MLX5_GET(query_esw_functions_out, out,
2036 host_params_context.host_num_of_vfs);
2037 if (err || num_vfs == esw->esw_funcs.num_vfs)
2040 /* Number of VFs can only change from "0 to x" or "x to 0". */
2041 if (esw->esw_funcs.num_vfs > 0) {
2042 esw_offloads_unload_vf_reps(esw, esw->esw_funcs.num_vfs);
2044 err = esw_offloads_load_vf_reps(esw, num_vfs);
2050 esw->esw_funcs.num_vfs = num_vfs;
2056 static void esw_emulate_event_handler(struct work_struct *work)
2058 struct mlx5_host_work *host_work =
2059 container_of(work, struct mlx5_host_work, work);
2060 struct mlx5_eswitch *esw = host_work->esw;
2063 if (esw->esw_funcs.num_vfs) {
2064 err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
2066 esw_warn(esw->dev, "Load vf reps err=%d\n", err);
2071 static int esw_functions_changed_event(struct notifier_block *nb,
2072 unsigned long type, void *data)
2074 struct mlx5_esw_functions *esw_funcs;
2075 struct mlx5_host_work *host_work;
2076 struct mlx5_eswitch *esw;
2078 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2082 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2083 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2085 host_work->esw = esw;
2087 if (mlx5_eswitch_is_funcs_handler(esw->dev))
2088 INIT_WORK(&host_work->work,
2089 esw_functions_changed_event_handler);
2091 INIT_WORK(&host_work->work, esw_emulate_event_handler);
2092 queue_work(esw->work_queue, &host_work->work);
2097 static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
2100 if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
2101 esw->esw_funcs.num_vfs = 0;
2102 MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
2103 ESW_FUNCTIONS_CHANGED);
2104 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
2106 esw->esw_funcs.num_vfs = vf_nvports;
2110 static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
2112 if (!mlx5_eswitch_is_funcs_handler(esw->dev))
2115 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
2116 flush_workqueue(esw->work_queue);
2119 int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
2124 err = esw_offloads_steering_init(esw, total_nvports);
2128 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
2129 err = mlx5_eswitch_enable_passing_vport_metadata(esw);
2131 goto err_vport_metadata;
2134 /* Only load special vports reps. VF reps will be loaded in
2135 * context of functions_changed event handler through real
2136 * or emulated event.
2138 err = esw_offloads_load_special_vport(esw);
2142 esw_offloads_devcom_init(esw);
2144 esw_functions_changed_event_init(esw, vf_nvports);
2146 mlx5_rdma_enable_roce(esw->dev);
2148 /* Call esw_functions_changed event to load VF reps:
2149 * 1. HW does not support the event then emulate it
2151 * 2. The event was already notified when num_vfs changed
2152 * and eswitch was in legacy mode
2154 esw_functions_changed_event(&esw->esw_funcs.nb.nb,
2155 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
2161 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2162 mlx5_eswitch_disable_passing_vport_metadata(esw);
2164 esw_offloads_steering_cleanup(esw);
2168 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2169 struct netlink_ext_ack *extack)
2171 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
2173 mlx5_eswitch_disable_sriov(esw);
2174 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
2176 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2177 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
2179 NL_SET_ERR_MSG_MOD(extack,
2180 "Failed setting eswitch back to offloads");
2187 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2189 esw_functions_changed_event_cleanup(esw);
2190 mlx5_rdma_disable_roce(esw->dev);
2191 esw_offloads_devcom_cleanup(esw);
2192 esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
2193 if (mlx5_eswitch_vport_match_metadata_enabled(esw))
2194 mlx5_eswitch_disable_passing_vport_metadata(esw);
2195 esw_offloads_steering_cleanup(esw);
2198 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2201 case DEVLINK_ESWITCH_MODE_LEGACY:
2202 *mlx5_mode = SRIOV_LEGACY;
2204 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2205 *mlx5_mode = SRIOV_OFFLOADS;
2214 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2216 switch (mlx5_mode) {
2218 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2220 case SRIOV_OFFLOADS:
2221 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2230 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2233 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2234 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2236 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2237 *mlx5_mode = MLX5_INLINE_MODE_L2;
2239 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2240 *mlx5_mode = MLX5_INLINE_MODE_IP;
2242 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2243 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2252 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2254 switch (mlx5_mode) {
2255 case MLX5_INLINE_MODE_NONE:
2256 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2258 case MLX5_INLINE_MODE_L2:
2259 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2261 case MLX5_INLINE_MODE_IP:
2262 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2264 case MLX5_INLINE_MODE_TCP_UDP:
2265 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2274 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
2276 struct mlx5_core_dev *dev = devlink_priv(devlink);
2278 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2281 if(!MLX5_ESWITCH_MANAGER(dev))
2284 if (dev->priv.eswitch->mode == SRIOV_NONE &&
2285 !mlx5_core_is_ecpf_esw_manager(dev))
2291 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2292 struct netlink_ext_ack *extack)
2294 struct mlx5_core_dev *dev = devlink_priv(devlink);
2295 u16 cur_mlx5_mode, mlx5_mode = 0;
2298 err = mlx5_devlink_eswitch_check(devlink);
2302 cur_mlx5_mode = dev->priv.eswitch->mode;
2304 if (esw_mode_from_devlink(mode, &mlx5_mode))
2307 if (cur_mlx5_mode == mlx5_mode)
2310 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2311 return esw_offloads_start(dev->priv.eswitch, extack);
2312 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2313 return esw_offloads_stop(dev->priv.eswitch, extack);
2318 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2320 struct mlx5_core_dev *dev = devlink_priv(devlink);
2323 err = mlx5_devlink_eswitch_check(devlink);
2327 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
2330 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2331 struct netlink_ext_ack *extack)
2333 struct mlx5_core_dev *dev = devlink_priv(devlink);
2334 struct mlx5_eswitch *esw = dev->priv.eswitch;
2338 err = mlx5_devlink_eswitch_check(devlink);
2342 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2343 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2344 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2347 case MLX5_CAP_INLINE_MODE_L2:
2348 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2350 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2354 if (esw->offloads.num_flows > 0) {
2355 NL_SET_ERR_MSG_MOD(extack,
2356 "Can't set inline mode when flows are configured");
2360 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2364 for (vport = 1; vport < esw->enabled_vports; vport++) {
2365 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2367 NL_SET_ERR_MSG_MOD(extack,
2368 "Failed to set min inline on vport");
2369 goto revert_inline_mode;
2373 esw->offloads.inline_mode = mlx5_mode;
2378 mlx5_modify_nic_vport_min_inline(dev,
2380 esw->offloads.inline_mode);
2385 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2387 struct mlx5_core_dev *dev = devlink_priv(devlink);
2388 struct mlx5_eswitch *esw = dev->priv.eswitch;
2391 err = mlx5_devlink_eswitch_check(devlink);
2395 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2398 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
2400 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2401 struct mlx5_core_dev *dev = esw->dev;
2404 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2407 if (esw->mode == SRIOV_NONE)
2410 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2411 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2412 mlx5_mode = MLX5_INLINE_MODE_NONE;
2414 case MLX5_CAP_INLINE_MODE_L2:
2415 mlx5_mode = MLX5_INLINE_MODE_L2;
2417 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2422 for (vport = 1; vport <= nvfs; vport++) {
2423 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
2424 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
2426 prev_mlx5_mode = mlx5_mode;
2434 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2435 enum devlink_eswitch_encap_mode encap,
2436 struct netlink_ext_ack *extack)
2438 struct mlx5_core_dev *dev = devlink_priv(devlink);
2439 struct mlx5_eswitch *esw = dev->priv.eswitch;
2442 err = mlx5_devlink_eswitch_check(devlink);
2446 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2447 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2448 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
2451 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
2454 if (esw->mode == SRIOV_LEGACY) {
2455 esw->offloads.encap = encap;
2459 if (esw->offloads.encap == encap)
2462 if (esw->offloads.num_flows > 0) {
2463 NL_SET_ERR_MSG_MOD(extack,
2464 "Can't set encapsulation when flows are configured");
2468 esw_destroy_offloads_fdb_tables(esw);
2470 esw->offloads.encap = encap;
2472 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
2475 NL_SET_ERR_MSG_MOD(extack,
2476 "Failed re-creating fast FDB table");
2477 esw->offloads.encap = !encap;
2478 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
2484 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2485 enum devlink_eswitch_encap_mode *encap)
2487 struct mlx5_core_dev *dev = devlink_priv(devlink);
2488 struct mlx5_eswitch *esw = dev->priv.eswitch;
2491 err = mlx5_devlink_eswitch_check(devlink);
2495 *encap = esw->offloads.encap;
2499 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2500 const struct mlx5_eswitch_rep_ops *ops,
2503 struct mlx5_eswitch_rep_data *rep_data;
2504 struct mlx5_eswitch_rep *rep;
2507 esw->offloads.rep_ops[rep_type] = ops;
2508 mlx5_esw_for_all_reps(esw, i, rep) {
2509 rep_data = &rep->rep_data[rep_type];
2510 atomic_set(&rep_data->state, REP_REGISTERED);
2513 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2515 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2517 u16 max_vf = mlx5_core_max_vfs(esw->dev);
2518 struct mlx5_eswitch_rep *rep;
2521 if (esw->mode == SRIOV_OFFLOADS)
2522 __unload_reps_all_vport(esw, max_vf, rep_type);
2524 mlx5_esw_for_all_reps(esw, i, rep)
2525 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2527 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2529 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2531 struct mlx5_eswitch_rep *rep;
2533 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2534 return rep->rep_data[rep_type].priv;
2537 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2541 struct mlx5_eswitch_rep *rep;
2543 rep = mlx5_eswitch_get_rep(esw, vport);
2545 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2546 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2547 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2550 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2552 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2554 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2556 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2558 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2561 return mlx5_eswitch_get_rep(esw, vport);
2563 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2565 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2567 return vport_num >= MLX5_VPORT_FIRST_VF &&
2568 vport_num <= esw->dev->priv.sriov.max_vfs;
2571 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2573 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2575 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2577 u32 mlx5_eswitch_get_vport_metadata_for_match(const struct mlx5_eswitch *esw,
2580 return ((MLX5_CAP_GEN(esw->dev, vhca_id) & 0xffff) << 16) | vport_num;
2582 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);