2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
52 #define mlx5_esw_for_each_rep(esw, i, rep) \
53 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
55 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
56 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
58 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
59 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
60 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
62 /* There are two match-all miss flows, one for unicast dst mac and
65 #define MLX5_ESW_MISS_FLOWS (2)
66 #define UPLINK_REP_INDEX 0
68 #define MLX5_ESW_VPORT_TBL_SIZE 128
69 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
71 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
72 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
73 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
77 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
80 return xa_load(&esw->offloads.vport_reps, vport_num);
84 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
85 struct mlx5_flow_spec *spec,
86 struct mlx5_esw_flow_attr *attr)
88 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
90 spec->flow_context.flow_source =
91 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
92 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
93 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
96 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
97 * are not needed as well in the following process. So clear them all for simplicity.
100 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
105 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
106 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
111 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
112 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
117 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
118 struct mlx5_flow_spec *spec,
119 struct mlx5_flow_attr *attr,
120 struct mlx5_eswitch *src_esw,
126 /* Use metadata matching because vport is not represented by single
127 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
129 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
130 if (mlx5_esw_indir_table_decap_vport(attr))
131 vport = mlx5_esw_indir_table_decap_vport(attr);
132 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
133 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
134 mlx5_eswitch_get_vport_metadata_for_match(src_esw,
137 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
138 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
139 mlx5_eswitch_get_vport_metadata_mask());
141 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
143 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
144 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
146 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
147 MLX5_SET(fte_match_set_misc, misc,
148 source_eswitch_owner_vhca_id,
149 MLX5_CAP_GEN(src_esw->dev, vhca_id));
151 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
152 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
153 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
154 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
155 source_eswitch_owner_vhca_id);
157 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
162 esw_setup_decap_indir(struct mlx5_eswitch *esw,
163 struct mlx5_flow_attr *attr,
164 struct mlx5_flow_spec *spec)
166 struct mlx5_flow_table *ft;
168 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
171 ft = mlx5_esw_indir_table_get(esw, attr, spec,
172 mlx5_esw_indir_table_decap_vport(attr), true);
173 return PTR_ERR_OR_ZERO(ft);
177 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
178 struct mlx5_flow_attr *attr)
180 if (mlx5_esw_indir_table_decap_vport(attr))
181 mlx5_esw_indir_table_put(esw, attr,
182 mlx5_esw_indir_table_decap_vport(attr),
187 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
188 struct mlx5_flow_act *flow_act,
189 struct mlx5_esw_flow_attr *esw_attr,
192 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
193 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
194 dest[i].sampler_id = esw_attr->sample->sampler_id;
200 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
201 struct mlx5_flow_act *flow_act,
202 struct mlx5_eswitch *esw,
203 struct mlx5_flow_attr *attr,
204 struct mlx5_flow_spec *spec,
207 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
208 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
209 dest[i].ft = attr->dest_ft;
211 if (mlx5_esw_indir_table_decap_vport(attr))
212 return esw_setup_decap_indir(esw, attr, spec);
217 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
218 struct mlx5_flow_act *flow_act,
219 struct mlx5_fs_chains *chains,
222 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
223 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
224 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
228 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
229 struct mlx5_flow_act *flow_act,
230 struct mlx5_fs_chains *chains,
231 u32 chain, u32 prio, u32 level,
234 struct mlx5_flow_table *ft;
236 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
237 ft = mlx5_chains_get_table(chains, chain, prio, level);
241 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
246 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
249 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
250 struct mlx5_fs_chains *chains = esw_chains(esw);
253 for (i = from; i < to; i++)
254 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
255 mlx5_chains_put_table(chains, 0, 1, 0);
256 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
257 esw_attr->dests[i].mdev))
258 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
263 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
267 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
268 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
274 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
275 struct mlx5_flow_act *flow_act,
276 struct mlx5_eswitch *esw,
277 struct mlx5_fs_chains *chains,
278 struct mlx5_flow_attr *attr,
281 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
284 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
287 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
288 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
290 goto err_setup_chain;
291 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
292 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
297 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
301 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
302 struct mlx5_flow_attr *attr)
304 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
306 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
310 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
312 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
315 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
316 if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
317 esw_attr->dests[i].mdev))
323 esw_setup_indir_table(struct mlx5_flow_destination *dest,
324 struct mlx5_flow_act *flow_act,
325 struct mlx5_eswitch *esw,
326 struct mlx5_flow_attr *attr,
327 struct mlx5_flow_spec *spec,
328 bool ignore_flow_lvl,
331 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
334 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
337 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
339 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
340 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
342 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
343 esw_attr->dests[j].rep->vport, false);
344 if (IS_ERR(dest[*i].ft)) {
345 err = PTR_ERR(dest[*i].ft);
346 goto err_indir_tbl_get;
350 if (mlx5_esw_indir_table_decap_vport(attr)) {
351 err = esw_setup_decap_indir(esw, attr, spec);
353 goto err_indir_tbl_get;
359 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
363 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
365 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
367 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
368 esw_cleanup_decap_indir(esw, attr);
372 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
374 mlx5_chains_put_table(chains, chain, prio, level);
378 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
379 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
380 int attr_idx, int dest_idx, bool pkt_reformat)
382 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
383 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
384 dest[dest_idx].vport.vhca_id =
385 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
386 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
387 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
388 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
390 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
391 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
393 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
394 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
399 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
400 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
405 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
406 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
411 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
413 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
414 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
415 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
419 esw_setup_dests(struct mlx5_flow_destination *dest,
420 struct mlx5_flow_act *flow_act,
421 struct mlx5_eswitch *esw,
422 struct mlx5_flow_attr *attr,
423 struct mlx5_flow_spec *spec,
426 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
427 struct mlx5_fs_chains *chains = esw_chains(esw);
430 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
431 esw_src_port_rewrite_supported(esw))
432 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
434 if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
435 esw_setup_sampler_dest(dest, flow_act, esw_attr, *i);
437 } else if (attr->dest_ft) {
438 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
440 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
441 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
443 } else if (attr->dest_chain) {
444 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
447 } else if (esw_is_indir_table(esw, attr)) {
448 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
449 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
450 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
452 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
459 esw_cleanup_dests(struct mlx5_eswitch *esw,
460 struct mlx5_flow_attr *attr)
462 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
463 struct mlx5_fs_chains *chains = esw_chains(esw);
466 esw_cleanup_decap_indir(esw, attr);
467 } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
468 if (attr->dest_chain)
469 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
470 else if (esw_is_indir_table(esw, attr))
471 esw_cleanup_indir_table(esw, attr);
472 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
473 esw_cleanup_chain_src_port_rewrite(esw, attr);
477 struct mlx5_flow_handle *
478 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
479 struct mlx5_flow_spec *spec,
480 struct mlx5_flow_attr *attr)
482 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
483 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
484 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
485 struct mlx5_fs_chains *chains = esw_chains(esw);
486 bool split = !!(esw_attr->split_count);
487 struct mlx5_vport_tbl_attr fwd_attr;
488 struct mlx5_flow_handle *rule;
489 struct mlx5_flow_table *fdb;
492 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
493 return ERR_PTR(-EOPNOTSUPP);
495 flow_act.action = attr->action;
496 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
497 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
498 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
499 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
500 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
501 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
502 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
503 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
504 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
505 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
506 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
507 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
511 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
513 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
516 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
519 goto err_create_goto_table;
523 if (esw_attr->decap_pkt_reformat)
524 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
526 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
527 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
528 dest[i].counter_id = mlx5_fc_id(attr->counter);
532 if (attr->outer_match_level != MLX5_MATCH_NONE)
533 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
534 if (attr->inner_match_level != MLX5_MATCH_NONE)
535 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
537 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
538 flow_act.modify_hdr = attr->modify_hdr;
540 /* esw_attr->sample is allocated only when there is a sample action */
541 if (esw_attr->sample && esw_attr->sample->sample_default_tbl) {
542 fdb = esw_attr->sample->sample_default_tbl;
544 fwd_attr.chain = attr->chain;
545 fwd_attr.prio = attr->prio;
546 fwd_attr.vport = esw_attr->in_rep->vport;
547 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
549 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
551 if (attr->chain || attr->prio)
552 fdb = mlx5_chains_get_table(chains, attr->chain,
557 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
558 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
559 esw_attr->in_mdev->priv.eswitch,
560 esw_attr->in_rep->vport);
563 rule = ERR_CAST(fdb);
567 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
568 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
571 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
575 atomic64_inc(&esw->offloads.num_flows);
581 mlx5_esw_vporttbl_put(esw, &fwd_attr);
582 else if (attr->chain || attr->prio)
583 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
585 esw_cleanup_dests(esw, attr);
586 err_create_goto_table:
590 struct mlx5_flow_handle *
591 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
592 struct mlx5_flow_spec *spec,
593 struct mlx5_flow_attr *attr)
595 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
596 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
597 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
598 struct mlx5_fs_chains *chains = esw_chains(esw);
599 struct mlx5_vport_tbl_attr fwd_attr;
600 struct mlx5_flow_table *fast_fdb;
601 struct mlx5_flow_table *fwd_fdb;
602 struct mlx5_flow_handle *rule;
605 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
606 if (IS_ERR(fast_fdb)) {
607 rule = ERR_CAST(fast_fdb);
611 fwd_attr.chain = attr->chain;
612 fwd_attr.prio = attr->prio;
613 fwd_attr.vport = esw_attr->in_rep->vport;
614 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
615 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
616 if (IS_ERR(fwd_fdb)) {
617 rule = ERR_CAST(fwd_fdb);
621 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
622 for (i = 0; i < esw_attr->split_count; i++) {
623 if (esw_is_indir_table(esw, attr))
624 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
625 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
626 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
629 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
633 goto err_chain_src_rewrite;
636 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
637 dest[i].ft = fwd_fdb;
640 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
641 esw_attr->in_mdev->priv.eswitch,
642 esw_attr->in_rep->vport);
644 if (attr->outer_match_level != MLX5_MATCH_NONE)
645 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
647 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
648 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
651 i = esw_attr->split_count;
652 goto err_chain_src_rewrite;
655 atomic64_inc(&esw->offloads.num_flows);
658 err_chain_src_rewrite:
659 esw_put_dest_tables_loop(esw, attr, 0, i);
660 mlx5_esw_vporttbl_put(esw, &fwd_attr);
662 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
668 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
669 struct mlx5_flow_handle *rule,
670 struct mlx5_flow_attr *attr,
673 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
674 struct mlx5_fs_chains *chains = esw_chains(esw);
675 bool split = (esw_attr->split_count > 0);
676 struct mlx5_vport_tbl_attr fwd_attr;
679 mlx5_del_flow_rules(rule);
681 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
682 /* unref the term table */
683 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
684 if (esw_attr->dests[i].termtbl)
685 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
689 atomic64_dec(&esw->offloads.num_flows);
691 if (fwd_rule || split) {
692 fwd_attr.chain = attr->chain;
693 fwd_attr.prio = attr->prio;
694 fwd_attr.vport = esw_attr->in_rep->vport;
695 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
699 mlx5_esw_vporttbl_put(esw, &fwd_attr);
700 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
701 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
704 mlx5_esw_vporttbl_put(esw, &fwd_attr);
705 else if (attr->chain || attr->prio)
706 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
707 esw_cleanup_dests(esw, attr);
712 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
713 struct mlx5_flow_handle *rule,
714 struct mlx5_flow_attr *attr)
716 __mlx5_eswitch_del_rule(esw, rule, attr, false);
720 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
721 struct mlx5_flow_handle *rule,
722 struct mlx5_flow_attr *attr)
724 __mlx5_eswitch_del_rule(esw, rule, attr, true);
727 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
729 struct mlx5_eswitch_rep *rep;
733 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
734 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
735 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
738 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
747 static struct mlx5_eswitch_rep *
748 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
750 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
752 in_rep = attr->in_rep;
753 out_rep = attr->dests[0].rep;
765 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
766 bool push, bool pop, bool fwd)
768 struct mlx5_eswitch_rep *in_rep, *out_rep;
770 if ((push || pop) && !fwd)
773 in_rep = attr->in_rep;
774 out_rep = attr->dests[0].rep;
776 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
779 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
782 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
783 if (!push && !pop && fwd)
784 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
787 /* protects against (1) setting rules with different vlans to push and
788 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
790 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
799 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
800 struct mlx5_flow_attr *attr)
802 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
803 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
804 struct mlx5_eswitch_rep *vport = NULL;
808 /* nop if we're on the vlan push/pop non emulation mode */
809 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
812 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
813 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
814 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
817 mutex_lock(&esw->state_lock);
819 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
823 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
825 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
827 if (!push && !pop && fwd) {
828 /* tracks VF --> wire rules without vlan push action */
829 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
830 vport->vlan_refcount++;
831 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
840 if (!(offloads->vlan_push_pop_refcount)) {
841 /* it's the 1st vlan rule, apply global vlan pop policy */
842 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
846 offloads->vlan_push_pop_refcount++;
849 if (vport->vlan_refcount)
852 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
853 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
856 vport->vlan = esw_attr->vlan_vid[0];
858 vport->vlan_refcount++;
862 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
864 mutex_unlock(&esw->state_lock);
868 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
869 struct mlx5_flow_attr *attr)
871 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
872 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
873 struct mlx5_eswitch_rep *vport = NULL;
877 /* nop if we're on the vlan push/pop non emulation mode */
878 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
881 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
884 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
885 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
886 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
888 mutex_lock(&esw->state_lock);
890 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
892 if (!push && !pop && fwd) {
893 /* tracks VF --> wire rules without vlan push action */
894 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
895 vport->vlan_refcount--;
901 vport->vlan_refcount--;
902 if (vport->vlan_refcount)
903 goto skip_unset_push;
906 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
907 0, 0, SET_VLAN_STRIP);
913 offloads->vlan_push_pop_refcount--;
914 if (offloads->vlan_push_pop_refcount)
917 /* no more vlan rules, stop global vlan pop policy */
918 err = esw_set_global_vlan_pop(esw, 0);
921 mutex_unlock(&esw->state_lock);
925 struct mlx5_flow_handle *
926 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
927 struct mlx5_eswitch_rep *rep,
930 struct mlx5_flow_act flow_act = {0};
931 struct mlx5_flow_destination dest = {};
932 struct mlx5_flow_handle *flow_rule;
933 struct mlx5_flow_spec *spec;
936 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
938 flow_rule = ERR_PTR(-ENOMEM);
942 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
943 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
944 /* source vport is the esw manager */
945 MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport);
946 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
947 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
948 MLX5_CAP_GEN(rep->esw->dev, vhca_id));
950 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
951 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
952 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
953 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
954 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
955 source_eswitch_owner_vhca_id);
957 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
958 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
959 dest.vport.num = rep->vport;
960 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
961 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
962 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
964 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
965 spec, &flow_act, &dest, 1);
966 if (IS_ERR(flow_rule))
967 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
973 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
975 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
977 mlx5_del_flow_rules(rule);
980 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
982 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
983 int i = 0, num_vfs = esw->esw_funcs.num_vfs;
985 if (!num_vfs || !flows)
988 for (i = 0; i < num_vfs; i++)
989 mlx5_del_flow_rules(flows[i]);
995 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
997 struct mlx5_flow_destination dest = {};
998 struct mlx5_flow_act flow_act = {0};
999 int num_vfs, rule_idx = 0, err = 0;
1000 struct mlx5_flow_handle *flow_rule;
1001 struct mlx5_flow_handle **flows;
1002 struct mlx5_flow_spec *spec;
1003 struct mlx5_vport *vport;
1007 num_vfs = esw->esw_funcs.num_vfs;
1008 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
1012 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1018 MLX5_SET(fte_match_param, spec->match_criteria,
1019 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1020 MLX5_SET(fte_match_param, spec->match_criteria,
1021 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1022 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1023 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1025 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1026 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1027 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1029 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1030 vport_num = vport->vport;
1031 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1032 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1033 dest.vport.num = vport_num;
1035 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1036 spec, &flow_act, &dest, 1);
1037 if (IS_ERR(flow_rule)) {
1038 err = PTR_ERR(flow_rule);
1039 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
1040 rule_idx, PTR_ERR(flow_rule));
1043 flows[rule_idx++] = flow_rule;
1046 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
1051 while (--rule_idx >= 0)
1052 mlx5_del_flow_rules(flows[rule_idx]);
1059 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1061 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1062 MLX5_FDB_TO_VPORT_REG_C_1;
1065 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1067 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1068 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1069 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1073 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1074 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1077 MLX5_SET(query_esw_vport_context_in, in, opcode,
1078 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1079 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1083 curr = MLX5_GET(query_esw_vport_context_out, out,
1084 esw_vport_context.fdb_to_vport_reg_c_id);
1085 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1086 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1087 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1094 MLX5_SET(modify_esw_vport_context_in, min,
1095 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1096 MLX5_SET(modify_esw_vport_context_in, min,
1097 field_select.fdb_to_vport_reg_c_id, 1);
1099 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1101 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1102 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1104 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1110 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1111 struct mlx5_core_dev *peer_dev,
1112 struct mlx5_flow_spec *spec,
1113 struct mlx5_flow_destination *dest)
1117 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1118 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1120 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1121 mlx5_eswitch_get_vport_metadata_mask());
1123 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1125 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1128 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1129 MLX5_CAP_GEN(peer_dev, vhca_id));
1131 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1133 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1135 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1136 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1137 source_eswitch_owner_vhca_id);
1140 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1141 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1142 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1143 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1146 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1147 struct mlx5_eswitch *peer_esw,
1148 struct mlx5_flow_spec *spec,
1153 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1154 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1156 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1157 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1160 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1162 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1166 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1167 struct mlx5_core_dev *peer_dev)
1169 struct mlx5_flow_destination dest = {};
1170 struct mlx5_flow_act flow_act = {0};
1171 struct mlx5_flow_handle **flows;
1172 /* total vports is the same for both e-switches */
1173 int nvports = esw->total_vports;
1174 struct mlx5_flow_handle *flow;
1175 struct mlx5_flow_spec *spec;
1176 struct mlx5_vport *vport;
1181 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1185 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1187 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
1190 goto alloc_flows_err;
1193 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1194 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1197 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1198 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1199 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1200 spec, MLX5_VPORT_PF);
1202 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1203 spec, &flow_act, &dest, 1);
1205 err = PTR_ERR(flow);
1206 goto add_pf_flow_err;
1208 flows[vport->index] = flow;
1211 if (mlx5_ecpf_vport_exists(esw->dev)) {
1212 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1213 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1214 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1215 spec, &flow_act, &dest, 1);
1217 err = PTR_ERR(flow);
1218 goto add_ecpf_flow_err;
1220 flows[vport->index] = flow;
1223 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1224 esw_set_peer_miss_rule_source_port(esw,
1225 peer_dev->priv.eswitch,
1226 spec, vport->vport);
1228 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1229 spec, &flow_act, &dest, 1);
1231 err = PTR_ERR(flow);
1232 goto add_vf_flow_err;
1234 flows[vport->index] = flow;
1237 esw->fdb_table.offloads.peer_miss_rules = flows;
1243 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1244 if (!flows[vport->index])
1246 mlx5_del_flow_rules(flows[vport->index]);
1248 if (mlx5_ecpf_vport_exists(esw->dev)) {
1249 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1250 mlx5_del_flow_rules(flows[vport->index]);
1253 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1254 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1255 mlx5_del_flow_rules(flows[vport->index]);
1258 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1265 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1267 struct mlx5_flow_handle **flows;
1268 struct mlx5_vport *vport;
1271 flows = esw->fdb_table.offloads.peer_miss_rules;
1273 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1274 mlx5_del_flow_rules(flows[vport->index]);
1276 if (mlx5_ecpf_vport_exists(esw->dev)) {
1277 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1278 mlx5_del_flow_rules(flows[vport->index]);
1281 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1282 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1283 mlx5_del_flow_rules(flows[vport->index]);
1288 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1290 struct mlx5_flow_act flow_act = {0};
1291 struct mlx5_flow_destination dest = {};
1292 struct mlx5_flow_handle *flow_rule = NULL;
1293 struct mlx5_flow_spec *spec;
1300 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1306 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1307 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1309 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1310 outer_headers.dmac_47_16);
1313 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1314 dest.vport.num = esw->manager_vport;
1315 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1317 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1318 spec, &flow_act, &dest, 1);
1319 if (IS_ERR(flow_rule)) {
1320 err = PTR_ERR(flow_rule);
1321 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1325 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1327 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1329 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1330 outer_headers.dmac_47_16);
1332 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1333 spec, &flow_act, &dest, 1);
1334 if (IS_ERR(flow_rule)) {
1335 err = PTR_ERR(flow_rule);
1336 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1337 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1341 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1348 struct mlx5_flow_handle *
1349 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1351 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1352 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1353 struct mlx5_flow_context *flow_context;
1354 struct mlx5_flow_handle *flow_rule;
1355 struct mlx5_flow_destination dest;
1356 struct mlx5_flow_spec *spec;
1359 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1360 return ERR_PTR(-EOPNOTSUPP);
1362 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1364 return ERR_PTR(-ENOMEM);
1366 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1368 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1369 ESW_REG_C0_USER_DATA_METADATA_MASK);
1370 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1372 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1373 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1374 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1375 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1376 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1378 flow_context = &spec->flow_context;
1379 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1380 flow_context->flow_tag = tag;
1381 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1382 dest.ft = esw->offloads.ft_offloads;
1384 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1387 if (IS_ERR(flow_rule))
1389 "Failed to create restore rule for tag: %d, err(%d)\n",
1390 tag, (int)PTR_ERR(flow_rule));
1395 #define MAX_PF_SQ 256
1396 #define MAX_SQ_NVPORTS 32
1398 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1401 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1405 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1406 MLX5_SET(create_flow_group_in, flow_group_in,
1407 match_criteria_enable,
1408 MLX5_MATCH_MISC_PARAMETERS_2);
1410 MLX5_SET(fte_match_param, match_criteria,
1411 misc_parameters_2.metadata_reg_c_0,
1412 mlx5_eswitch_get_vport_metadata_mask());
1414 MLX5_SET(create_flow_group_in, flow_group_in,
1415 match_criteria_enable,
1416 MLX5_MATCH_MISC_PARAMETERS);
1418 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1419 misc_parameters.source_port);
1423 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1424 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1426 struct mlx5_vport_tbl_attr attr;
1427 struct mlx5_vport *vport;
1432 mlx5_esw_for_each_vport(esw, i, vport) {
1433 attr.vport = vport->vport;
1434 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1435 mlx5_esw_vporttbl_put(esw, &attr);
1439 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1441 struct mlx5_vport_tbl_attr attr;
1442 struct mlx5_flow_table *fdb;
1443 struct mlx5_vport *vport;
1448 mlx5_esw_for_each_vport(esw, i, vport) {
1449 attr.vport = vport->vport;
1450 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1451 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1458 esw_vport_tbl_put(esw);
1459 return PTR_ERR(fdb);
1462 #define fdb_modify_header_fwd_to_table_supported(esw) \
1463 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1464 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1466 struct mlx5_core_dev *dev = esw->dev;
1468 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1469 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1471 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1472 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1473 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1474 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1475 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1476 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1477 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1478 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1479 /* Disabled when ttl workaround is needed, e.g
1480 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1483 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1484 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1486 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1487 esw_info(dev, "Supported tc chains and prios offload\n");
1490 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1491 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1495 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1497 struct mlx5_core_dev *dev = esw->dev;
1498 struct mlx5_flow_table *nf_ft, *ft;
1499 struct mlx5_chains_attr attr = {};
1500 struct mlx5_fs_chains *chains;
1504 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1506 esw_init_chains_offload_flags(esw, &attr.flags);
1507 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1508 attr.max_ft_sz = fdb_max;
1509 attr.max_grp_num = esw->params.large_group_num;
1510 attr.default_ft = miss_fdb;
1511 attr.mapping = esw->offloads.reg_c0_obj_pool;
1513 chains = mlx5_chains_create(dev, &attr);
1514 if (IS_ERR(chains)) {
1515 err = PTR_ERR(chains);
1516 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1520 esw->fdb_table.offloads.esw_chains_priv = chains;
1522 /* Create tc_end_ft which is the always created ft chain */
1523 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1525 if (IS_ERR(nf_ft)) {
1526 err = PTR_ERR(nf_ft);
1530 /* Always open the root for fast path */
1531 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1537 /* Open level 1 for split fdb rules now if prios isn't supported */
1538 if (!mlx5_chains_prios_supported(chains)) {
1539 err = esw_vport_tbl_get(esw);
1544 mlx5_chains_set_end_ft(chains, nf_ft);
1549 mlx5_chains_put_table(chains, 0, 1, 0);
1551 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1553 mlx5_chains_destroy(chains);
1554 esw->fdb_table.offloads.esw_chains_priv = NULL;
1560 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1562 if (!mlx5_chains_prios_supported(chains))
1563 esw_vport_tbl_put(esw);
1564 mlx5_chains_put_table(chains, 0, 1, 0);
1565 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1566 mlx5_chains_destroy(chains);
1569 #else /* CONFIG_MLX5_CLS_ACT */
1572 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1576 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1581 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1583 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1584 struct mlx5_flow_table_attr ft_attr = {};
1585 int num_vfs, table_size, ix, err = 0;
1586 struct mlx5_core_dev *dev = esw->dev;
1587 struct mlx5_flow_namespace *root_ns;
1588 struct mlx5_flow_table *fdb = NULL;
1589 u32 flags = 0, *flow_group_in;
1590 struct mlx5_flow_group *g;
1591 void *match_criteria;
1594 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1596 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1600 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1602 esw_warn(dev, "Failed to get FDB flow namespace\n");
1606 esw->fdb_table.offloads.ns = root_ns;
1607 err = mlx5_flow_namespace_set_mode(root_ns,
1608 esw->dev->priv.steering->mode);
1610 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1614 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1615 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
1617 /* create the slow path fdb with encap set, so further table instances
1618 * can be created at run time while VFs are probed if the FW allows that.
1620 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1621 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1622 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1624 ft_attr.flags = flags;
1625 ft_attr.max_fte = table_size;
1626 ft_attr.prio = FDB_SLOW_PATH;
1628 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1631 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1634 esw->fdb_table.offloads.slow_fdb = fdb;
1636 err = esw_chains_create(esw, fdb);
1638 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1639 goto fdb_chains_err;
1642 /* create send-to-vport group */
1643 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1644 MLX5_MATCH_MISC_PARAMETERS);
1646 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1648 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1649 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1650 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1651 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1652 misc_parameters.source_eswitch_owner_vhca_id);
1653 MLX5_SET(create_flow_group_in, flow_group_in,
1654 source_eswitch_owner_vhca_id_valid, 1);
1657 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1658 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1659 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1661 g = mlx5_create_flow_group(fdb, flow_group_in);
1664 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1665 goto send_vport_err;
1667 esw->fdb_table.offloads.send_to_vport_grp = g;
1669 if (esw_src_port_rewrite_supported(esw)) {
1670 /* meta send to vport */
1671 memset(flow_group_in, 0, inlen);
1672 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1673 MLX5_MATCH_MISC_PARAMETERS_2);
1675 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1677 MLX5_SET(fte_match_param, match_criteria,
1678 misc_parameters_2.metadata_reg_c_0,
1679 mlx5_eswitch_get_vport_metadata_mask());
1680 MLX5_SET(fte_match_param, match_criteria,
1681 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1683 num_vfs = esw->esw_funcs.num_vfs;
1685 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1686 MLX5_SET(create_flow_group_in, flow_group_in,
1687 end_flow_index, ix + num_vfs - 1);
1690 g = mlx5_create_flow_group(fdb, flow_group_in);
1693 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1695 goto send_vport_meta_err;
1697 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1699 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1705 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1706 /* create peer esw miss group */
1707 memset(flow_group_in, 0, inlen);
1709 esw_set_flow_group_source_port(esw, flow_group_in);
1711 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1712 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1716 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1717 misc_parameters.source_eswitch_owner_vhca_id);
1719 MLX5_SET(create_flow_group_in, flow_group_in,
1720 source_eswitch_owner_vhca_id_valid, 1);
1723 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1724 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1725 ix + esw->total_vports - 1);
1726 ix += esw->total_vports;
1728 g = mlx5_create_flow_group(fdb, flow_group_in);
1731 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1734 esw->fdb_table.offloads.peer_miss_grp = g;
1737 /* create miss group */
1738 memset(flow_group_in, 0, inlen);
1739 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1740 MLX5_MATCH_OUTER_HEADERS);
1741 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1743 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1744 outer_headers.dmac_47_16);
1747 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1748 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1749 ix + MLX5_ESW_MISS_FLOWS);
1751 g = mlx5_create_flow_group(fdb, flow_group_in);
1754 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1757 esw->fdb_table.offloads.miss_grp = g;
1759 err = esw_add_fdb_miss_rule(esw);
1763 kvfree(flow_group_in);
1767 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1769 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1770 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1772 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1774 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1775 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1776 send_vport_meta_err:
1777 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1779 esw_chains_destroy(esw, esw_chains(esw));
1781 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1783 /* Holds true only as long as DMFS is the default */
1784 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1786 kvfree(flow_group_in);
1790 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1792 if (!esw->fdb_table.offloads.slow_fdb)
1795 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1796 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1797 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1798 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1799 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1800 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1801 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1802 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1803 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1804 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1806 esw_chains_destroy(esw, esw_chains(esw));
1808 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1809 /* Holds true only as long as DMFS is the default */
1810 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1811 MLX5_FLOW_STEERING_MODE_DMFS);
1812 atomic64_set(&esw->user_count, 0);
1815 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1817 struct mlx5_flow_table_attr ft_attr = {};
1818 struct mlx5_core_dev *dev = esw->dev;
1819 struct mlx5_flow_table *ft_offloads;
1820 struct mlx5_flow_namespace *ns;
1823 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1825 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1829 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1832 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1833 if (IS_ERR(ft_offloads)) {
1834 err = PTR_ERR(ft_offloads);
1835 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1839 esw->offloads.ft_offloads = ft_offloads;
1843 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1845 struct mlx5_esw_offload *offloads = &esw->offloads;
1847 mlx5_destroy_flow_table(offloads->ft_offloads);
1850 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1852 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1853 struct mlx5_flow_group *g;
1858 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1859 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1863 /* create vport rx group */
1864 esw_set_flow_group_source_port(esw, flow_group_in);
1866 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1867 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1869 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1873 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1877 esw->offloads.vport_rx_group = g;
1879 kvfree(flow_group_in);
1883 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1885 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1888 struct mlx5_flow_handle *
1889 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1890 struct mlx5_flow_destination *dest)
1892 struct mlx5_flow_act flow_act = {0};
1893 struct mlx5_flow_handle *flow_rule;
1894 struct mlx5_flow_spec *spec;
1897 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1899 flow_rule = ERR_PTR(-ENOMEM);
1903 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1904 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1905 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1906 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1908 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1909 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1910 mlx5_eswitch_get_vport_metadata_mask());
1912 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1914 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1915 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1917 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1918 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1920 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1923 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1924 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1925 &flow_act, dest, 1);
1926 if (IS_ERR(flow_rule)) {
1927 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1936 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
1938 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1939 struct mlx5_core_dev *dev = esw->dev;
1940 struct mlx5_vport *vport;
1943 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1946 if (esw->mode == MLX5_ESWITCH_NONE)
1949 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1950 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1951 mlx5_mode = MLX5_INLINE_MODE_NONE;
1953 case MLX5_CAP_INLINE_MODE_L2:
1954 mlx5_mode = MLX5_INLINE_MODE_L2;
1956 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1961 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1962 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1963 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
1964 if (prev_mlx5_mode != mlx5_mode)
1966 prev_mlx5_mode = mlx5_mode;
1974 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1976 struct mlx5_esw_offload *offloads = &esw->offloads;
1978 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1981 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1982 mlx5_destroy_flow_group(offloads->restore_group);
1983 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1986 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1988 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1989 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1990 struct mlx5_flow_table_attr ft_attr = {};
1991 struct mlx5_core_dev *dev = esw->dev;
1992 struct mlx5_flow_namespace *ns;
1993 struct mlx5_modify_hdr *mod_hdr;
1994 void *match_criteria, *misc;
1995 struct mlx5_flow_table *ft;
1996 struct mlx5_flow_group *g;
2000 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2003 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2005 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2009 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2010 if (!flow_group_in) {
2015 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2016 ft = mlx5_create_flow_table(ns, &ft_attr);
2019 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2024 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2026 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2029 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2030 ESW_REG_C0_USER_DATA_METADATA_MASK);
2031 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2032 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2033 ft_attr.max_fte - 1);
2034 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2035 MLX5_MATCH_MISC_PARAMETERS_2);
2036 g = mlx5_create_flow_group(ft, flow_group_in);
2039 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2044 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2045 MLX5_SET(copy_action_in, modact, src_field,
2046 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2047 MLX5_SET(copy_action_in, modact, dst_field,
2048 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2049 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2050 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2052 if (IS_ERR(mod_hdr)) {
2053 err = PTR_ERR(mod_hdr);
2054 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2059 esw->offloads.ft_offloads_restore = ft;
2060 esw->offloads.restore_group = g;
2061 esw->offloads.restore_copy_hdr_id = mod_hdr;
2063 kvfree(flow_group_in);
2068 mlx5_destroy_flow_group(g);
2070 mlx5_destroy_flow_table(ft);
2072 kvfree(flow_group_in);
2077 static int esw_offloads_start(struct mlx5_eswitch *esw,
2078 struct netlink_ext_ack *extack)
2082 mlx5_eswitch_disable_locked(esw, false);
2083 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2084 esw->dev->priv.sriov.num_vfs);
2086 NL_SET_ERR_MSG_MOD(extack,
2087 "Failed setting eswitch to offloads");
2088 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2089 MLX5_ESWITCH_IGNORE_NUM_VFS);
2091 NL_SET_ERR_MSG_MOD(extack,
2092 "Failed setting eswitch back to legacy");
2095 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2096 if (mlx5_eswitch_inline_mode_get(esw,
2097 &esw->offloads.inline_mode)) {
2098 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2099 NL_SET_ERR_MSG_MOD(extack,
2100 "Inline mode is different between vports");
2106 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2107 struct mlx5_eswitch_rep *rep,
2112 /* Copy the mark from vport to its rep */
2113 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2115 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2118 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2120 struct mlx5_eswitch_rep *rep;
2124 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2128 rep->vport = vport->vport;
2129 rep->vport_index = vport->index;
2130 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2131 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2133 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2137 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2138 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2139 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2147 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2148 struct mlx5_eswitch_rep *rep)
2150 xa_erase(&esw->offloads.vport_reps, rep->vport);
2154 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2156 struct mlx5_eswitch_rep *rep;
2159 mlx5_esw_for_each_rep(esw, i, rep)
2160 mlx5_esw_offloads_rep_cleanup(esw, rep);
2161 xa_destroy(&esw->offloads.vport_reps);
2164 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2166 struct mlx5_vport *vport;
2170 xa_init(&esw->offloads.vport_reps);
2172 mlx5_esw_for_each_vport(esw, i, vport) {
2173 err = mlx5_esw_offloads_rep_init(esw, vport);
2180 esw_offloads_cleanup_reps(esw);
2184 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2185 struct mlx5_eswitch_rep *rep, u8 rep_type)
2187 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2188 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2189 esw->offloads.rep_ops[rep_type]->unload(rep);
2192 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2194 struct mlx5_eswitch_rep *rep;
2197 mlx5_esw_for_each_sf_rep(esw, i, rep)
2198 __esw_offloads_unload_rep(esw, rep, rep_type);
2201 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2203 struct mlx5_eswitch_rep *rep;
2206 __unload_reps_sf_vport(esw, rep_type);
2208 mlx5_esw_for_each_vf_rep(esw, i, rep)
2209 __esw_offloads_unload_rep(esw, rep, rep_type);
2211 if (mlx5_ecpf_vport_exists(esw->dev)) {
2212 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2213 __esw_offloads_unload_rep(esw, rep, rep_type);
2216 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2217 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2218 __esw_offloads_unload_rep(esw, rep, rep_type);
2221 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2222 __esw_offloads_unload_rep(esw, rep, rep_type);
2225 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2227 struct mlx5_eswitch_rep *rep;
2231 rep = mlx5_eswitch_get_rep(esw, vport_num);
2232 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2233 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2234 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2235 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2243 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2244 for (--rep_type; rep_type >= 0; rep_type--)
2245 __esw_offloads_unload_rep(esw, rep, rep_type);
2249 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2251 struct mlx5_eswitch_rep *rep;
2254 rep = mlx5_eswitch_get_rep(esw, vport_num);
2255 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2256 __esw_offloads_unload_rep(esw, rep, rep_type);
2259 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2263 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2266 if (vport_num != MLX5_VPORT_UPLINK) {
2267 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2272 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2278 if (vport_num != MLX5_VPORT_UPLINK)
2279 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2283 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2285 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2288 mlx5_esw_offloads_rep_unload(esw, vport_num);
2290 if (vport_num != MLX5_VPORT_UPLINK)
2291 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2294 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2295 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2297 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2298 struct mlx5_eswitch *peer_esw)
2301 return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2304 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2306 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2307 mlx5e_tc_clean_fdb_peer_flows(esw);
2309 esw_del_fdb_peer_miss_rules(esw);
2312 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2313 struct mlx5_eswitch *peer_esw,
2316 struct mlx5_flow_root_namespace *peer_ns;
2317 struct mlx5_flow_root_namespace *ns;
2320 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2321 ns = esw->dev->priv.steering->fdb_root_ns;
2324 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2328 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2330 mlx5_flow_namespace_set_peer(ns, NULL);
2334 mlx5_flow_namespace_set_peer(ns, NULL);
2335 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2341 static int mlx5_esw_offloads_devcom_event(int event,
2345 struct mlx5_eswitch *esw = my_data;
2346 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2347 struct mlx5_eswitch *peer_esw = event_data;
2351 case ESW_OFFLOADS_DEVCOM_PAIR:
2352 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2353 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2356 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2359 err = mlx5_esw_offloads_pair(esw, peer_esw);
2363 err = mlx5_esw_offloads_pair(peer_esw, esw);
2367 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2370 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2371 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2374 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2375 mlx5_esw_offloads_unpair(peer_esw);
2376 mlx5_esw_offloads_unpair(esw);
2377 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2384 mlx5_esw_offloads_unpair(esw);
2386 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2388 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2393 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2395 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2397 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2398 mutex_init(&esw->offloads.peer_mutex);
2400 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2403 mlx5_devcom_register_component(devcom,
2404 MLX5_DEVCOM_ESW_OFFLOADS,
2405 mlx5_esw_offloads_devcom_event,
2408 mlx5_devcom_send_event(devcom,
2409 MLX5_DEVCOM_ESW_OFFLOADS,
2410 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2413 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2415 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2417 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2420 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2421 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2423 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2426 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2428 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2431 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2432 MLX5_FDB_TO_VPORT_REG_C_0))
2435 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2438 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2439 mlx5_ecpf_vport_exists(esw->dev))
2445 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2447 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2448 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2452 /* Only 4 bits of pf_num */
2453 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2454 if (pf_num > max_pf_num)
2457 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2458 /* Use only non-zero vport_id (1-4095) for all PF's */
2459 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2462 id = (pf_num << ESW_VPORT_BITS) | id;
2466 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2468 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2470 /* Metadata contains only 12 bits of actual ida id */
2471 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2474 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2475 struct mlx5_vport *vport)
2477 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2478 vport->metadata = vport->default_metadata;
2479 return vport->metadata ? 0 : -ENOSPC;
2482 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2483 struct mlx5_vport *vport)
2485 if (!vport->default_metadata)
2488 WARN_ON(vport->metadata != vport->default_metadata);
2489 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2492 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2494 struct mlx5_vport *vport;
2497 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2500 mlx5_esw_for_each_vport(esw, i, vport)
2501 esw_offloads_vport_metadata_cleanup(esw, vport);
2504 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2506 struct mlx5_vport *vport;
2510 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2513 mlx5_esw_for_each_vport(esw, i, vport) {
2514 err = esw_offloads_vport_metadata_setup(esw, vport);
2522 esw_offloads_metadata_uninit(esw);
2526 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2530 down_write(&esw->mode_lock);
2531 if (esw->mode != MLX5_ESWITCH_NONE) {
2535 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2540 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2542 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2544 up_write(&esw->mode_lock);
2549 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2550 struct mlx5_vport *vport)
2554 err = esw_acl_ingress_ofld_setup(esw, vport);
2558 err = esw_acl_egress_ofld_setup(esw, vport);
2565 esw_acl_ingress_ofld_cleanup(esw, vport);
2570 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2571 struct mlx5_vport *vport)
2573 esw_acl_egress_ofld_cleanup(vport);
2574 esw_acl_ingress_ofld_cleanup(esw, vport);
2577 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2579 struct mlx5_vport *vport;
2581 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2583 return PTR_ERR(vport);
2585 return esw_vport_create_offloads_acl_tables(esw, vport);
2588 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2590 struct mlx5_vport *vport;
2592 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2596 esw_vport_destroy_offloads_acl_tables(esw, vport);
2599 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2601 struct mlx5_esw_indir_table *indir;
2604 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2605 mutex_init(&esw->fdb_table.offloads.vports.lock);
2606 hash_init(esw->fdb_table.offloads.vports.table);
2607 atomic64_set(&esw->user_count, 0);
2609 indir = mlx5_esw_indir_table_init();
2610 if (IS_ERR(indir)) {
2611 err = PTR_ERR(indir);
2612 goto create_indir_err;
2614 esw->fdb_table.offloads.indir = indir;
2616 err = esw_create_uplink_offloads_acl_tables(esw);
2618 goto create_acl_err;
2620 err = esw_create_offloads_table(esw);
2622 goto create_offloads_err;
2624 err = esw_create_restore_table(esw);
2626 goto create_restore_err;
2628 err = esw_create_offloads_fdb_tables(esw);
2630 goto create_fdb_err;
2632 err = esw_create_vport_rx_group(esw);
2639 esw_destroy_offloads_fdb_tables(esw);
2641 esw_destroy_restore_table(esw);
2643 esw_destroy_offloads_table(esw);
2644 create_offloads_err:
2645 esw_destroy_uplink_offloads_acl_tables(esw);
2647 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2649 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2653 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2655 esw_destroy_vport_rx_group(esw);
2656 esw_destroy_offloads_fdb_tables(esw);
2657 esw_destroy_restore_table(esw);
2658 esw_destroy_offloads_table(esw);
2659 esw_destroy_uplink_offloads_acl_tables(esw);
2660 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2661 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2665 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2667 bool host_pf_disabled;
2670 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2671 host_params_context.host_num_of_vfs);
2672 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2673 host_params_context.host_pf_disabled);
2675 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2678 /* Number of VFs can only change from "0 to x" or "x to 0". */
2679 if (esw->esw_funcs.num_vfs > 0) {
2680 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2684 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2685 MLX5_VPORT_UC_ADDR_CHANGE);
2689 esw->esw_funcs.num_vfs = new_num_vfs;
2692 static void esw_functions_changed_event_handler(struct work_struct *work)
2694 struct mlx5_host_work *host_work;
2695 struct mlx5_eswitch *esw;
2698 host_work = container_of(work, struct mlx5_host_work, work);
2699 esw = host_work->esw;
2701 out = mlx5_esw_query_functions(esw->dev);
2705 esw_vfs_changed_event_handler(esw, out);
2711 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2713 struct mlx5_esw_functions *esw_funcs;
2714 struct mlx5_host_work *host_work;
2715 struct mlx5_eswitch *esw;
2717 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2721 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2722 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2724 host_work->esw = esw;
2726 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2727 queue_work(esw->work_queue, &host_work->work);
2732 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2734 const u32 *query_host_out;
2736 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2739 query_host_out = mlx5_esw_query_functions(esw->dev);
2740 if (IS_ERR(query_host_out))
2741 return PTR_ERR(query_host_out);
2743 /* Mark non local controller with non zero controller number. */
2744 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2745 host_params_context.host_number);
2746 kvfree(query_host_out);
2750 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
2752 /* Local controller is always valid */
2753 if (controller == 0)
2756 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2759 /* External host number starts with zero in device */
2760 return (controller == esw->offloads.host_number + 1);
2763 int esw_offloads_enable(struct mlx5_eswitch *esw)
2765 struct mapping_ctx *reg_c0_obj_pool;
2766 struct mlx5_vport *vport;
2770 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2771 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2772 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2774 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2776 mutex_init(&esw->offloads.termtbl_mutex);
2777 mlx5_rdma_enable_roce(esw->dev);
2779 err = mlx5_esw_host_number_init(esw);
2783 err = esw_offloads_metadata_init(esw);
2787 err = esw_set_passing_vport_metadata(esw, true);
2789 goto err_vport_metadata;
2791 reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj),
2792 ESW_REG_C0_USER_DATA_METADATA_MASK,
2794 if (IS_ERR(reg_c0_obj_pool)) {
2795 err = PTR_ERR(reg_c0_obj_pool);
2798 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
2800 err = esw_offloads_steering_init(esw);
2802 goto err_steering_init;
2804 /* Representor will control the vport link state */
2805 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2806 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2808 /* Uplink vport rep must load first. */
2809 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2813 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2817 esw_offloads_devcom_init(esw);
2822 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2824 esw_offloads_steering_cleanup(esw);
2826 mapping_destroy(reg_c0_obj_pool);
2828 esw_set_passing_vport_metadata(esw, false);
2830 esw_offloads_metadata_uninit(esw);
2832 mlx5_rdma_disable_roce(esw->dev);
2833 mutex_destroy(&esw->offloads.termtbl_mutex);
2837 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2838 struct netlink_ext_ack *extack)
2842 mlx5_eswitch_disable_locked(esw, false);
2843 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2844 MLX5_ESWITCH_IGNORE_NUM_VFS);
2846 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2847 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2848 MLX5_ESWITCH_IGNORE_NUM_VFS);
2850 NL_SET_ERR_MSG_MOD(extack,
2851 "Failed setting eswitch back to offloads");
2858 void esw_offloads_disable(struct mlx5_eswitch *esw)
2860 esw_offloads_devcom_cleanup(esw);
2861 mlx5_eswitch_disable_pf_vf_vports(esw);
2862 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2863 esw_set_passing_vport_metadata(esw, false);
2864 esw_offloads_steering_cleanup(esw);
2865 mapping_destroy(esw->offloads.reg_c0_obj_pool);
2866 esw_offloads_metadata_uninit(esw);
2867 mlx5_rdma_disable_roce(esw->dev);
2868 mutex_destroy(&esw->offloads.termtbl_mutex);
2869 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2872 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2875 case DEVLINK_ESWITCH_MODE_LEGACY:
2876 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2878 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2879 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2888 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2890 switch (mlx5_mode) {
2891 case MLX5_ESWITCH_LEGACY:
2892 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2894 case MLX5_ESWITCH_OFFLOADS:
2895 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2904 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2907 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2908 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2910 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2911 *mlx5_mode = MLX5_INLINE_MODE_L2;
2913 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2914 *mlx5_mode = MLX5_INLINE_MODE_IP;
2916 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2917 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2926 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2928 switch (mlx5_mode) {
2929 case MLX5_INLINE_MODE_NONE:
2930 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2932 case MLX5_INLINE_MODE_L2:
2933 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2935 case MLX5_INLINE_MODE_IP:
2936 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2938 case MLX5_INLINE_MODE_TCP_UDP:
2939 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2948 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2950 /* devlink commands in NONE eswitch mode are currently supported only
2953 return (esw->mode == MLX5_ESWITCH_NONE &&
2954 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2957 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2958 struct netlink_ext_ack *extack)
2960 u16 cur_mlx5_mode, mlx5_mode = 0;
2961 struct mlx5_eswitch *esw;
2964 esw = mlx5_devlink_eswitch_get(devlink);
2966 return PTR_ERR(esw);
2968 if (esw_mode_from_devlink(mode, &mlx5_mode))
2971 err = mlx5_esw_try_lock(esw);
2973 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
2976 cur_mlx5_mode = err;
2979 if (cur_mlx5_mode == mlx5_mode)
2982 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2983 err = esw_offloads_start(esw, extack);
2984 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2985 err = esw_offloads_stop(esw, extack);
2990 mlx5_esw_unlock(esw);
2994 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2996 struct mlx5_eswitch *esw;
2999 esw = mlx5_devlink_eswitch_get(devlink);
3001 return PTR_ERR(esw);
3003 down_write(&esw->mode_lock);
3004 err = eswitch_devlink_esw_mode_check(esw);
3008 err = esw_mode_to_devlink(esw->mode, mode);
3010 up_write(&esw->mode_lock);
3014 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3015 struct netlink_ext_ack *extack)
3017 struct mlx5_core_dev *dev = esw->dev;
3018 struct mlx5_vport *vport;
3019 u16 err_vport_num = 0;
3023 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3024 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3026 err_vport_num = vport->vport;
3027 NL_SET_ERR_MSG_MOD(extack,
3028 "Failed to set min inline on vport");
3029 goto revert_inline_mode;
3035 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3036 if (vport->vport == err_vport_num)
3038 mlx5_modify_nic_vport_min_inline(dev,
3040 esw->offloads.inline_mode);
3045 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3046 struct netlink_ext_ack *extack)
3048 struct mlx5_core_dev *dev = devlink_priv(devlink);
3049 struct mlx5_eswitch *esw;
3053 esw = mlx5_devlink_eswitch_get(devlink);
3055 return PTR_ERR(esw);
3057 down_write(&esw->mode_lock);
3058 err = eswitch_devlink_esw_mode_check(esw);
3062 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3063 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3064 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
3067 case MLX5_CAP_INLINE_MODE_L2:
3068 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3071 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3075 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3076 NL_SET_ERR_MSG_MOD(extack,
3077 "Can't set inline mode when flows are configured");
3082 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3086 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3090 esw->offloads.inline_mode = mlx5_mode;
3091 up_write(&esw->mode_lock);
3095 up_write(&esw->mode_lock);
3099 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3101 struct mlx5_eswitch *esw;
3104 esw = mlx5_devlink_eswitch_get(devlink);
3106 return PTR_ERR(esw);
3108 down_write(&esw->mode_lock);
3109 err = eswitch_devlink_esw_mode_check(esw);
3113 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3115 up_write(&esw->mode_lock);
3119 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3120 enum devlink_eswitch_encap_mode encap,
3121 struct netlink_ext_ack *extack)
3123 struct mlx5_core_dev *dev = devlink_priv(devlink);
3124 struct mlx5_eswitch *esw;
3127 esw = mlx5_devlink_eswitch_get(devlink);
3129 return PTR_ERR(esw);
3131 down_write(&esw->mode_lock);
3132 err = eswitch_devlink_esw_mode_check(esw);
3136 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3137 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3138 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3143 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3148 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3149 esw->offloads.encap = encap;
3153 if (esw->offloads.encap == encap)
3156 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3157 NL_SET_ERR_MSG_MOD(extack,
3158 "Can't set encapsulation when flows are configured");
3163 esw_destroy_offloads_fdb_tables(esw);
3165 esw->offloads.encap = encap;
3167 err = esw_create_offloads_fdb_tables(esw);
3170 NL_SET_ERR_MSG_MOD(extack,
3171 "Failed re-creating fast FDB table");
3172 esw->offloads.encap = !encap;
3173 (void)esw_create_offloads_fdb_tables(esw);
3177 up_write(&esw->mode_lock);
3181 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3182 enum devlink_eswitch_encap_mode *encap)
3184 struct mlx5_eswitch *esw;
3187 esw = mlx5_devlink_eswitch_get(devlink);
3189 return PTR_ERR(esw);
3192 down_write(&esw->mode_lock);
3193 err = eswitch_devlink_esw_mode_check(esw);
3197 *encap = esw->offloads.encap;
3199 up_write(&esw->mode_lock);
3204 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3206 /* Currently, only ECPF based device has representor for host PF. */
3207 if (vport_num == MLX5_VPORT_PF &&
3208 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3211 if (vport_num == MLX5_VPORT_ECPF &&
3212 !mlx5_ecpf_vport_exists(esw->dev))
3218 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3219 const struct mlx5_eswitch_rep_ops *ops,
3222 struct mlx5_eswitch_rep_data *rep_data;
3223 struct mlx5_eswitch_rep *rep;
3226 esw->offloads.rep_ops[rep_type] = ops;
3227 mlx5_esw_for_each_rep(esw, i, rep) {
3228 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3230 rep_data = &rep->rep_data[rep_type];
3231 atomic_set(&rep_data->state, REP_REGISTERED);
3235 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3237 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3239 struct mlx5_eswitch_rep *rep;
3242 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3243 __unload_reps_all_vport(esw, rep_type);
3245 mlx5_esw_for_each_rep(esw, i, rep)
3246 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3248 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3250 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3252 struct mlx5_eswitch_rep *rep;
3254 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3255 return rep->rep_data[rep_type].priv;
3258 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3262 struct mlx5_eswitch_rep *rep;
3264 rep = mlx5_eswitch_get_rep(esw, vport);
3266 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3267 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3268 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3271 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3273 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3275 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3277 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3279 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3282 return mlx5_eswitch_get_rep(esw, vport);
3284 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3286 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3288 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3290 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3292 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3294 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3296 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3298 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3301 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3303 if (WARN_ON_ONCE(IS_ERR(vport)))
3306 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3308 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3310 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3311 u16 vport_num, u32 controller, u32 sfnum)
3315 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3319 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3323 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3329 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3331 mlx5_esw_vport_disable(esw, vport_num);
3335 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3337 mlx5_esw_offloads_rep_unload(esw, vport_num);
3338 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3339 mlx5_esw_vport_disable(esw, vport_num);
3342 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3344 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3350 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3351 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3354 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3358 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3362 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3363 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3370 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3372 u16 *old_entry, *vhca_map_entry, vhca_id;
3375 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3377 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3382 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3383 if (!vhca_map_entry)
3386 *vhca_map_entry = vport_num;
3387 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3388 if (xa_is_err(old_entry)) {
3389 kfree(vhca_map_entry);
3390 return xa_err(old_entry);
3396 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3398 u16 *vhca_map_entry, vhca_id;
3401 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3403 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3406 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3407 kfree(vhca_map_entry);
3410 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3412 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3421 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3424 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3426 if (WARN_ON_ONCE(IS_ERR(vport)))
3429 return vport->metadata;
3431 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);