2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
52 #define mlx5_esw_for_each_rep(esw, i, rep) \
53 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
55 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
56 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
58 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
59 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
60 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
62 /* There are two match-all miss flows, one for unicast dst mac and
65 #define MLX5_ESW_MISS_FLOWS (2)
66 #define UPLINK_REP_INDEX 0
68 #define MLX5_ESW_VPORT_TBL_SIZE 128
69 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
71 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
72 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
73 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
77 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
80 return xa_load(&esw->offloads.vport_reps, vport_num);
84 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
85 struct mlx5_flow_spec *spec,
86 struct mlx5_esw_flow_attr *attr)
88 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
90 spec->flow_context.flow_source =
91 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
92 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
93 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
96 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
97 * are not needed as well in the following process. So clear them all for simplicity.
100 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
102 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
105 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
106 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
108 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
109 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
111 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
112 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
117 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
118 struct mlx5_flow_spec *spec,
119 struct mlx5_flow_attr *attr,
120 struct mlx5_eswitch *src_esw,
126 /* Use metadata matching because vport is not represented by single
127 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
129 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
130 if (mlx5_esw_indir_table_decap_vport(attr))
131 vport = mlx5_esw_indir_table_decap_vport(attr);
132 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
133 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
134 mlx5_eswitch_get_vport_metadata_for_match(src_esw,
137 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
138 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
139 mlx5_eswitch_get_vport_metadata_mask());
141 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
143 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
144 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
146 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
147 MLX5_SET(fte_match_set_misc, misc,
148 source_eswitch_owner_vhca_id,
149 MLX5_CAP_GEN(src_esw->dev, vhca_id));
151 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
152 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
153 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
154 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
155 source_eswitch_owner_vhca_id);
157 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
162 esw_setup_decap_indir(struct mlx5_eswitch *esw,
163 struct mlx5_flow_attr *attr,
164 struct mlx5_flow_spec *spec)
166 struct mlx5_flow_table *ft;
168 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
171 ft = mlx5_esw_indir_table_get(esw, attr, spec,
172 mlx5_esw_indir_table_decap_vport(attr), true);
173 return PTR_ERR_OR_ZERO(ft);
177 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
178 struct mlx5_flow_attr *attr)
180 if (mlx5_esw_indir_table_decap_vport(attr))
181 mlx5_esw_indir_table_put(esw, attr,
182 mlx5_esw_indir_table_decap_vport(attr),
187 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
188 struct mlx5_flow_act *flow_act,
189 struct mlx5_esw_flow_attr *esw_attr,
192 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
193 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
194 dest[i].sampler_id = esw_attr->sample->sampler_id;
200 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
201 struct mlx5_flow_act *flow_act,
202 struct mlx5_eswitch *esw,
203 struct mlx5_flow_attr *attr,
204 struct mlx5_flow_spec *spec,
207 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
208 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
209 dest[i].ft = attr->dest_ft;
211 if (mlx5_esw_indir_table_decap_vport(attr))
212 return esw_setup_decap_indir(esw, attr, spec);
217 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
218 struct mlx5_flow_act *flow_act,
219 struct mlx5_fs_chains *chains,
222 if (mlx5_chains_ignore_flow_level_supported(chains))
223 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
224 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
225 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
229 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
230 struct mlx5_flow_act *flow_act,
231 struct mlx5_fs_chains *chains,
232 u32 chain, u32 prio, u32 level,
235 struct mlx5_flow_table *ft;
237 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
238 ft = mlx5_chains_get_table(chains, chain, prio, level);
242 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
247 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
250 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
251 struct mlx5_fs_chains *chains = esw_chains(esw);
254 for (i = from; i < to; i++)
255 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
256 mlx5_chains_put_table(chains, 0, 1, 0);
257 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
258 esw_attr->dests[i].mdev))
259 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
264 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
268 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
269 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
275 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
276 struct mlx5_flow_act *flow_act,
277 struct mlx5_eswitch *esw,
278 struct mlx5_fs_chains *chains,
279 struct mlx5_flow_attr *attr,
282 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
285 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
288 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
289 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
291 goto err_setup_chain;
292 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
293 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
298 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
302 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
303 struct mlx5_flow_attr *attr)
305 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
307 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
311 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
313 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
316 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
317 if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
318 esw_attr->dests[i].mdev))
324 esw_setup_indir_table(struct mlx5_flow_destination *dest,
325 struct mlx5_flow_act *flow_act,
326 struct mlx5_eswitch *esw,
327 struct mlx5_flow_attr *attr,
328 struct mlx5_flow_spec *spec,
329 bool ignore_flow_lvl,
332 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
335 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
338 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
340 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
341 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
343 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
344 esw_attr->dests[j].rep->vport, false);
345 if (IS_ERR(dest[*i].ft)) {
346 err = PTR_ERR(dest[*i].ft);
347 goto err_indir_tbl_get;
351 if (mlx5_esw_indir_table_decap_vport(attr)) {
352 err = esw_setup_decap_indir(esw, attr, spec);
354 goto err_indir_tbl_get;
360 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
364 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
366 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
368 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
369 esw_cleanup_decap_indir(esw, attr);
373 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
375 mlx5_chains_put_table(chains, chain, prio, level);
379 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
380 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
381 int attr_idx, int dest_idx, bool pkt_reformat)
383 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
384 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
385 dest[dest_idx].vport.vhca_id =
386 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
387 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
388 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
389 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
391 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
392 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
394 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
395 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
400 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
401 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
406 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
407 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
412 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
414 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
415 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
416 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
420 esw_setup_dests(struct mlx5_flow_destination *dest,
421 struct mlx5_flow_act *flow_act,
422 struct mlx5_eswitch *esw,
423 struct mlx5_flow_attr *attr,
424 struct mlx5_flow_spec *spec,
427 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
428 struct mlx5_fs_chains *chains = esw_chains(esw);
431 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
432 esw_src_port_rewrite_supported(esw))
433 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
435 if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
436 esw_setup_sampler_dest(dest, flow_act, esw_attr, *i);
438 } else if (attr->dest_ft) {
439 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
441 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
442 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
444 } else if (attr->dest_chain) {
445 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
448 } else if (esw_is_indir_table(esw, attr)) {
449 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
450 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
451 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
453 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
460 esw_cleanup_dests(struct mlx5_eswitch *esw,
461 struct mlx5_flow_attr *attr)
463 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
464 struct mlx5_fs_chains *chains = esw_chains(esw);
467 esw_cleanup_decap_indir(esw, attr);
468 } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
469 if (attr->dest_chain)
470 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
471 else if (esw_is_indir_table(esw, attr))
472 esw_cleanup_indir_table(esw, attr);
473 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
474 esw_cleanup_chain_src_port_rewrite(esw, attr);
478 struct mlx5_flow_handle *
479 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
480 struct mlx5_flow_spec *spec,
481 struct mlx5_flow_attr *attr)
483 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
484 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
485 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
486 struct mlx5_fs_chains *chains = esw_chains(esw);
487 bool split = !!(esw_attr->split_count);
488 struct mlx5_vport_tbl_attr fwd_attr;
489 struct mlx5_flow_handle *rule;
490 struct mlx5_flow_table *fdb;
493 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
494 return ERR_PTR(-EOPNOTSUPP);
496 flow_act.action = attr->action;
497 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
498 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
499 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
500 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
501 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
502 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
503 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
504 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
505 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
506 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
507 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
508 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
512 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
514 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
517 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
520 goto err_create_goto_table;
524 if (esw_attr->decap_pkt_reformat)
525 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
527 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
528 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
529 dest[i].counter_id = mlx5_fc_id(attr->counter);
533 if (attr->outer_match_level != MLX5_MATCH_NONE)
534 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
535 if (attr->inner_match_level != MLX5_MATCH_NONE)
536 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
538 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
539 flow_act.modify_hdr = attr->modify_hdr;
541 /* esw_attr->sample is allocated only when there is a sample action */
542 if (esw_attr->sample && esw_attr->sample->sample_default_tbl) {
543 fdb = esw_attr->sample->sample_default_tbl;
545 fwd_attr.chain = attr->chain;
546 fwd_attr.prio = attr->prio;
547 fwd_attr.vport = esw_attr->in_rep->vport;
548 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
550 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
552 if (attr->chain || attr->prio)
553 fdb = mlx5_chains_get_table(chains, attr->chain,
558 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
559 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
560 esw_attr->in_mdev->priv.eswitch,
561 esw_attr->in_rep->vport);
564 rule = ERR_CAST(fdb);
568 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
569 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
572 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
576 atomic64_inc(&esw->offloads.num_flows);
582 mlx5_esw_vporttbl_put(esw, &fwd_attr);
583 else if (attr->chain || attr->prio)
584 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
586 esw_cleanup_dests(esw, attr);
587 err_create_goto_table:
591 struct mlx5_flow_handle *
592 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
593 struct mlx5_flow_spec *spec,
594 struct mlx5_flow_attr *attr)
596 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
597 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
598 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
599 struct mlx5_fs_chains *chains = esw_chains(esw);
600 struct mlx5_vport_tbl_attr fwd_attr;
601 struct mlx5_flow_table *fast_fdb;
602 struct mlx5_flow_table *fwd_fdb;
603 struct mlx5_flow_handle *rule;
606 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
607 if (IS_ERR(fast_fdb)) {
608 rule = ERR_CAST(fast_fdb);
612 fwd_attr.chain = attr->chain;
613 fwd_attr.prio = attr->prio;
614 fwd_attr.vport = esw_attr->in_rep->vport;
615 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
616 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
617 if (IS_ERR(fwd_fdb)) {
618 rule = ERR_CAST(fwd_fdb);
622 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
623 for (i = 0; i < esw_attr->split_count; i++) {
624 if (esw_is_indir_table(esw, attr))
625 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
626 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
627 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
630 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
634 goto err_chain_src_rewrite;
637 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
638 dest[i].ft = fwd_fdb;
641 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
642 esw_attr->in_mdev->priv.eswitch,
643 esw_attr->in_rep->vport);
645 if (attr->outer_match_level != MLX5_MATCH_NONE)
646 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
648 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
649 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
652 i = esw_attr->split_count;
653 goto err_chain_src_rewrite;
656 atomic64_inc(&esw->offloads.num_flows);
659 err_chain_src_rewrite:
660 esw_put_dest_tables_loop(esw, attr, 0, i);
661 mlx5_esw_vporttbl_put(esw, &fwd_attr);
663 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
669 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
670 struct mlx5_flow_handle *rule,
671 struct mlx5_flow_attr *attr,
674 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
675 struct mlx5_fs_chains *chains = esw_chains(esw);
676 bool split = (esw_attr->split_count > 0);
677 struct mlx5_vport_tbl_attr fwd_attr;
680 mlx5_del_flow_rules(rule);
682 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
683 /* unref the term table */
684 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
685 if (esw_attr->dests[i].termtbl)
686 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
690 atomic64_dec(&esw->offloads.num_flows);
692 if (fwd_rule || split) {
693 fwd_attr.chain = attr->chain;
694 fwd_attr.prio = attr->prio;
695 fwd_attr.vport = esw_attr->in_rep->vport;
696 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
700 mlx5_esw_vporttbl_put(esw, &fwd_attr);
701 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
702 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
705 mlx5_esw_vporttbl_put(esw, &fwd_attr);
706 else if (attr->chain || attr->prio)
707 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
708 esw_cleanup_dests(esw, attr);
713 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
714 struct mlx5_flow_handle *rule,
715 struct mlx5_flow_attr *attr)
717 __mlx5_eswitch_del_rule(esw, rule, attr, false);
721 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
722 struct mlx5_flow_handle *rule,
723 struct mlx5_flow_attr *attr)
725 __mlx5_eswitch_del_rule(esw, rule, attr, true);
728 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
730 struct mlx5_eswitch_rep *rep;
734 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
735 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
736 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
739 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
748 static struct mlx5_eswitch_rep *
749 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
751 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
753 in_rep = attr->in_rep;
754 out_rep = attr->dests[0].rep;
766 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
767 bool push, bool pop, bool fwd)
769 struct mlx5_eswitch_rep *in_rep, *out_rep;
771 if ((push || pop) && !fwd)
774 in_rep = attr->in_rep;
775 out_rep = attr->dests[0].rep;
777 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
780 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
783 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
784 if (!push && !pop && fwd)
785 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
788 /* protects against (1) setting rules with different vlans to push and
789 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
791 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
800 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
801 struct mlx5_flow_attr *attr)
803 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
804 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
805 struct mlx5_eswitch_rep *vport = NULL;
809 /* nop if we're on the vlan push/pop non emulation mode */
810 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
813 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
814 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
815 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
818 mutex_lock(&esw->state_lock);
820 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
824 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
826 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
828 if (!push && !pop && fwd) {
829 /* tracks VF --> wire rules without vlan push action */
830 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
831 vport->vlan_refcount++;
832 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
841 if (!(offloads->vlan_push_pop_refcount)) {
842 /* it's the 1st vlan rule, apply global vlan pop policy */
843 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
847 offloads->vlan_push_pop_refcount++;
850 if (vport->vlan_refcount)
853 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
854 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
857 vport->vlan = esw_attr->vlan_vid[0];
859 vport->vlan_refcount++;
863 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
865 mutex_unlock(&esw->state_lock);
869 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
870 struct mlx5_flow_attr *attr)
872 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
873 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
874 struct mlx5_eswitch_rep *vport = NULL;
878 /* nop if we're on the vlan push/pop non emulation mode */
879 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
882 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
885 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
886 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
887 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
889 mutex_lock(&esw->state_lock);
891 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
893 if (!push && !pop && fwd) {
894 /* tracks VF --> wire rules without vlan push action */
895 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
896 vport->vlan_refcount--;
902 vport->vlan_refcount--;
903 if (vport->vlan_refcount)
904 goto skip_unset_push;
907 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
908 0, 0, SET_VLAN_STRIP);
914 offloads->vlan_push_pop_refcount--;
915 if (offloads->vlan_push_pop_refcount)
918 /* no more vlan rules, stop global vlan pop policy */
919 err = esw_set_global_vlan_pop(esw, 0);
922 mutex_unlock(&esw->state_lock);
926 struct mlx5_flow_handle *
927 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
928 struct mlx5_eswitch_rep *rep,
931 struct mlx5_flow_act flow_act = {0};
932 struct mlx5_flow_destination dest = {};
933 struct mlx5_flow_handle *flow_rule;
934 struct mlx5_flow_spec *spec;
937 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
939 flow_rule = ERR_PTR(-ENOMEM);
943 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
944 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
945 /* source vport is the esw manager */
946 MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport);
947 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
948 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
949 MLX5_CAP_GEN(rep->esw->dev, vhca_id));
951 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
952 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
953 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
954 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
955 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
956 source_eswitch_owner_vhca_id);
958 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
959 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
960 dest.vport.num = rep->vport;
961 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
962 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
963 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
965 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
966 spec, &flow_act, &dest, 1);
967 if (IS_ERR(flow_rule))
968 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
974 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
976 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
978 mlx5_del_flow_rules(rule);
981 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
983 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
984 int i = 0, num_vfs = esw->esw_funcs.num_vfs;
986 if (!num_vfs || !flows)
989 for (i = 0; i < num_vfs; i++)
990 mlx5_del_flow_rules(flows[i]);
996 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
998 struct mlx5_flow_destination dest = {};
999 struct mlx5_flow_act flow_act = {0};
1000 int num_vfs, rule_idx = 0, err = 0;
1001 struct mlx5_flow_handle *flow_rule;
1002 struct mlx5_flow_handle **flows;
1003 struct mlx5_flow_spec *spec;
1004 struct mlx5_vport *vport;
1008 num_vfs = esw->esw_funcs.num_vfs;
1009 flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
1013 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1019 MLX5_SET(fte_match_param, spec->match_criteria,
1020 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1021 MLX5_SET(fte_match_param, spec->match_criteria,
1022 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1023 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1024 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1026 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1027 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1028 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1030 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1031 vport_num = vport->vport;
1032 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1033 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1034 dest.vport.num = vport_num;
1036 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1037 spec, &flow_act, &dest, 1);
1038 if (IS_ERR(flow_rule)) {
1039 err = PTR_ERR(flow_rule);
1040 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
1041 rule_idx, PTR_ERR(flow_rule));
1044 flows[rule_idx++] = flow_rule;
1047 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
1052 while (--rule_idx >= 0)
1053 mlx5_del_flow_rules(flows[rule_idx]);
1060 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1062 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1063 MLX5_FDB_TO_VPORT_REG_C_1;
1066 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1068 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1069 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1070 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1074 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1075 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1078 MLX5_SET(query_esw_vport_context_in, in, opcode,
1079 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1080 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1084 curr = MLX5_GET(query_esw_vport_context_out, out,
1085 esw_vport_context.fdb_to_vport_reg_c_id);
1086 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1087 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1088 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1095 MLX5_SET(modify_esw_vport_context_in, min,
1096 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1097 MLX5_SET(modify_esw_vport_context_in, min,
1098 field_select.fdb_to_vport_reg_c_id, 1);
1100 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1102 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1103 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1105 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1111 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1112 struct mlx5_core_dev *peer_dev,
1113 struct mlx5_flow_spec *spec,
1114 struct mlx5_flow_destination *dest)
1118 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1119 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1121 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1122 mlx5_eswitch_get_vport_metadata_mask());
1124 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1126 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1129 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1130 MLX5_CAP_GEN(peer_dev, vhca_id));
1132 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1134 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1136 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1137 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1138 source_eswitch_owner_vhca_id);
1141 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1142 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1143 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1144 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1147 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1148 struct mlx5_eswitch *peer_esw,
1149 struct mlx5_flow_spec *spec,
1154 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1155 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1157 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1158 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1161 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1163 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1167 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1168 struct mlx5_core_dev *peer_dev)
1170 struct mlx5_flow_destination dest = {};
1171 struct mlx5_flow_act flow_act = {0};
1172 struct mlx5_flow_handle **flows;
1173 /* total vports is the same for both e-switches */
1174 int nvports = esw->total_vports;
1175 struct mlx5_flow_handle *flow;
1176 struct mlx5_flow_spec *spec;
1177 struct mlx5_vport *vport;
1182 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1186 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1188 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
1191 goto alloc_flows_err;
1194 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1195 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1198 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1199 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1200 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1201 spec, MLX5_VPORT_PF);
1203 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1204 spec, &flow_act, &dest, 1);
1206 err = PTR_ERR(flow);
1207 goto add_pf_flow_err;
1209 flows[vport->index] = flow;
1212 if (mlx5_ecpf_vport_exists(esw->dev)) {
1213 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1214 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1215 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1216 spec, &flow_act, &dest, 1);
1218 err = PTR_ERR(flow);
1219 goto add_ecpf_flow_err;
1221 flows[vport->index] = flow;
1224 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1225 esw_set_peer_miss_rule_source_port(esw,
1226 peer_dev->priv.eswitch,
1227 spec, vport->vport);
1229 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1230 spec, &flow_act, &dest, 1);
1232 err = PTR_ERR(flow);
1233 goto add_vf_flow_err;
1235 flows[vport->index] = flow;
1238 esw->fdb_table.offloads.peer_miss_rules = flows;
1244 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1245 if (!flows[vport->index])
1247 mlx5_del_flow_rules(flows[vport->index]);
1249 if (mlx5_ecpf_vport_exists(esw->dev)) {
1250 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1251 mlx5_del_flow_rules(flows[vport->index]);
1254 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1255 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1256 mlx5_del_flow_rules(flows[vport->index]);
1259 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1266 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1268 struct mlx5_flow_handle **flows;
1269 struct mlx5_vport *vport;
1272 flows = esw->fdb_table.offloads.peer_miss_rules;
1274 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1275 mlx5_del_flow_rules(flows[vport->index]);
1277 if (mlx5_ecpf_vport_exists(esw->dev)) {
1278 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1279 mlx5_del_flow_rules(flows[vport->index]);
1282 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1283 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1284 mlx5_del_flow_rules(flows[vport->index]);
1289 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1291 struct mlx5_flow_act flow_act = {0};
1292 struct mlx5_flow_destination dest = {};
1293 struct mlx5_flow_handle *flow_rule = NULL;
1294 struct mlx5_flow_spec *spec;
1301 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1307 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1308 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1310 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1311 outer_headers.dmac_47_16);
1314 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1315 dest.vport.num = esw->manager_vport;
1316 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1318 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1319 spec, &flow_act, &dest, 1);
1320 if (IS_ERR(flow_rule)) {
1321 err = PTR_ERR(flow_rule);
1322 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1326 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1328 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1330 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1331 outer_headers.dmac_47_16);
1333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1334 spec, &flow_act, &dest, 1);
1335 if (IS_ERR(flow_rule)) {
1336 err = PTR_ERR(flow_rule);
1337 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1338 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1342 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1349 struct mlx5_flow_handle *
1350 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1352 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1353 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1354 struct mlx5_flow_context *flow_context;
1355 struct mlx5_flow_handle *flow_rule;
1356 struct mlx5_flow_destination dest;
1357 struct mlx5_flow_spec *spec;
1360 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1361 return ERR_PTR(-EOPNOTSUPP);
1363 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1365 return ERR_PTR(-ENOMEM);
1367 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1369 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1370 ESW_REG_C0_USER_DATA_METADATA_MASK);
1371 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1373 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1374 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1375 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1376 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1377 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1379 flow_context = &spec->flow_context;
1380 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1381 flow_context->flow_tag = tag;
1382 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1383 dest.ft = esw->offloads.ft_offloads;
1385 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1388 if (IS_ERR(flow_rule))
1390 "Failed to create restore rule for tag: %d, err(%d)\n",
1391 tag, (int)PTR_ERR(flow_rule));
1396 #define MAX_PF_SQ 256
1397 #define MAX_SQ_NVPORTS 32
1399 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1402 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1406 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1407 MLX5_SET(create_flow_group_in, flow_group_in,
1408 match_criteria_enable,
1409 MLX5_MATCH_MISC_PARAMETERS_2);
1411 MLX5_SET(fte_match_param, match_criteria,
1412 misc_parameters_2.metadata_reg_c_0,
1413 mlx5_eswitch_get_vport_metadata_mask());
1415 MLX5_SET(create_flow_group_in, flow_group_in,
1416 match_criteria_enable,
1417 MLX5_MATCH_MISC_PARAMETERS);
1419 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1420 misc_parameters.source_port);
1424 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1425 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1427 struct mlx5_vport_tbl_attr attr;
1428 struct mlx5_vport *vport;
1433 mlx5_esw_for_each_vport(esw, i, vport) {
1434 attr.vport = vport->vport;
1435 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1436 mlx5_esw_vporttbl_put(esw, &attr);
1440 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1442 struct mlx5_vport_tbl_attr attr;
1443 struct mlx5_flow_table *fdb;
1444 struct mlx5_vport *vport;
1449 mlx5_esw_for_each_vport(esw, i, vport) {
1450 attr.vport = vport->vport;
1451 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1452 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1459 esw_vport_tbl_put(esw);
1460 return PTR_ERR(fdb);
1463 #define fdb_modify_header_fwd_to_table_supported(esw) \
1464 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1465 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1467 struct mlx5_core_dev *dev = esw->dev;
1469 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1470 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1472 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1473 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1474 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1475 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1476 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1477 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1478 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1479 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1480 /* Disabled when ttl workaround is needed, e.g
1481 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1484 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1485 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1487 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1488 esw_info(dev, "Supported tc chains and prios offload\n");
1491 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1492 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1496 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1498 struct mlx5_core_dev *dev = esw->dev;
1499 struct mlx5_flow_table *nf_ft, *ft;
1500 struct mlx5_chains_attr attr = {};
1501 struct mlx5_fs_chains *chains;
1505 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1507 esw_init_chains_offload_flags(esw, &attr.flags);
1508 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1509 attr.max_ft_sz = fdb_max;
1510 attr.max_grp_num = esw->params.large_group_num;
1511 attr.default_ft = miss_fdb;
1512 attr.mapping = esw->offloads.reg_c0_obj_pool;
1514 chains = mlx5_chains_create(dev, &attr);
1515 if (IS_ERR(chains)) {
1516 err = PTR_ERR(chains);
1517 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1521 esw->fdb_table.offloads.esw_chains_priv = chains;
1523 /* Create tc_end_ft which is the always created ft chain */
1524 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1526 if (IS_ERR(nf_ft)) {
1527 err = PTR_ERR(nf_ft);
1531 /* Always open the root for fast path */
1532 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1538 /* Open level 1 for split fdb rules now if prios isn't supported */
1539 if (!mlx5_chains_prios_supported(chains)) {
1540 err = esw_vport_tbl_get(esw);
1545 mlx5_chains_set_end_ft(chains, nf_ft);
1550 mlx5_chains_put_table(chains, 0, 1, 0);
1552 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1554 mlx5_chains_destroy(chains);
1555 esw->fdb_table.offloads.esw_chains_priv = NULL;
1561 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1563 if (!mlx5_chains_prios_supported(chains))
1564 esw_vport_tbl_put(esw);
1565 mlx5_chains_put_table(chains, 0, 1, 0);
1566 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1567 mlx5_chains_destroy(chains);
1570 #else /* CONFIG_MLX5_CLS_ACT */
1573 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1577 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1582 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1584 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1585 struct mlx5_flow_table_attr ft_attr = {};
1586 int num_vfs, table_size, ix, err = 0;
1587 struct mlx5_core_dev *dev = esw->dev;
1588 struct mlx5_flow_namespace *root_ns;
1589 struct mlx5_flow_table *fdb = NULL;
1590 u32 flags = 0, *flow_group_in;
1591 struct mlx5_flow_group *g;
1592 void *match_criteria;
1595 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1597 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1601 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1603 esw_warn(dev, "Failed to get FDB flow namespace\n");
1607 esw->fdb_table.offloads.ns = root_ns;
1608 err = mlx5_flow_namespace_set_mode(root_ns,
1609 esw->dev->priv.steering->mode);
1611 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1615 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1616 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
1618 /* create the slow path fdb with encap set, so further table instances
1619 * can be created at run time while VFs are probed if the FW allows that.
1621 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1622 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1623 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1625 ft_attr.flags = flags;
1626 ft_attr.max_fte = table_size;
1627 ft_attr.prio = FDB_SLOW_PATH;
1629 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1632 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1635 esw->fdb_table.offloads.slow_fdb = fdb;
1637 err = esw_chains_create(esw, fdb);
1639 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1640 goto fdb_chains_err;
1643 /* create send-to-vport group */
1644 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1645 MLX5_MATCH_MISC_PARAMETERS);
1647 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1649 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1650 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1651 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1652 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1653 misc_parameters.source_eswitch_owner_vhca_id);
1654 MLX5_SET(create_flow_group_in, flow_group_in,
1655 source_eswitch_owner_vhca_id_valid, 1);
1658 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1659 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1660 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1662 g = mlx5_create_flow_group(fdb, flow_group_in);
1665 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1666 goto send_vport_err;
1668 esw->fdb_table.offloads.send_to_vport_grp = g;
1670 if (esw_src_port_rewrite_supported(esw)) {
1671 /* meta send to vport */
1672 memset(flow_group_in, 0, inlen);
1673 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1674 MLX5_MATCH_MISC_PARAMETERS_2);
1676 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1678 MLX5_SET(fte_match_param, match_criteria,
1679 misc_parameters_2.metadata_reg_c_0,
1680 mlx5_eswitch_get_vport_metadata_mask());
1681 MLX5_SET(fte_match_param, match_criteria,
1682 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1684 num_vfs = esw->esw_funcs.num_vfs;
1686 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1687 MLX5_SET(create_flow_group_in, flow_group_in,
1688 end_flow_index, ix + num_vfs - 1);
1691 g = mlx5_create_flow_group(fdb, flow_group_in);
1694 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1696 goto send_vport_meta_err;
1698 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1700 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1706 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1707 /* create peer esw miss group */
1708 memset(flow_group_in, 0, inlen);
1710 esw_set_flow_group_source_port(esw, flow_group_in);
1712 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1713 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1717 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1718 misc_parameters.source_eswitch_owner_vhca_id);
1720 MLX5_SET(create_flow_group_in, flow_group_in,
1721 source_eswitch_owner_vhca_id_valid, 1);
1724 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1725 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1726 ix + esw->total_vports - 1);
1727 ix += esw->total_vports;
1729 g = mlx5_create_flow_group(fdb, flow_group_in);
1732 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1735 esw->fdb_table.offloads.peer_miss_grp = g;
1738 /* create miss group */
1739 memset(flow_group_in, 0, inlen);
1740 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1741 MLX5_MATCH_OUTER_HEADERS);
1742 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1744 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1745 outer_headers.dmac_47_16);
1748 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1749 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1750 ix + MLX5_ESW_MISS_FLOWS);
1752 g = mlx5_create_flow_group(fdb, flow_group_in);
1755 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1758 esw->fdb_table.offloads.miss_grp = g;
1760 err = esw_add_fdb_miss_rule(esw);
1764 kvfree(flow_group_in);
1768 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1770 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1771 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1773 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1775 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1776 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1777 send_vport_meta_err:
1778 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1780 esw_chains_destroy(esw, esw_chains(esw));
1782 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1784 /* Holds true only as long as DMFS is the default */
1785 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1787 kvfree(flow_group_in);
1791 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1793 if (!esw->fdb_table.offloads.slow_fdb)
1796 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1797 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1798 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1799 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1800 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1801 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1802 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1803 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1804 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1805 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1807 esw_chains_destroy(esw, esw_chains(esw));
1809 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1810 /* Holds true only as long as DMFS is the default */
1811 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1812 MLX5_FLOW_STEERING_MODE_DMFS);
1813 atomic64_set(&esw->user_count, 0);
1816 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1818 struct mlx5_flow_table_attr ft_attr = {};
1819 struct mlx5_core_dev *dev = esw->dev;
1820 struct mlx5_flow_table *ft_offloads;
1821 struct mlx5_flow_namespace *ns;
1824 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1826 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1830 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1833 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1834 if (IS_ERR(ft_offloads)) {
1835 err = PTR_ERR(ft_offloads);
1836 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1840 esw->offloads.ft_offloads = ft_offloads;
1844 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1846 struct mlx5_esw_offload *offloads = &esw->offloads;
1848 mlx5_destroy_flow_table(offloads->ft_offloads);
1851 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1853 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1854 struct mlx5_flow_group *g;
1859 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1860 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1864 /* create vport rx group */
1865 esw_set_flow_group_source_port(esw, flow_group_in);
1867 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1868 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1870 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1874 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1878 esw->offloads.vport_rx_group = g;
1880 kvfree(flow_group_in);
1884 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1886 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1889 struct mlx5_flow_handle *
1890 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1891 struct mlx5_flow_destination *dest)
1893 struct mlx5_flow_act flow_act = {0};
1894 struct mlx5_flow_handle *flow_rule;
1895 struct mlx5_flow_spec *spec;
1898 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1900 flow_rule = ERR_PTR(-ENOMEM);
1904 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1905 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1906 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1907 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1909 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1910 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1911 mlx5_eswitch_get_vport_metadata_mask());
1913 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1915 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1916 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1918 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1919 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1921 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1924 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1925 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1926 &flow_act, dest, 1);
1927 if (IS_ERR(flow_rule)) {
1928 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1937 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
1939 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1940 struct mlx5_core_dev *dev = esw->dev;
1941 struct mlx5_vport *vport;
1944 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1947 if (esw->mode == MLX5_ESWITCH_NONE)
1950 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1951 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1952 mlx5_mode = MLX5_INLINE_MODE_NONE;
1954 case MLX5_CAP_INLINE_MODE_L2:
1955 mlx5_mode = MLX5_INLINE_MODE_L2;
1957 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1962 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1963 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1964 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
1965 if (prev_mlx5_mode != mlx5_mode)
1967 prev_mlx5_mode = mlx5_mode;
1975 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1977 struct mlx5_esw_offload *offloads = &esw->offloads;
1979 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1982 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1983 mlx5_destroy_flow_group(offloads->restore_group);
1984 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1987 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1989 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1990 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1991 struct mlx5_flow_table_attr ft_attr = {};
1992 struct mlx5_core_dev *dev = esw->dev;
1993 struct mlx5_flow_namespace *ns;
1994 struct mlx5_modify_hdr *mod_hdr;
1995 void *match_criteria, *misc;
1996 struct mlx5_flow_table *ft;
1997 struct mlx5_flow_group *g;
2001 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2004 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2006 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2010 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2011 if (!flow_group_in) {
2016 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2017 ft = mlx5_create_flow_table(ns, &ft_attr);
2020 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2025 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2027 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2030 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2031 ESW_REG_C0_USER_DATA_METADATA_MASK);
2032 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2033 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2034 ft_attr.max_fte - 1);
2035 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2036 MLX5_MATCH_MISC_PARAMETERS_2);
2037 g = mlx5_create_flow_group(ft, flow_group_in);
2040 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2045 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2046 MLX5_SET(copy_action_in, modact, src_field,
2047 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2048 MLX5_SET(copy_action_in, modact, dst_field,
2049 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2050 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2051 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2053 if (IS_ERR(mod_hdr)) {
2054 err = PTR_ERR(mod_hdr);
2055 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2060 esw->offloads.ft_offloads_restore = ft;
2061 esw->offloads.restore_group = g;
2062 esw->offloads.restore_copy_hdr_id = mod_hdr;
2064 kvfree(flow_group_in);
2069 mlx5_destroy_flow_group(g);
2071 mlx5_destroy_flow_table(ft);
2073 kvfree(flow_group_in);
2078 static int esw_offloads_start(struct mlx5_eswitch *esw,
2079 struct netlink_ext_ack *extack)
2083 mlx5_eswitch_disable_locked(esw, false);
2084 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2085 esw->dev->priv.sriov.num_vfs);
2087 NL_SET_ERR_MSG_MOD(extack,
2088 "Failed setting eswitch to offloads");
2089 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2090 MLX5_ESWITCH_IGNORE_NUM_VFS);
2092 NL_SET_ERR_MSG_MOD(extack,
2093 "Failed setting eswitch back to legacy");
2096 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2097 if (mlx5_eswitch_inline_mode_get(esw,
2098 &esw->offloads.inline_mode)) {
2099 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2100 NL_SET_ERR_MSG_MOD(extack,
2101 "Inline mode is different between vports");
2107 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2108 struct mlx5_eswitch_rep *rep,
2113 /* Copy the mark from vport to its rep */
2114 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2116 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2119 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2121 struct mlx5_eswitch_rep *rep;
2125 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2129 rep->vport = vport->vport;
2130 rep->vport_index = vport->index;
2131 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2132 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2134 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2138 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2139 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2140 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2148 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2149 struct mlx5_eswitch_rep *rep)
2151 xa_erase(&esw->offloads.vport_reps, rep->vport);
2155 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2157 struct mlx5_eswitch_rep *rep;
2160 mlx5_esw_for_each_rep(esw, i, rep)
2161 mlx5_esw_offloads_rep_cleanup(esw, rep);
2162 xa_destroy(&esw->offloads.vport_reps);
2165 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2167 struct mlx5_vport *vport;
2171 xa_init(&esw->offloads.vport_reps);
2173 mlx5_esw_for_each_vport(esw, i, vport) {
2174 err = mlx5_esw_offloads_rep_init(esw, vport);
2181 esw_offloads_cleanup_reps(esw);
2185 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2186 struct mlx5_eswitch_rep *rep, u8 rep_type)
2188 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2189 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2190 esw->offloads.rep_ops[rep_type]->unload(rep);
2193 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2195 struct mlx5_eswitch_rep *rep;
2198 mlx5_esw_for_each_sf_rep(esw, i, rep)
2199 __esw_offloads_unload_rep(esw, rep, rep_type);
2202 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2204 struct mlx5_eswitch_rep *rep;
2207 __unload_reps_sf_vport(esw, rep_type);
2209 mlx5_esw_for_each_vf_rep(esw, i, rep)
2210 __esw_offloads_unload_rep(esw, rep, rep_type);
2212 if (mlx5_ecpf_vport_exists(esw->dev)) {
2213 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2214 __esw_offloads_unload_rep(esw, rep, rep_type);
2217 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2218 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2219 __esw_offloads_unload_rep(esw, rep, rep_type);
2222 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2223 __esw_offloads_unload_rep(esw, rep, rep_type);
2226 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2228 struct mlx5_eswitch_rep *rep;
2232 rep = mlx5_eswitch_get_rep(esw, vport_num);
2233 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2234 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2235 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2236 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2244 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2245 for (--rep_type; rep_type >= 0; rep_type--)
2246 __esw_offloads_unload_rep(esw, rep, rep_type);
2250 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2252 struct mlx5_eswitch_rep *rep;
2255 rep = mlx5_eswitch_get_rep(esw, vport_num);
2256 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2257 __esw_offloads_unload_rep(esw, rep, rep_type);
2260 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2264 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2267 if (vport_num != MLX5_VPORT_UPLINK) {
2268 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2273 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2279 if (vport_num != MLX5_VPORT_UPLINK)
2280 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2284 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2286 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2289 mlx5_esw_offloads_rep_unload(esw, vport_num);
2291 if (vport_num != MLX5_VPORT_UPLINK)
2292 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2295 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2296 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2298 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2299 struct mlx5_eswitch *peer_esw)
2302 return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2305 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2307 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2308 mlx5e_tc_clean_fdb_peer_flows(esw);
2310 esw_del_fdb_peer_miss_rules(esw);
2313 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2314 struct mlx5_eswitch *peer_esw,
2317 struct mlx5_flow_root_namespace *peer_ns;
2318 struct mlx5_flow_root_namespace *ns;
2321 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2322 ns = esw->dev->priv.steering->fdb_root_ns;
2325 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2329 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2331 mlx5_flow_namespace_set_peer(ns, NULL);
2335 mlx5_flow_namespace_set_peer(ns, NULL);
2336 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2342 static int mlx5_esw_offloads_devcom_event(int event,
2346 struct mlx5_eswitch *esw = my_data;
2347 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2348 struct mlx5_eswitch *peer_esw = event_data;
2352 case ESW_OFFLOADS_DEVCOM_PAIR:
2353 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2354 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2357 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2360 err = mlx5_esw_offloads_pair(esw, peer_esw);
2364 err = mlx5_esw_offloads_pair(peer_esw, esw);
2368 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2371 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2372 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2375 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2376 mlx5_esw_offloads_unpair(peer_esw);
2377 mlx5_esw_offloads_unpair(esw);
2378 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2385 mlx5_esw_offloads_unpair(esw);
2387 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2389 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2394 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2396 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2398 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2399 mutex_init(&esw->offloads.peer_mutex);
2401 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2404 mlx5_devcom_register_component(devcom,
2405 MLX5_DEVCOM_ESW_OFFLOADS,
2406 mlx5_esw_offloads_devcom_event,
2409 mlx5_devcom_send_event(devcom,
2410 MLX5_DEVCOM_ESW_OFFLOADS,
2411 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2414 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2416 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2418 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2421 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2422 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2424 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2427 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2429 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2432 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2433 MLX5_FDB_TO_VPORT_REG_C_0))
2436 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2439 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2440 mlx5_ecpf_vport_exists(esw->dev))
2446 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2448 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2449 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2453 /* Only 4 bits of pf_num */
2454 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2455 if (pf_num > max_pf_num)
2458 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2459 /* Use only non-zero vport_id (1-4095) for all PF's */
2460 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2463 id = (pf_num << ESW_VPORT_BITS) | id;
2467 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2469 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2471 /* Metadata contains only 12 bits of actual ida id */
2472 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2475 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2476 struct mlx5_vport *vport)
2478 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2479 vport->metadata = vport->default_metadata;
2480 return vport->metadata ? 0 : -ENOSPC;
2483 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2484 struct mlx5_vport *vport)
2486 if (!vport->default_metadata)
2489 WARN_ON(vport->metadata != vport->default_metadata);
2490 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2493 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2495 struct mlx5_vport *vport;
2498 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2501 mlx5_esw_for_each_vport(esw, i, vport)
2502 esw_offloads_vport_metadata_cleanup(esw, vport);
2505 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2507 struct mlx5_vport *vport;
2511 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2514 mlx5_esw_for_each_vport(esw, i, vport) {
2515 err = esw_offloads_vport_metadata_setup(esw, vport);
2523 esw_offloads_metadata_uninit(esw);
2527 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2531 down_write(&esw->mode_lock);
2532 if (esw->mode != MLX5_ESWITCH_NONE) {
2536 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2541 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2543 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2545 up_write(&esw->mode_lock);
2550 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2551 struct mlx5_vport *vport)
2555 err = esw_acl_ingress_ofld_setup(esw, vport);
2559 err = esw_acl_egress_ofld_setup(esw, vport);
2566 esw_acl_ingress_ofld_cleanup(esw, vport);
2571 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2572 struct mlx5_vport *vport)
2574 esw_acl_egress_ofld_cleanup(vport);
2575 esw_acl_ingress_ofld_cleanup(esw, vport);
2578 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2580 struct mlx5_vport *vport;
2582 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2584 return PTR_ERR(vport);
2586 return esw_vport_create_offloads_acl_tables(esw, vport);
2589 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2591 struct mlx5_vport *vport;
2593 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2597 esw_vport_destroy_offloads_acl_tables(esw, vport);
2600 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2602 struct mlx5_esw_indir_table *indir;
2605 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2606 mutex_init(&esw->fdb_table.offloads.vports.lock);
2607 hash_init(esw->fdb_table.offloads.vports.table);
2608 atomic64_set(&esw->user_count, 0);
2610 indir = mlx5_esw_indir_table_init();
2611 if (IS_ERR(indir)) {
2612 err = PTR_ERR(indir);
2613 goto create_indir_err;
2615 esw->fdb_table.offloads.indir = indir;
2617 err = esw_create_uplink_offloads_acl_tables(esw);
2619 goto create_acl_err;
2621 err = esw_create_offloads_table(esw);
2623 goto create_offloads_err;
2625 err = esw_create_restore_table(esw);
2627 goto create_restore_err;
2629 err = esw_create_offloads_fdb_tables(esw);
2631 goto create_fdb_err;
2633 err = esw_create_vport_rx_group(esw);
2640 esw_destroy_offloads_fdb_tables(esw);
2642 esw_destroy_restore_table(esw);
2644 esw_destroy_offloads_table(esw);
2645 create_offloads_err:
2646 esw_destroy_uplink_offloads_acl_tables(esw);
2648 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2650 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2654 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2656 esw_destroy_vport_rx_group(esw);
2657 esw_destroy_offloads_fdb_tables(esw);
2658 esw_destroy_restore_table(esw);
2659 esw_destroy_offloads_table(esw);
2660 esw_destroy_uplink_offloads_acl_tables(esw);
2661 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
2662 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2666 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2668 bool host_pf_disabled;
2671 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2672 host_params_context.host_num_of_vfs);
2673 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2674 host_params_context.host_pf_disabled);
2676 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2679 /* Number of VFs can only change from "0 to x" or "x to 0". */
2680 if (esw->esw_funcs.num_vfs > 0) {
2681 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2685 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2686 MLX5_VPORT_UC_ADDR_CHANGE);
2690 esw->esw_funcs.num_vfs = new_num_vfs;
2693 static void esw_functions_changed_event_handler(struct work_struct *work)
2695 struct mlx5_host_work *host_work;
2696 struct mlx5_eswitch *esw;
2699 host_work = container_of(work, struct mlx5_host_work, work);
2700 esw = host_work->esw;
2702 out = mlx5_esw_query_functions(esw->dev);
2706 esw_vfs_changed_event_handler(esw, out);
2712 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2714 struct mlx5_esw_functions *esw_funcs;
2715 struct mlx5_host_work *host_work;
2716 struct mlx5_eswitch *esw;
2718 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2722 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2723 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2725 host_work->esw = esw;
2727 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2728 queue_work(esw->work_queue, &host_work->work);
2733 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2735 const u32 *query_host_out;
2737 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2740 query_host_out = mlx5_esw_query_functions(esw->dev);
2741 if (IS_ERR(query_host_out))
2742 return PTR_ERR(query_host_out);
2744 /* Mark non local controller with non zero controller number. */
2745 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2746 host_params_context.host_number);
2747 kvfree(query_host_out);
2751 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
2753 /* Local controller is always valid */
2754 if (controller == 0)
2757 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2760 /* External host number starts with zero in device */
2761 return (controller == esw->offloads.host_number + 1);
2764 int esw_offloads_enable(struct mlx5_eswitch *esw)
2766 struct mapping_ctx *reg_c0_obj_pool;
2767 struct mlx5_vport *vport;
2771 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2772 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2773 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2775 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2777 mutex_init(&esw->offloads.termtbl_mutex);
2778 mlx5_rdma_enable_roce(esw->dev);
2780 err = mlx5_esw_host_number_init(esw);
2784 err = esw_offloads_metadata_init(esw);
2788 err = esw_set_passing_vport_metadata(esw, true);
2790 goto err_vport_metadata;
2792 reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj),
2793 ESW_REG_C0_USER_DATA_METADATA_MASK,
2795 if (IS_ERR(reg_c0_obj_pool)) {
2796 err = PTR_ERR(reg_c0_obj_pool);
2799 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
2801 err = esw_offloads_steering_init(esw);
2803 goto err_steering_init;
2805 /* Representor will control the vport link state */
2806 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2807 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2809 /* Uplink vport rep must load first. */
2810 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2814 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2818 esw_offloads_devcom_init(esw);
2823 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2825 esw_offloads_steering_cleanup(esw);
2827 mapping_destroy(reg_c0_obj_pool);
2829 esw_set_passing_vport_metadata(esw, false);
2831 esw_offloads_metadata_uninit(esw);
2833 mlx5_rdma_disable_roce(esw->dev);
2834 mutex_destroy(&esw->offloads.termtbl_mutex);
2838 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2839 struct netlink_ext_ack *extack)
2843 mlx5_eswitch_disable_locked(esw, false);
2844 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2845 MLX5_ESWITCH_IGNORE_NUM_VFS);
2847 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2848 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2849 MLX5_ESWITCH_IGNORE_NUM_VFS);
2851 NL_SET_ERR_MSG_MOD(extack,
2852 "Failed setting eswitch back to offloads");
2859 void esw_offloads_disable(struct mlx5_eswitch *esw)
2861 esw_offloads_devcom_cleanup(esw);
2862 mlx5_eswitch_disable_pf_vf_vports(esw);
2863 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2864 esw_set_passing_vport_metadata(esw, false);
2865 esw_offloads_steering_cleanup(esw);
2866 mapping_destroy(esw->offloads.reg_c0_obj_pool);
2867 esw_offloads_metadata_uninit(esw);
2868 mlx5_rdma_disable_roce(esw->dev);
2869 mutex_destroy(&esw->offloads.termtbl_mutex);
2870 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2873 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2876 case DEVLINK_ESWITCH_MODE_LEGACY:
2877 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2879 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2880 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2889 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2891 switch (mlx5_mode) {
2892 case MLX5_ESWITCH_LEGACY:
2893 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2895 case MLX5_ESWITCH_OFFLOADS:
2896 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2905 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2908 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2909 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2911 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2912 *mlx5_mode = MLX5_INLINE_MODE_L2;
2914 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2915 *mlx5_mode = MLX5_INLINE_MODE_IP;
2917 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2918 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2927 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2929 switch (mlx5_mode) {
2930 case MLX5_INLINE_MODE_NONE:
2931 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2933 case MLX5_INLINE_MODE_L2:
2934 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2936 case MLX5_INLINE_MODE_IP:
2937 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2939 case MLX5_INLINE_MODE_TCP_UDP:
2940 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2949 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2951 /* devlink commands in NONE eswitch mode are currently supported only
2954 return (esw->mode == MLX5_ESWITCH_NONE &&
2955 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2958 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2959 struct netlink_ext_ack *extack)
2961 u16 cur_mlx5_mode, mlx5_mode = 0;
2962 struct mlx5_eswitch *esw;
2965 esw = mlx5_devlink_eswitch_get(devlink);
2967 return PTR_ERR(esw);
2969 if (esw_mode_from_devlink(mode, &mlx5_mode))
2972 err = mlx5_esw_try_lock(esw);
2974 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
2977 cur_mlx5_mode = err;
2980 if (cur_mlx5_mode == mlx5_mode)
2983 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2984 err = esw_offloads_start(esw, extack);
2985 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2986 err = esw_offloads_stop(esw, extack);
2991 mlx5_esw_unlock(esw);
2995 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2997 struct mlx5_eswitch *esw;
3000 esw = mlx5_devlink_eswitch_get(devlink);
3002 return PTR_ERR(esw);
3004 down_write(&esw->mode_lock);
3005 err = eswitch_devlink_esw_mode_check(esw);
3009 err = esw_mode_to_devlink(esw->mode, mode);
3011 up_write(&esw->mode_lock);
3015 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3016 struct netlink_ext_ack *extack)
3018 struct mlx5_core_dev *dev = esw->dev;
3019 struct mlx5_vport *vport;
3020 u16 err_vport_num = 0;
3024 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3025 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3027 err_vport_num = vport->vport;
3028 NL_SET_ERR_MSG_MOD(extack,
3029 "Failed to set min inline on vport");
3030 goto revert_inline_mode;
3036 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3037 if (vport->vport == err_vport_num)
3039 mlx5_modify_nic_vport_min_inline(dev,
3041 esw->offloads.inline_mode);
3046 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3047 struct netlink_ext_ack *extack)
3049 struct mlx5_core_dev *dev = devlink_priv(devlink);
3050 struct mlx5_eswitch *esw;
3054 esw = mlx5_devlink_eswitch_get(devlink);
3056 return PTR_ERR(esw);
3058 down_write(&esw->mode_lock);
3059 err = eswitch_devlink_esw_mode_check(esw);
3063 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3064 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3065 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
3068 case MLX5_CAP_INLINE_MODE_L2:
3069 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3072 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3076 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3077 NL_SET_ERR_MSG_MOD(extack,
3078 "Can't set inline mode when flows are configured");
3083 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3087 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3091 esw->offloads.inline_mode = mlx5_mode;
3092 up_write(&esw->mode_lock);
3096 up_write(&esw->mode_lock);
3100 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3102 struct mlx5_eswitch *esw;
3105 esw = mlx5_devlink_eswitch_get(devlink);
3107 return PTR_ERR(esw);
3109 down_write(&esw->mode_lock);
3110 err = eswitch_devlink_esw_mode_check(esw);
3114 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3116 up_write(&esw->mode_lock);
3120 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3121 enum devlink_eswitch_encap_mode encap,
3122 struct netlink_ext_ack *extack)
3124 struct mlx5_core_dev *dev = devlink_priv(devlink);
3125 struct mlx5_eswitch *esw;
3128 esw = mlx5_devlink_eswitch_get(devlink);
3130 return PTR_ERR(esw);
3132 down_write(&esw->mode_lock);
3133 err = eswitch_devlink_esw_mode_check(esw);
3137 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3138 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3139 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3144 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3149 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3150 esw->offloads.encap = encap;
3154 if (esw->offloads.encap == encap)
3157 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3158 NL_SET_ERR_MSG_MOD(extack,
3159 "Can't set encapsulation when flows are configured");
3164 esw_destroy_offloads_fdb_tables(esw);
3166 esw->offloads.encap = encap;
3168 err = esw_create_offloads_fdb_tables(esw);
3171 NL_SET_ERR_MSG_MOD(extack,
3172 "Failed re-creating fast FDB table");
3173 esw->offloads.encap = !encap;
3174 (void)esw_create_offloads_fdb_tables(esw);
3178 up_write(&esw->mode_lock);
3182 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3183 enum devlink_eswitch_encap_mode *encap)
3185 struct mlx5_eswitch *esw;
3188 esw = mlx5_devlink_eswitch_get(devlink);
3190 return PTR_ERR(esw);
3193 down_write(&esw->mode_lock);
3194 err = eswitch_devlink_esw_mode_check(esw);
3198 *encap = esw->offloads.encap;
3200 up_write(&esw->mode_lock);
3205 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3207 /* Currently, only ECPF based device has representor for host PF. */
3208 if (vport_num == MLX5_VPORT_PF &&
3209 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3212 if (vport_num == MLX5_VPORT_ECPF &&
3213 !mlx5_ecpf_vport_exists(esw->dev))
3219 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3220 const struct mlx5_eswitch_rep_ops *ops,
3223 struct mlx5_eswitch_rep_data *rep_data;
3224 struct mlx5_eswitch_rep *rep;
3227 esw->offloads.rep_ops[rep_type] = ops;
3228 mlx5_esw_for_each_rep(esw, i, rep) {
3229 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3231 rep_data = &rep->rep_data[rep_type];
3232 atomic_set(&rep_data->state, REP_REGISTERED);
3236 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3238 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3240 struct mlx5_eswitch_rep *rep;
3243 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3244 __unload_reps_all_vport(esw, rep_type);
3246 mlx5_esw_for_each_rep(esw, i, rep)
3247 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3249 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3251 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3253 struct mlx5_eswitch_rep *rep;
3255 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3256 return rep->rep_data[rep_type].priv;
3259 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3263 struct mlx5_eswitch_rep *rep;
3265 rep = mlx5_eswitch_get_rep(esw, vport);
3267 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3268 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3269 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3272 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3274 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3276 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3278 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3280 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3283 return mlx5_eswitch_get_rep(esw, vport);
3285 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3287 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3289 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3291 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3293 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3295 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3297 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3299 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3302 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3304 if (WARN_ON_ONCE(IS_ERR(vport)))
3307 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3309 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3311 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3312 u16 vport_num, u32 controller, u32 sfnum)
3316 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3320 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3324 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3330 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3332 mlx5_esw_vport_disable(esw, vport_num);
3336 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3338 mlx5_esw_offloads_rep_unload(esw, vport_num);
3339 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3340 mlx5_esw_vport_disable(esw, vport_num);
3343 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3345 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3351 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3352 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3355 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3359 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3363 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3364 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3371 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3373 u16 *old_entry, *vhca_map_entry, vhca_id;
3376 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3378 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3383 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3384 if (!vhca_map_entry)
3387 *vhca_map_entry = vport_num;
3388 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3389 if (xa_is_err(old_entry)) {
3390 kfree(vhca_map_entry);
3391 return xa_err(old_entry);
3397 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3399 u16 *vhca_map_entry, vhca_id;
3402 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3404 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3407 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3408 kfree(vhca_map_entry);
3411 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3413 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3422 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3425 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3427 if (WARN_ON_ONCE(IS_ERR(vport)))
3430 return vport->metadata;
3432 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);