2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
42 #include "lib/devcom.h"
49 #define fdb_prio_table(esw, chain, prio, level) \
50 (esw)->fdb_table.offloads.fdb_prio[(chain)][(prio)][(level)]
52 static struct mlx5_flow_table *
53 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
55 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level);
57 bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
59 return (!!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED));
62 u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
64 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
70 u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
72 if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
78 struct mlx5_flow_handle *
79 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
80 struct mlx5_flow_spec *spec,
81 struct mlx5_esw_flow_attr *attr)
83 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
84 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
85 bool split = !!(attr->split_count);
86 struct mlx5_flow_handle *rule;
87 struct mlx5_flow_table *fdb;
91 if (esw->mode != SRIOV_OFFLOADS)
92 return ERR_PTR(-EOPNOTSUPP);
94 flow_act.action = attr->action;
95 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
96 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
97 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
98 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
99 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
100 flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
101 flow_act.vlan[0].vid = attr->vlan_vid[0];
102 flow_act.vlan[0].prio = attr->vlan_prio[0];
103 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
104 flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
105 flow_act.vlan[1].vid = attr->vlan_vid[1];
106 flow_act.vlan[1].prio = attr->vlan_prio[1];
110 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
111 if (attr->dest_chain) {
112 struct mlx5_flow_table *ft;
114 ft = esw_get_prio_table(esw, attr->dest_chain, 1, 0);
117 goto err_create_goto_table;
120 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
124 for (j = attr->split_count; j < attr->out_count; j++) {
125 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
126 dest[i].vport.num = attr->dests[j].rep->vport;
127 dest[i].vport.vhca_id =
128 MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
129 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
130 dest[i].vport.flags |=
131 MLX5_FLOW_DEST_VPORT_VHCA_ID;
132 if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
133 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
134 flow_act.reformat_id = attr->dests[j].encap_id;
135 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
136 dest[i].vport.reformat_id =
137 attr->dests[j].encap_id;
143 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
144 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
145 dest[i].counter_id = mlx5_fc_id(attr->counter);
149 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
150 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
152 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
153 MLX5_SET(fte_match_set_misc, misc,
154 source_eswitch_owner_vhca_id,
155 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
157 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
158 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
159 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
160 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
161 source_eswitch_owner_vhca_id);
163 if (attr->match_level == MLX5_MATCH_NONE)
164 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
166 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
167 MLX5_MATCH_MISC_PARAMETERS;
169 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
170 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
172 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
173 flow_act.modify_id = attr->mod_hdr_id;
175 fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
177 rule = ERR_CAST(fdb);
181 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
185 esw->offloads.num_flows++;
190 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
192 if (attr->dest_chain)
193 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
194 err_create_goto_table:
198 struct mlx5_flow_handle *
199 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
200 struct mlx5_flow_spec *spec,
201 struct mlx5_esw_flow_attr *attr)
203 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
204 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
205 struct mlx5_flow_table *fast_fdb;
206 struct mlx5_flow_table *fwd_fdb;
207 struct mlx5_flow_handle *rule;
211 fast_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 0);
212 if (IS_ERR(fast_fdb)) {
213 rule = ERR_CAST(fast_fdb);
217 fwd_fdb = esw_get_prio_table(esw, attr->chain, attr->prio, 1);
218 if (IS_ERR(fwd_fdb)) {
219 rule = ERR_CAST(fwd_fdb);
223 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
224 for (i = 0; i < attr->split_count; i++) {
225 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
226 dest[i].vport.num = attr->dests[i].rep->vport;
227 dest[i].vport.vhca_id =
228 MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
229 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
230 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
231 if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
232 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
233 dest[i].vport.reformat_id = attr->dests[i].encap_id;
236 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
237 dest[i].ft = fwd_fdb,
240 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
241 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
243 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
244 MLX5_SET(fte_match_set_misc, misc,
245 source_eswitch_owner_vhca_id,
246 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
248 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
249 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
250 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
251 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
252 source_eswitch_owner_vhca_id);
254 if (attr->match_level == MLX5_MATCH_NONE)
255 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
257 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
258 MLX5_MATCH_MISC_PARAMETERS;
260 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
265 esw->offloads.num_flows++;
269 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
271 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
277 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
278 struct mlx5_flow_handle *rule,
279 struct mlx5_esw_flow_attr *attr,
282 bool split = (attr->split_count > 0);
284 mlx5_del_flow_rules(rule);
285 esw->offloads.num_flows--;
288 esw_put_prio_table(esw, attr->chain, attr->prio, 1);
289 esw_put_prio_table(esw, attr->chain, attr->prio, 0);
291 esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
292 if (attr->dest_chain)
293 esw_put_prio_table(esw, attr->dest_chain, 1, 0);
298 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
299 struct mlx5_flow_handle *rule,
300 struct mlx5_esw_flow_attr *attr)
302 __mlx5_eswitch_del_rule(esw, rule, attr, false);
306 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
307 struct mlx5_flow_handle *rule,
308 struct mlx5_esw_flow_attr *attr)
310 __mlx5_eswitch_del_rule(esw, rule, attr, true);
313 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
315 struct mlx5_eswitch_rep *rep;
316 int vf_vport, err = 0;
318 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
319 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
320 rep = &esw->offloads.vport_reps[vf_vport];
321 if (!rep->rep_if[REP_ETH].valid)
324 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
333 static struct mlx5_eswitch_rep *
334 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
336 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
338 in_rep = attr->in_rep;
339 out_rep = attr->dests[0].rep;
351 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
352 bool push, bool pop, bool fwd)
354 struct mlx5_eswitch_rep *in_rep, *out_rep;
356 if ((push || pop) && !fwd)
359 in_rep = attr->in_rep;
360 out_rep = attr->dests[0].rep;
362 if (push && in_rep->vport == FDB_UPLINK_VPORT)
365 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
368 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
369 if (!push && !pop && fwd)
370 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
373 /* protects against (1) setting rules with different vlans to push and
374 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
376 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
385 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
386 struct mlx5_esw_flow_attr *attr)
388 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
389 struct mlx5_eswitch_rep *vport = NULL;
393 /* nop if we're on the vlan push/pop non emulation mode */
394 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
397 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
398 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
399 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
402 err = esw_add_vlan_action_check(attr, push, pop, fwd);
406 attr->vlan_handled = false;
408 vport = esw_vlan_action_get_vport(attr, push, pop);
410 if (!push && !pop && fwd) {
411 /* tracks VF --> wire rules without vlan push action */
412 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
413 vport->vlan_refcount++;
414 attr->vlan_handled = true;
423 if (!(offloads->vlan_push_pop_refcount)) {
424 /* it's the 1st vlan rule, apply global vlan pop policy */
425 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
429 offloads->vlan_push_pop_refcount++;
432 if (vport->vlan_refcount)
435 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
436 SET_VLAN_INSERT | SET_VLAN_STRIP);
439 vport->vlan = attr->vlan_vid[0];
441 vport->vlan_refcount++;
445 attr->vlan_handled = true;
449 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
450 struct mlx5_esw_flow_attr *attr)
452 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
453 struct mlx5_eswitch_rep *vport = NULL;
457 /* nop if we're on the vlan push/pop non emulation mode */
458 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
461 if (!attr->vlan_handled)
464 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
465 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
466 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
468 vport = esw_vlan_action_get_vport(attr, push, pop);
470 if (!push && !pop && fwd) {
471 /* tracks VF --> wire rules without vlan push action */
472 if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
473 vport->vlan_refcount--;
479 vport->vlan_refcount--;
480 if (vport->vlan_refcount)
481 goto skip_unset_push;
484 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
485 0, 0, SET_VLAN_STRIP);
491 offloads->vlan_push_pop_refcount--;
492 if (offloads->vlan_push_pop_refcount)
495 /* no more vlan rules, stop global vlan pop policy */
496 err = esw_set_global_vlan_pop(esw, 0);
502 struct mlx5_flow_handle *
503 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
505 struct mlx5_flow_act flow_act = {0};
506 struct mlx5_flow_destination dest = {};
507 struct mlx5_flow_handle *flow_rule;
508 struct mlx5_flow_spec *spec;
511 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
513 flow_rule = ERR_PTR(-ENOMEM);
517 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
518 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
519 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
521 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
522 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
523 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
525 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
526 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
527 dest.vport.num = vport;
528 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
530 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
531 &flow_act, &dest, 1);
532 if (IS_ERR(flow_rule))
533 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
538 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
540 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
542 mlx5_del_flow_rules(rule);
545 static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev,
546 struct mlx5_flow_spec *spec,
547 struct mlx5_flow_destination *dest)
549 void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
552 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
553 MLX5_CAP_GEN(peer_dev, vhca_id));
555 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
557 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
559 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
560 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
561 source_eswitch_owner_vhca_id);
563 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
565 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
566 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
569 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
570 struct mlx5_core_dev *peer_dev)
572 struct mlx5_flow_destination dest = {};
573 struct mlx5_flow_act flow_act = {0};
574 struct mlx5_flow_handle **flows;
575 struct mlx5_flow_handle *flow;
576 struct mlx5_flow_spec *spec;
577 /* total vports is the same for both e-switches */
578 int nvports = esw->total_vports;
582 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
586 peer_miss_rules_setup(peer_dev, spec, &dest);
588 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
591 goto alloc_flows_err;
594 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
595 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
598 for (i = 1; i < nvports; i++) {
599 MLX5_SET(fte_match_set_misc, misc, source_port, i);
600 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
601 spec, &flow_act, &dest, 1);
604 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
610 esw->fdb_table.offloads.peer_miss_rules = flows;
616 for (i--; i > 0; i--)
617 mlx5_del_flow_rules(flows[i]);
624 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
626 struct mlx5_flow_handle **flows;
629 flows = esw->fdb_table.offloads.peer_miss_rules;
631 for (i = 1; i < esw->total_vports; i++)
632 mlx5_del_flow_rules(flows[i]);
637 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
639 struct mlx5_flow_act flow_act = {0};
640 struct mlx5_flow_destination dest = {};
641 struct mlx5_flow_handle *flow_rule = NULL;
642 struct mlx5_flow_spec *spec;
649 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
655 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
656 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
658 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
659 outer_headers.dmac_47_16);
662 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
664 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
666 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
667 &flow_act, &dest, 1);
668 if (IS_ERR(flow_rule)) {
669 err = PTR_ERR(flow_rule);
670 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
674 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
676 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
678 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
679 outer_headers.dmac_47_16);
681 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec,
682 &flow_act, &dest, 1);
683 if (IS_ERR(flow_rule)) {
684 err = PTR_ERR(flow_rule);
685 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
686 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
690 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
697 #define ESW_OFFLOADS_NUM_GROUPS 4
699 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
700 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
701 * for each flow table pool. We can allocate up to 16M of each pool,
702 * and we keep track of how much we used via put/get_sz_to_pool.
703 * Firmware doesn't report any of this for now.
704 * ESW_POOL is expected to be sorted from large to small
706 #define ESW_SIZE (16 * 1024 * 1024)
707 const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024,
708 64 * 1024, 4 * 1024 };
711 get_sz_from_pool(struct mlx5_eswitch *esw)
715 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
716 if (esw->fdb_table.offloads.fdb_left[i]) {
717 --esw->fdb_table.offloads.fdb_left[i];
727 put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
731 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++) {
732 if (sz >= ESW_POOLS[i]) {
733 ++esw->fdb_table.offloads.fdb_left[i];
739 static struct mlx5_flow_table *
740 create_next_size_table(struct mlx5_eswitch *esw,
741 struct mlx5_flow_namespace *ns,
746 struct mlx5_flow_table *fdb;
749 sz = get_sz_from_pool(esw);
751 return ERR_PTR(-ENOSPC);
753 fdb = mlx5_create_auto_grouped_flow_table(ns,
756 ESW_OFFLOADS_NUM_GROUPS,
760 esw_warn(esw->dev, "Failed to create FDB Table err %d (table prio: %d, level: %d, size: %d)\n",
761 (int)PTR_ERR(fdb), table_prio, level, sz);
762 put_sz_to_pool(esw, sz);
768 static struct mlx5_flow_table *
769 esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
771 struct mlx5_core_dev *dev = esw->dev;
772 struct mlx5_flow_table *fdb = NULL;
773 struct mlx5_flow_namespace *ns;
774 int table_prio, l = 0;
777 if (chain == FDB_SLOW_PATH_CHAIN)
778 return esw->fdb_table.offloads.slow_fdb;
780 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
782 fdb = fdb_prio_table(esw, chain, prio, level).fdb;
784 /* take ref on earlier levels as well */
786 fdb_prio_table(esw, chain, prio, level--).num_rules++;
787 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
791 ns = mlx5_get_fdb_sub_ns(dev, chain);
793 esw_warn(dev, "Failed to get FDB sub namespace\n");
794 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
795 return ERR_PTR(-EOPNOTSUPP);
798 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
799 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
800 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
802 table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
804 /* create earlier levels for correct fs_core lookup when
807 for (l = 0; l <= level; l++) {
808 if (fdb_prio_table(esw, chain, prio, l).fdb) {
809 fdb_prio_table(esw, chain, prio, l).num_rules++;
813 fdb = create_next_size_table(esw, ns, table_prio, l, flags);
819 fdb_prio_table(esw, chain, prio, l).fdb = fdb;
820 fdb_prio_table(esw, chain, prio, l).num_rules = 1;
823 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
827 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
829 esw_put_prio_table(esw, chain, prio, l);
835 esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
839 if (chain == FDB_SLOW_PATH_CHAIN)
842 mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
844 for (l = level; l >= 0; l--) {
845 if (--(fdb_prio_table(esw, chain, prio, l).num_rules) > 0)
848 put_sz_to_pool(esw, fdb_prio_table(esw, chain, prio, l).fdb->max_fte);
849 mlx5_destroy_flow_table(fdb_prio_table(esw, chain, prio, l).fdb);
850 fdb_prio_table(esw, chain, prio, l).fdb = NULL;
853 mutex_unlock(&esw->fdb_table.offloads.fdb_prio_lock);
856 static void esw_destroy_offloads_fast_fdb_tables(struct mlx5_eswitch *esw)
858 /* If lazy creation isn't supported, deref the fast path tables */
859 if (!(esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)) {
860 esw_put_prio_table(esw, 0, 1, 1);
861 esw_put_prio_table(esw, 0, 1, 0);
865 #define MAX_PF_SQ 256
866 #define MAX_SQ_NVPORTS 32
868 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
870 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
871 struct mlx5_flow_table_attr ft_attr = {};
872 struct mlx5_core_dev *dev = esw->dev;
873 u32 *flow_group_in, max_flow_counter;
874 struct mlx5_flow_namespace *root_ns;
875 struct mlx5_flow_table *fdb = NULL;
876 int table_size, ix, err = 0, i;
877 struct mlx5_flow_group *g;
878 u32 flags = 0, fdb_max;
879 void *match_criteria;
882 esw_debug(esw->dev, "Create offloads FDB Tables\n");
883 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
887 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
889 esw_warn(dev, "Failed to get FDB flow namespace\n");
894 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
895 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
896 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
898 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n",
899 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
900 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
903 for (i = 0; i < ARRAY_SIZE(ESW_POOLS); i++)
904 esw->fdb_table.offloads.fdb_left[i] =
905 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
907 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 +
910 /* create the slow path fdb with encap set, so further table instances
911 * can be created at run time while VFs are probed if the FW allows that.
913 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
914 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
915 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
917 ft_attr.flags = flags;
918 ft_attr.max_fte = table_size;
919 ft_attr.prio = FDB_SLOW_PATH;
921 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
924 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
927 esw->fdb_table.offloads.slow_fdb = fdb;
929 /* If lazy creation isn't supported, open the fast path tables now */
930 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
931 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
932 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
933 esw_warn(dev, "Lazy creation of flow tables isn't supported, ignoring priorities\n");
934 esw_get_prio_table(esw, 0, 1, 0);
935 esw_get_prio_table(esw, 0, 1, 1);
937 esw_debug(dev, "Lazy creation of flow tables supported, deferring table opening\n");
938 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
941 /* create send-to-vport group */
942 memset(flow_group_in, 0, inlen);
943 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
944 MLX5_MATCH_MISC_PARAMETERS);
946 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
948 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
949 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
951 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
952 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
953 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
955 g = mlx5_create_flow_group(fdb, flow_group_in);
958 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
961 esw->fdb_table.offloads.send_to_vport_grp = g;
963 /* create peer esw miss group */
964 memset(flow_group_in, 0, inlen);
965 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
966 MLX5_MATCH_MISC_PARAMETERS);
968 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
971 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
972 misc_parameters.source_port);
973 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
974 misc_parameters.source_eswitch_owner_vhca_id);
976 MLX5_SET(create_flow_group_in, flow_group_in,
977 source_eswitch_owner_vhca_id_valid, 1);
978 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
979 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
980 ix + esw->total_vports - 1);
981 ix += esw->total_vports;
983 g = mlx5_create_flow_group(fdb, flow_group_in);
986 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
989 esw->fdb_table.offloads.peer_miss_grp = g;
991 /* create miss group */
992 memset(flow_group_in, 0, inlen);
993 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
994 MLX5_MATCH_OUTER_HEADERS);
995 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
997 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
998 outer_headers.dmac_47_16);
1001 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1002 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
1004 g = mlx5_create_flow_group(fdb, flow_group_in);
1007 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1010 esw->fdb_table.offloads.miss_grp = g;
1012 err = esw_add_fdb_miss_rule(esw);
1016 esw->nvports = nvports;
1017 kvfree(flow_group_in);
1021 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1023 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1025 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1027 esw_destroy_offloads_fast_fdb_tables(esw);
1028 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1031 kvfree(flow_group_in);
1035 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1037 if (!esw->fdb_table.offloads.slow_fdb)
1040 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1041 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1042 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1043 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1044 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1045 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1047 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1048 esw_destroy_offloads_fast_fdb_tables(esw);
1051 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1053 struct mlx5_flow_table_attr ft_attr = {};
1054 struct mlx5_core_dev *dev = esw->dev;
1055 struct mlx5_flow_table *ft_offloads;
1056 struct mlx5_flow_namespace *ns;
1059 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1061 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1065 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
1067 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1068 if (IS_ERR(ft_offloads)) {
1069 err = PTR_ERR(ft_offloads);
1070 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1074 esw->offloads.ft_offloads = ft_offloads;
1078 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1080 struct mlx5_esw_offload *offloads = &esw->offloads;
1082 mlx5_destroy_flow_table(offloads->ft_offloads);
1085 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1087 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1088 struct mlx5_flow_group *g;
1089 struct mlx5_priv *priv = &esw->dev->priv;
1091 void *match_criteria, *misc;
1093 int nvports = priv->sriov.num_vfs + 2;
1095 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1099 /* create vport rx group */
1100 memset(flow_group_in, 0, inlen);
1101 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1102 MLX5_MATCH_MISC_PARAMETERS);
1104 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1105 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
1106 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1108 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1109 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1111 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1115 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1119 esw->offloads.vport_rx_group = g;
1121 kvfree(flow_group_in);
1125 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1127 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1130 struct mlx5_flow_handle *
1131 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
1132 struct mlx5_flow_destination *dest)
1134 struct mlx5_flow_act flow_act = {0};
1135 struct mlx5_flow_handle *flow_rule;
1136 struct mlx5_flow_spec *spec;
1139 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1141 flow_rule = ERR_PTR(-ENOMEM);
1145 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1146 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1148 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1149 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1151 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1153 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1154 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1155 &flow_act, dest, 1);
1156 if (IS_ERR(flow_rule)) {
1157 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1166 static int esw_offloads_start(struct mlx5_eswitch *esw,
1167 struct netlink_ext_ack *extack)
1169 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1171 if (esw->mode != SRIOV_LEGACY) {
1172 NL_SET_ERR_MSG_MOD(extack,
1173 "Can't set offloads mode, SRIOV legacy not enabled");
1177 mlx5_eswitch_disable_sriov(esw);
1178 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1180 NL_SET_ERR_MSG_MOD(extack,
1181 "Failed setting eswitch to offloads");
1182 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1184 NL_SET_ERR_MSG_MOD(extack,
1185 "Failed setting eswitch back to legacy");
1188 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1189 if (mlx5_eswitch_inline_mode_get(esw,
1191 &esw->offloads.inline_mode)) {
1192 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1193 NL_SET_ERR_MSG_MOD(extack,
1194 "Inline mode is different between vports");
1200 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1202 kfree(esw->offloads.vport_reps);
1205 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1207 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
1208 struct mlx5_core_dev *dev = esw->dev;
1209 struct mlx5_esw_offload *offloads;
1210 struct mlx5_eswitch_rep *rep;
1214 esw->offloads.vport_reps = kcalloc(total_vfs,
1215 sizeof(struct mlx5_eswitch_rep),
1217 if (!esw->offloads.vport_reps)
1220 offloads = &esw->offloads;
1221 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
1223 for (vport = 0; vport < total_vfs; vport++) {
1224 rep = &offloads->vport_reps[vport];
1227 ether_addr_copy(rep->hw_id, hw_id);
1230 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
1235 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
1238 struct mlx5_eswitch_rep *rep;
1241 for (vport = nvports - 1; vport >= 0; vport--) {
1242 rep = &esw->offloads.vport_reps[vport];
1243 if (!rep->rep_if[rep_type].valid)
1246 rep->rep_if[rep_type].unload(rep);
1250 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
1252 u8 rep_type = NUM_REP_TYPES;
1254 while (rep_type-- > 0)
1255 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1258 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
1261 struct mlx5_eswitch_rep *rep;
1265 for (vport = 0; vport < nvports; vport++) {
1266 rep = &esw->offloads.vport_reps[vport];
1267 if (!rep->rep_if[rep_type].valid)
1270 err = rep->rep_if[rep_type].load(esw->dev, rep);
1278 esw_offloads_unload_reps_type(esw, vport, rep_type);
1282 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
1287 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
1288 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
1296 while (rep_type-- > 0)
1297 esw_offloads_unload_reps_type(esw, nvports, rep_type);
1301 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1302 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1304 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1305 struct mlx5_eswitch *peer_esw)
1309 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1316 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
1318 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1320 mlx5e_tc_clean_fdb_peer_flows(esw);
1321 esw_del_fdb_peer_miss_rules(esw);
1324 static int mlx5_esw_offloads_devcom_event(int event,
1328 struct mlx5_eswitch *esw = my_data;
1329 struct mlx5_eswitch *peer_esw = event_data;
1330 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1334 case ESW_OFFLOADS_DEVCOM_PAIR:
1335 err = mlx5_esw_offloads_pair(esw, peer_esw);
1339 err = mlx5_esw_offloads_pair(peer_esw, esw);
1343 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1346 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1347 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1350 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1351 mlx5_esw_offloads_unpair(peer_esw);
1352 mlx5_esw_offloads_unpair(esw);
1359 mlx5_esw_offloads_unpair(esw);
1362 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1367 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1369 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1371 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1372 mutex_init(&esw->offloads.peer_mutex);
1374 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1377 mlx5_devcom_register_component(devcom,
1378 MLX5_DEVCOM_ESW_OFFLOADS,
1379 mlx5_esw_offloads_devcom_event,
1382 mlx5_devcom_send_event(devcom,
1383 MLX5_DEVCOM_ESW_OFFLOADS,
1384 ESW_OFFLOADS_DEVCOM_PAIR, esw);
1387 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
1389 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1391 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1394 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
1395 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
1397 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1400 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1404 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1406 err = esw_create_offloads_fdb_tables(esw, nvports);
1410 err = esw_create_offloads_table(esw);
1414 err = esw_create_vport_rx_group(esw);
1418 err = esw_offloads_load_reps(esw, nvports);
1422 esw_offloads_devcom_init(esw);
1426 esw_destroy_vport_rx_group(esw);
1429 esw_destroy_offloads_table(esw);
1432 esw_destroy_offloads_fdb_tables(esw);
1437 static int esw_offloads_stop(struct mlx5_eswitch *esw,
1438 struct netlink_ext_ack *extack)
1440 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1442 mlx5_eswitch_disable_sriov(esw);
1443 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1445 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
1446 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
1448 NL_SET_ERR_MSG_MOD(extack,
1449 "Failed setting eswitch back to offloads");
1456 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
1458 esw_offloads_devcom_cleanup(esw);
1459 esw_offloads_unload_reps(esw, nvports);
1460 esw_destroy_vport_rx_group(esw);
1461 esw_destroy_offloads_table(esw);
1462 esw_destroy_offloads_fdb_tables(esw);
1465 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
1468 case DEVLINK_ESWITCH_MODE_LEGACY:
1469 *mlx5_mode = SRIOV_LEGACY;
1471 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1472 *mlx5_mode = SRIOV_OFFLOADS;
1481 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
1483 switch (mlx5_mode) {
1485 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
1487 case SRIOV_OFFLOADS:
1488 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
1497 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
1500 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
1501 *mlx5_mode = MLX5_INLINE_MODE_NONE;
1503 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
1504 *mlx5_mode = MLX5_INLINE_MODE_L2;
1506 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
1507 *mlx5_mode = MLX5_INLINE_MODE_IP;
1509 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
1510 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
1519 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
1521 switch (mlx5_mode) {
1522 case MLX5_INLINE_MODE_NONE:
1523 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
1525 case MLX5_INLINE_MODE_L2:
1526 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
1528 case MLX5_INLINE_MODE_IP:
1529 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
1531 case MLX5_INLINE_MODE_TCP_UDP:
1532 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
1541 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1543 struct mlx5_core_dev *dev = devlink_priv(devlink);
1545 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1548 if(!MLX5_ESWITCH_MANAGER(dev))
1551 if (dev->priv.eswitch->mode == SRIOV_NONE)
1557 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1558 struct netlink_ext_ack *extack)
1560 struct mlx5_core_dev *dev = devlink_priv(devlink);
1561 u16 cur_mlx5_mode, mlx5_mode = 0;
1564 err = mlx5_devlink_eswitch_check(devlink);
1568 cur_mlx5_mode = dev->priv.eswitch->mode;
1570 if (esw_mode_from_devlink(mode, &mlx5_mode))
1573 if (cur_mlx5_mode == mlx5_mode)
1576 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1577 return esw_offloads_start(dev->priv.eswitch, extack);
1578 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1579 return esw_offloads_stop(dev->priv.eswitch, extack);
1584 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1586 struct mlx5_core_dev *dev = devlink_priv(devlink);
1589 err = mlx5_devlink_eswitch_check(devlink);
1593 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1596 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
1597 struct netlink_ext_ack *extack)
1599 struct mlx5_core_dev *dev = devlink_priv(devlink);
1600 struct mlx5_eswitch *esw = dev->priv.eswitch;
1604 err = mlx5_devlink_eswitch_check(devlink);
1608 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1609 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1610 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1613 case MLX5_CAP_INLINE_MODE_L2:
1614 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
1616 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1620 if (esw->offloads.num_flows > 0) {
1621 NL_SET_ERR_MSG_MOD(extack,
1622 "Can't set inline mode when flows are configured");
1626 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1630 for (vport = 1; vport < esw->enabled_vports; vport++) {
1631 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1633 NL_SET_ERR_MSG_MOD(extack,
1634 "Failed to set min inline on vport");
1635 goto revert_inline_mode;
1639 esw->offloads.inline_mode = mlx5_mode;
1644 mlx5_modify_nic_vport_min_inline(dev,
1646 esw->offloads.inline_mode);
1651 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1653 struct mlx5_core_dev *dev = devlink_priv(devlink);
1654 struct mlx5_eswitch *esw = dev->priv.eswitch;
1657 err = mlx5_devlink_eswitch_check(devlink);
1661 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1664 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1666 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1667 struct mlx5_core_dev *dev = esw->dev;
1670 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1673 if (esw->mode == SRIOV_NONE)
1676 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1677 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1678 mlx5_mode = MLX5_INLINE_MODE_NONE;
1680 case MLX5_CAP_INLINE_MODE_L2:
1681 mlx5_mode = MLX5_INLINE_MODE_L2;
1683 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1688 for (vport = 1; vport <= nvfs; vport++) {
1689 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1690 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1692 prev_mlx5_mode = mlx5_mode;
1700 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap,
1701 struct netlink_ext_ack *extack)
1703 struct mlx5_core_dev *dev = devlink_priv(devlink);
1704 struct mlx5_eswitch *esw = dev->priv.eswitch;
1707 err = mlx5_devlink_eswitch_check(devlink);
1711 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1712 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
1713 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1716 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1719 if (esw->mode == SRIOV_LEGACY) {
1720 esw->offloads.encap = encap;
1724 if (esw->offloads.encap == encap)
1727 if (esw->offloads.num_flows > 0) {
1728 NL_SET_ERR_MSG_MOD(extack,
1729 "Can't set encapsulation when flows are configured");
1733 esw_destroy_offloads_fdb_tables(esw);
1735 esw->offloads.encap = encap;
1737 err = esw_create_offloads_fdb_tables(esw, esw->nvports);
1740 NL_SET_ERR_MSG_MOD(extack,
1741 "Failed re-creating fast FDB table");
1742 esw->offloads.encap = !encap;
1743 (void)esw_create_offloads_fdb_tables(esw, esw->nvports);
1749 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1751 struct mlx5_core_dev *dev = devlink_priv(devlink);
1752 struct mlx5_eswitch *esw = dev->priv.eswitch;
1755 err = mlx5_devlink_eswitch_check(devlink);
1759 *encap = esw->offloads.encap;
1763 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1765 struct mlx5_eswitch_rep_if *__rep_if,
1768 struct mlx5_esw_offload *offloads = &esw->offloads;
1769 struct mlx5_eswitch_rep_if *rep_if;
1771 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1773 rep_if->load = __rep_if->load;
1774 rep_if->unload = __rep_if->unload;
1775 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1776 rep_if->priv = __rep_if->priv;
1778 rep_if->valid = true;
1780 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1782 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1783 int vport_index, u8 rep_type)
1785 struct mlx5_esw_offload *offloads = &esw->offloads;
1786 struct mlx5_eswitch_rep *rep;
1788 rep = &offloads->vport_reps[vport_index];
1790 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1791 rep->rep_if[rep_type].unload(rep);
1793 rep->rep_if[rep_type].valid = false;
1795 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1797 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1799 #define UPLINK_REP_INDEX 0
1800 struct mlx5_esw_offload *offloads = &esw->offloads;
1801 struct mlx5_eswitch_rep *rep;
1803 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1804 return rep->rep_if[rep_type].priv;
1807 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1811 struct mlx5_esw_offload *offloads = &esw->offloads;
1812 struct mlx5_eswitch_rep *rep;
1814 if (vport == FDB_UPLINK_VPORT)
1815 vport = UPLINK_REP_INDEX;
1817 rep = &offloads->vport_reps[vport];
1819 if (rep->rep_if[rep_type].valid &&
1820 rep->rep_if[rep_type].get_proto_dev)
1821 return rep->rep_if[rep_type].get_proto_dev(rep);
1824 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1826 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1828 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1830 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1832 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1835 return &esw->offloads.vport_reps[vport];
1837 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);