2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/acl/ofld.h"
45 #include "lib/devcom.h"
47 #include "lib/fs_chains.h"
50 /* There are two match-all miss flows, one for unicast dst mac and
53 #define MLX5_ESW_MISS_FLOWS (2)
54 #define UPLINK_REP_INDEX 0
56 /* Per vport tables */
58 #define MLX5_ESW_VPORT_TABLE_SIZE 128
60 /* This struct is used as a key to the hash table and we need it to be packed
61 * so hash result is consistent
63 struct mlx5_vport_key {
70 struct mlx5_vport_tbl_attr {
76 struct mlx5_vport_table {
77 struct hlist_node hlist;
78 struct mlx5_flow_table *fdb;
80 struct mlx5_vport_key key;
83 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
85 static struct mlx5_flow_table *
86 esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
88 struct mlx5_flow_table_attr ft_attr = {};
89 struct mlx5_flow_table *fdb;
91 ft_attr.autogroup.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS;
92 ft_attr.max_fte = MLX5_ESW_VPORT_TABLE_SIZE;
93 ft_attr.prio = FDB_PER_VPORT;
94 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
96 esw_warn(esw->dev, "Failed to create per vport FDB Table err %ld\n",
103 static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
104 struct mlx5_vport_tbl_attr *attr,
105 struct mlx5_vport_key *key)
107 key->vport = attr->vport;
108 key->chain = attr->chain;
109 key->prio = attr->prio;
110 key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
111 return jhash(key, sizeof(*key), 0);
114 /* caller must hold vports.lock */
115 static struct mlx5_vport_table *
116 esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32 key)
118 struct mlx5_vport_table *e;
120 hash_for_each_possible(esw->fdb_table.offloads.vports.table, e, hlist, key)
121 if (!memcmp(&e->key, skey, sizeof(*skey)))
128 esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
130 struct mlx5_vport_table *e;
131 struct mlx5_vport_key key;
134 mutex_lock(&esw->fdb_table.offloads.vports.lock);
135 hkey = flow_attr_to_vport_key(esw, attr, &key);
136 e = esw_vport_tbl_lookup(esw, &key, hkey);
137 if (!e || --e->num_rules)
141 mlx5_destroy_flow_table(e->fdb);
144 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
147 static struct mlx5_flow_table *
148 esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
150 struct mlx5_core_dev *dev = esw->dev;
151 struct mlx5_flow_namespace *ns;
152 struct mlx5_flow_table *fdb;
153 struct mlx5_vport_table *e;
154 struct mlx5_vport_key skey;
157 mutex_lock(&esw->fdb_table.offloads.vports.lock);
158 hkey = flow_attr_to_vport_key(esw, attr, &skey);
159 e = esw_vport_tbl_lookup(esw, &skey, hkey);
165 e = kzalloc(sizeof(*e), GFP_KERNEL);
167 fdb = ERR_PTR(-ENOMEM);
171 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
173 esw_warn(dev, "Failed to get FDB namespace\n");
174 fdb = ERR_PTR(-ENOENT);
178 fdb = esw_vport_tbl_create(esw, ns);
185 hash_add(esw->fdb_table.offloads.vports.table, &e->hlist, hkey);
187 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
193 mutex_unlock(&esw->fdb_table.offloads.vports.lock);
197 int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
199 struct mlx5_vport_tbl_attr attr;
200 struct mlx5_flow_table *fdb;
201 struct mlx5_vport *vport;
206 mlx5_esw_for_all_vports(esw, i, vport) {
207 attr.vport = vport->vport;
208 fdb = esw_vport_tbl_get(esw, &attr);
215 mlx5_esw_vport_tbl_put(esw);
219 void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
221 struct mlx5_vport_tbl_attr attr;
222 struct mlx5_vport *vport;
227 mlx5_esw_for_all_vports(esw, i, vport) {
228 attr.vport = vport->vport;
229 esw_vport_tbl_put(esw, &attr);
233 /* End: Per vport tables */
235 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
238 int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
240 WARN_ON(idx > esw->total_vports - 1);
241 return &esw->offloads.vport_reps[idx];
245 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
246 struct mlx5_flow_spec *spec,
247 struct mlx5_esw_flow_attr *attr)
249 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
250 attr && attr->in_rep)
251 spec->flow_context.flow_source =
252 attr->in_rep->vport == MLX5_VPORT_UPLINK ?
253 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
254 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
258 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
259 struct mlx5_flow_spec *spec,
260 struct mlx5_esw_flow_attr *attr)
265 /* Use metadata matching because vport is not represented by single
266 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
268 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
269 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
270 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
271 mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
272 attr->in_rep->vport));
274 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
275 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
276 mlx5_eswitch_get_vport_metadata_mask());
278 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
280 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
281 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
283 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
284 MLX5_SET(fte_match_set_misc, misc,
285 source_eswitch_owner_vhca_id,
286 MLX5_CAP_GEN(attr->in_mdev, vhca_id));
288 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
289 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
290 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
291 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
292 source_eswitch_owner_vhca_id);
294 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
298 struct mlx5_flow_handle *
299 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
300 struct mlx5_flow_spec *spec,
301 struct mlx5_flow_attr *attr)
303 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
304 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
305 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
306 struct mlx5_fs_chains *chains = esw_chains(esw);
307 bool split = !!(esw_attr->split_count);
308 struct mlx5_vport_tbl_attr fwd_attr;
309 struct mlx5_flow_handle *rule;
310 struct mlx5_flow_table *fdb;
313 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
314 return ERR_PTR(-EOPNOTSUPP);
316 flow_act.action = attr->action;
317 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
318 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
319 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
320 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
321 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
322 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
323 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
324 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
325 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
326 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
327 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
328 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
332 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
333 struct mlx5_flow_table *ft;
336 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
337 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
338 dest[i].ft = attr->dest_ft;
340 } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
341 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
342 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
343 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
345 } else if (attr->dest_chain) {
346 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
347 ft = mlx5_chains_get_table(chains, attr->dest_chain,
351 goto err_create_goto_table;
354 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
358 for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
359 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
360 dest[i].vport.num = esw_attr->dests[j].rep->vport;
361 dest[i].vport.vhca_id =
362 MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
363 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
364 dest[i].vport.flags |=
365 MLX5_FLOW_DEST_VPORT_VHCA_ID;
366 if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
367 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
368 flow_act.pkt_reformat =
369 esw_attr->dests[j].pkt_reformat;
370 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
371 dest[i].vport.pkt_reformat =
372 esw_attr->dests[j].pkt_reformat;
379 if (esw_attr->decap_pkt_reformat)
380 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
382 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
383 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
384 dest[i].counter_id = mlx5_fc_id(attr->counter);
388 if (attr->outer_match_level != MLX5_MATCH_NONE)
389 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
390 if (attr->inner_match_level != MLX5_MATCH_NONE)
391 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
393 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
394 flow_act.modify_hdr = attr->modify_hdr;
397 fwd_attr.chain = attr->chain;
398 fwd_attr.prio = attr->prio;
399 fwd_attr.vport = esw_attr->in_rep->vport;
401 fdb = esw_vport_tbl_get(esw, &fwd_attr);
403 if (attr->chain || attr->prio)
404 fdb = mlx5_chains_get_table(chains, attr->chain,
409 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
410 mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
413 rule = ERR_CAST(fdb);
417 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
419 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
420 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
423 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
427 atomic64_inc(&esw->offloads.num_flows);
433 esw_vport_tbl_put(esw, &fwd_attr);
434 else if (attr->chain || attr->prio)
435 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
437 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
438 mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
439 err_create_goto_table:
443 struct mlx5_flow_handle *
444 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
445 struct mlx5_flow_spec *spec,
446 struct mlx5_flow_attr *attr)
448 struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
449 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
450 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
451 struct mlx5_fs_chains *chains = esw_chains(esw);
452 struct mlx5_vport_tbl_attr fwd_attr;
453 struct mlx5_flow_table *fast_fdb;
454 struct mlx5_flow_table *fwd_fdb;
455 struct mlx5_flow_handle *rule;
458 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
459 if (IS_ERR(fast_fdb)) {
460 rule = ERR_CAST(fast_fdb);
464 fwd_attr.chain = attr->chain;
465 fwd_attr.prio = attr->prio;
466 fwd_attr.vport = esw_attr->in_rep->vport;
467 fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
468 if (IS_ERR(fwd_fdb)) {
469 rule = ERR_CAST(fwd_fdb);
473 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
474 for (i = 0; i < esw_attr->split_count; i++) {
475 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
476 dest[i].vport.num = esw_attr->dests[i].rep->vport;
477 dest[i].vport.vhca_id =
478 MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
479 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
480 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
481 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
482 dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
483 dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
486 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
487 dest[i].ft = fwd_fdb;
490 mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
492 if (attr->outer_match_level != MLX5_MATCH_NONE)
493 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
495 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
496 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
501 atomic64_inc(&esw->offloads.num_flows);
505 esw_vport_tbl_put(esw, &fwd_attr);
507 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
513 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
514 struct mlx5_flow_handle *rule,
515 struct mlx5_flow_attr *attr,
518 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
519 struct mlx5_fs_chains *chains = esw_chains(esw);
520 bool split = (esw_attr->split_count > 0);
521 struct mlx5_vport_tbl_attr fwd_attr;
524 mlx5_del_flow_rules(rule);
526 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
527 /* unref the term table */
528 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
529 if (esw_attr->dests[i].termtbl)
530 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
534 atomic64_dec(&esw->offloads.num_flows);
536 if (fwd_rule || split) {
537 fwd_attr.chain = attr->chain;
538 fwd_attr.prio = attr->prio;
539 fwd_attr.vport = esw_attr->in_rep->vport;
543 esw_vport_tbl_put(esw, &fwd_attr);
544 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
547 esw_vport_tbl_put(esw, &fwd_attr);
548 else if (attr->chain || attr->prio)
549 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
550 if (attr->dest_chain)
551 mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
556 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
557 struct mlx5_flow_handle *rule,
558 struct mlx5_flow_attr *attr)
560 __mlx5_eswitch_del_rule(esw, rule, attr, false);
564 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
565 struct mlx5_flow_handle *rule,
566 struct mlx5_flow_attr *attr)
568 __mlx5_eswitch_del_rule(esw, rule, attr, true);
571 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
573 struct mlx5_eswitch_rep *rep;
576 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
577 mlx5_esw_for_each_host_func_rep(esw, i, rep, esw->esw_funcs.num_vfs) {
578 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
581 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
590 static struct mlx5_eswitch_rep *
591 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
593 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
595 in_rep = attr->in_rep;
596 out_rep = attr->dests[0].rep;
608 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
609 bool push, bool pop, bool fwd)
611 struct mlx5_eswitch_rep *in_rep, *out_rep;
613 if ((push || pop) && !fwd)
616 in_rep = attr->in_rep;
617 out_rep = attr->dests[0].rep;
619 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
622 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
625 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
626 if (!push && !pop && fwd)
627 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
630 /* protects against (1) setting rules with different vlans to push and
631 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
633 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
642 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
643 struct mlx5_flow_attr *attr)
645 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
646 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
647 struct mlx5_eswitch_rep *vport = NULL;
651 /* nop if we're on the vlan push/pop non emulation mode */
652 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
655 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
656 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
657 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
660 mutex_lock(&esw->state_lock);
662 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
666 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
668 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
670 if (!push && !pop && fwd) {
671 /* tracks VF --> wire rules without vlan push action */
672 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
673 vport->vlan_refcount++;
674 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
683 if (!(offloads->vlan_push_pop_refcount)) {
684 /* it's the 1st vlan rule, apply global vlan pop policy */
685 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
689 offloads->vlan_push_pop_refcount++;
692 if (vport->vlan_refcount)
695 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
696 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
699 vport->vlan = esw_attr->vlan_vid[0];
701 vport->vlan_refcount++;
705 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
707 mutex_unlock(&esw->state_lock);
711 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
712 struct mlx5_flow_attr *attr)
714 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
715 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
716 struct mlx5_eswitch_rep *vport = NULL;
720 /* nop if we're on the vlan push/pop non emulation mode */
721 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
724 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
727 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
728 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
729 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
731 mutex_lock(&esw->state_lock);
733 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
735 if (!push && !pop && fwd) {
736 /* tracks VF --> wire rules without vlan push action */
737 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
738 vport->vlan_refcount--;
744 vport->vlan_refcount--;
745 if (vport->vlan_refcount)
746 goto skip_unset_push;
749 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
750 0, 0, SET_VLAN_STRIP);
756 offloads->vlan_push_pop_refcount--;
757 if (offloads->vlan_push_pop_refcount)
760 /* no more vlan rules, stop global vlan pop policy */
761 err = esw_set_global_vlan_pop(esw, 0);
764 mutex_unlock(&esw->state_lock);
768 struct mlx5_flow_handle *
769 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport,
772 struct mlx5_flow_act flow_act = {0};
773 struct mlx5_flow_destination dest = {};
774 struct mlx5_flow_handle *flow_rule;
775 struct mlx5_flow_spec *spec;
778 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
780 flow_rule = ERR_PTR(-ENOMEM);
784 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
785 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
786 /* source vport is the esw manager */
787 MLX5_SET(fte_match_set_misc, misc, source_port, esw->manager_vport);
789 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
790 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
791 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
793 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
794 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
795 dest.vport.num = vport;
796 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
798 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
799 spec, &flow_act, &dest, 1);
800 if (IS_ERR(flow_rule))
801 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
806 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
808 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
810 mlx5_del_flow_rules(rule);
813 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
815 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
816 MLX5_FDB_TO_VPORT_REG_C_1;
819 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
821 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
822 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
823 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
827 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
828 !mlx5_eswitch_vport_match_metadata_enabled(esw))
831 MLX5_SET(query_esw_vport_context_in, in, opcode,
832 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
833 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
837 curr = MLX5_GET(query_esw_vport_context_out, out,
838 esw_vport_context.fdb_to_vport_reg_c_id);
839 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
840 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
841 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
848 MLX5_SET(modify_esw_vport_context_in, min,
849 esw_vport_context.fdb_to_vport_reg_c_id, curr);
850 MLX5_SET(modify_esw_vport_context_in, min,
851 field_select.fdb_to_vport_reg_c_id, 1);
853 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
855 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
856 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
858 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
864 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
865 struct mlx5_core_dev *peer_dev,
866 struct mlx5_flow_spec *spec,
867 struct mlx5_flow_destination *dest)
871 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
872 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
874 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
875 mlx5_eswitch_get_vport_metadata_mask());
877 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
879 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
882 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
883 MLX5_CAP_GEN(peer_dev, vhca_id));
885 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
887 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
889 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
890 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
891 source_eswitch_owner_vhca_id);
894 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
895 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
896 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
897 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
900 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
901 struct mlx5_eswitch *peer_esw,
902 struct mlx5_flow_spec *spec,
907 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
908 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
910 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
911 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
914 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
916 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
920 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
921 struct mlx5_core_dev *peer_dev)
923 struct mlx5_flow_destination dest = {};
924 struct mlx5_flow_act flow_act = {0};
925 struct mlx5_flow_handle **flows;
926 struct mlx5_flow_handle *flow;
927 struct mlx5_flow_spec *spec;
928 /* total vports is the same for both e-switches */
929 int nvports = esw->total_vports;
933 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
937 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
939 flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
942 goto alloc_flows_err;
945 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
946 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
949 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
950 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
951 spec, MLX5_VPORT_PF);
953 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
954 spec, &flow_act, &dest, 1);
957 goto add_pf_flow_err;
959 flows[MLX5_VPORT_PF] = flow;
962 if (mlx5_ecpf_vport_exists(esw->dev)) {
963 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
964 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
965 spec, &flow_act, &dest, 1);
968 goto add_ecpf_flow_err;
970 flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
973 mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
974 esw_set_peer_miss_rule_source_port(esw,
975 peer_dev->priv.eswitch,
978 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
979 spec, &flow_act, &dest, 1);
982 goto add_vf_flow_err;
987 esw->fdb_table.offloads.peer_miss_rules = flows;
994 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
995 mlx5_del_flow_rules(flows[i]);
997 if (mlx5_ecpf_vport_exists(esw->dev))
998 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1000 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1001 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1003 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1010 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1012 struct mlx5_flow_handle **flows;
1015 flows = esw->fdb_table.offloads.peer_miss_rules;
1017 mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
1018 mlx5_core_max_vfs(esw->dev))
1019 mlx5_del_flow_rules(flows[i]);
1021 if (mlx5_ecpf_vport_exists(esw->dev))
1022 mlx5_del_flow_rules(flows[mlx5_eswitch_ecpf_idx(esw)]);
1024 if (mlx5_core_is_ecpf_esw_manager(esw->dev))
1025 mlx5_del_flow_rules(flows[MLX5_VPORT_PF]);
1030 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1032 struct mlx5_flow_act flow_act = {0};
1033 struct mlx5_flow_destination dest = {};
1034 struct mlx5_flow_handle *flow_rule = NULL;
1035 struct mlx5_flow_spec *spec;
1042 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1048 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1049 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1051 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1052 outer_headers.dmac_47_16);
1055 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1056 dest.vport.num = esw->manager_vport;
1057 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1059 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1060 spec, &flow_act, &dest, 1);
1061 if (IS_ERR(flow_rule)) {
1062 err = PTR_ERR(flow_rule);
1063 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1067 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1069 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1071 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1072 outer_headers.dmac_47_16);
1074 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1075 spec, &flow_act, &dest, 1);
1076 if (IS_ERR(flow_rule)) {
1077 err = PTR_ERR(flow_rule);
1078 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1079 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1083 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1090 struct mlx5_flow_handle *
1091 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1093 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1094 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1095 struct mlx5_flow_context *flow_context;
1096 struct mlx5_flow_handle *flow_rule;
1097 struct mlx5_flow_destination dest;
1098 struct mlx5_flow_spec *spec;
1101 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1102 return ERR_PTR(-EOPNOTSUPP);
1104 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
1106 return ERR_PTR(-ENOMEM);
1108 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1110 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1111 ESW_CHAIN_TAG_METADATA_MASK);
1112 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1114 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1115 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1116 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1117 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1118 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1120 flow_context = &spec->flow_context;
1121 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1122 flow_context->flow_tag = tag;
1123 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1124 dest.ft = esw->offloads.ft_offloads;
1126 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1129 if (IS_ERR(flow_rule))
1131 "Failed to create restore rule for tag: %d, err(%d)\n",
1132 tag, (int)PTR_ERR(flow_rule));
1138 esw_get_max_restore_tag(struct mlx5_eswitch *esw)
1140 return ESW_CHAIN_TAG_METADATA_MASK;
1143 #define MAX_PF_SQ 256
1144 #define MAX_SQ_NVPORTS 32
1146 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1149 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1153 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1154 MLX5_SET(create_flow_group_in, flow_group_in,
1155 match_criteria_enable,
1156 MLX5_MATCH_MISC_PARAMETERS_2);
1158 MLX5_SET(fte_match_param, match_criteria,
1159 misc_parameters_2.metadata_reg_c_0,
1160 mlx5_eswitch_get_vport_metadata_mask());
1162 MLX5_SET(create_flow_group_in, flow_group_in,
1163 match_criteria_enable,
1164 MLX5_MATCH_MISC_PARAMETERS);
1166 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1167 misc_parameters.source_port);
1171 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1172 #define fdb_modify_header_fwd_to_table_supported(esw) \
1173 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1174 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1176 struct mlx5_core_dev *dev = esw->dev;
1178 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1179 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1181 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1182 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1183 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1184 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1185 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1186 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1187 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1188 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1189 /* Disabled when ttl workaround is needed, e.g
1190 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1193 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1194 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1196 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1197 esw_info(dev, "Supported tc chains and prios offload\n");
1200 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1201 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1205 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1207 struct mlx5_core_dev *dev = esw->dev;
1208 struct mlx5_flow_table *nf_ft, *ft;
1209 struct mlx5_chains_attr attr = {};
1210 struct mlx5_fs_chains *chains;
1214 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1216 esw_init_chains_offload_flags(esw, &attr.flags);
1217 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1218 attr.max_ft_sz = fdb_max;
1219 attr.max_grp_num = esw->params.large_group_num;
1220 attr.default_ft = miss_fdb;
1221 attr.max_restore_tag = esw_get_max_restore_tag(esw);
1223 chains = mlx5_chains_create(dev, &attr);
1224 if (IS_ERR(chains)) {
1225 err = PTR_ERR(chains);
1226 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1230 esw->fdb_table.offloads.esw_chains_priv = chains;
1232 /* Create tc_end_ft which is the always created ft chain */
1233 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1235 if (IS_ERR(nf_ft)) {
1236 err = PTR_ERR(nf_ft);
1240 /* Always open the root for fast path */
1241 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1247 /* Open level 1 for split fdb rules now if prios isn't supported */
1248 if (!mlx5_chains_prios_supported(chains)) {
1249 err = mlx5_esw_vport_tbl_get(esw);
1254 mlx5_chains_set_end_ft(chains, nf_ft);
1259 mlx5_chains_put_table(chains, 0, 1, 0);
1261 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1263 mlx5_chains_destroy(chains);
1264 esw->fdb_table.offloads.esw_chains_priv = NULL;
1270 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1272 if (!mlx5_chains_prios_supported(chains))
1273 mlx5_esw_vport_tbl_put(esw);
1274 mlx5_chains_put_table(chains, 0, 1, 0);
1275 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1276 mlx5_chains_destroy(chains);
1279 #else /* CONFIG_MLX5_CLS_ACT */
1282 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1286 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1291 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1293 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1294 struct mlx5_flow_table_attr ft_attr = {};
1295 struct mlx5_core_dev *dev = esw->dev;
1296 struct mlx5_flow_namespace *root_ns;
1297 struct mlx5_flow_table *fdb = NULL;
1298 u32 flags = 0, *flow_group_in;
1299 int table_size, ix, err = 0;
1300 struct mlx5_flow_group *g;
1301 void *match_criteria;
1304 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1306 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1310 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1312 esw_warn(dev, "Failed to get FDB flow namespace\n");
1316 esw->fdb_table.offloads.ns = root_ns;
1317 err = mlx5_flow_namespace_set_mode(root_ns,
1318 esw->dev->priv.steering->mode);
1320 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1324 table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1325 MLX5_ESW_MISS_FLOWS + esw->total_vports;
1327 /* create the slow path fdb with encap set, so further table instances
1328 * can be created at run time while VFs are probed if the FW allows that.
1330 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1331 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1332 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1334 ft_attr.flags = flags;
1335 ft_attr.max_fte = table_size;
1336 ft_attr.prio = FDB_SLOW_PATH;
1338 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1341 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1344 esw->fdb_table.offloads.slow_fdb = fdb;
1346 err = esw_chains_create(esw, fdb);
1348 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1349 goto fdb_chains_err;
1352 /* create send-to-vport group */
1353 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1354 MLX5_MATCH_MISC_PARAMETERS);
1356 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1358 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1359 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1361 ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ;
1362 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1363 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1365 g = mlx5_create_flow_group(fdb, flow_group_in);
1368 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1369 goto send_vport_err;
1371 esw->fdb_table.offloads.send_to_vport_grp = g;
1373 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1374 /* create peer esw miss group */
1375 memset(flow_group_in, 0, inlen);
1377 esw_set_flow_group_source_port(esw, flow_group_in);
1379 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1380 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1384 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1385 misc_parameters.source_eswitch_owner_vhca_id);
1387 MLX5_SET(create_flow_group_in, flow_group_in,
1388 source_eswitch_owner_vhca_id_valid, 1);
1391 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1392 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1393 ix + esw->total_vports - 1);
1394 ix += esw->total_vports;
1396 g = mlx5_create_flow_group(fdb, flow_group_in);
1399 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1402 esw->fdb_table.offloads.peer_miss_grp = g;
1405 /* create miss group */
1406 memset(flow_group_in, 0, inlen);
1407 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1408 MLX5_MATCH_OUTER_HEADERS);
1409 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1411 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1412 outer_headers.dmac_47_16);
1415 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1416 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1417 ix + MLX5_ESW_MISS_FLOWS);
1419 g = mlx5_create_flow_group(fdb, flow_group_in);
1422 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1425 esw->fdb_table.offloads.miss_grp = g;
1427 err = esw_add_fdb_miss_rule(esw);
1431 kvfree(flow_group_in);
1435 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1437 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1438 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1440 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1442 esw_chains_destroy(esw, esw_chains(esw));
1444 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1446 /* Holds true only as long as DMFS is the default */
1447 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1449 kvfree(flow_group_in);
1453 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1455 if (!esw->fdb_table.offloads.slow_fdb)
1458 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1459 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1460 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1461 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1462 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1463 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1464 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1466 esw_chains_destroy(esw, esw_chains(esw));
1468 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1469 /* Holds true only as long as DMFS is the default */
1470 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1471 MLX5_FLOW_STEERING_MODE_DMFS);
1474 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1476 struct mlx5_flow_table_attr ft_attr = {};
1477 struct mlx5_core_dev *dev = esw->dev;
1478 struct mlx5_flow_table *ft_offloads;
1479 struct mlx5_flow_namespace *ns;
1482 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1484 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1488 ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1491 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1492 if (IS_ERR(ft_offloads)) {
1493 err = PTR_ERR(ft_offloads);
1494 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1498 esw->offloads.ft_offloads = ft_offloads;
1502 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1504 struct mlx5_esw_offload *offloads = &esw->offloads;
1506 mlx5_destroy_flow_table(offloads->ft_offloads);
1509 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1511 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1512 struct mlx5_flow_group *g;
1517 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1518 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1522 /* create vport rx group */
1523 esw_set_flow_group_source_port(esw, flow_group_in);
1525 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1526 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1528 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1532 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1536 esw->offloads.vport_rx_group = g;
1538 kvfree(flow_group_in);
1542 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1544 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1547 struct mlx5_flow_handle *
1548 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1549 struct mlx5_flow_destination *dest)
1551 struct mlx5_flow_act flow_act = {0};
1552 struct mlx5_flow_handle *flow_rule;
1553 struct mlx5_flow_spec *spec;
1556 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1558 flow_rule = ERR_PTR(-ENOMEM);
1562 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1563 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1564 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1565 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1567 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1568 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1569 mlx5_eswitch_get_vport_metadata_mask());
1571 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1573 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1574 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1576 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1577 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1579 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1582 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1583 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1584 &flow_act, dest, 1);
1585 if (IS_ERR(flow_rule)) {
1586 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1596 static int mlx5_eswitch_inline_mode_get(const struct mlx5_eswitch *esw, u8 *mode)
1598 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1599 struct mlx5_core_dev *dev = esw->dev;
1602 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1605 if (esw->mode == MLX5_ESWITCH_NONE)
1608 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1609 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1610 mlx5_mode = MLX5_INLINE_MODE_NONE;
1612 case MLX5_CAP_INLINE_MODE_L2:
1613 mlx5_mode = MLX5_INLINE_MODE_L2;
1615 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1620 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
1621 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
1622 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1623 if (prev_mlx5_mode != mlx5_mode)
1625 prev_mlx5_mode = mlx5_mode;
1633 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
1635 struct mlx5_esw_offload *offloads = &esw->offloads;
1637 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1640 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
1641 mlx5_destroy_flow_group(offloads->restore_group);
1642 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
1645 static int esw_create_restore_table(struct mlx5_eswitch *esw)
1647 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1648 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1649 struct mlx5_flow_table_attr ft_attr = {};
1650 struct mlx5_core_dev *dev = esw->dev;
1651 struct mlx5_flow_namespace *ns;
1652 struct mlx5_modify_hdr *mod_hdr;
1653 void *match_criteria, *misc;
1654 struct mlx5_flow_table *ft;
1655 struct mlx5_flow_group *g;
1659 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1662 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1664 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1668 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1669 if (!flow_group_in) {
1674 ft_attr.max_fte = 1 << ESW_CHAIN_TAG_METADATA_BITS;
1675 ft = mlx5_create_flow_table(ns, &ft_attr);
1678 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
1683 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1685 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
1688 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1689 ESW_CHAIN_TAG_METADATA_MASK);
1690 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1691 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1692 ft_attr.max_fte - 1);
1693 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1694 MLX5_MATCH_MISC_PARAMETERS_2);
1695 g = mlx5_create_flow_group(ft, flow_group_in);
1698 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
1703 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
1704 MLX5_SET(copy_action_in, modact, src_field,
1705 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
1706 MLX5_SET(copy_action_in, modact, dst_field,
1707 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1708 mod_hdr = mlx5_modify_header_alloc(esw->dev,
1709 MLX5_FLOW_NAMESPACE_KERNEL, 1,
1711 if (IS_ERR(mod_hdr)) {
1712 err = PTR_ERR(mod_hdr);
1713 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
1718 esw->offloads.ft_offloads_restore = ft;
1719 esw->offloads.restore_group = g;
1720 esw->offloads.restore_copy_hdr_id = mod_hdr;
1722 kvfree(flow_group_in);
1727 mlx5_destroy_flow_group(g);
1729 mlx5_destroy_flow_table(ft);
1731 kvfree(flow_group_in);
1736 static int esw_offloads_start(struct mlx5_eswitch *esw,
1737 struct netlink_ext_ack *extack)
1741 mlx5_eswitch_disable_locked(esw, false);
1742 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
1743 esw->dev->priv.sriov.num_vfs);
1745 NL_SET_ERR_MSG_MOD(extack,
1746 "Failed setting eswitch to offloads");
1747 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
1748 MLX5_ESWITCH_IGNORE_NUM_VFS);
1750 NL_SET_ERR_MSG_MOD(extack,
1751 "Failed setting eswitch back to legacy");
1754 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
1755 if (mlx5_eswitch_inline_mode_get(esw,
1756 &esw->offloads.inline_mode)) {
1757 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
1758 NL_SET_ERR_MSG_MOD(extack,
1759 "Inline mode is different between vports");
1765 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
1767 kfree(esw->offloads.vport_reps);
1770 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
1772 int total_vports = esw->total_vports;
1773 struct mlx5_eswitch_rep *rep;
1777 esw->offloads.vport_reps = kcalloc(total_vports,
1778 sizeof(struct mlx5_eswitch_rep),
1780 if (!esw->offloads.vport_reps)
1783 mlx5_esw_for_all_reps(esw, vport_index, rep) {
1784 rep->vport = mlx5_eswitch_index_to_vport_num(esw, vport_index);
1785 rep->vport_index = vport_index;
1787 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1788 atomic_set(&rep->rep_data[rep_type].state,
1795 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
1796 struct mlx5_eswitch_rep *rep, u8 rep_type)
1798 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1799 REP_LOADED, REP_REGISTERED) == REP_LOADED)
1800 esw->offloads.rep_ops[rep_type]->unload(rep);
1803 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
1805 struct mlx5_eswitch_rep *rep;
1808 mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
1809 __esw_offloads_unload_rep(esw, rep, rep_type);
1811 if (mlx5_ecpf_vport_exists(esw->dev)) {
1812 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
1813 __esw_offloads_unload_rep(esw, rep, rep_type);
1816 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1817 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
1818 __esw_offloads_unload_rep(esw, rep, rep_type);
1821 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
1822 __esw_offloads_unload_rep(esw, rep, rep_type);
1825 static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
1827 struct mlx5_eswitch_rep *rep;
1831 rep = mlx5_eswitch_get_rep(esw, vport_num);
1832 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
1833 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
1834 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
1835 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
1843 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
1844 for (--rep_type; rep_type >= 0; rep_type--)
1845 __esw_offloads_unload_rep(esw, rep, rep_type);
1849 static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
1851 struct mlx5_eswitch_rep *rep;
1854 rep = mlx5_eswitch_get_rep(esw, vport_num);
1855 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
1856 __esw_offloads_unload_rep(esw, rep, rep_type);
1859 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
1863 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1866 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
1870 err = mlx5_esw_offloads_rep_load(esw, vport_num);
1876 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
1880 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
1882 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
1885 mlx5_esw_offloads_rep_unload(esw, vport_num);
1886 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
1889 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
1890 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
1892 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
1893 struct mlx5_eswitch *peer_esw)
1897 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
1904 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
1906 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1907 mlx5e_tc_clean_fdb_peer_flows(esw);
1909 esw_del_fdb_peer_miss_rules(esw);
1912 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
1913 struct mlx5_eswitch *peer_esw,
1916 struct mlx5_flow_root_namespace *peer_ns;
1917 struct mlx5_flow_root_namespace *ns;
1920 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
1921 ns = esw->dev->priv.steering->fdb_root_ns;
1924 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
1928 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
1930 mlx5_flow_namespace_set_peer(ns, NULL);
1934 mlx5_flow_namespace_set_peer(ns, NULL);
1935 mlx5_flow_namespace_set_peer(peer_ns, NULL);
1941 static int mlx5_esw_offloads_devcom_event(int event,
1945 struct mlx5_eswitch *esw = my_data;
1946 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1947 struct mlx5_eswitch *peer_esw = event_data;
1951 case ESW_OFFLOADS_DEVCOM_PAIR:
1952 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
1953 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
1956 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
1959 err = mlx5_esw_offloads_pair(esw, peer_esw);
1963 err = mlx5_esw_offloads_pair(peer_esw, esw);
1967 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
1970 case ESW_OFFLOADS_DEVCOM_UNPAIR:
1971 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
1974 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
1975 mlx5_esw_offloads_unpair(peer_esw);
1976 mlx5_esw_offloads_unpair(esw);
1977 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1984 mlx5_esw_offloads_unpair(esw);
1986 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
1988 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
1993 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
1995 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
1997 INIT_LIST_HEAD(&esw->offloads.peer_flows);
1998 mutex_init(&esw->offloads.peer_mutex);
2000 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2003 mlx5_devcom_register_component(devcom,
2004 MLX5_DEVCOM_ESW_OFFLOADS,
2005 mlx5_esw_offloads_devcom_event,
2008 mlx5_devcom_send_event(devcom,
2009 MLX5_DEVCOM_ESW_OFFLOADS,
2010 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2013 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2015 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2017 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2020 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2021 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2023 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2027 esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2029 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2032 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2033 MLX5_FDB_TO_VPORT_REG_C_0))
2036 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2039 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2040 mlx5_ecpf_vport_exists(esw->dev))
2046 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2048 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2049 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
2053 /* Only 4 bits of pf_num */
2054 pf_num = PCI_FUNC(esw->dev->pdev->devfn);
2055 if (pf_num > max_pf_num)
2058 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2059 /* Use only non-zero vport_id (1-4095) for all PF's */
2060 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2063 id = (pf_num << ESW_VPORT_BITS) | id;
2067 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2069 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2071 /* Metadata contains only 12 bits of actual ida id */
2072 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2075 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2076 struct mlx5_vport *vport)
2078 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2079 vport->metadata = vport->default_metadata;
2080 return vport->metadata ? 0 : -ENOSPC;
2083 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2084 struct mlx5_vport *vport)
2086 if (!vport->default_metadata)
2089 WARN_ON(vport->metadata != vport->default_metadata);
2090 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2093 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2095 struct mlx5_vport *vport;
2098 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2101 mlx5_esw_for_all_vports_reverse(esw, i, vport)
2102 esw_offloads_vport_metadata_cleanup(esw, vport);
2105 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2107 struct mlx5_vport *vport;
2111 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2114 mlx5_esw_for_all_vports(esw, i, vport) {
2115 err = esw_offloads_vport_metadata_setup(esw, vport);
2123 esw_offloads_metadata_uninit(esw);
2128 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2129 struct mlx5_vport *vport)
2133 err = esw_acl_ingress_ofld_setup(esw, vport);
2137 err = esw_acl_egress_ofld_setup(esw, vport);
2144 esw_acl_ingress_ofld_cleanup(esw, vport);
2149 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2150 struct mlx5_vport *vport)
2152 esw_acl_egress_ofld_cleanup(vport);
2153 esw_acl_ingress_ofld_cleanup(esw, vport);
2156 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2158 struct mlx5_vport *vport;
2160 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2161 return esw_vport_create_offloads_acl_tables(esw, vport);
2164 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2166 struct mlx5_vport *vport;
2168 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2169 esw_vport_destroy_offloads_acl_tables(esw, vport);
2172 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
2176 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
2177 mutex_init(&esw->fdb_table.offloads.vports.lock);
2178 hash_init(esw->fdb_table.offloads.vports.table);
2180 err = esw_create_uplink_offloads_acl_tables(esw);
2182 goto create_acl_err;
2184 err = esw_create_offloads_table(esw);
2186 goto create_offloads_err;
2188 err = esw_create_restore_table(esw);
2190 goto create_restore_err;
2192 err = esw_create_offloads_fdb_tables(esw);
2194 goto create_fdb_err;
2196 err = esw_create_vport_rx_group(esw);
2203 esw_destroy_offloads_fdb_tables(esw);
2205 esw_destroy_restore_table(esw);
2207 esw_destroy_offloads_table(esw);
2208 create_offloads_err:
2209 esw_destroy_uplink_offloads_acl_tables(esw);
2211 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2215 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
2217 esw_destroy_vport_rx_group(esw);
2218 esw_destroy_offloads_fdb_tables(esw);
2219 esw_destroy_restore_table(esw);
2220 esw_destroy_offloads_table(esw);
2221 esw_destroy_uplink_offloads_acl_tables(esw);
2222 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
2226 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
2228 bool host_pf_disabled;
2231 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
2232 host_params_context.host_num_of_vfs);
2233 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
2234 host_params_context.host_pf_disabled);
2236 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
2239 /* Number of VFs can only change from "0 to x" or "x to 0". */
2240 if (esw->esw_funcs.num_vfs > 0) {
2241 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
2245 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
2246 MLX5_VPORT_UC_ADDR_CHANGE);
2250 esw->esw_funcs.num_vfs = new_num_vfs;
2253 static void esw_functions_changed_event_handler(struct work_struct *work)
2255 struct mlx5_host_work *host_work;
2256 struct mlx5_eswitch *esw;
2259 host_work = container_of(work, struct mlx5_host_work, work);
2260 esw = host_work->esw;
2262 out = mlx5_esw_query_functions(esw->dev);
2266 esw_vfs_changed_event_handler(esw, out);
2272 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
2274 struct mlx5_esw_functions *esw_funcs;
2275 struct mlx5_host_work *host_work;
2276 struct mlx5_eswitch *esw;
2278 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
2282 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
2283 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
2285 host_work->esw = esw;
2287 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
2288 queue_work(esw->work_queue, &host_work->work);
2293 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
2295 const u32 *query_host_out;
2297 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
2300 query_host_out = mlx5_esw_query_functions(esw->dev);
2301 if (IS_ERR(query_host_out))
2302 return PTR_ERR(query_host_out);
2304 /* Mark non local controller with non zero controller number. */
2305 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
2306 host_params_context.host_number);
2307 kvfree(query_host_out);
2311 int esw_offloads_enable(struct mlx5_eswitch *esw)
2313 struct mlx5_vport *vport;
2316 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) &&
2317 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap))
2318 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
2320 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2322 mutex_init(&esw->offloads.termtbl_mutex);
2323 mlx5_rdma_enable_roce(esw->dev);
2325 err = mlx5_esw_host_number_init(esw);
2329 if (esw_check_vport_match_metadata_supported(esw))
2330 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2332 err = esw_offloads_metadata_init(esw);
2336 err = esw_set_passing_vport_metadata(esw, true);
2338 goto err_vport_metadata;
2340 err = esw_offloads_steering_init(esw);
2342 goto err_steering_init;
2344 /* Representor will control the vport link state */
2345 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
2346 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
2348 /* Uplink vport rep must load first. */
2349 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
2353 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
2357 esw_offloads_devcom_init(esw);
2362 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2364 esw_offloads_steering_cleanup(esw);
2366 esw_set_passing_vport_metadata(esw, false);
2368 esw_offloads_metadata_uninit(esw);
2370 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2371 mlx5_rdma_disable_roce(esw->dev);
2372 mutex_destroy(&esw->offloads.termtbl_mutex);
2376 static int esw_offloads_stop(struct mlx5_eswitch *esw,
2377 struct netlink_ext_ack *extack)
2381 mlx5_eswitch_disable_locked(esw, false);
2382 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2383 MLX5_ESWITCH_IGNORE_NUM_VFS);
2385 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
2386 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2387 MLX5_ESWITCH_IGNORE_NUM_VFS);
2389 NL_SET_ERR_MSG_MOD(extack,
2390 "Failed setting eswitch back to offloads");
2397 void esw_offloads_disable(struct mlx5_eswitch *esw)
2399 esw_offloads_devcom_cleanup(esw);
2400 mlx5_eswitch_disable_pf_vf_vports(esw);
2401 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
2402 esw_set_passing_vport_metadata(esw, false);
2403 esw_offloads_steering_cleanup(esw);
2404 esw_offloads_metadata_uninit(esw);
2405 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2406 mlx5_rdma_disable_roce(esw->dev);
2407 mutex_destroy(&esw->offloads.termtbl_mutex);
2408 esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2411 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
2414 case DEVLINK_ESWITCH_MODE_LEGACY:
2415 *mlx5_mode = MLX5_ESWITCH_LEGACY;
2417 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
2418 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
2427 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
2429 switch (mlx5_mode) {
2430 case MLX5_ESWITCH_LEGACY:
2431 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
2433 case MLX5_ESWITCH_OFFLOADS:
2434 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
2443 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
2446 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
2447 *mlx5_mode = MLX5_INLINE_MODE_NONE;
2449 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
2450 *mlx5_mode = MLX5_INLINE_MODE_L2;
2452 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
2453 *mlx5_mode = MLX5_INLINE_MODE_IP;
2455 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
2456 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
2465 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
2467 switch (mlx5_mode) {
2468 case MLX5_INLINE_MODE_NONE:
2469 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
2471 case MLX5_INLINE_MODE_L2:
2472 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
2474 case MLX5_INLINE_MODE_IP:
2475 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
2477 case MLX5_INLINE_MODE_TCP_UDP:
2478 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
2487 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
2489 /* devlink commands in NONE eswitch mode are currently supported only
2492 return (esw->mode == MLX5_ESWITCH_NONE &&
2493 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
2496 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
2497 struct netlink_ext_ack *extack)
2499 u16 cur_mlx5_mode, mlx5_mode = 0;
2500 struct mlx5_eswitch *esw;
2503 esw = mlx5_devlink_eswitch_get(devlink);
2505 return PTR_ERR(esw);
2507 if (esw_mode_from_devlink(mode, &mlx5_mode))
2510 mutex_lock(&esw->mode_lock);
2511 cur_mlx5_mode = esw->mode;
2512 if (cur_mlx5_mode == mlx5_mode)
2515 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
2516 err = esw_offloads_start(esw, extack);
2517 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
2518 err = esw_offloads_stop(esw, extack);
2523 mutex_unlock(&esw->mode_lock);
2527 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
2529 struct mlx5_eswitch *esw;
2532 esw = mlx5_devlink_eswitch_get(devlink);
2534 return PTR_ERR(esw);
2536 mutex_lock(&esw->mode_lock);
2537 err = eswitch_devlink_esw_mode_check(esw);
2541 err = esw_mode_to_devlink(esw->mode, mode);
2543 mutex_unlock(&esw->mode_lock);
2547 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
2548 struct netlink_ext_ack *extack)
2550 struct mlx5_core_dev *dev = devlink_priv(devlink);
2551 int err, vport, num_vport;
2552 struct mlx5_eswitch *esw;
2555 esw = mlx5_devlink_eswitch_get(devlink);
2557 return PTR_ERR(esw);
2559 mutex_lock(&esw->mode_lock);
2560 err = eswitch_devlink_esw_mode_check(esw);
2564 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2565 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2566 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
2569 case MLX5_CAP_INLINE_MODE_L2:
2570 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
2573 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2577 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2578 NL_SET_ERR_MSG_MOD(extack,
2579 "Can't set inline mode when flows are configured");
2584 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
2588 mlx5_esw_for_each_host_func_vport(esw, vport, esw->esw_funcs.num_vfs) {
2589 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
2591 NL_SET_ERR_MSG_MOD(extack,
2592 "Failed to set min inline on vport");
2593 goto revert_inline_mode;
2597 esw->offloads.inline_mode = mlx5_mode;
2598 mutex_unlock(&esw->mode_lock);
2602 num_vport = --vport;
2603 mlx5_esw_for_each_host_func_vport_reverse(esw, vport, num_vport)
2604 mlx5_modify_nic_vport_min_inline(dev,
2606 esw->offloads.inline_mode);
2608 mutex_unlock(&esw->mode_lock);
2612 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
2614 struct mlx5_eswitch *esw;
2617 esw = mlx5_devlink_eswitch_get(devlink);
2619 return PTR_ERR(esw);
2621 mutex_lock(&esw->mode_lock);
2622 err = eswitch_devlink_esw_mode_check(esw);
2626 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
2628 mutex_unlock(&esw->mode_lock);
2632 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
2633 enum devlink_eswitch_encap_mode encap,
2634 struct netlink_ext_ack *extack)
2636 struct mlx5_core_dev *dev = devlink_priv(devlink);
2637 struct mlx5_eswitch *esw;
2640 esw = mlx5_devlink_eswitch_get(devlink);
2642 return PTR_ERR(esw);
2644 mutex_lock(&esw->mode_lock);
2645 err = eswitch_devlink_esw_mode_check(esw);
2649 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
2650 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
2651 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
2656 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
2661 if (esw->mode == MLX5_ESWITCH_LEGACY) {
2662 esw->offloads.encap = encap;
2666 if (esw->offloads.encap == encap)
2669 if (atomic64_read(&esw->offloads.num_flows) > 0) {
2670 NL_SET_ERR_MSG_MOD(extack,
2671 "Can't set encapsulation when flows are configured");
2676 esw_destroy_offloads_fdb_tables(esw);
2678 esw->offloads.encap = encap;
2680 err = esw_create_offloads_fdb_tables(esw);
2683 NL_SET_ERR_MSG_MOD(extack,
2684 "Failed re-creating fast FDB table");
2685 esw->offloads.encap = !encap;
2686 (void)esw_create_offloads_fdb_tables(esw);
2690 mutex_unlock(&esw->mode_lock);
2694 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
2695 enum devlink_eswitch_encap_mode *encap)
2697 struct mlx5_eswitch *esw;
2700 esw = mlx5_devlink_eswitch_get(devlink);
2702 return PTR_ERR(esw);
2705 mutex_lock(&esw->mode_lock);
2706 err = eswitch_devlink_esw_mode_check(esw);
2710 *encap = esw->offloads.encap;
2712 mutex_unlock(&esw->mode_lock);
2717 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
2719 /* Currently, only ECPF based device has representor for host PF. */
2720 if (vport_num == MLX5_VPORT_PF &&
2721 !mlx5_core_is_ecpf_esw_manager(esw->dev))
2724 if (vport_num == MLX5_VPORT_ECPF &&
2725 !mlx5_ecpf_vport_exists(esw->dev))
2731 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
2732 const struct mlx5_eswitch_rep_ops *ops,
2735 struct mlx5_eswitch_rep_data *rep_data;
2736 struct mlx5_eswitch_rep *rep;
2739 esw->offloads.rep_ops[rep_type] = ops;
2740 mlx5_esw_for_all_reps(esw, i, rep) {
2741 if (likely(mlx5_eswitch_vport_has_rep(esw, i))) {
2742 rep_data = &rep->rep_data[rep_type];
2743 atomic_set(&rep_data->state, REP_REGISTERED);
2747 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
2749 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
2751 struct mlx5_eswitch_rep *rep;
2754 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
2755 __unload_reps_all_vport(esw, rep_type);
2757 mlx5_esw_for_all_reps(esw, i, rep)
2758 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2760 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
2762 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
2764 struct mlx5_eswitch_rep *rep;
2766 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2767 return rep->rep_data[rep_type].priv;
2770 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
2774 struct mlx5_eswitch_rep *rep;
2776 rep = mlx5_eswitch_get_rep(esw, vport);
2778 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2779 esw->offloads.rep_ops[rep_type]->get_proto_dev)
2780 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
2783 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
2785 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
2787 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
2789 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
2791 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
2794 return mlx5_eswitch_get_rep(esw, vport);
2796 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
2798 bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
2800 return vport_num >= MLX5_VPORT_FIRST_VF &&
2801 vport_num <= esw->dev->priv.sriov.max_vfs;
2804 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
2806 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
2808 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
2810 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
2812 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
2814 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
2816 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
2819 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2821 if (WARN_ON_ONCE(IS_ERR(vport)))
2824 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
2826 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);