1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
8 #include "eswitch_offloads_chains.h"
9 #include "en/mapping.h"
10 #include "mlx5_core.h"
16 #define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
17 #define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
18 #define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
19 #define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
20 #define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
21 #define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
22 #define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
23 #define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
24 #define fdb_ignore_flow_level_supported(esw) \
25 (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
27 /* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
28 * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
29 * for each flow table pool. We can allocate up to 16M of each pool,
30 * and we keep track of how much we used via get_next_avail_sz_from_pool.
31 * Firmware doesn't report any of this for now.
32 * ESW_POOL is expected to be sorted from large to small and match firmware
35 #define ESW_SIZE (16 * 1024 * 1024)
36 static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
40 #define ESW_FT_TBL_SZ (64 * 1024)
42 struct mlx5_esw_chains_priv {
43 struct rhashtable chains_ht;
44 struct rhashtable prios_ht;
45 /* Protects above chains_ht and prios_ht */
48 struct mlx5_flow_table *tc_end_fdb;
49 struct mapping_ctx *chains_mapping;
51 int fdb_left[ARRAY_SIZE(ESW_POOLS)];
55 struct rhash_head node;
62 struct mlx5_eswitch *esw;
63 struct list_head prios_list;
64 struct mlx5_flow_handle *restore_rule;
65 struct mlx5_modify_hdr *miss_modify_hdr;
75 struct rhash_head node;
76 struct list_head list;
78 struct fdb_prio_key key;
82 struct fdb_chain *fdb_chain;
83 struct mlx5_flow_table *fdb;
84 struct mlx5_flow_table *next_fdb;
85 struct mlx5_flow_group *miss_group;
86 struct mlx5_flow_handle *miss_rule;
89 static const struct rhashtable_params chain_params = {
90 .head_offset = offsetof(struct fdb_chain, node),
91 .key_offset = offsetof(struct fdb_chain, chain),
92 .key_len = sizeof_field(struct fdb_chain, chain),
93 .automatic_shrinking = true,
96 static const struct rhashtable_params prio_params = {
97 .head_offset = offsetof(struct fdb_prio, node),
98 .key_offset = offsetof(struct fdb_prio, key),
99 .key_len = sizeof_field(struct fdb_prio, key),
100 .automatic_shrinking = true,
103 bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
105 return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
108 bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
110 return fdb_ignore_flow_level_supported(esw);
113 u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
115 if (!mlx5_esw_chains_prios_supported(esw))
118 if (fdb_ignore_flow_level_supported(esw))
121 return FDB_TC_MAX_CHAIN;
124 u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
126 return mlx5_esw_chains_get_chain_range(esw) + 1;
129 u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
131 if (!mlx5_esw_chains_prios_supported(esw))
134 if (fdb_ignore_flow_level_supported(esw))
137 return FDB_TC_MAX_PRIO;
140 static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
142 if (fdb_ignore_flow_level_supported(esw))
145 return FDB_TC_LEVELS_PER_PRIO;
148 #define POOL_NEXT_SIZE 0
150 mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
155 for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
156 if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
158 if (desired_size != POOL_NEXT_SIZE)
164 --fdb_pool_left(esw)[found_i];
165 return ESW_POOLS[found_i];
172 mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
176 for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
177 if (sz == ESW_POOLS[i]) {
178 ++fdb_pool_left(esw)[i];
183 WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
187 mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
192 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
194 for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
195 fdb_pool_left(esw)[i] =
196 ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
199 static struct mlx5_flow_table *
200 mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
201 u32 chain, u32 prio, u32 level)
203 struct mlx5_flow_table_attr ft_attr = {};
204 struct mlx5_flow_namespace *ns;
205 struct mlx5_flow_table *fdb;
208 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
209 ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
210 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
212 sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
213 mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
214 mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
216 return ERR_PTR(-ENOSPC);
217 ft_attr.max_fte = sz;
219 /* We use tc_slow_fdb(esw) as the table's next_ft till
220 * ignore_flow_level is allowed on FT creation and not just for FTEs.
221 * Instead caller should add an explicit miss rule if needed.
223 ft_attr.next_ft = tc_slow_fdb(esw);
225 /* The root table(chain 0, prio 1, level 0) is required to be
226 * connected to the previous prio (FDB_BYPASS_PATH if exists).
227 * We always create it, as a managed table, in order to align with
230 if (!fdb_ignore_flow_level_supported(esw) ||
231 (chain == 0 && prio == 1 && level == 0)) {
232 ft_attr.level = level;
233 ft_attr.prio = prio - 1;
234 ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
236 ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
237 ft_attr.prio = FDB_TC_OFFLOAD;
238 /* Firmware doesn't allow us to create another level 0 table,
239 * so we create all unmanaged tables as level 1.
241 * To connect them, we use explicit miss rules with
242 * ignore_flow_level. Caller is responsible to create
243 * these rules (if needed).
246 ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
249 ft_attr.autogroup.num_reserved_entries = 2;
250 ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
251 fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
254 "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
255 (int)PTR_ERR(fdb), chain, prio, level, sz);
256 mlx5_esw_chains_put_sz_to_pool(esw, sz);
264 mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
265 struct mlx5_flow_table *fdb)
267 mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
268 mlx5_destroy_flow_table(fdb);
272 create_fdb_chain_restore(struct fdb_chain *fdb_chain)
274 char modact[MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)];
275 struct mlx5_eswitch *esw = fdb_chain->esw;
276 struct mlx5_modify_hdr *mod_hdr;
280 if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw))
283 err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
286 if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
287 /* we got the special default flow tag id, so we won't know
288 * if we actually marked the packet with the restore rule
291 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
293 err = mapping_add(esw_chains_mapping(esw),
294 &fdb_chain->chain, &index);
295 mapping_remove(esw_chains_mapping(esw),
296 MLX5_FS_DEFAULT_FLOW_TAG);
301 fdb_chain->id = index;
303 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
304 MLX5_SET(set_action_in, modact, field,
305 mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
306 MLX5_SET(set_action_in, modact, offset,
307 mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
308 MLX5_SET(set_action_in, modact, length,
309 mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
310 MLX5_SET(set_action_in, modact, data, fdb_chain->id);
311 mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
313 if (IS_ERR(mod_hdr)) {
314 err = PTR_ERR(mod_hdr);
317 fdb_chain->miss_modify_hdr = mod_hdr;
319 fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
320 if (IS_ERR(fdb_chain->restore_rule)) {
321 err = PTR_ERR(fdb_chain->restore_rule);
328 mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
330 /* Datapath can't find this mapping, so we can safely remove it */
331 mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
335 static struct fdb_chain *
336 mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
338 struct fdb_chain *fdb_chain = NULL;
341 fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
343 return ERR_PTR(-ENOMEM);
345 fdb_chain->esw = esw;
346 fdb_chain->chain = chain;
347 INIT_LIST_HEAD(&fdb_chain->prios_list);
349 err = create_fdb_chain_restore(fdb_chain);
353 err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
361 if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
362 mlx5_del_flow_rules(fdb_chain->restore_rule);
363 mlx5_modify_header_dealloc(esw->dev,
364 fdb_chain->miss_modify_hdr);
372 mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
374 struct mlx5_eswitch *esw = fdb_chain->esw;
376 rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
379 if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
380 mlx5_del_flow_rules(fdb_chain->restore_rule);
381 mlx5_modify_header_dealloc(esw->dev,
382 fdb_chain->miss_modify_hdr);
384 mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
390 static struct fdb_chain *
391 mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
393 struct fdb_chain *fdb_chain;
395 fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
398 fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
399 if (IS_ERR(fdb_chain))
408 static struct mlx5_flow_handle *
409 mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
410 struct mlx5_flow_table *fdb,
411 struct mlx5_flow_table *next_fdb)
413 static const struct mlx5_flow_spec spec = {};
414 struct mlx5_eswitch *esw = fdb_chain->esw;
415 struct mlx5_flow_destination dest = {};
416 struct mlx5_flow_act act = {};
418 act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
419 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
420 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
423 if (fdb_chain->chain != mlx5_esw_chains_get_ft_chain(esw)) {
424 act.modify_hdr = fdb_chain->miss_modify_hdr;
425 act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
428 return mlx5_add_flow_rules(fdb, &spec, &act, &dest, 1);
432 mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
433 struct mlx5_flow_table *next_fdb)
435 struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
436 struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
437 struct fdb_prio *pos;
440 if (fdb_prio->key.level)
443 /* Iterate in reverse order until reaching the level 0 rule of
444 * the previous priority, adding all the miss rules first, so we can
445 * revert them if any of them fails.
448 list_for_each_entry_continue_reverse(pos,
449 &fdb_chain->prios_list,
451 miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
454 if (IS_ERR(miss_rules[n])) {
455 err = PTR_ERR(miss_rules[n]);
464 /* Success, delete old miss rules, and update the pointers. */
467 list_for_each_entry_continue_reverse(pos,
468 &fdb_chain->prios_list,
470 mlx5_del_flow_rules(pos->miss_rule);
472 pos->miss_rule = miss_rules[n];
473 pos->next_fdb = next_fdb;
484 mlx5_del_flow_rules(miss_rules[n]);
490 mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
492 if (--fdb_chain->ref == 0)
493 mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
496 static struct fdb_prio *
497 mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
498 u32 chain, u32 prio, u32 level)
500 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
501 struct mlx5_flow_handle *miss_rule = NULL;
502 struct mlx5_flow_group *miss_group;
503 struct fdb_prio *fdb_prio = NULL;
504 struct mlx5_flow_table *next_fdb;
505 struct fdb_chain *fdb_chain;
506 struct mlx5_flow_table *fdb;
507 struct list_head *pos;
511 fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
512 if (IS_ERR(fdb_chain))
513 return ERR_CAST(fdb_chain);
515 fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
516 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
517 if (!fdb_prio || !flow_group_in) {
522 /* Chain's prio list is sorted by prio and level.
523 * And all levels of some prio point to the next prio's level 0.
524 * Example list (prio, level):
525 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
526 * In hardware, we will we have the following pointers:
527 * (3,0) -> (5,0) -> (7,0) -> Slow path
533 /* Default miss for each chain: */
534 next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
537 list_for_each(pos, &fdb_chain->prios_list) {
538 struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
540 /* exit on first pos that is larger */
541 if (prio < p->key.prio || (prio == p->key.prio &&
542 level < p->key.level)) {
543 /* Get next level 0 table */
544 next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
549 fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
555 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
557 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
559 miss_group = mlx5_create_flow_group(fdb, flow_group_in);
560 if (IS_ERR(miss_group)) {
561 err = PTR_ERR(miss_group);
565 /* Add miss rule to next_fdb */
566 miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
567 if (IS_ERR(miss_rule)) {
568 err = PTR_ERR(miss_rule);
572 fdb_prio->miss_group = miss_group;
573 fdb_prio->miss_rule = miss_rule;
574 fdb_prio->next_fdb = next_fdb;
575 fdb_prio->fdb_chain = fdb_chain;
576 fdb_prio->key.chain = chain;
577 fdb_prio->key.prio = prio;
578 fdb_prio->key.level = level;
581 err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
586 list_add(&fdb_prio->list, pos->prev);
588 /* Table is ready, connect it */
589 err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
593 kvfree(flow_group_in);
597 list_del(&fdb_prio->list);
598 rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
601 mlx5_del_flow_rules(miss_rule);
603 mlx5_destroy_flow_group(miss_group);
605 mlx5_esw_chains_destroy_fdb_table(esw, fdb);
609 kvfree(flow_group_in);
610 mlx5_esw_chains_put_fdb_chain(fdb_chain);
615 mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
616 struct fdb_prio *fdb_prio)
618 struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
620 WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
621 fdb_prio->next_fdb));
623 list_del(&fdb_prio->list);
624 rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
626 mlx5_del_flow_rules(fdb_prio->miss_rule);
627 mlx5_destroy_flow_group(fdb_prio->miss_group);
628 mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
629 mlx5_esw_chains_put_fdb_chain(fdb_chain);
633 struct mlx5_flow_table *
634 mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
637 struct mlx5_flow_table *prev_fts;
638 struct fdb_prio *fdb_prio;
639 struct fdb_prio_key key;
642 if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
643 chain != mlx5_esw_chains_get_ft_chain(esw)) ||
644 prio > mlx5_esw_chains_get_prio_range(esw) ||
645 level > mlx5_esw_chains_get_level_range(esw))
646 return ERR_PTR(-EOPNOTSUPP);
648 /* create earlier levels for correct fs_core lookup when
651 for (l = 0; l < level; l++) {
652 prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
653 if (IS_ERR(prev_fts)) {
654 fdb_prio = ERR_CAST(prev_fts);
663 mutex_lock(&esw_chains_lock(esw));
664 fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
667 fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
669 if (IS_ERR(fdb_prio))
670 goto err_create_prio;
674 mutex_unlock(&esw_chains_lock(esw));
676 return fdb_prio->fdb;
679 mutex_unlock(&esw_chains_lock(esw));
682 mlx5_esw_chains_put_table(esw, chain, prio, l);
683 return ERR_CAST(fdb_prio);
687 mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
690 struct fdb_prio *fdb_prio;
691 struct fdb_prio_key key;
697 mutex_lock(&esw_chains_lock(esw));
698 fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
703 if (--fdb_prio->ref == 0)
704 mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
705 mutex_unlock(&esw_chains_lock(esw));
708 mlx5_esw_chains_put_table(esw, chain, prio, level);
713 mutex_unlock(&esw_chains_lock(esw));
715 "Couldn't find table: (chain: %d prio: %d level: %d)",
719 struct mlx5_flow_table *
720 mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
722 return tc_end_fdb(esw);
726 mlx5_esw_chains_init(struct mlx5_eswitch *esw)
728 struct mlx5_esw_chains_priv *chains_priv;
729 struct mlx5_core_dev *dev = esw->dev;
730 u32 max_flow_counter, fdb_max;
731 struct mapping_ctx *mapping;
734 chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
737 esw_chains_priv(esw) = chains_priv;
739 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
740 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
741 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
744 "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
745 max_flow_counter, esw->params.large_group_num, fdb_max);
747 mlx5_esw_chains_init_sz_pool(esw);
749 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
750 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
751 esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
752 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
754 esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
755 esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
756 mlx5_esw_chains_get_chain_range(esw),
757 mlx5_esw_chains_get_prio_range(esw));
760 err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
762 goto init_chains_ht_err;
764 err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
766 goto init_prios_ht_err;
768 mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
770 if (IS_ERR(mapping)) {
771 err = PTR_ERR(mapping);
774 esw_chains_mapping(esw) = mapping;
776 mutex_init(&esw_chains_lock(esw));
781 rhashtable_destroy(&esw_prios_ht(esw));
783 rhashtable_destroy(&esw_chains_ht(esw));
790 mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
792 mutex_destroy(&esw_chains_lock(esw));
793 mapping_destroy(esw_chains_mapping(esw));
794 rhashtable_destroy(&esw_prios_ht(esw));
795 rhashtable_destroy(&esw_chains_ht(esw));
797 kfree(esw_chains_priv(esw));
801 mlx5_esw_chains_open(struct mlx5_eswitch *esw)
803 struct mlx5_flow_table *ft;
806 /* Create tc_end_fdb(esw) which is the always created ft chain */
807 ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
812 tc_end_fdb(esw) = ft;
814 /* Always open the root for fast path */
815 ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
821 /* Open level 1 for split rules now if prios isn't supported */
822 if (!mlx5_esw_chains_prios_supported(esw)) {
823 err = mlx5_esw_vport_tbl_get(esw);
831 mlx5_esw_chains_put_table(esw, 0, 1, 0);
833 mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
838 mlx5_esw_chains_close(struct mlx5_eswitch *esw)
840 if (!mlx5_esw_chains_prios_supported(esw))
841 mlx5_esw_vport_tbl_put(esw);
842 mlx5_esw_chains_put_table(esw, 0, 1, 0);
843 mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
847 mlx5_esw_chains_create(struct mlx5_eswitch *esw)
851 err = mlx5_esw_chains_init(esw);
855 err = mlx5_esw_chains_open(esw);
862 mlx5_esw_chains_cleanup(esw);
867 mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
869 mlx5_esw_chains_close(esw);
870 mlx5_esw_chains_cleanup(esw);
873 int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
878 err = mapping_find(esw_chains_mapping(esw), tag, chain);
880 esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);