1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2020 Mellanox Technologies.
4 #include <linux/mlx5/driver.h>
5 #include <linux/mlx5/mlx5_ifc.h>
6 #include <linux/mlx5/fs.h>
8 #include "lib/fs_chains.h"
9 #include "en/mapping.h"
13 #define chains_lock(chains) ((chains)->lock)
14 #define chains_ht(chains) ((chains)->chains_ht)
15 #define prios_ht(chains) ((chains)->prios_ht)
16 #define ft_pool_left(chains) ((chains)->ft_left)
17 #define tc_default_ft(chains) ((chains)->tc_default_ft)
18 #define tc_end_ft(chains) ((chains)->tc_end_ft)
19 #define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
20 FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
22 /* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
23 * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
24 * for each flow table pool. We can allocate up to 16M of each pool,
25 * and we keep track of how much we used via get_next_avail_sz_from_pool.
26 * Firmware doesn't report any of this for now.
27 * ESW_POOL is expected to be sorted from large to small and match firmware
30 #define FT_SIZE (16 * 1024 * 1024)
31 static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
35 #define FT_TBL_SZ (64 * 1024)
37 struct mlx5_fs_chains {
38 struct mlx5_core_dev *dev;
40 struct rhashtable chains_ht;
41 struct rhashtable prios_ht;
42 /* Protects above chains_ht and prios_ht */
45 struct mlx5_flow_table *tc_default_ft;
46 struct mlx5_flow_table *tc_end_ft;
47 struct mapping_ctx *chains_mapping;
49 enum mlx5_flow_namespace_type ns;
53 int ft_left[ARRAY_SIZE(FT_POOLS)];
57 struct rhash_head node;
64 struct mlx5_fs_chains *chains;
65 struct list_head prios_list;
66 struct mlx5_flow_handle *restore_rule;
67 struct mlx5_modify_hdr *miss_modify_hdr;
77 struct rhash_head node;
78 struct list_head list;
84 struct fs_chain *chain;
85 struct mlx5_flow_table *ft;
86 struct mlx5_flow_table *next_ft;
87 struct mlx5_flow_group *miss_group;
88 struct mlx5_flow_handle *miss_rule;
91 static const struct rhashtable_params chain_params = {
92 .head_offset = offsetof(struct fs_chain, node),
93 .key_offset = offsetof(struct fs_chain, chain),
94 .key_len = sizeof_field(struct fs_chain, chain),
95 .automatic_shrinking = true,
98 static const struct rhashtable_params prio_params = {
99 .head_offset = offsetof(struct prio, node),
100 .key_offset = offsetof(struct prio, key),
101 .key_len = sizeof_field(struct prio, key),
102 .automatic_shrinking = true,
105 bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
107 return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
110 bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
112 return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
115 bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
117 return mlx5_chains_prios_supported(chains) &&
118 mlx5_chains_ignore_flow_level_supported(chains);
121 u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
123 if (!mlx5_chains_prios_supported(chains))
126 if (mlx5_chains_ignore_flow_level_supported(chains))
129 /* We should get here only for eswitch case */
130 return FDB_TC_MAX_CHAIN;
133 u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
135 return mlx5_chains_get_chain_range(chains) + 1;
138 u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
140 if (mlx5_chains_ignore_flow_level_supported(chains))
143 /* We should get here only for eswitch case */
144 return FDB_TC_MAX_PRIO;
147 static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
149 if (mlx5_chains_ignore_flow_level_supported(chains))
152 /* Same value for FDB and NIC RX tables */
153 return FDB_TC_LEVELS_PER_PRIO;
157 mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
158 struct mlx5_flow_table *ft)
160 tc_end_ft(chains) = ft;
163 #define POOL_NEXT_SIZE 0
165 mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
170 for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
171 if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
173 if (desired_size != POOL_NEXT_SIZE)
179 --ft_pool_left(chains)[found_i];
180 return FT_POOLS[found_i];
187 mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
191 for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
192 if (sz == FT_POOLS[i]) {
193 ++ft_pool_left(chains)[i];
198 WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
202 mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
206 for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
207 ft_pool_left(chains)[i] =
208 FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
211 static struct mlx5_flow_table *
212 mlx5_chains_create_table(struct mlx5_fs_chains *chains,
213 u32 chain, u32 prio, u32 level)
215 struct mlx5_flow_table_attr ft_attr = {};
216 struct mlx5_flow_namespace *ns;
217 struct mlx5_flow_table *ft;
220 if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
221 ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
222 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
224 sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
225 mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
226 mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
228 return ERR_PTR(-ENOSPC);
229 ft_attr.max_fte = sz;
231 /* We use tc_default_ft(chains) as the table's next_ft till
232 * ignore_flow_level is allowed on FT creation and not just for FTEs.
233 * Instead caller should add an explicit miss rule if needed.
235 ft_attr.next_ft = tc_default_ft(chains);
237 /* The root table(chain 0, prio 1, level 0) is required to be
238 * connected to the previous fs_core managed prio.
239 * We always create it, as a managed table, in order to align with
242 if (!mlx5_chains_ignore_flow_level_supported(chains) ||
243 (chain == 0 && prio == 1 && level == 0)) {
244 ft_attr.level = level;
245 ft_attr.prio = prio - 1;
246 ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
247 mlx5_get_fdb_sub_ns(chains->dev, chain) :
248 mlx5_get_flow_namespace(chains->dev, chains->ns);
250 ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
251 ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
252 /* Firmware doesn't allow us to create another level 0 table,
253 * so we create all unmanaged tables as level 1.
255 * To connect them, we use explicit miss rules with
256 * ignore_flow_level. Caller is responsible to create
257 * these rules (if needed).
260 ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
263 ft_attr.autogroup.num_reserved_entries = 2;
264 ft_attr.autogroup.max_num_groups = chains->group_num;
265 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
267 mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
268 (int)PTR_ERR(ft), chain, prio, level, sz);
269 mlx5_chains_put_sz_to_pool(chains, sz);
277 mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
278 struct mlx5_flow_table *ft)
280 mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
281 mlx5_destroy_flow_table(ft);
285 create_chain_restore(struct fs_chain *chain)
287 struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
288 char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
289 struct mlx5_fs_chains *chains = chain->chains;
290 enum mlx5e_tc_attr_to_reg chain_to_reg;
291 struct mlx5_modify_hdr *mod_hdr;
295 if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
296 !mlx5_chains_prios_supported(chains))
299 err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
302 if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
303 /* we got the special default flow tag id, so we won't know
304 * if we actually marked the packet with the restore rule
307 * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
309 err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
310 mapping_remove(chains->chains_mapping, MLX5_FS_DEFAULT_FLOW_TAG);
317 if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
318 chain_to_reg = CHAIN_TO_REG;
319 chain->restore_rule = esw_add_restore_rule(esw, chain->id);
320 if (IS_ERR(chain->restore_rule)) {
321 err = PTR_ERR(chain->restore_rule);
324 } else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
325 /* For NIC RX we don't need a restore rule
326 * since we write the metadata to reg_b
327 * that is passed to SW directly.
329 chain_to_reg = NIC_CHAIN_TO_REG;
335 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
336 MLX5_SET(set_action_in, modact, field,
337 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
338 MLX5_SET(set_action_in, modact, offset,
339 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
340 MLX5_SET(set_action_in, modact, length,
341 mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
342 MLX5_SET(set_action_in, modact, data, chain->id);
343 mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
345 if (IS_ERR(mod_hdr)) {
346 err = PTR_ERR(mod_hdr);
349 chain->miss_modify_hdr = mod_hdr;
354 if (!IS_ERR_OR_NULL(chain->restore_rule))
355 mlx5_del_flow_rules(chain->restore_rule);
357 /* Datapath can't find this mapping, so we can safely remove it */
358 mapping_remove(chains->chains_mapping, chain->id);
362 static void destroy_chain_restore(struct fs_chain *chain)
364 struct mlx5_fs_chains *chains = chain->chains;
366 if (!chain->miss_modify_hdr)
369 if (chain->restore_rule)
370 mlx5_del_flow_rules(chain->restore_rule);
372 mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
373 mapping_remove(chains->chains_mapping, chain->id);
376 static struct fs_chain *
377 mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
379 struct fs_chain *chain_s = NULL;
382 chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
384 return ERR_PTR(-ENOMEM);
386 chain_s->chains = chains;
387 chain_s->chain = chain;
388 INIT_LIST_HEAD(&chain_s->prios_list);
390 err = create_chain_restore(chain_s);
394 err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
402 destroy_chain_restore(chain_s);
409 mlx5_chains_destroy_chain(struct fs_chain *chain)
411 struct mlx5_fs_chains *chains = chain->chains;
413 rhashtable_remove_fast(&chains_ht(chains), &chain->node,
416 destroy_chain_restore(chain);
420 static struct fs_chain *
421 mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
423 struct fs_chain *chain_s;
425 chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
428 chain_s = mlx5_chains_create_chain(chains, chain);
438 static struct mlx5_flow_handle *
439 mlx5_chains_add_miss_rule(struct fs_chain *chain,
440 struct mlx5_flow_table *ft,
441 struct mlx5_flow_table *next_ft)
443 struct mlx5_fs_chains *chains = chain->chains;
444 struct mlx5_flow_destination dest = {};
445 struct mlx5_flow_act act = {};
447 act.flags = FLOW_ACT_NO_APPEND;
448 if (mlx5_chains_ignore_flow_level_supported(chain->chains))
449 act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
451 act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
452 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
455 if (next_ft == tc_end_ft(chains) &&
456 chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
457 mlx5_chains_prios_supported(chains)) {
458 act.modify_hdr = chain->miss_modify_hdr;
459 act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
462 return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
466 mlx5_chains_update_prio_prevs(struct prio *prio,
467 struct mlx5_flow_table *next_ft)
469 struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
470 struct fs_chain *chain = prio->chain;
477 /* Iterate in reverse order until reaching the level 0 rule of
478 * the previous priority, adding all the miss rules first, so we can
479 * revert them if any of them fails.
482 list_for_each_entry_continue_reverse(pos,
485 miss_rules[n] = mlx5_chains_add_miss_rule(chain,
488 if (IS_ERR(miss_rules[n])) {
489 err = PTR_ERR(miss_rules[n]);
498 /* Success, delete old miss rules, and update the pointers. */
501 list_for_each_entry_continue_reverse(pos,
504 mlx5_del_flow_rules(pos->miss_rule);
506 pos->miss_rule = miss_rules[n];
507 pos->next_ft = next_ft;
518 mlx5_del_flow_rules(miss_rules[n]);
524 mlx5_chains_put_chain(struct fs_chain *chain)
526 if (--chain->ref == 0)
527 mlx5_chains_destroy_chain(chain);
531 mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
532 u32 chain, u32 prio, u32 level)
534 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
535 struct mlx5_flow_handle *miss_rule;
536 struct mlx5_flow_group *miss_group;
537 struct mlx5_flow_table *next_ft;
538 struct mlx5_flow_table *ft;
539 struct fs_chain *chain_s;
540 struct list_head *pos;
545 chain_s = mlx5_chains_get_chain(chains, chain);
547 return ERR_CAST(chain_s);
549 prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
550 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
551 if (!prio_s || !flow_group_in) {
556 /* Chain's prio list is sorted by prio and level.
557 * And all levels of some prio point to the next prio's level 0.
558 * Example list (prio, level):
559 * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
560 * In hardware, we will we have the following pointers:
561 * (3,0) -> (5,0) -> (7,0) -> Slow path
567 /* Default miss for each chain: */
568 next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
569 tc_default_ft(chains) :
571 list_for_each(pos, &chain_s->prios_list) {
572 struct prio *p = list_entry(pos, struct prio, list);
574 /* exit on first pos that is larger */
575 if (prio < p->key.prio || (prio == p->key.prio &&
576 level < p->key.level)) {
577 /* Get next level 0 table */
578 next_ft = p->key.level == 0 ? p->ft : p->next_ft;
583 ft = mlx5_chains_create_table(chains, chain, prio, level);
589 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
591 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
593 miss_group = mlx5_create_flow_group(ft, flow_group_in);
594 if (IS_ERR(miss_group)) {
595 err = PTR_ERR(miss_group);
599 /* Add miss rule to next_ft */
600 miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
601 if (IS_ERR(miss_rule)) {
602 err = PTR_ERR(miss_rule);
606 prio_s->miss_group = miss_group;
607 prio_s->miss_rule = miss_rule;
608 prio_s->next_ft = next_ft;
609 prio_s->chain = chain_s;
610 prio_s->key.chain = chain;
611 prio_s->key.prio = prio;
612 prio_s->key.level = level;
615 err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
620 list_add(&prio_s->list, pos->prev);
622 /* Table is ready, connect it */
623 err = mlx5_chains_update_prio_prevs(prio_s, ft);
627 kvfree(flow_group_in);
631 list_del(&prio_s->list);
632 rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
635 mlx5_del_flow_rules(miss_rule);
637 mlx5_destroy_flow_group(miss_group);
639 mlx5_chains_destroy_table(chains, ft);
643 kvfree(flow_group_in);
644 mlx5_chains_put_chain(chain_s);
649 mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
652 struct fs_chain *chain = prio->chain;
654 WARN_ON(mlx5_chains_update_prio_prevs(prio,
657 list_del(&prio->list);
658 rhashtable_remove_fast(&prios_ht(chains), &prio->node,
660 mlx5_del_flow_rules(prio->miss_rule);
661 mlx5_destroy_flow_group(prio->miss_group);
662 mlx5_chains_destroy_table(chains, prio->ft);
663 mlx5_chains_put_chain(chain);
667 struct mlx5_flow_table *
668 mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
671 struct mlx5_flow_table *prev_fts;
676 if ((chain > mlx5_chains_get_chain_range(chains) &&
677 chain != mlx5_chains_get_nf_ft_chain(chains)) ||
678 prio > mlx5_chains_get_prio_range(chains) ||
679 level > mlx5_chains_get_level_range(chains))
680 return ERR_PTR(-EOPNOTSUPP);
682 /* create earlier levels for correct fs_core lookup when
685 for (l = 0; l < level; l++) {
686 prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
687 if (IS_ERR(prev_fts)) {
688 prio_s = ERR_CAST(prev_fts);
697 mutex_lock(&chains_lock(chains));
698 prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
701 prio_s = mlx5_chains_create_prio(chains, chain,
704 goto err_create_prio;
708 mutex_unlock(&chains_lock(chains));
713 mutex_unlock(&chains_lock(chains));
716 mlx5_chains_put_table(chains, chain, prio, l);
717 return ERR_CAST(prio_s);
721 mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
731 mutex_lock(&chains_lock(chains));
732 prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
737 if (--prio_s->ref == 0)
738 mlx5_chains_destroy_prio(chains, prio_s);
739 mutex_unlock(&chains_lock(chains));
742 mlx5_chains_put_table(chains, chain, prio, level);
747 mutex_unlock(&chains_lock(chains));
749 "Couldn't find table: (chain: %d prio: %d level: %d)",
753 struct mlx5_flow_table *
754 mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
756 return tc_end_ft(chains);
759 struct mlx5_flow_table *
760 mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
762 u32 chain, prio, level;
765 if (!mlx5_chains_ignore_flow_level_supported(chains)) {
768 mlx5_core_warn(chains->dev,
769 "Couldn't create global flow table, ignore_flow_level not supported.");
773 chain = mlx5_chains_get_chain_range(chains),
774 prio = mlx5_chains_get_prio_range(chains);
775 level = mlx5_chains_get_level_range(chains);
777 return mlx5_chains_create_table(chains, chain, prio, level);
784 mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
785 struct mlx5_flow_table *ft)
787 mlx5_chains_destroy_table(chains, ft);
790 static struct mlx5_fs_chains *
791 mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
793 struct mlx5_fs_chains *chains_priv;
794 u32 max_flow_counter;
797 chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
799 return ERR_PTR(-ENOMEM);
801 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
802 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
805 "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
806 max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
808 chains_priv->dev = dev;
809 chains_priv->flags = attr->flags;
810 chains_priv->ns = attr->ns;
811 chains_priv->group_num = attr->max_grp_num;
812 chains_priv->chains_mapping = attr->mapping;
813 tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
815 mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
816 mlx5_chains_get_chain_range(chains_priv),
817 mlx5_chains_get_prio_range(chains_priv));
819 mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
821 err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
823 goto init_chains_ht_err;
825 err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
827 goto init_prios_ht_err;
829 mutex_init(&chains_lock(chains_priv));
834 rhashtable_destroy(&chains_ht(chains_priv));
841 mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
843 mutex_destroy(&chains_lock(chains));
844 rhashtable_destroy(&prios_ht(chains));
845 rhashtable_destroy(&chains_ht(chains));
850 struct mlx5_fs_chains *
851 mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
853 struct mlx5_fs_chains *chains;
855 chains = mlx5_chains_init(dev, attr);
861 mlx5_chains_destroy(struct mlx5_fs_chains *chains)
863 mlx5_chains_cleanup(chains);
867 mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
870 struct mapping_ctx *ctx = chains->chains_mapping;
871 struct mlx5_mapped_obj mapped_obj = {};
873 mapped_obj.type = MLX5_MAPPED_OBJ_CHAIN;
874 mapped_obj.chain = chain;
875 return mapping_add(ctx, &mapped_obj, chain_mapping);
879 mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
881 struct mapping_ctx *ctx = chains->chains_mapping;
883 return mapping_remove(ctx, chain_mapping);