2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
39 #include "diag/fs_tracepoint.h"
40 #include "accel/ipsec.h"
41 #include "fpga/ipsec.h"
43 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
44 sizeof(struct init_tree_node))
46 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
47 ...) {.type = FS_TYPE_PRIO,\
48 .min_ft_level = min_level_val,\
49 .num_levels = num_levels_val,\
50 .num_leaf_prios = num_prios_val,\
52 .children = (struct init_tree_node[]) {__VA_ARGS__},\
53 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
56 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
57 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
60 #define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
61 .children = (struct init_tree_node[]) {__VA_ARGS__},\
62 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
65 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
68 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
70 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
71 .caps = (long[]) {__VA_ARGS__} }
73 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
74 FS_CAP(flow_table_properties_nic_receive.modify_root), \
75 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
76 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
78 #define LEFTOVERS_NUM_LEVELS 1
79 #define LEFTOVERS_NUM_PRIOS 1
81 #define BY_PASS_PRIO_NUM_LEVELS 1
82 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
85 #define ETHTOOL_PRIO_NUM_LEVELS 1
86 #define ETHTOOL_NUM_PRIOS 11
87 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
88 /* Vlan, mac, ttc, inner ttc, aRFS */
89 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
90 #define KERNEL_NIC_NUM_PRIOS 1
91 /* One more level for tc */
92 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
94 #define KERNEL_NIC_TC_NUM_PRIOS 1
95 #define KERNEL_NIC_TC_NUM_LEVELS 2
97 #define ANCHOR_NUM_LEVELS 1
98 #define ANCHOR_NUM_PRIOS 1
99 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
101 #define OFFLOADS_MAX_FT 1
102 #define OFFLOADS_NUM_PRIOS 1
103 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
105 #define LAG_PRIO_NUM_LEVELS 1
106 #define LAG_NUM_PRIOS 1
107 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
114 static struct init_tree_node {
115 enum fs_node_type type;
116 struct init_tree_node *children;
118 struct node_caps caps;
124 .type = FS_TYPE_NAMESPACE,
126 .children = (struct init_tree_node[]) {
127 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
129 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
130 BY_PASS_PRIO_NUM_LEVELS))),
131 ADD_PRIO(0, LAG_MIN_LEVEL, 0,
133 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
134 LAG_PRIO_NUM_LEVELS))),
135 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
136 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
137 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
139 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
140 ETHTOOL_PRIO_NUM_LEVELS))),
141 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
142 ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
143 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
144 KERNEL_NIC_PRIO_NUM_LEVELS))),
145 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
147 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
148 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
149 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
153 enum fs_i_lock_class {
159 static const struct rhashtable_params rhash_fte = {
160 .key_len = FIELD_SIZEOF(struct fs_fte, val),
161 .key_offset = offsetof(struct fs_fte, val),
162 .head_offset = offsetof(struct fs_fte, hash),
163 .automatic_shrinking = true,
167 static const struct rhashtable_params rhash_fg = {
168 .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask),
169 .key_offset = offsetof(struct mlx5_flow_group, mask),
170 .head_offset = offsetof(struct mlx5_flow_group, hash),
171 .automatic_shrinking = true,
176 static void del_hw_flow_table(struct fs_node *node);
177 static void del_hw_flow_group(struct fs_node *node);
178 static void del_hw_fte(struct fs_node *node);
179 static void del_sw_flow_table(struct fs_node *node);
180 static void del_sw_flow_group(struct fs_node *node);
181 static void del_sw_fte(struct fs_node *node);
182 static void del_sw_prio(struct fs_node *node);
183 static void del_sw_ns(struct fs_node *node);
184 /* Delete rule (destination) is special case that
185 * requires to lock the FTE for all the deletion process.
187 static void del_sw_hw_rule(struct fs_node *node);
188 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
189 struct mlx5_flow_destination *d2);
190 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
191 static struct mlx5_flow_rule *
192 find_flow_rule(struct fs_fte *fte,
193 struct mlx5_flow_destination *dest);
195 static void tree_init_node(struct fs_node *node,
196 void (*del_hw_func)(struct fs_node *),
197 void (*del_sw_func)(struct fs_node *))
199 refcount_set(&node->refcount, 1);
200 INIT_LIST_HEAD(&node->list);
201 INIT_LIST_HEAD(&node->children);
202 init_rwsem(&node->lock);
203 node->del_hw_func = del_hw_func;
204 node->del_sw_func = del_sw_func;
205 node->active = false;
208 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
211 refcount_inc(&parent->refcount);
212 node->parent = parent;
214 /* Parent is the root */
218 node->root = parent->root;
221 static int tree_get_node(struct fs_node *node)
223 return refcount_inc_not_zero(&node->refcount);
226 static void nested_down_read_ref_node(struct fs_node *node,
227 enum fs_i_lock_class class)
230 down_read_nested(&node->lock, class);
231 refcount_inc(&node->refcount);
235 static void nested_down_write_ref_node(struct fs_node *node,
236 enum fs_i_lock_class class)
239 down_write_nested(&node->lock, class);
240 refcount_inc(&node->refcount);
244 static void down_write_ref_node(struct fs_node *node)
247 down_write(&node->lock);
248 refcount_inc(&node->refcount);
252 static void up_read_ref_node(struct fs_node *node)
254 refcount_dec(&node->refcount);
255 up_read(&node->lock);
258 static void up_write_ref_node(struct fs_node *node)
260 refcount_dec(&node->refcount);
261 up_write(&node->lock);
264 static void tree_put_node(struct fs_node *node)
266 struct fs_node *parent_node = node->parent;
268 if (refcount_dec_and_test(&node->refcount)) {
269 if (node->del_hw_func)
270 node->del_hw_func(node);
272 /* Only root namespace doesn't have parent and we just
273 * need to free its node.
275 down_write_ref_node(parent_node);
276 list_del_init(&node->list);
277 if (node->del_sw_func)
278 node->del_sw_func(node);
279 up_write_ref_node(parent_node);
285 if (!node && parent_node)
286 tree_put_node(parent_node);
289 static int tree_remove_node(struct fs_node *node)
291 if (refcount_read(&node->refcount) > 1) {
292 refcount_dec(&node->refcount);
299 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
302 struct fs_prio *iter_prio;
304 fs_for_each_prio(iter_prio, ns) {
305 if (iter_prio->prio == prio)
312 static bool check_last_reserved(const u32 *match_criteria)
314 char *match_criteria_reserved =
315 MLX5_ADDR_OF(fte_match_param, match_criteria, MLX5_FTE_MATCH_PARAM_RESERVED);
317 return !match_criteria_reserved[0] &&
318 !memcmp(match_criteria_reserved, match_criteria_reserved + 1,
319 MLX5_FLD_SZ_BYTES(fte_match_param,
320 MLX5_FTE_MATCH_PARAM_RESERVED) - 1);
323 static bool check_valid_mask(u8 match_criteria_enable, const u32 *match_criteria)
325 if (match_criteria_enable & ~(
326 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) |
327 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) |
328 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) |
329 (1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)))
332 if (!(match_criteria_enable &
333 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS)) {
334 char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
335 match_criteria, outer_headers);
337 if (fg_type_mask[0] ||
338 memcmp(fg_type_mask, fg_type_mask + 1,
339 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1))
343 if (!(match_criteria_enable &
344 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS)) {
345 char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
346 match_criteria, misc_parameters);
348 if (fg_type_mask[0] ||
349 memcmp(fg_type_mask, fg_type_mask + 1,
350 MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
354 if (!(match_criteria_enable &
355 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS)) {
356 char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
357 match_criteria, inner_headers);
359 if (fg_type_mask[0] ||
360 memcmp(fg_type_mask, fg_type_mask + 1,
361 MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4) - 1))
365 if (!(match_criteria_enable &
366 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2)) {
367 char *fg_type_mask = MLX5_ADDR_OF(fte_match_param,
368 match_criteria, misc_parameters_2);
370 if (fg_type_mask[0] ||
371 memcmp(fg_type_mask, fg_type_mask + 1,
372 MLX5_ST_SZ_BYTES(fte_match_set_misc2) - 1))
376 return check_last_reserved(match_criteria);
379 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
383 if (!check_valid_mask(spec->match_criteria_enable, spec->match_criteria)) {
384 pr_warn("mlx5_core: Match criteria given mismatches match_criteria_enable\n");
388 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
389 if (spec->match_value[i] & ~spec->match_criteria[i]) {
390 pr_warn("mlx5_core: match_value differs from match_criteria\n");
394 return check_last_reserved(spec->match_value);
397 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
399 struct fs_node *root;
400 struct mlx5_flow_namespace *ns;
404 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
405 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
409 ns = container_of(root, struct mlx5_flow_namespace, node);
410 return container_of(ns, struct mlx5_flow_root_namespace, ns);
413 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
415 struct mlx5_flow_root_namespace *root = find_root(node);
418 return root->dev->priv.steering;
422 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
424 struct mlx5_flow_root_namespace *root = find_root(node);
431 static void del_sw_ns(struct fs_node *node)
436 static void del_sw_prio(struct fs_node *node)
441 static void del_hw_flow_table(struct fs_node *node)
443 struct mlx5_flow_root_namespace *root;
444 struct mlx5_flow_table *ft;
445 struct mlx5_core_dev *dev;
448 fs_get_obj(ft, node);
449 dev = get_dev(&ft->node);
450 root = find_root(&ft->node);
453 err = root->cmds->destroy_flow_table(dev, ft);
455 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
459 static void del_sw_flow_table(struct fs_node *node)
461 struct mlx5_flow_table *ft;
462 struct fs_prio *prio;
464 fs_get_obj(ft, node);
466 rhltable_destroy(&ft->fgs_hash);
467 fs_get_obj(prio, ft->node.parent);
472 static void del_sw_hw_rule(struct fs_node *node)
474 struct mlx5_flow_root_namespace *root;
475 struct mlx5_flow_rule *rule;
476 struct mlx5_flow_table *ft;
477 struct mlx5_flow_group *fg;
480 struct mlx5_core_dev *dev = get_dev(node);
482 bool update_fte = false;
484 fs_get_obj(rule, node);
485 fs_get_obj(fte, rule->node.parent);
486 fs_get_obj(fg, fte->node.parent);
487 fs_get_obj(ft, fg->node.parent);
488 trace_mlx5_fs_del_rule(rule);
489 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
490 mutex_lock(&rule->dest_attr.ft->lock);
491 list_del(&rule->next_ft);
492 mutex_unlock(&rule->dest_attr.ft->lock);
495 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
497 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
498 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
499 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
504 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
506 modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
510 root = find_root(&ft->node);
511 if (update_fte && fte->dests_size) {
512 err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte);
515 "%s can't del rule fg id=%d fte_index=%d\n",
516 __func__, fg->id, fte->index);
521 static void del_hw_fte(struct fs_node *node)
523 struct mlx5_flow_root_namespace *root;
524 struct mlx5_flow_table *ft;
525 struct mlx5_flow_group *fg;
526 struct mlx5_core_dev *dev;
530 fs_get_obj(fte, node);
531 fs_get_obj(fg, fte->node.parent);
532 fs_get_obj(ft, fg->node.parent);
534 trace_mlx5_fs_del_fte(fte);
535 dev = get_dev(&ft->node);
536 root = find_root(&ft->node);
538 err = root->cmds->delete_fte(dev, ft, fte);
541 "flow steering can't delete fte in index %d of flow group id %d\n",
546 static void del_sw_fte(struct fs_node *node)
548 struct mlx5_flow_steering *steering = get_steering(node);
549 struct mlx5_flow_group *fg;
553 fs_get_obj(fte, node);
554 fs_get_obj(fg, fte->node.parent);
556 err = rhashtable_remove_fast(&fg->ftes_hash,
560 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
561 kmem_cache_free(steering->ftes_cache, fte);
564 static void del_hw_flow_group(struct fs_node *node)
566 struct mlx5_flow_root_namespace *root;
567 struct mlx5_flow_group *fg;
568 struct mlx5_flow_table *ft;
569 struct mlx5_core_dev *dev;
571 fs_get_obj(fg, node);
572 fs_get_obj(ft, fg->node.parent);
573 dev = get_dev(&ft->node);
574 trace_mlx5_fs_del_fg(fg);
576 root = find_root(&ft->node);
577 if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
578 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
582 static void del_sw_flow_group(struct fs_node *node)
584 struct mlx5_flow_steering *steering = get_steering(node);
585 struct mlx5_flow_group *fg;
586 struct mlx5_flow_table *ft;
589 fs_get_obj(fg, node);
590 fs_get_obj(ft, fg->node.parent);
592 rhashtable_destroy(&fg->ftes_hash);
593 ida_destroy(&fg->fte_allocator);
594 if (ft->autogroup.active)
595 ft->autogroup.num_groups--;
596 err = rhltable_remove(&ft->fgs_hash,
600 kmem_cache_free(steering->fgs_cache, fg);
603 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
608 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
612 fte->index = index + fg->start_index;
613 ret = rhashtable_insert_fast(&fg->ftes_hash,
619 tree_add_node(&fte->node, &fg->node);
620 list_add_tail(&fte->node.list, &fg->node.children);
624 ida_simple_remove(&fg->fte_allocator, index);
628 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
630 struct mlx5_flow_act *flow_act)
632 struct mlx5_flow_steering *steering = get_steering(&ft->node);
635 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
637 return ERR_PTR(-ENOMEM);
639 memcpy(fte->val, match_value, sizeof(fte->val));
640 fte->node.type = FS_TYPE_FLOW_ENTRY;
641 fte->action = *flow_act;
643 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
648 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
649 struct mlx5_flow_group *fg)
651 rhashtable_destroy(&fg->ftes_hash);
652 kmem_cache_free(steering->fgs_cache, fg);
655 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
656 u8 match_criteria_enable,
657 void *match_criteria,
661 struct mlx5_flow_group *fg;
664 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
666 return ERR_PTR(-ENOMEM);
668 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
670 kmem_cache_free(steering->fgs_cache, fg);
673 ida_init(&fg->fte_allocator);
674 fg->mask.match_criteria_enable = match_criteria_enable;
675 memcpy(&fg->mask.match_criteria, match_criteria,
676 sizeof(fg->mask.match_criteria));
677 fg->node.type = FS_TYPE_FLOW_GROUP;
678 fg->start_index = start_index;
679 fg->max_ftes = end_index - start_index + 1;
684 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
685 u8 match_criteria_enable,
686 void *match_criteria,
689 struct list_head *prev)
691 struct mlx5_flow_steering *steering = get_steering(&ft->node);
692 struct mlx5_flow_group *fg;
695 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
696 start_index, end_index);
700 /* initialize refcnt, add to parent list */
701 ret = rhltable_insert(&ft->fgs_hash,
705 dealloc_flow_group(steering, fg);
709 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
710 tree_add_node(&fg->node, &ft->node);
711 /* Add node to group list */
712 list_add(&fg->node.list, prev);
713 atomic_inc(&ft->node.version);
718 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
719 enum fs_flow_table_type table_type,
720 enum fs_flow_table_op_mod op_mod,
723 struct mlx5_flow_table *ft;
726 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
728 return ERR_PTR(-ENOMEM);
730 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
737 ft->node.type = FS_TYPE_FLOW_TABLE;
739 ft->type = table_type;
741 ft->max_fte = max_fte;
743 INIT_LIST_HEAD(&ft->fwd_rules);
744 mutex_init(&ft->lock);
749 /* If reverse is false, then we search for the first flow table in the
750 * root sub-tree from start(closest from right), else we search for the
751 * last flow table in the root sub-tree till start(closest from left).
753 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
754 struct list_head *start,
757 #define list_advance_entry(pos, reverse) \
758 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
760 #define list_for_each_advance_continue(pos, head, reverse) \
761 for (pos = list_advance_entry(pos, reverse); \
762 &pos->list != (head); \
763 pos = list_advance_entry(pos, reverse))
765 struct fs_node *iter = list_entry(start, struct fs_node, list);
766 struct mlx5_flow_table *ft = NULL;
771 list_for_each_advance_continue(iter, &root->children, reverse) {
772 if (iter->type == FS_TYPE_FLOW_TABLE) {
773 fs_get_obj(ft, iter);
776 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
784 /* If reverse if false then return the first flow table in next priority of
785 * prio in the tree, else return the last flow table in the previous priority
786 * of prio in the tree.
788 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
790 struct mlx5_flow_table *ft = NULL;
791 struct fs_node *curr_node;
792 struct fs_node *parent;
794 parent = prio->node.parent;
795 curr_node = &prio->node;
796 while (!ft && parent) {
797 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
799 parent = curr_node->parent;
804 /* Assuming all the tree is locked by mutex chain lock */
805 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
807 return find_closest_ft(prio, false);
810 /* Assuming all the tree is locked by mutex chain lock */
811 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
813 return find_closest_ft(prio, true);
816 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
817 struct fs_prio *prio,
818 struct mlx5_flow_table *ft)
820 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
821 struct mlx5_flow_table *iter;
825 fs_for_each_ft(iter, prio) {
827 err = root->cmds->modify_flow_table(dev, iter, ft);
829 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
831 /* The driver is out of sync with the FW */
840 /* Connect flow tables from previous priority of prio to ft */
841 static int connect_prev_fts(struct mlx5_core_dev *dev,
842 struct mlx5_flow_table *ft,
843 struct fs_prio *prio)
845 struct mlx5_flow_table *prev_ft;
847 prev_ft = find_prev_chained_ft(prio);
849 struct fs_prio *prev_prio;
851 fs_get_obj(prev_prio, prev_ft->node.parent);
852 return connect_fts_in_prio(dev, prev_prio, ft);
857 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
860 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
861 struct mlx5_ft_underlay_qp *uqp;
862 int min_level = INT_MAX;
867 min_level = root->root_ft->level;
869 if (ft->level >= min_level)
872 if (list_empty(&root->underlay_qpns)) {
873 /* Don't set any QPN (zero) in case QPN list is empty */
875 err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
877 list_for_each_entry(uqp, &root->underlay_qpns, list) {
879 err = root->cmds->update_root_ft(root->dev, ft,
887 mlx5_core_warn(root->dev,
888 "Update root flow table of id(%u) qpn(%d) failed\n",
896 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
897 struct mlx5_flow_destination *dest)
899 struct mlx5_flow_root_namespace *root;
900 struct mlx5_flow_table *ft;
901 struct mlx5_flow_group *fg;
903 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
906 fs_get_obj(fte, rule->node.parent);
907 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
909 down_write_ref_node(&fte->node);
910 fs_get_obj(fg, fte->node.parent);
911 fs_get_obj(ft, fg->node.parent);
913 memcpy(&rule->dest_attr, dest, sizeof(*dest));
914 root = find_root(&ft->node);
915 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
917 up_write_ref_node(&fte->node);
922 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
923 struct mlx5_flow_destination *new_dest,
924 struct mlx5_flow_destination *old_dest)
929 if (handle->num_rules != 1)
931 return _mlx5_modify_rule_destination(handle->rule[0],
935 for (i = 0; i < handle->num_rules; i++) {
936 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
937 return _mlx5_modify_rule_destination(handle->rule[i],
944 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
945 static int connect_fwd_rules(struct mlx5_core_dev *dev,
946 struct mlx5_flow_table *new_next_ft,
947 struct mlx5_flow_table *old_next_ft)
949 struct mlx5_flow_destination dest = {};
950 struct mlx5_flow_rule *iter;
953 /* new_next_ft and old_next_ft could be NULL only
954 * when we create/destroy the anchor flow table.
956 if (!new_next_ft || !old_next_ft)
959 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
960 dest.ft = new_next_ft;
962 mutex_lock(&old_next_ft->lock);
963 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
964 mutex_unlock(&old_next_ft->lock);
965 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
966 err = _mlx5_modify_rule_destination(iter, &dest);
968 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
974 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
975 struct fs_prio *prio)
977 struct mlx5_flow_table *next_ft;
980 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
982 if (list_empty(&prio->node.children)) {
983 err = connect_prev_fts(dev, ft, prio);
987 next_ft = find_next_chained_ft(prio);
988 err = connect_fwd_rules(dev, ft, next_ft);
993 if (MLX5_CAP_FLOWTABLE(dev,
994 flow_table_properties_nic_receive.modify_root))
995 err = update_root_ft_create(ft, prio);
999 static void list_add_flow_table(struct mlx5_flow_table *ft,
1000 struct fs_prio *prio)
1002 struct list_head *prev = &prio->node.children;
1003 struct mlx5_flow_table *iter;
1005 fs_for_each_ft(iter, prio) {
1006 if (iter->level > ft->level)
1008 prev = &iter->node.list;
1010 list_add(&ft->node.list, prev);
1013 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1014 struct mlx5_flow_table_attr *ft_attr,
1015 enum fs_flow_table_op_mod op_mod,
1018 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1019 struct mlx5_flow_table *next_ft = NULL;
1020 struct fs_prio *fs_prio = NULL;
1021 struct mlx5_flow_table *ft;
1026 pr_err("mlx5: flow steering failed to find root of namespace\n");
1027 return ERR_PTR(-ENODEV);
1030 mutex_lock(&root->chain_lock);
1031 fs_prio = find_prio(ns, ft_attr->prio);
1036 if (ft_attr->level >= fs_prio->num_levels) {
1040 /* The level is related to the
1041 * priority level range.
1043 ft_attr->level += fs_prio->start_level;
1044 ft = alloc_flow_table(ft_attr->level,
1046 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1048 op_mod, ft_attr->flags);
1054 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1055 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1056 next_ft = find_next_chained_ft(fs_prio);
1057 err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
1058 ft->type, ft->level, log_table_sz,
1059 next_ft, &ft->id, ft->flags);
1063 err = connect_flow_table(root->dev, ft, fs_prio);
1066 ft->node.active = true;
1067 down_write_ref_node(&fs_prio->node);
1068 tree_add_node(&ft->node, &fs_prio->node);
1069 list_add_flow_table(ft, fs_prio);
1071 up_write_ref_node(&fs_prio->node);
1072 mutex_unlock(&root->chain_lock);
1075 root->cmds->destroy_flow_table(root->dev, ft);
1079 mutex_unlock(&root->chain_lock);
1080 return ERR_PTR(err);
1083 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1084 struct mlx5_flow_table_attr *ft_attr)
1086 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1089 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1090 int prio, int max_fte,
1091 u32 level, u16 vport)
1093 struct mlx5_flow_table_attr ft_attr = {};
1095 ft_attr.max_fte = max_fte;
1096 ft_attr.level = level;
1097 ft_attr.prio = prio;
1099 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1102 struct mlx5_flow_table*
1103 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1104 int prio, u32 level)
1106 struct mlx5_flow_table_attr ft_attr = {};
1108 ft_attr.level = level;
1109 ft_attr.prio = prio;
1110 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1112 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1114 struct mlx5_flow_table*
1115 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1117 int num_flow_table_entries,
1122 struct mlx5_flow_table_attr ft_attr = {};
1123 struct mlx5_flow_table *ft;
1125 if (max_num_groups > num_flow_table_entries)
1126 return ERR_PTR(-EINVAL);
1128 ft_attr.max_fte = num_flow_table_entries;
1129 ft_attr.prio = prio;
1130 ft_attr.level = level;
1131 ft_attr.flags = flags;
1133 ft = mlx5_create_flow_table(ns, &ft_attr);
1137 ft->autogroup.active = true;
1138 ft->autogroup.required_groups = max_num_groups;
1142 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1144 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1147 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1148 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1149 fg_in, match_criteria);
1150 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1152 match_criteria_enable);
1153 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1155 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1157 struct mlx5_core_dev *dev = get_dev(&ft->node);
1158 struct mlx5_flow_group *fg;
1161 if (!check_valid_mask(match_criteria_enable, match_criteria))
1162 return ERR_PTR(-EINVAL);
1164 if (ft->autogroup.active)
1165 return ERR_PTR(-EPERM);
1167 down_write_ref_node(&ft->node);
1168 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1169 start_index, end_index,
1170 ft->node.children.prev);
1171 up_write_ref_node(&ft->node);
1175 err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
1177 tree_put_node(&fg->node);
1178 return ERR_PTR(err);
1180 trace_mlx5_fs_add_fg(fg);
1181 fg->node.active = true;
1186 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1188 struct mlx5_flow_rule *rule;
1190 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1194 INIT_LIST_HEAD(&rule->next_ft);
1195 rule->node.type = FS_TYPE_FLOW_DEST;
1197 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1202 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1204 struct mlx5_flow_handle *handle;
1206 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1210 handle->num_rules = num_rules;
1215 static void destroy_flow_handle(struct fs_fte *fte,
1216 struct mlx5_flow_handle *handle,
1217 struct mlx5_flow_destination *dest,
1221 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1223 list_del(&handle->rule[i]->node.list);
1224 kfree(handle->rule[i]);
1230 static struct mlx5_flow_handle *
1231 create_flow_handle(struct fs_fte *fte,
1232 struct mlx5_flow_destination *dest,
1237 struct mlx5_flow_handle *handle;
1238 struct mlx5_flow_rule *rule = NULL;
1239 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1240 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1244 handle = alloc_handle((dest_num) ? dest_num : 1);
1246 return ERR_PTR(-ENOMEM);
1250 rule = find_flow_rule(fte, dest + i);
1252 refcount_inc(&rule->node.refcount);
1258 rule = alloc_rule(dest + i);
1262 /* Add dest to dests list- we need flow tables to be in the
1263 * end of the list for forward to next prio rules.
1265 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1267 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1268 list_add(&rule->node.list, &fte->node.children);
1270 list_add_tail(&rule->node.list, &fte->node.children);
1274 type = dest[i].type ==
1275 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1276 *modify_mask |= type ? count : dst;
1279 handle->rule[i] = rule;
1280 } while (++i < dest_num);
1285 destroy_flow_handle(fte, handle, dest, i);
1286 return ERR_PTR(-ENOMEM);
1289 /* fte should not be deleted while calling this function */
1290 static struct mlx5_flow_handle *
1291 add_rule_fte(struct fs_fte *fte,
1292 struct mlx5_flow_group *fg,
1293 struct mlx5_flow_destination *dest,
1297 struct mlx5_flow_root_namespace *root;
1298 struct mlx5_flow_handle *handle;
1299 struct mlx5_flow_table *ft;
1300 int modify_mask = 0;
1302 bool new_rule = false;
1304 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1306 if (IS_ERR(handle) || !new_rule)
1310 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1312 fs_get_obj(ft, fg->node.parent);
1313 root = find_root(&fg->node);
1314 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1315 err = root->cmds->create_fte(get_dev(&ft->node),
1318 err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
1323 fte->node.active = true;
1324 fte->status |= FS_FTE_STATUS_EXISTING;
1325 atomic_inc(&fte->node.version);
1331 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1332 return ERR_PTR(err);
1335 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1336 struct mlx5_flow_spec *spec)
1338 struct list_head *prev = &ft->node.children;
1339 struct mlx5_flow_group *fg;
1340 unsigned int candidate_index = 0;
1341 unsigned int group_size = 0;
1343 if (!ft->autogroup.active)
1344 return ERR_PTR(-ENOENT);
1346 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1347 /* We save place for flow groups in addition to max types */
1348 group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
1350 /* ft->max_fte == ft->autogroup.max_types */
1351 if (group_size == 0)
1354 /* sorted by start_index */
1355 fs_for_each_fg(fg, ft) {
1356 if (candidate_index + group_size > fg->start_index)
1357 candidate_index = fg->start_index + fg->max_ftes;
1360 prev = &fg->node.list;
1363 if (candidate_index + group_size > ft->max_fte)
1364 return ERR_PTR(-ENOSPC);
1366 fg = alloc_insert_flow_group(ft,
1367 spec->match_criteria_enable,
1368 spec->match_criteria,
1370 candidate_index + group_size - 1,
1375 ft->autogroup.num_groups++;
1381 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1382 struct mlx5_flow_group *fg)
1384 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1385 struct mlx5_core_dev *dev = get_dev(&ft->node);
1386 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1387 void *match_criteria_addr;
1388 u8 src_esw_owner_mask_on;
1393 in = kvzalloc(inlen, GFP_KERNEL);
1397 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1398 fg->mask.match_criteria_enable);
1399 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1400 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1403 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1405 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1406 source_eswitch_owner_vhca_id);
1407 MLX5_SET(create_flow_group_in, in,
1408 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1410 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1411 in, match_criteria);
1412 memcpy(match_criteria_addr, fg->mask.match_criteria,
1413 sizeof(fg->mask.match_criteria));
1415 err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
1417 fg->node.active = true;
1418 trace_mlx5_fs_add_fg(fg);
1425 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1426 struct mlx5_flow_destination *d2)
1428 if (d1->type == d2->type) {
1429 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1430 d1->vport.num == d2->vport.num) ||
1431 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1432 d1->ft == d2->ft) ||
1433 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1434 d1->tir_num == d2->tir_num))
1441 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1442 struct mlx5_flow_destination *dest)
1444 struct mlx5_flow_rule *rule;
1446 list_for_each_entry(rule, &fte->node.children, node.list) {
1447 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1453 static bool check_conflicting_actions(u32 action1, u32 action2)
1455 u32 xored_actions = action1 ^ action2;
1457 /* if one rule only wants to count, it's ok */
1458 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1459 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1462 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1463 MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1464 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1465 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1466 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1467 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))
1473 static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_act *flow_act)
1475 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1476 mlx5_core_warn(get_dev(&fte->node),
1477 "Found two FTEs with conflicting actions\n");
1481 if (flow_act->has_flow_tag &&
1482 fte->action.flow_tag != flow_act->flow_tag) {
1483 mlx5_core_warn(get_dev(&fte->node),
1484 "FTE flow tag %u already exists with different flow tag %u\n",
1485 fte->action.flow_tag,
1486 flow_act->flow_tag);
1493 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1495 struct mlx5_flow_act *flow_act,
1496 struct mlx5_flow_destination *dest,
1500 struct mlx5_flow_handle *handle;
1505 ret = check_conflicting_ftes(fte, flow_act);
1507 return ERR_PTR(ret);
1509 old_action = fte->action.action;
1510 fte->action.action |= flow_act->action;
1511 handle = add_rule_fte(fte, fg, dest, dest_num,
1512 old_action != flow_act->action);
1513 if (IS_ERR(handle)) {
1514 fte->action.action = old_action;
1517 trace_mlx5_fs_set_fte(fte, false);
1519 for (i = 0; i < handle->num_rules; i++) {
1520 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1521 tree_add_node(&handle->rule[i]->node, &fte->node);
1522 trace_mlx5_fs_add_rule(handle->rule[i]);
1528 struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
1530 struct mlx5_flow_rule *dst;
1533 fs_get_obj(fte, handle->rule[0]->node.parent);
1535 fs_for_each_dst(dst, fte) {
1536 if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
1537 return dst->dest_attr.counter;
1543 static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
1545 if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
1551 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1552 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1555 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1557 struct mlx5_flow_table *ft)
1559 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1560 return counter_is_valid(dest->counter, action);
1562 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1565 if (!dest || ((dest->type ==
1566 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1567 (dest->ft->level <= ft->level)))
1573 struct list_head list;
1574 struct mlx5_flow_group *g;
1577 struct match_list_head {
1578 struct list_head list;
1579 struct match_list first;
1582 static void free_match_list(struct match_list_head *head)
1584 if (!list_empty(&head->list)) {
1585 struct match_list *iter, *match_tmp;
1587 list_del(&head->first.list);
1588 tree_put_node(&head->first.g->node);
1589 list_for_each_entry_safe(iter, match_tmp, &head->list,
1591 tree_put_node(&iter->g->node);
1592 list_del(&iter->list);
1598 static int build_match_list(struct match_list_head *match_head,
1599 struct mlx5_flow_table *ft,
1600 struct mlx5_flow_spec *spec)
1602 struct rhlist_head *tmp, *list;
1603 struct mlx5_flow_group *g;
1607 INIT_LIST_HEAD(&match_head->list);
1608 /* Collect all fgs which has a matching match_criteria */
1609 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1610 /* RCU is atomic, we can't execute FW commands here */
1611 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1612 struct match_list *curr_match;
1614 if (likely(list_empty(&match_head->list))) {
1615 if (!tree_get_node(&g->node))
1617 match_head->first.g = g;
1618 list_add_tail(&match_head->first.list,
1623 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1625 free_match_list(match_head);
1629 if (!tree_get_node(&g->node)) {
1634 list_add_tail(&curr_match->list, &match_head->list);
1641 static u64 matched_fgs_get_version(struct list_head *match_head)
1643 struct match_list *iter;
1646 list_for_each_entry(iter, match_head, list)
1647 version += (u64)atomic_read(&iter->g->node.version);
1651 static struct mlx5_flow_handle *
1652 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1653 struct list_head *match_head,
1654 struct mlx5_flow_spec *spec,
1655 struct mlx5_flow_act *flow_act,
1656 struct mlx5_flow_destination *dest,
1660 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1661 struct mlx5_flow_group *g;
1662 struct mlx5_flow_handle *rule;
1663 struct match_list *iter;
1664 bool take_write = false;
1669 fte = alloc_fte(ft, spec->match_value, flow_act);
1671 return ERR_PTR(-ENOMEM);
1673 list_for_each_entry(iter, match_head, list) {
1674 nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
1677 search_again_locked:
1678 version = matched_fgs_get_version(match_head);
1679 /* Try to find a fg that already contains a matching fte */
1680 list_for_each_entry(iter, match_head, list) {
1681 struct fs_fte *fte_tmp;
1684 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
1686 if (!fte_tmp || !tree_get_node(&fte_tmp->node))
1689 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1691 list_for_each_entry(iter, match_head, list)
1692 up_read_ref_node(&iter->g->node);
1694 list_for_each_entry(iter, match_head, list)
1695 up_write_ref_node(&iter->g->node);
1698 rule = add_rule_fg(g, spec->match_value,
1699 flow_act, dest, dest_num, fte_tmp);
1700 up_write_ref_node(&fte_tmp->node);
1701 tree_put_node(&fte_tmp->node);
1702 kmem_cache_free(steering->ftes_cache, fte);
1706 /* No group with matching fte found. Try to add a new fte to any
1711 list_for_each_entry(iter, match_head, list)
1712 up_read_ref_node(&iter->g->node);
1713 list_for_each_entry(iter, match_head, list)
1714 nested_down_write_ref_node(&iter->g->node,
1719 /* Check the ft version, for case that new flow group
1720 * was added while the fgs weren't locked
1722 if (atomic_read(&ft->node.version) != ft_version) {
1723 rule = ERR_PTR(-EAGAIN);
1727 /* Check the fgs version, for case the new FTE with the
1728 * same values was added while the fgs weren't locked
1730 if (version != matched_fgs_get_version(match_head))
1731 goto search_again_locked;
1733 list_for_each_entry(iter, match_head, list) {
1736 if (!g->node.active)
1738 err = insert_fte(g, fte);
1742 list_for_each_entry(iter, match_head, list)
1743 up_write_ref_node(&iter->g->node);
1744 kmem_cache_free(steering->ftes_cache, fte);
1745 return ERR_PTR(err);
1748 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1749 list_for_each_entry(iter, match_head, list)
1750 up_write_ref_node(&iter->g->node);
1751 rule = add_rule_fg(g, spec->match_value,
1752 flow_act, dest, dest_num, fte);
1753 up_write_ref_node(&fte->node);
1754 tree_put_node(&fte->node);
1757 rule = ERR_PTR(-ENOENT);
1759 list_for_each_entry(iter, match_head, list)
1760 up_write_ref_node(&iter->g->node);
1761 kmem_cache_free(steering->ftes_cache, fte);
1765 static struct mlx5_flow_handle *
1766 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1767 struct mlx5_flow_spec *spec,
1768 struct mlx5_flow_act *flow_act,
1769 struct mlx5_flow_destination *dest,
1773 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1774 struct mlx5_flow_group *g;
1775 struct mlx5_flow_handle *rule;
1776 struct match_list_head match_head;
1777 bool take_write = false;
1783 if (!check_valid_spec(spec))
1784 return ERR_PTR(-EINVAL);
1786 for (i = 0; i < dest_num; i++) {
1787 if (!dest_is_valid(&dest[i], flow_act->action, ft))
1788 return ERR_PTR(-EINVAL);
1790 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1791 search_again_locked:
1792 version = atomic_read(&ft->node.version);
1794 /* Collect all fgs which has a matching match_criteria */
1795 err = build_match_list(&match_head, ft, spec);
1798 up_write_ref_node(&ft->node);
1799 return ERR_PTR(err);
1803 up_read_ref_node(&ft->node);
1805 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1807 free_match_list(&match_head);
1808 if (!IS_ERR(rule) ||
1809 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1811 up_write_ref_node(&ft->node);
1816 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1820 if (PTR_ERR(rule) == -EAGAIN ||
1821 version != atomic_read(&ft->node.version))
1822 goto search_again_locked;
1824 g = alloc_auto_flow_group(ft, spec);
1827 up_write_ref_node(&ft->node);
1831 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1832 up_write_ref_node(&ft->node);
1834 err = create_auto_flow_group(ft, g);
1836 goto err_release_fg;
1838 fte = alloc_fte(ft, spec->match_value, flow_act);
1841 goto err_release_fg;
1844 err = insert_fte(g, fte);
1846 kmem_cache_free(steering->ftes_cache, fte);
1847 goto err_release_fg;
1850 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1851 up_write_ref_node(&g->node);
1852 rule = add_rule_fg(g, spec->match_value, flow_act, dest,
1854 up_write_ref_node(&fte->node);
1855 tree_put_node(&fte->node);
1856 tree_put_node(&g->node);
1860 up_write_ref_node(&g->node);
1861 tree_put_node(&g->node);
1862 return ERR_PTR(err);
1865 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1867 return ((ft->type == FS_FT_NIC_RX) &&
1868 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1871 struct mlx5_flow_handle *
1872 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1873 struct mlx5_flow_spec *spec,
1874 struct mlx5_flow_act *flow_act,
1875 struct mlx5_flow_destination *dest,
1878 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1879 struct mlx5_flow_destination gen_dest = {};
1880 struct mlx5_flow_table *next_ft = NULL;
1881 struct mlx5_flow_handle *handle = NULL;
1882 u32 sw_action = flow_act->action;
1883 struct fs_prio *prio;
1885 fs_get_obj(prio, ft->node.parent);
1886 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1887 if (!fwd_next_prio_supported(ft))
1888 return ERR_PTR(-EOPNOTSUPP);
1890 return ERR_PTR(-EINVAL);
1891 mutex_lock(&root->chain_lock);
1892 next_ft = find_next_chained_ft(prio);
1894 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1895 gen_dest.ft = next_ft;
1898 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1900 mutex_unlock(&root->chain_lock);
1901 return ERR_PTR(-EOPNOTSUPP);
1905 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
1907 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1908 if (!IS_ERR_OR_NULL(handle) &&
1909 (list_empty(&handle->rule[0]->next_ft))) {
1910 mutex_lock(&next_ft->lock);
1911 list_add(&handle->rule[0]->next_ft,
1912 &next_ft->fwd_rules);
1913 mutex_unlock(&next_ft->lock);
1914 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1916 mutex_unlock(&root->chain_lock);
1920 EXPORT_SYMBOL(mlx5_add_flow_rules);
1922 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1926 for (i = handle->num_rules - 1; i >= 0; i--)
1927 tree_remove_node(&handle->rule[i]->node);
1930 EXPORT_SYMBOL(mlx5_del_flow_rules);
1932 /* Assuming prio->node.children(flow tables) is sorted by level */
1933 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1935 struct fs_prio *prio;
1937 fs_get_obj(prio, ft->node.parent);
1939 if (!list_is_last(&ft->node.list, &prio->node.children))
1940 return list_next_entry(ft, node.list);
1941 return find_next_chained_ft(prio);
1944 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1946 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1947 struct mlx5_ft_underlay_qp *uqp;
1948 struct mlx5_flow_table *new_root_ft = NULL;
1952 if (root->root_ft != ft)
1955 new_root_ft = find_next_ft(ft);
1957 root->root_ft = NULL;
1961 if (list_empty(&root->underlay_qpns)) {
1962 /* Don't set any QPN (zero) in case QPN list is empty */
1964 err = root->cmds->update_root_ft(root->dev, new_root_ft,
1967 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1969 err = root->cmds->update_root_ft(root->dev,
1978 mlx5_core_warn(root->dev,
1979 "Update root flow table of id(%u) qpn(%d) failed\n",
1982 root->root_ft = new_root_ft;
1987 /* Connect flow table from previous priority to
1988 * the next flow table.
1990 static int disconnect_flow_table(struct mlx5_flow_table *ft)
1992 struct mlx5_core_dev *dev = get_dev(&ft->node);
1993 struct mlx5_flow_table *next_ft;
1994 struct fs_prio *prio;
1997 err = update_root_ft_destroy(ft);
2001 fs_get_obj(prio, ft->node.parent);
2002 if (!(list_first_entry(&prio->node.children,
2003 struct mlx5_flow_table,
2007 next_ft = find_next_chained_ft(prio);
2008 err = connect_fwd_rules(dev, next_ft, ft);
2012 err = connect_prev_fts(dev, next_ft, prio);
2014 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2019 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2021 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2024 mutex_lock(&root->chain_lock);
2025 err = disconnect_flow_table(ft);
2027 mutex_unlock(&root->chain_lock);
2030 if (tree_remove_node(&ft->node))
2031 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2033 mutex_unlock(&root->chain_lock);
2037 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2039 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2041 if (tree_remove_node(&fg->node))
2042 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2046 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2047 enum mlx5_flow_namespace_type type)
2049 struct mlx5_flow_steering *steering = dev->priv.steering;
2050 struct mlx5_flow_root_namespace *root_ns;
2052 struct fs_prio *fs_prio;
2053 struct mlx5_flow_namespace *ns;
2059 case MLX5_FLOW_NAMESPACE_BYPASS:
2060 case MLX5_FLOW_NAMESPACE_LAG:
2061 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2062 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2063 case MLX5_FLOW_NAMESPACE_KERNEL:
2064 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2065 case MLX5_FLOW_NAMESPACE_ANCHOR:
2068 case MLX5_FLOW_NAMESPACE_FDB:
2069 if (steering->fdb_root_ns)
2070 return &steering->fdb_root_ns->ns;
2073 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2074 if (steering->sniffer_rx_root_ns)
2075 return &steering->sniffer_rx_root_ns->ns;
2078 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2079 if (steering->sniffer_tx_root_ns)
2080 return &steering->sniffer_tx_root_ns->ns;
2083 case MLX5_FLOW_NAMESPACE_EGRESS:
2084 if (steering->egress_root_ns)
2085 return &steering->egress_root_ns->ns;
2092 root_ns = steering->root_ns;
2096 fs_prio = find_prio(&root_ns->ns, prio);
2100 ns = list_first_entry(&fs_prio->node.children,
2106 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2108 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2109 enum mlx5_flow_namespace_type type,
2112 struct mlx5_flow_steering *steering = dev->priv.steering;
2114 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2118 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2119 if (steering->esw_egress_root_ns &&
2120 steering->esw_egress_root_ns[vport])
2121 return &steering->esw_egress_root_ns[vport]->ns;
2124 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2125 if (steering->esw_ingress_root_ns &&
2126 steering->esw_ingress_root_ns[vport])
2127 return &steering->esw_ingress_root_ns[vport]->ns;
2135 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2136 unsigned int prio, int num_levels)
2138 struct fs_prio *fs_prio;
2140 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2142 return ERR_PTR(-ENOMEM);
2144 fs_prio->node.type = FS_TYPE_PRIO;
2145 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2146 tree_add_node(&fs_prio->node, &ns->node);
2147 fs_prio->num_levels = num_levels;
2148 fs_prio->prio = prio;
2149 list_add_tail(&fs_prio->node.list, &ns->node.children);
2154 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2157 ns->node.type = FS_TYPE_NAMESPACE;
2162 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
2164 struct mlx5_flow_namespace *ns;
2166 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2168 return ERR_PTR(-ENOMEM);
2170 fs_init_namespace(ns);
2171 tree_init_node(&ns->node, NULL, del_sw_ns);
2172 tree_add_node(&ns->node, &prio->node);
2173 list_add_tail(&ns->node.list, &prio->node.children);
2178 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2179 struct init_tree_node *prio_metadata)
2181 struct fs_prio *fs_prio;
2184 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2185 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2186 if (IS_ERR(fs_prio))
2187 return PTR_ERR(fs_prio);
2192 #define FLOW_TABLE_BIT_SZ 1
2193 #define GET_FLOW_TABLE_CAP(dev, offset) \
2194 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2196 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2197 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2201 for (i = 0; i < caps->arr_sz; i++) {
2202 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2208 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2209 struct init_tree_node *init_node,
2210 struct fs_node *fs_parent_node,
2211 struct init_tree_node *init_parent_node,
2214 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2215 flow_table_properties_nic_receive.
2217 struct mlx5_flow_namespace *fs_ns;
2218 struct fs_prio *fs_prio;
2219 struct fs_node *base;
2223 if (init_node->type == FS_TYPE_PRIO) {
2224 if ((init_node->min_ft_level > max_ft_level) ||
2225 !has_required_caps(steering->dev, &init_node->caps))
2228 fs_get_obj(fs_ns, fs_parent_node);
2229 if (init_node->num_leaf_prios)
2230 return create_leaf_prios(fs_ns, prio, init_node);
2231 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2232 if (IS_ERR(fs_prio))
2233 return PTR_ERR(fs_prio);
2234 base = &fs_prio->node;
2235 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2236 fs_get_obj(fs_prio, fs_parent_node);
2237 fs_ns = fs_create_namespace(fs_prio);
2239 return PTR_ERR(fs_ns);
2240 base = &fs_ns->node;
2245 for (i = 0; i < init_node->ar_size; i++) {
2246 err = init_root_tree_recursive(steering, &init_node->children[i],
2247 base, init_node, prio);
2250 if (init_node->children[i].type == FS_TYPE_PRIO &&
2251 init_node->children[i].num_leaf_prios) {
2252 prio += init_node->children[i].num_leaf_prios;
2259 static int init_root_tree(struct mlx5_flow_steering *steering,
2260 struct init_tree_node *init_node,
2261 struct fs_node *fs_parent_node)
2264 struct mlx5_flow_namespace *fs_ns;
2267 fs_get_obj(fs_ns, fs_parent_node);
2268 for (i = 0; i < init_node->ar_size; i++) {
2269 err = init_root_tree_recursive(steering, &init_node->children[i],
2278 static struct mlx5_flow_root_namespace
2279 *create_root_ns(struct mlx5_flow_steering *steering,
2280 enum fs_flow_table_type table_type)
2282 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2283 struct mlx5_flow_root_namespace *root_ns;
2284 struct mlx5_flow_namespace *ns;
2286 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2287 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2288 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2290 /* Create the root namespace */
2291 root_ns = kvzalloc(sizeof(*root_ns), GFP_KERNEL);
2295 root_ns->dev = steering->dev;
2296 root_ns->table_type = table_type;
2297 root_ns->cmds = cmds;
2299 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2302 fs_init_namespace(ns);
2303 mutex_init(&root_ns->chain_lock);
2304 tree_init_node(&ns->node, NULL, NULL);
2305 tree_add_node(&ns->node, NULL);
2310 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2312 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2314 struct fs_prio *prio;
2316 fs_for_each_prio(prio, ns) {
2317 /* This updates prio start_level and num_levels */
2318 set_prio_attrs_in_prio(prio, acc_level);
2319 acc_level += prio->num_levels;
2324 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2326 struct mlx5_flow_namespace *ns;
2327 int acc_level_ns = acc_level;
2329 prio->start_level = acc_level;
2330 fs_for_each_ns(ns, prio)
2331 /* This updates start_level and num_levels of ns's priority descendants */
2332 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2333 if (!prio->num_levels)
2334 prio->num_levels = acc_level_ns - prio->start_level;
2335 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2338 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2340 struct mlx5_flow_namespace *ns = &root_ns->ns;
2341 struct fs_prio *prio;
2342 int start_level = 0;
2344 fs_for_each_prio(prio, ns) {
2345 set_prio_attrs_in_prio(prio, start_level);
2346 start_level += prio->num_levels;
2350 #define ANCHOR_PRIO 0
2351 #define ANCHOR_SIZE 1
2352 #define ANCHOR_LEVEL 0
2353 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2355 struct mlx5_flow_namespace *ns = NULL;
2356 struct mlx5_flow_table_attr ft_attr = {};
2357 struct mlx5_flow_table *ft;
2359 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2363 ft_attr.max_fte = ANCHOR_SIZE;
2364 ft_attr.level = ANCHOR_LEVEL;
2365 ft_attr.prio = ANCHOR_PRIO;
2367 ft = mlx5_create_flow_table(ns, &ft_attr);
2369 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2375 static int init_root_ns(struct mlx5_flow_steering *steering)
2379 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2380 if (!steering->root_ns)
2383 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2387 set_prio_attrs(steering->root_ns);
2388 err = create_anchor_flow_table(steering);
2395 cleanup_root_ns(steering->root_ns);
2396 steering->root_ns = NULL;
2400 static void clean_tree(struct fs_node *node)
2403 struct fs_node *iter;
2404 struct fs_node *temp;
2406 tree_get_node(node);
2407 list_for_each_entry_safe(iter, temp, &node->children, list)
2409 tree_put_node(node);
2410 tree_remove_node(node);
2414 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2419 clean_tree(&root_ns->ns.node);
2422 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2424 struct mlx5_flow_steering *steering = dev->priv.steering;
2427 if (!steering->esw_egress_root_ns)
2430 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2431 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2433 kfree(steering->esw_egress_root_ns);
2436 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2438 struct mlx5_flow_steering *steering = dev->priv.steering;
2441 if (!steering->esw_ingress_root_ns)
2444 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2445 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2447 kfree(steering->esw_ingress_root_ns);
2450 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2452 struct mlx5_flow_steering *steering = dev->priv.steering;
2454 cleanup_root_ns(steering->root_ns);
2455 cleanup_egress_acls_root_ns(dev);
2456 cleanup_ingress_acls_root_ns(dev);
2457 cleanup_root_ns(steering->fdb_root_ns);
2458 cleanup_root_ns(steering->sniffer_rx_root_ns);
2459 cleanup_root_ns(steering->sniffer_tx_root_ns);
2460 cleanup_root_ns(steering->egress_root_ns);
2461 mlx5_cleanup_fc_stats(dev);
2462 kmem_cache_destroy(steering->ftes_cache);
2463 kmem_cache_destroy(steering->fgs_cache);
2467 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2469 struct fs_prio *prio;
2471 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2472 if (!steering->sniffer_tx_root_ns)
2475 /* Create single prio */
2476 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2478 cleanup_root_ns(steering->sniffer_tx_root_ns);
2479 return PTR_ERR(prio);
2484 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2486 struct fs_prio *prio;
2488 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2489 if (!steering->sniffer_rx_root_ns)
2492 /* Create single prio */
2493 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2495 cleanup_root_ns(steering->sniffer_rx_root_ns);
2496 return PTR_ERR(prio);
2501 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2503 struct fs_prio *prio;
2505 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2506 if (!steering->fdb_root_ns)
2509 prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 2);
2513 prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
2517 set_prio_attrs(steering->fdb_root_ns);
2521 cleanup_root_ns(steering->fdb_root_ns);
2522 steering->fdb_root_ns = NULL;
2523 return PTR_ERR(prio);
2526 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2528 struct fs_prio *prio;
2530 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2531 if (!steering->esw_egress_root_ns[vport])
2535 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2536 return PTR_ERR_OR_ZERO(prio);
2539 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2541 struct fs_prio *prio;
2543 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2544 if (!steering->esw_ingress_root_ns[vport])
2548 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2549 return PTR_ERR_OR_ZERO(prio);
2552 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2554 struct mlx5_flow_steering *steering = dev->priv.steering;
2558 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2559 sizeof(*steering->esw_egress_root_ns),
2561 if (!steering->esw_egress_root_ns)
2564 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2565 err = init_egress_acl_root_ns(steering, i);
2567 goto cleanup_root_ns;
2573 for (i--; i >= 0; i--)
2574 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2575 kfree(steering->esw_egress_root_ns);
2579 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2581 struct mlx5_flow_steering *steering = dev->priv.steering;
2585 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2586 sizeof(*steering->esw_ingress_root_ns),
2588 if (!steering->esw_ingress_root_ns)
2591 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2592 err = init_ingress_acl_root_ns(steering, i);
2594 goto cleanup_root_ns;
2600 for (i--; i >= 0; i--)
2601 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2602 kfree(steering->esw_ingress_root_ns);
2606 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2608 struct fs_prio *prio;
2610 steering->egress_root_ns = create_root_ns(steering,
2612 if (!steering->egress_root_ns)
2616 prio = fs_create_prio(&steering->egress_root_ns->ns, 0, 1);
2617 return PTR_ERR_OR_ZERO(prio);
2620 int mlx5_init_fs(struct mlx5_core_dev *dev)
2622 struct mlx5_flow_steering *steering;
2625 err = mlx5_init_fc_stats(dev);
2629 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2632 steering->dev = dev;
2633 dev->priv.steering = steering;
2635 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2636 sizeof(struct mlx5_flow_group), 0,
2638 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2640 if (!steering->ftes_cache || !steering->fgs_cache) {
2645 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2646 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2647 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2648 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2649 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2650 err = init_root_ns(steering);
2655 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
2656 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2657 err = init_fdb_root_ns(steering);
2661 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2662 err = init_egress_acls_root_ns(dev);
2666 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2667 err = init_ingress_acls_root_ns(dev);
2673 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2674 err = init_sniffer_rx_root_ns(steering);
2679 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2680 err = init_sniffer_tx_root_ns(steering);
2685 if (MLX5_IPSEC_DEV(dev)) {
2686 err = init_egress_root_ns(steering);
2693 mlx5_cleanup_fs(dev);
2697 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2699 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2700 struct mlx5_ft_underlay_qp *new_uqp;
2703 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2707 mutex_lock(&root->chain_lock);
2709 if (!root->root_ft) {
2711 goto update_ft_fail;
2714 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2717 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2719 goto update_ft_fail;
2722 new_uqp->qpn = underlay_qpn;
2723 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2725 mutex_unlock(&root->chain_lock);
2730 mutex_unlock(&root->chain_lock);
2734 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2736 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2738 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2739 struct mlx5_ft_underlay_qp *uqp;
2743 mutex_lock(&root->chain_lock);
2744 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2745 if (uqp->qpn == underlay_qpn) {
2752 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
2758 err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
2761 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
2764 list_del(&uqp->list);
2765 mutex_unlock(&root->chain_lock);
2771 mutex_unlock(&root->chain_lock);
2774 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);