2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
41 #include "diag/fs_tracepoint.h"
42 #include "accel/ipsec.h"
43 #include "fpga/ipsec.h"
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
82 #define FS_CHAINING_CAPS_EGRESS \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
90 #define FS_CHAINING_CAPS_RDMA_TX \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
102 #define BY_PASS_PRIO_NUM_LEVELS 1
103 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
106 #define ETHTOOL_PRIO_NUM_LEVELS 1
107 #define ETHTOOL_NUM_PRIOS 11
108 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
109 /* Vlan, mac, ttc, inner ttc, aRFS */
110 #define KERNEL_NIC_PRIO_NUM_LEVELS 5
111 #define KERNEL_NIC_NUM_PRIOS 1
112 /* One more level for tc */
113 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
115 #define KERNEL_NIC_TC_NUM_PRIOS 1
116 #define KERNEL_NIC_TC_NUM_LEVELS 2
118 #define ANCHOR_NUM_LEVELS 1
119 #define ANCHOR_NUM_PRIOS 1
120 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
122 #define OFFLOADS_MAX_FT 2
123 #define OFFLOADS_NUM_PRIOS 2
124 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
126 #define LAG_PRIO_NUM_LEVELS 1
127 #define LAG_NUM_PRIOS 1
128 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
135 static struct init_tree_node {
136 enum fs_node_type type;
137 struct init_tree_node *children;
139 struct node_caps caps;
144 enum mlx5_flow_table_miss_action def_miss_action;
146 .type = FS_TYPE_NAMESPACE,
148 .children = (struct init_tree_node[]){
149 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
150 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
151 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
152 BY_PASS_PRIO_NUM_LEVELS))),
153 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
154 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
155 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
156 LAG_PRIO_NUM_LEVELS))),
157 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
158 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
159 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
161 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
162 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
163 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
164 ETHTOOL_PRIO_NUM_LEVELS))),
165 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
166 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
167 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
168 KERNEL_NIC_TC_NUM_LEVELS),
169 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
170 KERNEL_NIC_PRIO_NUM_LEVELS))),
171 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
172 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
173 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
174 LEFTOVERS_NUM_LEVELS))),
175 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
176 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
177 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
178 ANCHOR_NUM_LEVELS))),
182 static struct init_tree_node egress_root_fs = {
183 .type = FS_TYPE_NAMESPACE,
185 .children = (struct init_tree_node[]) {
186 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
187 FS_CHAINING_CAPS_EGRESS,
188 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
189 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
190 BY_PASS_PRIO_NUM_LEVELS))),
194 #define RDMA_RX_BYPASS_PRIO 0
195 #define RDMA_RX_KERNEL_PRIO 1
196 static struct init_tree_node rdma_rx_root_fs = {
197 .type = FS_TYPE_NAMESPACE,
199 .children = (struct init_tree_node[]) {
200 [RDMA_RX_BYPASS_PRIO] =
201 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
203 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
204 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
205 BY_PASS_PRIO_NUM_LEVELS))),
206 [RDMA_RX_KERNEL_PRIO] =
207 ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
209 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
210 ADD_MULTIPLE_PRIO(1, 1))),
214 static struct init_tree_node rdma_tx_root_fs = {
215 .type = FS_TYPE_NAMESPACE,
217 .children = (struct init_tree_node[]) {
218 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
219 FS_CHAINING_CAPS_RDMA_TX,
220 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
221 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
222 BY_PASS_PRIO_NUM_LEVELS))),
226 enum fs_i_lock_class {
232 static const struct rhashtable_params rhash_fte = {
233 .key_len = sizeof_field(struct fs_fte, val),
234 .key_offset = offsetof(struct fs_fte, val),
235 .head_offset = offsetof(struct fs_fte, hash),
236 .automatic_shrinking = true,
240 static const struct rhashtable_params rhash_fg = {
241 .key_len = sizeof_field(struct mlx5_flow_group, mask),
242 .key_offset = offsetof(struct mlx5_flow_group, mask),
243 .head_offset = offsetof(struct mlx5_flow_group, hash),
244 .automatic_shrinking = true,
249 static void del_hw_flow_table(struct fs_node *node);
250 static void del_hw_flow_group(struct fs_node *node);
251 static void del_hw_fte(struct fs_node *node);
252 static void del_sw_flow_table(struct fs_node *node);
253 static void del_sw_flow_group(struct fs_node *node);
254 static void del_sw_fte(struct fs_node *node);
255 static void del_sw_prio(struct fs_node *node);
256 static void del_sw_ns(struct fs_node *node);
257 /* Delete rule (destination) is special case that
258 * requires to lock the FTE for all the deletion process.
260 static void del_sw_hw_rule(struct fs_node *node);
261 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
262 struct mlx5_flow_destination *d2);
263 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
264 static struct mlx5_flow_rule *
265 find_flow_rule(struct fs_fte *fte,
266 struct mlx5_flow_destination *dest);
268 static void tree_init_node(struct fs_node *node,
269 void (*del_hw_func)(struct fs_node *),
270 void (*del_sw_func)(struct fs_node *))
272 refcount_set(&node->refcount, 1);
273 INIT_LIST_HEAD(&node->list);
274 INIT_LIST_HEAD(&node->children);
275 init_rwsem(&node->lock);
276 node->del_hw_func = del_hw_func;
277 node->del_sw_func = del_sw_func;
278 node->active = false;
281 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
284 refcount_inc(&parent->refcount);
285 node->parent = parent;
287 /* Parent is the root */
291 node->root = parent->root;
294 static int tree_get_node(struct fs_node *node)
296 return refcount_inc_not_zero(&node->refcount);
299 static void nested_down_read_ref_node(struct fs_node *node,
300 enum fs_i_lock_class class)
303 down_read_nested(&node->lock, class);
304 refcount_inc(&node->refcount);
308 static void nested_down_write_ref_node(struct fs_node *node,
309 enum fs_i_lock_class class)
312 down_write_nested(&node->lock, class);
313 refcount_inc(&node->refcount);
317 static void down_write_ref_node(struct fs_node *node, bool locked)
321 down_write(&node->lock);
322 refcount_inc(&node->refcount);
326 static void up_read_ref_node(struct fs_node *node)
328 refcount_dec(&node->refcount);
329 up_read(&node->lock);
332 static void up_write_ref_node(struct fs_node *node, bool locked)
334 refcount_dec(&node->refcount);
336 up_write(&node->lock);
339 static void tree_put_node(struct fs_node *node, bool locked)
341 struct fs_node *parent_node = node->parent;
343 if (refcount_dec_and_test(&node->refcount)) {
344 if (node->del_hw_func)
345 node->del_hw_func(node);
347 /* Only root namespace doesn't have parent and we just
348 * need to free its node.
350 down_write_ref_node(parent_node, locked);
351 list_del_init(&node->list);
352 if (node->del_sw_func)
353 node->del_sw_func(node);
354 up_write_ref_node(parent_node, locked);
360 if (!node && parent_node)
361 tree_put_node(parent_node, locked);
364 static int tree_remove_node(struct fs_node *node, bool locked)
366 if (refcount_read(&node->refcount) > 1) {
367 refcount_dec(&node->refcount);
370 tree_put_node(node, locked);
374 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
377 struct fs_prio *iter_prio;
379 fs_for_each_prio(iter_prio, ns) {
380 if (iter_prio->prio == prio)
387 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
391 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
392 if (spec->match_value[i] & ~spec->match_criteria[i]) {
393 pr_warn("mlx5_core: match_value differs from match_criteria\n");
400 static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
402 struct fs_node *root;
403 struct mlx5_flow_namespace *ns;
407 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
408 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
412 ns = container_of(root, struct mlx5_flow_namespace, node);
413 return container_of(ns, struct mlx5_flow_root_namespace, ns);
416 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
418 struct mlx5_flow_root_namespace *root = find_root(node);
421 return root->dev->priv.steering;
425 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
427 struct mlx5_flow_root_namespace *root = find_root(node);
434 static void del_sw_ns(struct fs_node *node)
439 static void del_sw_prio(struct fs_node *node)
444 static void del_hw_flow_table(struct fs_node *node)
446 struct mlx5_flow_root_namespace *root;
447 struct mlx5_flow_table *ft;
448 struct mlx5_core_dev *dev;
451 fs_get_obj(ft, node);
452 dev = get_dev(&ft->node);
453 root = find_root(&ft->node);
454 trace_mlx5_fs_del_ft(ft);
457 err = root->cmds->destroy_flow_table(root, ft);
459 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
463 static void del_sw_flow_table(struct fs_node *node)
465 struct mlx5_flow_table *ft;
466 struct fs_prio *prio;
468 fs_get_obj(ft, node);
470 rhltable_destroy(&ft->fgs_hash);
471 fs_get_obj(prio, ft->node.parent);
476 static void modify_fte(struct fs_fte *fte)
478 struct mlx5_flow_root_namespace *root;
479 struct mlx5_flow_table *ft;
480 struct mlx5_flow_group *fg;
481 struct mlx5_core_dev *dev;
484 fs_get_obj(fg, fte->node.parent);
485 fs_get_obj(ft, fg->node.parent);
486 dev = get_dev(&fte->node);
488 root = find_root(&ft->node);
489 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
492 "%s can't del rule fg id=%d fte_index=%d\n",
493 __func__, fg->id, fte->index);
494 fte->modify_mask = 0;
497 static void del_sw_hw_rule(struct fs_node *node)
499 struct mlx5_flow_rule *rule;
502 fs_get_obj(rule, node);
503 fs_get_obj(fte, rule->node.parent);
504 trace_mlx5_fs_del_rule(rule);
505 if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
506 mutex_lock(&rule->dest_attr.ft->lock);
507 list_del(&rule->next_ft);
508 mutex_unlock(&rule->dest_attr.ft->lock);
511 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
514 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
515 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
516 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
520 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
523 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
529 static void del_hw_fte(struct fs_node *node)
531 struct mlx5_flow_root_namespace *root;
532 struct mlx5_flow_table *ft;
533 struct mlx5_flow_group *fg;
534 struct mlx5_core_dev *dev;
538 fs_get_obj(fte, node);
539 fs_get_obj(fg, fte->node.parent);
540 fs_get_obj(ft, fg->node.parent);
542 trace_mlx5_fs_del_fte(fte);
543 dev = get_dev(&ft->node);
544 root = find_root(&ft->node);
546 err = root->cmds->delete_fte(root, ft, fte);
549 "flow steering can't delete fte in index %d of flow group id %d\n",
555 static void del_sw_fte(struct fs_node *node)
557 struct mlx5_flow_steering *steering = get_steering(node);
558 struct mlx5_flow_group *fg;
562 fs_get_obj(fte, node);
563 fs_get_obj(fg, fte->node.parent);
565 err = rhashtable_remove_fast(&fg->ftes_hash,
569 ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
570 kmem_cache_free(steering->ftes_cache, fte);
573 static void del_hw_flow_group(struct fs_node *node)
575 struct mlx5_flow_root_namespace *root;
576 struct mlx5_flow_group *fg;
577 struct mlx5_flow_table *ft;
578 struct mlx5_core_dev *dev;
580 fs_get_obj(fg, node);
581 fs_get_obj(ft, fg->node.parent);
582 dev = get_dev(&ft->node);
583 trace_mlx5_fs_del_fg(fg);
585 root = find_root(&ft->node);
586 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
587 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
591 static void del_sw_flow_group(struct fs_node *node)
593 struct mlx5_flow_steering *steering = get_steering(node);
594 struct mlx5_flow_group *fg;
595 struct mlx5_flow_table *ft;
598 fs_get_obj(fg, node);
599 fs_get_obj(ft, fg->node.parent);
601 rhashtable_destroy(&fg->ftes_hash);
602 ida_destroy(&fg->fte_allocator);
603 if (ft->autogroup.active &&
604 fg->max_ftes == ft->autogroup.group_size &&
605 fg->start_index < ft->autogroup.max_fte)
606 ft->autogroup.num_groups--;
607 err = rhltable_remove(&ft->fgs_hash,
611 kmem_cache_free(steering->fgs_cache, fg);
614 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
619 index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
623 fte->index = index + fg->start_index;
624 ret = rhashtable_insert_fast(&fg->ftes_hash,
630 tree_add_node(&fte->node, &fg->node);
631 list_add_tail(&fte->node.list, &fg->node.children);
635 ida_simple_remove(&fg->fte_allocator, index);
639 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
640 const struct mlx5_flow_spec *spec,
641 struct mlx5_flow_act *flow_act)
643 struct mlx5_flow_steering *steering = get_steering(&ft->node);
646 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
648 return ERR_PTR(-ENOMEM);
650 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
651 fte->node.type = FS_TYPE_FLOW_ENTRY;
652 fte->action = *flow_act;
653 fte->flow_context = spec->flow_context;
655 tree_init_node(&fte->node, NULL, del_sw_fte);
660 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
661 struct mlx5_flow_group *fg)
663 rhashtable_destroy(&fg->ftes_hash);
664 kmem_cache_free(steering->fgs_cache, fg);
667 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
668 u8 match_criteria_enable,
669 const void *match_criteria,
673 struct mlx5_flow_group *fg;
676 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
678 return ERR_PTR(-ENOMEM);
680 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
682 kmem_cache_free(steering->fgs_cache, fg);
686 ida_init(&fg->fte_allocator);
687 fg->mask.match_criteria_enable = match_criteria_enable;
688 memcpy(&fg->mask.match_criteria, match_criteria,
689 sizeof(fg->mask.match_criteria));
690 fg->node.type = FS_TYPE_FLOW_GROUP;
691 fg->start_index = start_index;
692 fg->max_ftes = end_index - start_index + 1;
697 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
698 u8 match_criteria_enable,
699 const void *match_criteria,
702 struct list_head *prev)
704 struct mlx5_flow_steering *steering = get_steering(&ft->node);
705 struct mlx5_flow_group *fg;
708 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
709 start_index, end_index);
713 /* initialize refcnt, add to parent list */
714 ret = rhltable_insert(&ft->fgs_hash,
718 dealloc_flow_group(steering, fg);
722 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
723 tree_add_node(&fg->node, &ft->node);
724 /* Add node to group list */
725 list_add(&fg->node.list, prev);
726 atomic_inc(&ft->node.version);
731 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
732 enum fs_flow_table_type table_type,
733 enum fs_flow_table_op_mod op_mod,
736 struct mlx5_flow_table *ft;
739 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
741 return ERR_PTR(-ENOMEM);
743 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
750 ft->node.type = FS_TYPE_FLOW_TABLE;
752 ft->type = table_type;
754 ft->max_fte = max_fte;
756 INIT_LIST_HEAD(&ft->fwd_rules);
757 mutex_init(&ft->lock);
762 /* If reverse is false, then we search for the first flow table in the
763 * root sub-tree from start(closest from right), else we search for the
764 * last flow table in the root sub-tree till start(closest from left).
766 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
767 struct list_head *start,
770 #define list_advance_entry(pos, reverse) \
771 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
773 #define list_for_each_advance_continue(pos, head, reverse) \
774 for (pos = list_advance_entry(pos, reverse); \
775 &pos->list != (head); \
776 pos = list_advance_entry(pos, reverse))
778 struct fs_node *iter = list_entry(start, struct fs_node, list);
779 struct mlx5_flow_table *ft = NULL;
781 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
784 list_for_each_advance_continue(iter, &root->children, reverse) {
785 if (iter->type == FS_TYPE_FLOW_TABLE) {
786 fs_get_obj(ft, iter);
789 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
797 /* If reverse if false then return the first flow table in next priority of
798 * prio in the tree, else return the last flow table in the previous priority
799 * of prio in the tree.
801 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
803 struct mlx5_flow_table *ft = NULL;
804 struct fs_node *curr_node;
805 struct fs_node *parent;
807 parent = prio->node.parent;
808 curr_node = &prio->node;
809 while (!ft && parent) {
810 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
812 parent = curr_node->parent;
817 /* Assuming all the tree is locked by mutex chain lock */
818 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
820 return find_closest_ft(prio, false);
823 /* Assuming all the tree is locked by mutex chain lock */
824 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
826 return find_closest_ft(prio, true);
829 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
830 struct fs_prio *prio,
831 struct mlx5_flow_table *ft)
833 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
834 struct mlx5_flow_table *iter;
838 fs_for_each_ft(iter, prio) {
840 err = root->cmds->modify_flow_table(root, iter, ft);
842 mlx5_core_warn(dev, "Failed to modify flow table %d\n",
844 /* The driver is out of sync with the FW */
853 /* Connect flow tables from previous priority of prio to ft */
854 static int connect_prev_fts(struct mlx5_core_dev *dev,
855 struct mlx5_flow_table *ft,
856 struct fs_prio *prio)
858 struct mlx5_flow_table *prev_ft;
860 prev_ft = find_prev_chained_ft(prio);
862 struct fs_prio *prev_prio;
864 fs_get_obj(prev_prio, prev_ft->node.parent);
865 return connect_fts_in_prio(dev, prev_prio, ft);
870 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
873 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
874 struct mlx5_ft_underlay_qp *uqp;
875 int min_level = INT_MAX;
880 min_level = root->root_ft->level;
882 if (ft->level >= min_level)
885 if (list_empty(&root->underlay_qpns)) {
886 /* Don't set any QPN (zero) in case QPN list is empty */
888 err = root->cmds->update_root_ft(root, ft, qpn, false);
890 list_for_each_entry(uqp, &root->underlay_qpns, list) {
892 err = root->cmds->update_root_ft(root, ft,
900 mlx5_core_warn(root->dev,
901 "Update root flow table of id(%u) qpn(%d) failed\n",
909 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
910 struct mlx5_flow_destination *dest)
912 struct mlx5_flow_root_namespace *root;
913 struct mlx5_flow_table *ft;
914 struct mlx5_flow_group *fg;
916 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
919 fs_get_obj(fte, rule->node.parent);
920 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
922 down_write_ref_node(&fte->node, false);
923 fs_get_obj(fg, fte->node.parent);
924 fs_get_obj(ft, fg->node.parent);
926 memcpy(&rule->dest_attr, dest, sizeof(*dest));
927 root = find_root(&ft->node);
928 err = root->cmds->update_fte(root, ft, fg,
930 up_write_ref_node(&fte->node, false);
935 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
936 struct mlx5_flow_destination *new_dest,
937 struct mlx5_flow_destination *old_dest)
942 if (handle->num_rules != 1)
944 return _mlx5_modify_rule_destination(handle->rule[0],
948 for (i = 0; i < handle->num_rules; i++) {
949 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
950 return _mlx5_modify_rule_destination(handle->rule[i],
957 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
958 static int connect_fwd_rules(struct mlx5_core_dev *dev,
959 struct mlx5_flow_table *new_next_ft,
960 struct mlx5_flow_table *old_next_ft)
962 struct mlx5_flow_destination dest = {};
963 struct mlx5_flow_rule *iter;
966 /* new_next_ft and old_next_ft could be NULL only
967 * when we create/destroy the anchor flow table.
969 if (!new_next_ft || !old_next_ft)
972 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
973 dest.ft = new_next_ft;
975 mutex_lock(&old_next_ft->lock);
976 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
977 mutex_unlock(&old_next_ft->lock);
978 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
979 err = _mlx5_modify_rule_destination(iter, &dest);
981 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
987 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
988 struct fs_prio *prio)
990 struct mlx5_flow_table *next_ft;
993 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
995 if (list_empty(&prio->node.children)) {
996 err = connect_prev_fts(dev, ft, prio);
1000 next_ft = find_next_chained_ft(prio);
1001 err = connect_fwd_rules(dev, ft, next_ft);
1006 if (MLX5_CAP_FLOWTABLE(dev,
1007 flow_table_properties_nic_receive.modify_root))
1008 err = update_root_ft_create(ft, prio);
1012 static void list_add_flow_table(struct mlx5_flow_table *ft,
1013 struct fs_prio *prio)
1015 struct list_head *prev = &prio->node.children;
1016 struct mlx5_flow_table *iter;
1018 fs_for_each_ft(iter, prio) {
1019 if (iter->level > ft->level)
1021 prev = &iter->node.list;
1023 list_add(&ft->node.list, prev);
1026 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1027 struct mlx5_flow_table_attr *ft_attr,
1028 enum fs_flow_table_op_mod op_mod,
1031 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1032 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1033 struct mlx5_flow_table *next_ft;
1034 struct fs_prio *fs_prio = NULL;
1035 struct mlx5_flow_table *ft;
1040 pr_err("mlx5: flow steering failed to find root of namespace\n");
1041 return ERR_PTR(-ENODEV);
1044 mutex_lock(&root->chain_lock);
1045 fs_prio = find_prio(ns, ft_attr->prio);
1051 /* The level is related to the
1052 * priority level range.
1054 if (ft_attr->level >= fs_prio->num_levels) {
1059 ft_attr->level += fs_prio->start_level;
1062 /* The level is related to the
1063 * priority level range.
1065 ft = alloc_flow_table(ft_attr->level,
1067 ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
1069 op_mod, ft_attr->flags);
1075 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1076 log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
1077 next_ft = unmanaged ? ft_attr->next_ft :
1078 find_next_chained_ft(fs_prio);
1079 ft->def_miss_action = ns->def_miss_action;
1080 err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
1085 err = connect_flow_table(root->dev, ft, fs_prio);
1090 ft->node.active = true;
1091 down_write_ref_node(&fs_prio->node, false);
1093 tree_add_node(&ft->node, &fs_prio->node);
1094 list_add_flow_table(ft, fs_prio);
1096 ft->node.root = fs_prio->node.root;
1099 up_write_ref_node(&fs_prio->node, false);
1100 mutex_unlock(&root->chain_lock);
1101 trace_mlx5_fs_add_ft(ft);
1104 root->cmds->destroy_flow_table(root, ft);
1108 mutex_unlock(&root->chain_lock);
1109 return ERR_PTR(err);
1112 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1113 struct mlx5_flow_table_attr *ft_attr)
1115 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1118 struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1119 int prio, int max_fte,
1120 u32 level, u16 vport)
1122 struct mlx5_flow_table_attr ft_attr = {};
1124 ft_attr.max_fte = max_fte;
1125 ft_attr.level = level;
1126 ft_attr.prio = prio;
1128 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1131 struct mlx5_flow_table*
1132 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1133 int prio, u32 level)
1135 struct mlx5_flow_table_attr ft_attr = {};
1137 ft_attr.level = level;
1138 ft_attr.prio = prio;
1139 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1141 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1143 struct mlx5_flow_table*
1144 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1145 struct mlx5_flow_table_attr *ft_attr)
1147 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1148 int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
1149 int max_num_groups = ft_attr->autogroup.max_num_groups;
1150 struct mlx5_flow_table *ft;
1152 if (max_num_groups > autogroups_max_fte)
1153 return ERR_PTR(-EINVAL);
1154 if (num_reserved_entries > ft_attr->max_fte)
1155 return ERR_PTR(-EINVAL);
1157 ft = mlx5_create_flow_table(ns, ft_attr);
1161 ft->autogroup.active = true;
1162 ft->autogroup.required_groups = max_num_groups;
1163 ft->autogroup.max_fte = autogroups_max_fte;
1164 /* We save place for flow groups in addition to max types */
1165 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1169 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1171 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1174 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1175 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1176 fg_in, match_criteria);
1177 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1179 match_criteria_enable);
1180 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1182 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1184 struct mlx5_flow_group *fg;
1187 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1188 return ERR_PTR(-EPERM);
1190 down_write_ref_node(&ft->node, false);
1191 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1192 start_index, end_index,
1193 ft->node.children.prev);
1194 up_write_ref_node(&ft->node, false);
1198 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1200 tree_put_node(&fg->node, false);
1201 return ERR_PTR(err);
1203 trace_mlx5_fs_add_fg(fg);
1204 fg->node.active = true;
1209 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1211 struct mlx5_flow_rule *rule;
1213 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1217 INIT_LIST_HEAD(&rule->next_ft);
1218 rule->node.type = FS_TYPE_FLOW_DEST;
1220 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1225 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1227 struct mlx5_flow_handle *handle;
1229 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1233 handle->num_rules = num_rules;
1238 static void destroy_flow_handle(struct fs_fte *fte,
1239 struct mlx5_flow_handle *handle,
1240 struct mlx5_flow_destination *dest,
1244 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1246 list_del(&handle->rule[i]->node.list);
1247 kfree(handle->rule[i]);
1253 static struct mlx5_flow_handle *
1254 create_flow_handle(struct fs_fte *fte,
1255 struct mlx5_flow_destination *dest,
1260 struct mlx5_flow_handle *handle;
1261 struct mlx5_flow_rule *rule = NULL;
1262 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1263 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1267 handle = alloc_handle((dest_num) ? dest_num : 1);
1269 return ERR_PTR(-ENOMEM);
1273 rule = find_flow_rule(fte, dest + i);
1275 refcount_inc(&rule->node.refcount);
1281 rule = alloc_rule(dest + i);
1285 /* Add dest to dests list- we need flow tables to be in the
1286 * end of the list for forward to next prio rules.
1288 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1290 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1291 list_add(&rule->node.list, &fte->node.children);
1293 list_add_tail(&rule->node.list, &fte->node.children);
1297 type = dest[i].type ==
1298 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1299 *modify_mask |= type ? count : dst;
1302 handle->rule[i] = rule;
1303 } while (++i < dest_num);
1308 destroy_flow_handle(fte, handle, dest, i);
1309 return ERR_PTR(-ENOMEM);
1312 /* fte should not be deleted while calling this function */
1313 static struct mlx5_flow_handle *
1314 add_rule_fte(struct fs_fte *fte,
1315 struct mlx5_flow_group *fg,
1316 struct mlx5_flow_destination *dest,
1320 struct mlx5_flow_root_namespace *root;
1321 struct mlx5_flow_handle *handle;
1322 struct mlx5_flow_table *ft;
1323 int modify_mask = 0;
1325 bool new_rule = false;
1327 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1329 if (IS_ERR(handle) || !new_rule)
1333 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1335 fs_get_obj(ft, fg->node.parent);
1336 root = find_root(&fg->node);
1337 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1338 err = root->cmds->create_fte(root, ft, fg, fte);
1340 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1344 fte->node.active = true;
1345 fte->status |= FS_FTE_STATUS_EXISTING;
1346 atomic_inc(&fg->node.version);
1352 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1353 return ERR_PTR(err);
1356 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1357 const struct mlx5_flow_spec *spec)
1359 struct list_head *prev = &ft->node.children;
1360 u32 max_fte = ft->autogroup.max_fte;
1361 unsigned int candidate_index = 0;
1362 unsigned int group_size = 0;
1363 struct mlx5_flow_group *fg;
1365 if (!ft->autogroup.active)
1366 return ERR_PTR(-ENOENT);
1368 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1369 group_size = ft->autogroup.group_size;
1371 /* max_fte == ft->autogroup.max_types */
1372 if (group_size == 0)
1375 /* sorted by start_index */
1376 fs_for_each_fg(fg, ft) {
1377 if (candidate_index + group_size > fg->start_index)
1378 candidate_index = fg->start_index + fg->max_ftes;
1381 prev = &fg->node.list;
1384 if (candidate_index + group_size > max_fte)
1385 return ERR_PTR(-ENOSPC);
1387 fg = alloc_insert_flow_group(ft,
1388 spec->match_criteria_enable,
1389 spec->match_criteria,
1391 candidate_index + group_size - 1,
1396 if (group_size == ft->autogroup.group_size)
1397 ft->autogroup.num_groups++;
1403 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1404 struct mlx5_flow_group *fg)
1406 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1407 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1408 void *match_criteria_addr;
1409 u8 src_esw_owner_mask_on;
1414 in = kvzalloc(inlen, GFP_KERNEL);
1418 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1419 fg->mask.match_criteria_enable);
1420 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1421 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1424 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1426 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1427 source_eswitch_owner_vhca_id);
1428 MLX5_SET(create_flow_group_in, in,
1429 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1431 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1432 in, match_criteria);
1433 memcpy(match_criteria_addr, fg->mask.match_criteria,
1434 sizeof(fg->mask.match_criteria));
1436 err = root->cmds->create_flow_group(root, ft, in, fg);
1438 fg->node.active = true;
1439 trace_mlx5_fs_add_fg(fg);
1446 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1447 struct mlx5_flow_destination *d2)
1449 if (d1->type == d2->type) {
1450 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
1451 d1->vport.num == d2->vport.num &&
1452 d1->vport.flags == d2->vport.flags &&
1453 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1454 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1455 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1456 (d1->vport.pkt_reformat->id ==
1457 d2->vport.pkt_reformat->id) : true)) ||
1458 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1459 d1->ft == d2->ft) ||
1460 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1461 d1->tir_num == d2->tir_num) ||
1462 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1463 d1->ft_num == d2->ft_num))
1470 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1471 struct mlx5_flow_destination *dest)
1473 struct mlx5_flow_rule *rule;
1475 list_for_each_entry(rule, &fte->node.children, node.list) {
1476 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1482 static bool check_conflicting_actions(u32 action1, u32 action2)
1484 u32 xored_actions = action1 ^ action2;
1486 /* if one rule only wants to count, it's ok */
1487 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1488 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1491 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1492 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1493 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1494 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1495 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1496 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1497 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1498 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1504 static int check_conflicting_ftes(struct fs_fte *fte,
1505 const struct mlx5_flow_context *flow_context,
1506 const struct mlx5_flow_act *flow_act)
1508 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1509 mlx5_core_warn(get_dev(&fte->node),
1510 "Found two FTEs with conflicting actions\n");
1514 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1515 fte->flow_context.flow_tag != flow_context->flow_tag) {
1516 mlx5_core_warn(get_dev(&fte->node),
1517 "FTE flow tag %u already exists with different flow tag %u\n",
1518 fte->flow_context.flow_tag,
1519 flow_context->flow_tag);
1526 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1527 const struct mlx5_flow_spec *spec,
1528 struct mlx5_flow_act *flow_act,
1529 struct mlx5_flow_destination *dest,
1533 struct mlx5_flow_handle *handle;
1538 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1540 return ERR_PTR(ret);
1542 old_action = fte->action.action;
1543 fte->action.action |= flow_act->action;
1544 handle = add_rule_fte(fte, fg, dest, dest_num,
1545 old_action != flow_act->action);
1546 if (IS_ERR(handle)) {
1547 fte->action.action = old_action;
1550 trace_mlx5_fs_set_fte(fte, false);
1552 for (i = 0; i < handle->num_rules; i++) {
1553 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1554 tree_add_node(&handle->rule[i]->node, &fte->node);
1555 trace_mlx5_fs_add_rule(handle->rule[i]);
1561 static bool counter_is_valid(u32 action)
1563 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1564 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1567 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1568 struct mlx5_flow_act *flow_act,
1569 struct mlx5_flow_table *ft)
1571 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1572 u32 action = flow_act->action;
1574 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1575 return counter_is_valid(action);
1577 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1581 if (ft->type != FS_FT_FDB)
1584 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1585 dest->ft->type != FS_FT_FDB)
1589 if (!dest || ((dest->type ==
1590 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1591 (dest->ft->level <= ft->level && !ignore_level)))
1597 struct list_head list;
1598 struct mlx5_flow_group *g;
1601 static void free_match_list(struct match_list *head, bool ft_locked)
1603 struct match_list *iter, *match_tmp;
1605 list_for_each_entry_safe(iter, match_tmp, &head->list,
1607 tree_put_node(&iter->g->node, ft_locked);
1608 list_del(&iter->list);
1613 static int build_match_list(struct match_list *match_head,
1614 struct mlx5_flow_table *ft,
1615 const struct mlx5_flow_spec *spec,
1618 struct rhlist_head *tmp, *list;
1619 struct mlx5_flow_group *g;
1623 INIT_LIST_HEAD(&match_head->list);
1624 /* Collect all fgs which has a matching match_criteria */
1625 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1626 /* RCU is atomic, we can't execute FW commands here */
1627 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1628 struct match_list *curr_match;
1630 if (unlikely(!tree_get_node(&g->node)))
1633 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1635 free_match_list(match_head, ft_locked);
1640 list_add_tail(&curr_match->list, &match_head->list);
1647 static u64 matched_fgs_get_version(struct list_head *match_head)
1649 struct match_list *iter;
1652 list_for_each_entry(iter, match_head, list)
1653 version += (u64)atomic_read(&iter->g->node.version);
1657 static struct fs_fte *
1658 lookup_fte_locked(struct mlx5_flow_group *g,
1659 const u32 *match_value,
1662 struct fs_fte *fte_tmp;
1665 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1667 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1668 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1670 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1674 if (!fte_tmp->node.active) {
1675 tree_put_node(&fte_tmp->node, false);
1680 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1683 up_write_ref_node(&g->node, false);
1685 up_read_ref_node(&g->node);
1689 static struct mlx5_flow_handle *
1690 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1691 struct list_head *match_head,
1692 const struct mlx5_flow_spec *spec,
1693 struct mlx5_flow_act *flow_act,
1694 struct mlx5_flow_destination *dest,
1698 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1699 struct mlx5_flow_group *g;
1700 struct mlx5_flow_handle *rule;
1701 struct match_list *iter;
1702 bool take_write = false;
1707 fte = alloc_fte(ft, spec, flow_act);
1709 return ERR_PTR(-ENOMEM);
1711 search_again_locked:
1712 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1714 version = matched_fgs_get_version(match_head);
1715 /* Try to find an fte with identical match value and attempt update its
1718 list_for_each_entry(iter, match_head, list) {
1719 struct fs_fte *fte_tmp;
1722 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1725 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1726 up_write_ref_node(&fte_tmp->node, false);
1727 tree_put_node(&fte_tmp->node, false);
1728 kmem_cache_free(steering->ftes_cache, fte);
1733 /* No group with matching fte found, or we skipped the search.
1734 * Try to add a new fte to any matching fg.
1737 /* Check the ft version, for case that new flow group
1738 * was added while the fgs weren't locked
1740 if (atomic_read(&ft->node.version) != ft_version) {
1741 rule = ERR_PTR(-EAGAIN);
1745 /* Check the fgs version. If version have changed it could be that an
1746 * FTE with the same match value was added while the fgs weren't
1749 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1750 version != matched_fgs_get_version(match_head)) {
1752 goto search_again_locked;
1755 list_for_each_entry(iter, match_head, list) {
1758 if (!g->node.active)
1761 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1763 err = insert_fte(g, fte);
1765 up_write_ref_node(&g->node, false);
1768 kmem_cache_free(steering->ftes_cache, fte);
1769 return ERR_PTR(err);
1772 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1773 up_write_ref_node(&g->node, false);
1774 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1775 up_write_ref_node(&fte->node, false);
1776 tree_put_node(&fte->node, false);
1779 rule = ERR_PTR(-ENOENT);
1781 kmem_cache_free(steering->ftes_cache, fte);
1785 static struct mlx5_flow_handle *
1786 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1787 const struct mlx5_flow_spec *spec,
1788 struct mlx5_flow_act *flow_act,
1789 struct mlx5_flow_destination *dest,
1793 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1794 struct mlx5_flow_handle *rule;
1795 struct match_list match_head;
1796 struct mlx5_flow_group *g;
1797 bool take_write = false;
1803 if (!check_valid_spec(spec))
1804 return ERR_PTR(-EINVAL);
1806 for (i = 0; i < dest_num; i++) {
1807 if (!dest_is_valid(&dest[i], flow_act, ft))
1808 return ERR_PTR(-EINVAL);
1810 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1811 search_again_locked:
1812 version = atomic_read(&ft->node.version);
1814 /* Collect all fgs which has a matching match_criteria */
1815 err = build_match_list(&match_head, ft, spec, take_write);
1818 up_write_ref_node(&ft->node, false);
1820 up_read_ref_node(&ft->node);
1821 return ERR_PTR(err);
1825 up_read_ref_node(&ft->node);
1827 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1829 free_match_list(&match_head, take_write);
1830 if (!IS_ERR(rule) ||
1831 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1833 up_write_ref_node(&ft->node, false);
1838 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1842 if (PTR_ERR(rule) == -EAGAIN ||
1843 version != atomic_read(&ft->node.version))
1844 goto search_again_locked;
1846 g = alloc_auto_flow_group(ft, spec);
1849 up_write_ref_node(&ft->node, false);
1853 fte = alloc_fte(ft, spec, flow_act);
1855 up_write_ref_node(&ft->node, false);
1860 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1861 up_write_ref_node(&ft->node, false);
1863 err = create_auto_flow_group(ft, g);
1865 goto err_release_fg;
1867 err = insert_fte(g, fte);
1869 goto err_release_fg;
1871 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1872 up_write_ref_node(&g->node, false);
1873 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1874 up_write_ref_node(&fte->node, false);
1875 tree_put_node(&fte->node, false);
1876 tree_put_node(&g->node, false);
1880 up_write_ref_node(&g->node, false);
1881 kmem_cache_free(steering->ftes_cache, fte);
1883 tree_put_node(&g->node, false);
1884 return ERR_PTR(err);
1887 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1889 return ((ft->type == FS_FT_NIC_RX) &&
1890 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1893 struct mlx5_flow_handle *
1894 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1895 const struct mlx5_flow_spec *spec,
1896 struct mlx5_flow_act *flow_act,
1897 struct mlx5_flow_destination *dest,
1900 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1901 static const struct mlx5_flow_spec zero_spec = {};
1902 struct mlx5_flow_destination gen_dest = {};
1903 struct mlx5_flow_table *next_ft = NULL;
1904 struct mlx5_flow_handle *handle = NULL;
1905 u32 sw_action = flow_act->action;
1906 struct fs_prio *prio;
1911 fs_get_obj(prio, ft->node.parent);
1912 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1913 if (!fwd_next_prio_supported(ft))
1914 return ERR_PTR(-EOPNOTSUPP);
1916 return ERR_PTR(-EINVAL);
1917 mutex_lock(&root->chain_lock);
1918 next_ft = find_next_chained_ft(prio);
1920 gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1921 gen_dest.ft = next_ft;
1924 flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1926 mutex_unlock(&root->chain_lock);
1927 return ERR_PTR(-EOPNOTSUPP);
1931 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
1933 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1934 if (!IS_ERR_OR_NULL(handle) &&
1935 (list_empty(&handle->rule[0]->next_ft))) {
1936 mutex_lock(&next_ft->lock);
1937 list_add(&handle->rule[0]->next_ft,
1938 &next_ft->fwd_rules);
1939 mutex_unlock(&next_ft->lock);
1940 handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1942 mutex_unlock(&root->chain_lock);
1946 EXPORT_SYMBOL(mlx5_add_flow_rules);
1948 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
1953 /* In order to consolidate the HW changes we lock the FTE for other
1954 * changes, and increase its refcount, in order not to perform the
1955 * "del" functions of the FTE. Will handle them here.
1956 * The removal of the rules is done under locked FTE.
1957 * After removing all the handle's rules, if there are remaining
1958 * rules, it means we just need to modify the FTE in FW, and
1959 * unlock/decrease the refcount we increased before.
1960 * Otherwise, it means the FTE should be deleted. First delete the
1961 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
1962 * the FTE, which will handle the last decrease of the refcount, as
1963 * well as required handling of its parent.
1965 fs_get_obj(fte, handle->rule[0]->node.parent);
1966 down_write_ref_node(&fte->node, false);
1967 for (i = handle->num_rules - 1; i >= 0; i--)
1968 tree_remove_node(&handle->rule[i]->node, true);
1969 if (fte->modify_mask && fte->dests_size) {
1971 up_write_ref_node(&fte->node, false);
1973 del_hw_fte(&fte->node);
1974 up_write(&fte->node.lock);
1975 tree_put_node(&fte->node, false);
1979 EXPORT_SYMBOL(mlx5_del_flow_rules);
1981 /* Assuming prio->node.children(flow tables) is sorted by level */
1982 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
1984 struct fs_prio *prio;
1986 fs_get_obj(prio, ft->node.parent);
1988 if (!list_is_last(&ft->node.list, &prio->node.children))
1989 return list_next_entry(ft, node.list);
1990 return find_next_chained_ft(prio);
1993 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
1995 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1996 struct mlx5_ft_underlay_qp *uqp;
1997 struct mlx5_flow_table *new_root_ft = NULL;
2001 if (root->root_ft != ft)
2004 new_root_ft = find_next_ft(ft);
2006 root->root_ft = NULL;
2010 if (list_empty(&root->underlay_qpns)) {
2011 /* Don't set any QPN (zero) in case QPN list is empty */
2013 err = root->cmds->update_root_ft(root, new_root_ft,
2016 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2018 err = root->cmds->update_root_ft(root,
2027 mlx5_core_warn(root->dev,
2028 "Update root flow table of id(%u) qpn(%d) failed\n",
2031 root->root_ft = new_root_ft;
2036 /* Connect flow table from previous priority to
2037 * the next flow table.
2039 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2041 struct mlx5_core_dev *dev = get_dev(&ft->node);
2042 struct mlx5_flow_table *next_ft;
2043 struct fs_prio *prio;
2046 err = update_root_ft_destroy(ft);
2050 fs_get_obj(prio, ft->node.parent);
2051 if (!(list_first_entry(&prio->node.children,
2052 struct mlx5_flow_table,
2056 next_ft = find_next_chained_ft(prio);
2057 err = connect_fwd_rules(dev, next_ft, ft);
2061 err = connect_prev_fts(dev, next_ft, prio);
2063 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2068 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2070 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2073 mutex_lock(&root->chain_lock);
2074 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2075 err = disconnect_flow_table(ft);
2077 mutex_unlock(&root->chain_lock);
2080 if (tree_remove_node(&ft->node, false))
2081 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2083 mutex_unlock(&root->chain_lock);
2087 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2089 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2091 if (tree_remove_node(&fg->node, false))
2092 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2096 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2099 struct mlx5_flow_steering *steering = dev->priv.steering;
2101 if (!steering || !steering->fdb_sub_ns)
2104 return steering->fdb_sub_ns[n];
2106 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2108 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2109 enum mlx5_flow_namespace_type type)
2111 struct mlx5_flow_steering *steering = dev->priv.steering;
2112 struct mlx5_flow_root_namespace *root_ns;
2114 struct fs_prio *fs_prio;
2115 struct mlx5_flow_namespace *ns;
2121 case MLX5_FLOW_NAMESPACE_FDB:
2122 if (steering->fdb_root_ns)
2123 return &steering->fdb_root_ns->ns;
2125 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2126 if (steering->sniffer_rx_root_ns)
2127 return &steering->sniffer_rx_root_ns->ns;
2129 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2130 if (steering->sniffer_tx_root_ns)
2131 return &steering->sniffer_tx_root_ns->ns;
2137 if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
2138 root_ns = steering->egress_root_ns;
2139 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
2140 root_ns = steering->rdma_rx_root_ns;
2141 prio = RDMA_RX_BYPASS_PRIO;
2142 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
2143 root_ns = steering->rdma_rx_root_ns;
2144 prio = RDMA_RX_KERNEL_PRIO;
2145 } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
2146 root_ns = steering->rdma_tx_root_ns;
2147 } else { /* Must be NIC RX */
2148 root_ns = steering->root_ns;
2155 fs_prio = find_prio(&root_ns->ns, prio);
2159 ns = list_first_entry(&fs_prio->node.children,
2165 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2167 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2168 enum mlx5_flow_namespace_type type,
2171 struct mlx5_flow_steering *steering = dev->priv.steering;
2173 if (!steering || vport >= mlx5_eswitch_get_total_vports(dev))
2177 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2178 if (steering->esw_egress_root_ns &&
2179 steering->esw_egress_root_ns[vport])
2180 return &steering->esw_egress_root_ns[vport]->ns;
2183 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2184 if (steering->esw_ingress_root_ns &&
2185 steering->esw_ingress_root_ns[vport])
2186 return &steering->esw_ingress_root_ns[vport]->ns;
2194 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2197 enum fs_node_type type)
2199 struct fs_prio *fs_prio;
2201 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2203 return ERR_PTR(-ENOMEM);
2205 fs_prio->node.type = type;
2206 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2207 tree_add_node(&fs_prio->node, &ns->node);
2208 fs_prio->num_levels = num_levels;
2209 fs_prio->prio = prio;
2210 list_add_tail(&fs_prio->node.list, &ns->node.children);
2215 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2219 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2222 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2223 unsigned int prio, int num_levels)
2225 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2228 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2231 ns->node.type = FS_TYPE_NAMESPACE;
2236 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2239 struct mlx5_flow_namespace *ns;
2241 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2243 return ERR_PTR(-ENOMEM);
2245 fs_init_namespace(ns);
2246 ns->def_miss_action = def_miss_act;
2247 tree_init_node(&ns->node, NULL, del_sw_ns);
2248 tree_add_node(&ns->node, &prio->node);
2249 list_add_tail(&ns->node.list, &prio->node.children);
2254 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2255 struct init_tree_node *prio_metadata)
2257 struct fs_prio *fs_prio;
2260 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2261 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2262 if (IS_ERR(fs_prio))
2263 return PTR_ERR(fs_prio);
2268 #define FLOW_TABLE_BIT_SZ 1
2269 #define GET_FLOW_TABLE_CAP(dev, offset) \
2270 ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
2272 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2273 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2277 for (i = 0; i < caps->arr_sz; i++) {
2278 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2284 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2285 struct init_tree_node *init_node,
2286 struct fs_node *fs_parent_node,
2287 struct init_tree_node *init_parent_node,
2290 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2291 flow_table_properties_nic_receive.
2293 struct mlx5_flow_namespace *fs_ns;
2294 struct fs_prio *fs_prio;
2295 struct fs_node *base;
2299 if (init_node->type == FS_TYPE_PRIO) {
2300 if ((init_node->min_ft_level > max_ft_level) ||
2301 !has_required_caps(steering->dev, &init_node->caps))
2304 fs_get_obj(fs_ns, fs_parent_node);
2305 if (init_node->num_leaf_prios)
2306 return create_leaf_prios(fs_ns, prio, init_node);
2307 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2308 if (IS_ERR(fs_prio))
2309 return PTR_ERR(fs_prio);
2310 base = &fs_prio->node;
2311 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2312 fs_get_obj(fs_prio, fs_parent_node);
2313 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2315 return PTR_ERR(fs_ns);
2316 base = &fs_ns->node;
2321 for (i = 0; i < init_node->ar_size; i++) {
2322 err = init_root_tree_recursive(steering, &init_node->children[i],
2323 base, init_node, prio);
2326 if (init_node->children[i].type == FS_TYPE_PRIO &&
2327 init_node->children[i].num_leaf_prios) {
2328 prio += init_node->children[i].num_leaf_prios;
2335 static int init_root_tree(struct mlx5_flow_steering *steering,
2336 struct init_tree_node *init_node,
2337 struct fs_node *fs_parent_node)
2340 struct mlx5_flow_namespace *fs_ns;
2343 fs_get_obj(fs_ns, fs_parent_node);
2344 for (i = 0; i < init_node->ar_size; i++) {
2345 err = init_root_tree_recursive(steering, &init_node->children[i],
2354 static struct mlx5_flow_root_namespace
2355 *create_root_ns(struct mlx5_flow_steering *steering,
2356 enum fs_flow_table_type table_type)
2358 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2359 struct mlx5_flow_root_namespace *root_ns;
2360 struct mlx5_flow_namespace *ns;
2362 if (mlx5_accel_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2363 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2364 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2366 /* Create the root namespace */
2367 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2371 root_ns->dev = steering->dev;
2372 root_ns->table_type = table_type;
2373 root_ns->cmds = cmds;
2375 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2378 fs_init_namespace(ns);
2379 mutex_init(&root_ns->chain_lock);
2380 tree_init_node(&ns->node, NULL, NULL);
2381 tree_add_node(&ns->node, NULL);
2386 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2388 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2390 struct fs_prio *prio;
2392 fs_for_each_prio(prio, ns) {
2393 /* This updates prio start_level and num_levels */
2394 set_prio_attrs_in_prio(prio, acc_level);
2395 acc_level += prio->num_levels;
2400 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2402 struct mlx5_flow_namespace *ns;
2403 int acc_level_ns = acc_level;
2405 prio->start_level = acc_level;
2406 fs_for_each_ns(ns, prio) {
2407 /* This updates start_level and num_levels of ns's priority descendants */
2408 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2410 /* If this a prio with chains, and we can jump from one chain
2411 * (namepsace) to another, so we accumulate the levels
2413 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2414 acc_level = acc_level_ns;
2417 if (!prio->num_levels)
2418 prio->num_levels = acc_level_ns - prio->start_level;
2419 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2422 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2424 struct mlx5_flow_namespace *ns = &root_ns->ns;
2425 struct fs_prio *prio;
2426 int start_level = 0;
2428 fs_for_each_prio(prio, ns) {
2429 set_prio_attrs_in_prio(prio, start_level);
2430 start_level += prio->num_levels;
2434 #define ANCHOR_PRIO 0
2435 #define ANCHOR_SIZE 1
2436 #define ANCHOR_LEVEL 0
2437 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2439 struct mlx5_flow_namespace *ns = NULL;
2440 struct mlx5_flow_table_attr ft_attr = {};
2441 struct mlx5_flow_table *ft;
2443 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2447 ft_attr.max_fte = ANCHOR_SIZE;
2448 ft_attr.level = ANCHOR_LEVEL;
2449 ft_attr.prio = ANCHOR_PRIO;
2451 ft = mlx5_create_flow_table(ns, &ft_attr);
2453 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2459 static int init_root_ns(struct mlx5_flow_steering *steering)
2463 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2464 if (!steering->root_ns)
2467 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2471 set_prio_attrs(steering->root_ns);
2472 err = create_anchor_flow_table(steering);
2479 cleanup_root_ns(steering->root_ns);
2480 steering->root_ns = NULL;
2484 static void clean_tree(struct fs_node *node)
2487 struct fs_node *iter;
2488 struct fs_node *temp;
2490 tree_get_node(node);
2491 list_for_each_entry_safe(iter, temp, &node->children, list)
2493 tree_put_node(node, false);
2494 tree_remove_node(node, false);
2498 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2503 clean_tree(&root_ns->ns.node);
2506 static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2508 struct mlx5_flow_steering *steering = dev->priv.steering;
2511 if (!steering->esw_egress_root_ns)
2514 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2515 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2517 kfree(steering->esw_egress_root_ns);
2518 steering->esw_egress_root_ns = NULL;
2521 static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2523 struct mlx5_flow_steering *steering = dev->priv.steering;
2526 if (!steering->esw_ingress_root_ns)
2529 for (i = 0; i < mlx5_eswitch_get_total_vports(dev); i++)
2530 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2532 kfree(steering->esw_ingress_root_ns);
2533 steering->esw_ingress_root_ns = NULL;
2536 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2538 struct mlx5_flow_steering *steering = dev->priv.steering;
2540 cleanup_root_ns(steering->root_ns);
2541 cleanup_egress_acls_root_ns(dev);
2542 cleanup_ingress_acls_root_ns(dev);
2543 cleanup_root_ns(steering->fdb_root_ns);
2544 steering->fdb_root_ns = NULL;
2545 kfree(steering->fdb_sub_ns);
2546 steering->fdb_sub_ns = NULL;
2547 cleanup_root_ns(steering->sniffer_rx_root_ns);
2548 cleanup_root_ns(steering->sniffer_tx_root_ns);
2549 cleanup_root_ns(steering->rdma_rx_root_ns);
2550 cleanup_root_ns(steering->rdma_tx_root_ns);
2551 cleanup_root_ns(steering->egress_root_ns);
2552 mlx5_cleanup_fc_stats(dev);
2553 kmem_cache_destroy(steering->ftes_cache);
2554 kmem_cache_destroy(steering->fgs_cache);
2558 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2560 struct fs_prio *prio;
2562 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2563 if (!steering->sniffer_tx_root_ns)
2566 /* Create single prio */
2567 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2568 return PTR_ERR_OR_ZERO(prio);
2571 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2573 struct fs_prio *prio;
2575 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2576 if (!steering->sniffer_rx_root_ns)
2579 /* Create single prio */
2580 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2581 return PTR_ERR_OR_ZERO(prio);
2584 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2588 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2589 if (!steering->rdma_rx_root_ns)
2592 err = init_root_tree(steering, &rdma_rx_root_fs,
2593 &steering->rdma_rx_root_ns->ns.node);
2597 set_prio_attrs(steering->rdma_rx_root_ns);
2602 cleanup_root_ns(steering->rdma_rx_root_ns);
2603 steering->rdma_rx_root_ns = NULL;
2607 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2611 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2612 if (!steering->rdma_tx_root_ns)
2615 err = init_root_tree(steering, &rdma_tx_root_fs,
2616 &steering->rdma_tx_root_ns->ns.node);
2620 set_prio_attrs(steering->rdma_tx_root_ns);
2625 cleanup_root_ns(steering->rdma_tx_root_ns);
2626 steering->rdma_tx_root_ns = NULL;
2630 /* FT and tc chains are stored in the same array so we can re-use the
2631 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2632 * When creating a new ns for each chain store it in the first available slot.
2633 * Assume tc chains are created and stored first and only then the FT chain.
2635 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2636 struct mlx5_flow_namespace *ns)
2640 while (steering->fdb_sub_ns[chain])
2643 steering->fdb_sub_ns[chain] = ns;
2646 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2647 struct fs_prio *maj_prio)
2649 struct mlx5_flow_namespace *ns;
2650 struct fs_prio *min_prio;
2653 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2657 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2658 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2659 if (IS_ERR(min_prio))
2660 return PTR_ERR(min_prio);
2663 store_fdb_sub_ns_prio_chain(steering, ns);
2668 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2672 struct fs_prio *maj_prio;
2677 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2678 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2681 if (IS_ERR(maj_prio))
2682 return PTR_ERR(maj_prio);
2684 for (chain = 0; chain < chains; chain++) {
2685 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2693 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2697 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2698 sizeof(*steering->fdb_sub_ns),
2700 if (!steering->fdb_sub_ns)
2703 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2707 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2714 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2716 struct fs_prio *maj_prio;
2719 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2720 if (!steering->fdb_root_ns)
2723 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
2725 if (IS_ERR(maj_prio)) {
2726 err = PTR_ERR(maj_prio);
2729 err = create_fdb_fast_path(steering);
2733 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2734 if (IS_ERR(maj_prio)) {
2735 err = PTR_ERR(maj_prio);
2739 /* We put this priority last, knowing that nothing will get here
2740 * unless explicitly forwarded to. This is possible because the
2741 * slow path tables have catch all rules and nothing gets passed
2744 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2745 if (IS_ERR(maj_prio)) {
2746 err = PTR_ERR(maj_prio);
2750 set_prio_attrs(steering->fdb_root_ns);
2754 cleanup_root_ns(steering->fdb_root_ns);
2755 kfree(steering->fdb_sub_ns);
2756 steering->fdb_sub_ns = NULL;
2757 steering->fdb_root_ns = NULL;
2761 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2763 struct fs_prio *prio;
2765 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2766 if (!steering->esw_egress_root_ns[vport])
2770 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2771 return PTR_ERR_OR_ZERO(prio);
2774 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2776 struct fs_prio *prio;
2778 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2779 if (!steering->esw_ingress_root_ns[vport])
2783 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2784 return PTR_ERR_OR_ZERO(prio);
2787 static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2789 struct mlx5_flow_steering *steering = dev->priv.steering;
2790 int total_vports = mlx5_eswitch_get_total_vports(dev);
2794 steering->esw_egress_root_ns =
2795 kcalloc(total_vports,
2796 sizeof(*steering->esw_egress_root_ns),
2798 if (!steering->esw_egress_root_ns)
2801 for (i = 0; i < total_vports; i++) {
2802 err = init_egress_acl_root_ns(steering, i);
2804 goto cleanup_root_ns;
2810 for (i--; i >= 0; i--)
2811 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2812 kfree(steering->esw_egress_root_ns);
2813 steering->esw_egress_root_ns = NULL;
2817 static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2819 struct mlx5_flow_steering *steering = dev->priv.steering;
2820 int total_vports = mlx5_eswitch_get_total_vports(dev);
2824 steering->esw_ingress_root_ns =
2825 kcalloc(total_vports,
2826 sizeof(*steering->esw_ingress_root_ns),
2828 if (!steering->esw_ingress_root_ns)
2831 for (i = 0; i < total_vports; i++) {
2832 err = init_ingress_acl_root_ns(steering, i);
2834 goto cleanup_root_ns;
2840 for (i--; i >= 0; i--)
2841 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2842 kfree(steering->esw_ingress_root_ns);
2843 steering->esw_ingress_root_ns = NULL;
2847 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
2851 steering->egress_root_ns = create_root_ns(steering,
2853 if (!steering->egress_root_ns)
2856 err = init_root_tree(steering, &egress_root_fs,
2857 &steering->egress_root_ns->ns.node);
2860 set_prio_attrs(steering->egress_root_ns);
2863 cleanup_root_ns(steering->egress_root_ns);
2864 steering->egress_root_ns = NULL;
2868 int mlx5_init_fs(struct mlx5_core_dev *dev)
2870 struct mlx5_flow_steering *steering;
2873 err = mlx5_init_fc_stats(dev);
2877 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
2880 steering->dev = dev;
2881 dev->priv.steering = steering;
2883 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
2884 sizeof(struct mlx5_flow_group), 0,
2886 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
2888 if (!steering->ftes_cache || !steering->fgs_cache) {
2893 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
2894 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
2895 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
2896 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
2897 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
2898 err = init_root_ns(steering);
2903 if (MLX5_ESWITCH_MANAGER(dev)) {
2904 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2905 err = init_fdb_root_ns(steering);
2909 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2910 err = init_egress_acls_root_ns(dev);
2914 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2915 err = init_ingress_acls_root_ns(dev);
2921 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
2922 err = init_sniffer_rx_root_ns(steering);
2927 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
2928 err = init_sniffer_tx_root_ns(steering);
2933 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
2934 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
2935 err = init_rdma_rx_root_ns(steering);
2940 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
2941 err = init_rdma_tx_root_ns(steering);
2946 if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
2947 err = init_egress_root_ns(steering);
2954 mlx5_cleanup_fs(dev);
2958 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2960 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
2961 struct mlx5_ft_underlay_qp *new_uqp;
2964 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
2968 mutex_lock(&root->chain_lock);
2970 if (!root->root_ft) {
2972 goto update_ft_fail;
2975 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
2978 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
2980 goto update_ft_fail;
2983 new_uqp->qpn = underlay_qpn;
2984 list_add_tail(&new_uqp->list, &root->underlay_qpns);
2986 mutex_unlock(&root->chain_lock);
2991 mutex_unlock(&root->chain_lock);
2995 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
2997 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
2999 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3000 struct mlx5_ft_underlay_qp *uqp;
3004 mutex_lock(&root->chain_lock);
3005 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3006 if (uqp->qpn == underlay_qpn) {
3013 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3019 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3022 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3025 list_del(&uqp->list);
3026 mutex_unlock(&root->chain_lock);
3032 mutex_unlock(&root->chain_lock);
3035 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3037 static struct mlx5_flow_root_namespace
3038 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3040 struct mlx5_flow_namespace *ns;
3042 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3043 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3044 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3046 ns = mlx5_get_flow_namespace(dev, ns_type);
3050 return find_root(&ns->node);
3053 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3054 u8 ns_type, u8 num_actions,
3055 void *modify_actions)
3057 struct mlx5_flow_root_namespace *root;
3058 struct mlx5_modify_hdr *modify_hdr;
3061 root = get_root_namespace(dev, ns_type);
3063 return ERR_PTR(-EOPNOTSUPP);
3065 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3067 return ERR_PTR(-ENOMEM);
3069 modify_hdr->ns_type = ns_type;
3070 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3071 modify_actions, modify_hdr);
3074 return ERR_PTR(err);
3079 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3081 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3082 struct mlx5_modify_hdr *modify_hdr)
3084 struct mlx5_flow_root_namespace *root;
3086 root = get_root_namespace(dev, modify_hdr->ns_type);
3089 root->cmds->modify_header_dealloc(root, modify_hdr);
3092 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3094 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3097 void *reformat_data,
3098 enum mlx5_flow_namespace_type ns_type)
3100 struct mlx5_pkt_reformat *pkt_reformat;
3101 struct mlx5_flow_root_namespace *root;
3104 root = get_root_namespace(dev, ns_type);
3106 return ERR_PTR(-EOPNOTSUPP);
3108 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3110 return ERR_PTR(-ENOMEM);
3112 pkt_reformat->ns_type = ns_type;
3113 pkt_reformat->reformat_type = reformat_type;
3114 err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
3115 reformat_data, ns_type,
3118 kfree(pkt_reformat);
3119 return ERR_PTR(err);
3122 return pkt_reformat;
3124 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3126 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3127 struct mlx5_pkt_reformat *pkt_reformat)
3129 struct mlx5_flow_root_namespace *root;
3131 root = get_root_namespace(dev, pkt_reformat->ns_type);
3134 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3135 kfree(pkt_reformat);
3137 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3139 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3140 struct mlx5_flow_root_namespace *peer_ns)
3142 if (peer_ns && ns->mode != peer_ns->mode) {
3143 mlx5_core_err(ns->dev,
3144 "Can't peer namespace of different steering mode\n");
3148 return ns->cmds->set_peer(ns, peer_ns);
3151 /* This function should be called only at init stage of the namespace.
3152 * It is not safe to call this function while steering operations
3153 * are executed in the namespace.
3155 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3156 enum mlx5_flow_steering_mode mode)
3158 struct mlx5_flow_root_namespace *root;
3159 const struct mlx5_flow_cmds *cmds;
3162 root = find_root(&ns->node);
3163 if (&root->ns != ns)
3164 /* Can't set cmds to non root namespace */
3167 if (root->table_type != FS_FT_FDB)
3170 if (root->mode == mode)
3173 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3174 cmds = mlx5_fs_cmd_get_dr_cmds();
3176 cmds = mlx5_fs_cmd_get_fw_cmds();
3180 err = cmds->create_ns(root);
3182 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3187 root->cmds->destroy_ns(root);