2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
41 #include "fs_ft_pool.h"
42 #include "diag/fs_tracepoint.h"
43 #include "accel/ipsec.h"
44 #include "fpga/ipsec.h"
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
82 #define FS_CHAINING_CAPS_EGRESS \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
90 #define FS_CHAINING_CAPS_RDMA_TX \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
102 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
105 #define BY_PASS_PRIO_NUM_LEVELS 1
106 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
109 #define ETHTOOL_PRIO_NUM_LEVELS 1
110 #define ETHTOOL_NUM_PRIOS 11
111 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
112 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
113 #define KERNEL_NIC_PRIO_NUM_LEVELS 7
114 #define KERNEL_NIC_NUM_PRIOS 1
115 /* One more level for tc */
116 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
118 #define KERNEL_NIC_TC_NUM_PRIOS 1
119 #define KERNEL_NIC_TC_NUM_LEVELS 2
121 #define ANCHOR_NUM_LEVELS 1
122 #define ANCHOR_NUM_PRIOS 1
123 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
125 #define OFFLOADS_MAX_FT 2
126 #define OFFLOADS_NUM_PRIOS 2
127 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
129 #define LAG_PRIO_NUM_LEVELS 1
130 #define LAG_NUM_PRIOS 1
131 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
133 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
134 #define KERNEL_TX_IPSEC_NUM_LEVELS 1
135 #define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
142 static struct init_tree_node {
143 enum fs_node_type type;
144 struct init_tree_node *children;
146 struct node_caps caps;
151 enum mlx5_flow_table_miss_action def_miss_action;
153 .type = FS_TYPE_NAMESPACE,
155 .children = (struct init_tree_node[]){
156 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
157 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
158 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
159 BY_PASS_PRIO_NUM_LEVELS))),
160 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
161 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
162 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
163 LAG_PRIO_NUM_LEVELS))),
164 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
165 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
166 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
168 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
169 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
170 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
171 ETHTOOL_PRIO_NUM_LEVELS))),
172 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
173 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
174 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
175 KERNEL_NIC_TC_NUM_LEVELS),
176 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
177 KERNEL_NIC_PRIO_NUM_LEVELS))),
178 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
181 LEFTOVERS_NUM_LEVELS))),
182 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
183 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
185 ANCHOR_NUM_LEVELS))),
189 static struct init_tree_node egress_root_fs = {
190 .type = FS_TYPE_NAMESPACE,
191 #ifdef CONFIG_MLX5_IPSEC
196 .children = (struct init_tree_node[]) {
197 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
198 FS_CHAINING_CAPS_EGRESS,
199 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
200 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
201 BY_PASS_PRIO_NUM_LEVELS))),
202 #ifdef CONFIG_MLX5_IPSEC
203 ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
204 FS_CHAINING_CAPS_EGRESS,
205 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
206 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
207 KERNEL_TX_IPSEC_NUM_LEVELS))),
213 RDMA_RX_COUNTERS_PRIO,
218 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
219 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
220 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
222 static struct init_tree_node rdma_rx_root_fs = {
223 .type = FS_TYPE_NAMESPACE,
225 .children = (struct init_tree_node[]) {
226 [RDMA_RX_COUNTERS_PRIO] =
227 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
229 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
230 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
231 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
232 [RDMA_RX_BYPASS_PRIO] =
233 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
235 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
236 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
237 BY_PASS_PRIO_NUM_LEVELS))),
238 [RDMA_RX_KERNEL_PRIO] =
239 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
241 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
242 ADD_MULTIPLE_PRIO(1, 1))),
247 RDMA_TX_COUNTERS_PRIO,
251 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
252 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
254 static struct init_tree_node rdma_tx_root_fs = {
255 .type = FS_TYPE_NAMESPACE,
257 .children = (struct init_tree_node[]) {
258 [RDMA_TX_COUNTERS_PRIO] =
259 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
261 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
262 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
263 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
264 [RDMA_TX_BYPASS_PRIO] =
265 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
266 FS_CHAINING_CAPS_RDMA_TX,
267 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
268 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
269 BY_PASS_PRIO_NUM_LEVELS))),
273 enum fs_i_lock_class {
279 static const struct rhashtable_params rhash_fte = {
280 .key_len = sizeof_field(struct fs_fte, val),
281 .key_offset = offsetof(struct fs_fte, val),
282 .head_offset = offsetof(struct fs_fte, hash),
283 .automatic_shrinking = true,
287 static const struct rhashtable_params rhash_fg = {
288 .key_len = sizeof_field(struct mlx5_flow_group, mask),
289 .key_offset = offsetof(struct mlx5_flow_group, mask),
290 .head_offset = offsetof(struct mlx5_flow_group, hash),
291 .automatic_shrinking = true,
296 static void del_hw_flow_table(struct fs_node *node);
297 static void del_hw_flow_group(struct fs_node *node);
298 static void del_hw_fte(struct fs_node *node);
299 static void del_sw_flow_table(struct fs_node *node);
300 static void del_sw_flow_group(struct fs_node *node);
301 static void del_sw_fte(struct fs_node *node);
302 static void del_sw_prio(struct fs_node *node);
303 static void del_sw_ns(struct fs_node *node);
304 /* Delete rule (destination) is special case that
305 * requires to lock the FTE for all the deletion process.
307 static void del_sw_hw_rule(struct fs_node *node);
308 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
309 struct mlx5_flow_destination *d2);
310 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
311 static struct mlx5_flow_rule *
312 find_flow_rule(struct fs_fte *fte,
313 struct mlx5_flow_destination *dest);
315 static void tree_init_node(struct fs_node *node,
316 void (*del_hw_func)(struct fs_node *),
317 void (*del_sw_func)(struct fs_node *))
319 refcount_set(&node->refcount, 1);
320 INIT_LIST_HEAD(&node->list);
321 INIT_LIST_HEAD(&node->children);
322 init_rwsem(&node->lock);
323 node->del_hw_func = del_hw_func;
324 node->del_sw_func = del_sw_func;
325 node->active = false;
328 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
331 refcount_inc(&parent->refcount);
332 node->parent = parent;
334 /* Parent is the root */
338 node->root = parent->root;
341 static int tree_get_node(struct fs_node *node)
343 return refcount_inc_not_zero(&node->refcount);
346 static void nested_down_read_ref_node(struct fs_node *node,
347 enum fs_i_lock_class class)
350 down_read_nested(&node->lock, class);
351 refcount_inc(&node->refcount);
355 static void nested_down_write_ref_node(struct fs_node *node,
356 enum fs_i_lock_class class)
359 down_write_nested(&node->lock, class);
360 refcount_inc(&node->refcount);
364 static void down_write_ref_node(struct fs_node *node, bool locked)
368 down_write(&node->lock);
369 refcount_inc(&node->refcount);
373 static void up_read_ref_node(struct fs_node *node)
375 refcount_dec(&node->refcount);
376 up_read(&node->lock);
379 static void up_write_ref_node(struct fs_node *node, bool locked)
381 refcount_dec(&node->refcount);
383 up_write(&node->lock);
386 static void tree_put_node(struct fs_node *node, bool locked)
388 struct fs_node *parent_node = node->parent;
390 if (refcount_dec_and_test(&node->refcount)) {
391 if (node->del_hw_func)
392 node->del_hw_func(node);
394 down_write_ref_node(parent_node, locked);
395 list_del_init(&node->list);
397 node->del_sw_func(node);
399 up_write_ref_node(parent_node, locked);
402 if (!node && parent_node)
403 tree_put_node(parent_node, locked);
406 static int tree_remove_node(struct fs_node *node, bool locked)
408 if (refcount_read(&node->refcount) > 1) {
409 refcount_dec(&node->refcount);
412 tree_put_node(node, locked);
416 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
419 struct fs_prio *iter_prio;
421 fs_for_each_prio(iter_prio, ns) {
422 if (iter_prio->prio == prio)
429 static bool is_fwd_next_action(u32 action)
431 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
432 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
435 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
439 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
440 if (spec->match_value[i] & ~spec->match_criteria[i]) {
441 pr_warn("mlx5_core: match_value differs from match_criteria\n");
448 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
450 struct fs_node *root;
451 struct mlx5_flow_namespace *ns;
455 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
456 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
460 ns = container_of(root, struct mlx5_flow_namespace, node);
461 return container_of(ns, struct mlx5_flow_root_namespace, ns);
464 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
466 struct mlx5_flow_root_namespace *root = find_root(node);
469 return root->dev->priv.steering;
473 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
475 struct mlx5_flow_root_namespace *root = find_root(node);
482 static void del_sw_ns(struct fs_node *node)
487 static void del_sw_prio(struct fs_node *node)
492 static void del_hw_flow_table(struct fs_node *node)
494 struct mlx5_flow_root_namespace *root;
495 struct mlx5_flow_table *ft;
496 struct mlx5_core_dev *dev;
499 fs_get_obj(ft, node);
500 dev = get_dev(&ft->node);
501 root = find_root(&ft->node);
502 trace_mlx5_fs_del_ft(ft);
505 err = root->cmds->destroy_flow_table(root, ft);
507 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
511 static void del_sw_flow_table(struct fs_node *node)
513 struct mlx5_flow_table *ft;
514 struct fs_prio *prio;
516 fs_get_obj(ft, node);
518 rhltable_destroy(&ft->fgs_hash);
519 if (ft->node.parent) {
520 fs_get_obj(prio, ft->node.parent);
526 static void modify_fte(struct fs_fte *fte)
528 struct mlx5_flow_root_namespace *root;
529 struct mlx5_flow_table *ft;
530 struct mlx5_flow_group *fg;
531 struct mlx5_core_dev *dev;
534 fs_get_obj(fg, fte->node.parent);
535 fs_get_obj(ft, fg->node.parent);
536 dev = get_dev(&fte->node);
538 root = find_root(&ft->node);
539 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
542 "%s can't del rule fg id=%d fte_index=%d\n",
543 __func__, fg->id, fte->index);
544 fte->modify_mask = 0;
547 static void del_sw_hw_rule(struct fs_node *node)
549 struct mlx5_flow_rule *rule;
552 fs_get_obj(rule, node);
553 fs_get_obj(fte, rule->node.parent);
554 trace_mlx5_fs_del_rule(rule);
555 if (is_fwd_next_action(rule->sw_action)) {
556 mutex_lock(&rule->dest_attr.ft->lock);
557 list_del(&rule->next_ft);
558 mutex_unlock(&rule->dest_attr.ft->lock);
561 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
564 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
565 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
566 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
570 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT &&
572 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
573 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
577 if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
580 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
586 static void del_hw_fte(struct fs_node *node)
588 struct mlx5_flow_root_namespace *root;
589 struct mlx5_flow_table *ft;
590 struct mlx5_flow_group *fg;
591 struct mlx5_core_dev *dev;
595 fs_get_obj(fte, node);
596 fs_get_obj(fg, fte->node.parent);
597 fs_get_obj(ft, fg->node.parent);
599 trace_mlx5_fs_del_fte(fte);
600 dev = get_dev(&ft->node);
601 root = find_root(&ft->node);
603 err = root->cmds->delete_fte(root, ft, fte);
606 "flow steering can't delete fte in index %d of flow group id %d\n",
608 node->active = false;
612 static void del_sw_fte(struct fs_node *node)
614 struct mlx5_flow_steering *steering = get_steering(node);
615 struct mlx5_flow_group *fg;
619 fs_get_obj(fte, node);
620 fs_get_obj(fg, fte->node.parent);
622 err = rhashtable_remove_fast(&fg->ftes_hash,
626 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
627 kmem_cache_free(steering->ftes_cache, fte);
630 static void del_hw_flow_group(struct fs_node *node)
632 struct mlx5_flow_root_namespace *root;
633 struct mlx5_flow_group *fg;
634 struct mlx5_flow_table *ft;
635 struct mlx5_core_dev *dev;
637 fs_get_obj(fg, node);
638 fs_get_obj(ft, fg->node.parent);
639 dev = get_dev(&ft->node);
640 trace_mlx5_fs_del_fg(fg);
642 root = find_root(&ft->node);
643 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
644 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
648 static void del_sw_flow_group(struct fs_node *node)
650 struct mlx5_flow_steering *steering = get_steering(node);
651 struct mlx5_flow_group *fg;
652 struct mlx5_flow_table *ft;
655 fs_get_obj(fg, node);
656 fs_get_obj(ft, fg->node.parent);
658 rhashtable_destroy(&fg->ftes_hash);
659 ida_destroy(&fg->fte_allocator);
660 if (ft->autogroup.active &&
661 fg->max_ftes == ft->autogroup.group_size &&
662 fg->start_index < ft->autogroup.max_fte)
663 ft->autogroup.num_groups--;
664 err = rhltable_remove(&ft->fgs_hash,
668 kmem_cache_free(steering->fgs_cache, fg);
671 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
676 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
680 fte->index = index + fg->start_index;
681 ret = rhashtable_insert_fast(&fg->ftes_hash,
687 tree_add_node(&fte->node, &fg->node);
688 list_add_tail(&fte->node.list, &fg->node.children);
692 ida_free(&fg->fte_allocator, index);
696 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
697 const struct mlx5_flow_spec *spec,
698 struct mlx5_flow_act *flow_act)
700 struct mlx5_flow_steering *steering = get_steering(&ft->node);
703 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
705 return ERR_PTR(-ENOMEM);
707 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
708 fte->node.type = FS_TYPE_FLOW_ENTRY;
709 fte->action = *flow_act;
710 fte->flow_context = spec->flow_context;
712 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
717 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
718 struct mlx5_flow_group *fg)
720 rhashtable_destroy(&fg->ftes_hash);
721 kmem_cache_free(steering->fgs_cache, fg);
724 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
725 u8 match_criteria_enable,
726 const void *match_criteria,
730 struct mlx5_flow_group *fg;
733 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
735 return ERR_PTR(-ENOMEM);
737 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
739 kmem_cache_free(steering->fgs_cache, fg);
743 ida_init(&fg->fte_allocator);
744 fg->mask.match_criteria_enable = match_criteria_enable;
745 memcpy(&fg->mask.match_criteria, match_criteria,
746 sizeof(fg->mask.match_criteria));
747 fg->node.type = FS_TYPE_FLOW_GROUP;
748 fg->start_index = start_index;
749 fg->max_ftes = end_index - start_index + 1;
754 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
755 u8 match_criteria_enable,
756 const void *match_criteria,
759 struct list_head *prev)
761 struct mlx5_flow_steering *steering = get_steering(&ft->node);
762 struct mlx5_flow_group *fg;
765 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
766 start_index, end_index);
770 /* initialize refcnt, add to parent list */
771 ret = rhltable_insert(&ft->fgs_hash,
775 dealloc_flow_group(steering, fg);
779 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
780 tree_add_node(&fg->node, &ft->node);
781 /* Add node to group list */
782 list_add(&fg->node.list, prev);
783 atomic_inc(&ft->node.version);
788 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
789 enum fs_flow_table_type table_type,
790 enum fs_flow_table_op_mod op_mod,
793 struct mlx5_flow_table *ft;
796 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
798 return ERR_PTR(-ENOMEM);
800 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
807 ft->node.type = FS_TYPE_FLOW_TABLE;
809 ft->type = table_type;
812 INIT_LIST_HEAD(&ft->fwd_rules);
813 mutex_init(&ft->lock);
818 /* If reverse is false, then we search for the first flow table in the
819 * root sub-tree from start(closest from right), else we search for the
820 * last flow table in the root sub-tree till start(closest from left).
822 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
823 struct list_head *start,
826 #define list_advance_entry(pos, reverse) \
827 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
829 #define list_for_each_advance_continue(pos, head, reverse) \
830 for (pos = list_advance_entry(pos, reverse); \
831 &pos->list != (head); \
832 pos = list_advance_entry(pos, reverse))
834 struct fs_node *iter = list_entry(start, struct fs_node, list);
835 struct mlx5_flow_table *ft = NULL;
837 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
840 list_for_each_advance_continue(iter, &root->children, reverse) {
841 if (iter->type == FS_TYPE_FLOW_TABLE) {
842 fs_get_obj(ft, iter);
845 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
853 /* If reverse is false then return the first flow table in next priority of
854 * prio in the tree, else return the last flow table in the previous priority
855 * of prio in the tree.
857 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
859 struct mlx5_flow_table *ft = NULL;
860 struct fs_node *curr_node;
861 struct fs_node *parent;
863 parent = prio->node.parent;
864 curr_node = &prio->node;
865 while (!ft && parent) {
866 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
868 parent = curr_node->parent;
873 /* Assuming all the tree is locked by mutex chain lock */
874 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
876 return find_closest_ft(prio, false);
879 /* Assuming all the tree is locked by mutex chain lock */
880 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
882 return find_closest_ft(prio, true);
885 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
886 struct mlx5_flow_act *flow_act)
888 struct fs_prio *prio;
891 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
892 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
894 return find_next_chained_ft(prio);
897 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
898 struct fs_prio *prio,
899 struct mlx5_flow_table *ft)
901 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
902 struct mlx5_flow_table *iter;
905 fs_for_each_ft(iter, prio) {
906 err = root->cmds->modify_flow_table(root, iter, ft);
909 "Failed to modify flow table id %d, type %d, err %d\n",
910 iter->id, iter->type, err);
911 /* The driver is out of sync with the FW */
918 /* Connect flow tables from previous priority of prio to ft */
919 static int connect_prev_fts(struct mlx5_core_dev *dev,
920 struct mlx5_flow_table *ft,
921 struct fs_prio *prio)
923 struct mlx5_flow_table *prev_ft;
925 prev_ft = find_prev_chained_ft(prio);
927 struct fs_prio *prev_prio;
929 fs_get_obj(prev_prio, prev_ft->node.parent);
930 return connect_fts_in_prio(dev, prev_prio, ft);
935 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
938 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
939 struct mlx5_ft_underlay_qp *uqp;
940 int min_level = INT_MAX;
945 min_level = root->root_ft->level;
947 if (ft->level >= min_level)
950 if (list_empty(&root->underlay_qpns)) {
951 /* Don't set any QPN (zero) in case QPN list is empty */
953 err = root->cmds->update_root_ft(root, ft, qpn, false);
955 list_for_each_entry(uqp, &root->underlay_qpns, list) {
957 err = root->cmds->update_root_ft(root, ft,
965 mlx5_core_warn(root->dev,
966 "Update root flow table of id(%u) qpn(%d) failed\n",
974 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
975 struct mlx5_flow_destination *dest)
977 struct mlx5_flow_root_namespace *root;
978 struct mlx5_flow_table *ft;
979 struct mlx5_flow_group *fg;
981 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
984 fs_get_obj(fte, rule->node.parent);
985 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
987 down_write_ref_node(&fte->node, false);
988 fs_get_obj(fg, fte->node.parent);
989 fs_get_obj(ft, fg->node.parent);
991 memcpy(&rule->dest_attr, dest, sizeof(*dest));
992 root = find_root(&ft->node);
993 err = root->cmds->update_fte(root, ft, fg,
995 up_write_ref_node(&fte->node, false);
1000 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1001 struct mlx5_flow_destination *new_dest,
1002 struct mlx5_flow_destination *old_dest)
1007 if (handle->num_rules != 1)
1009 return _mlx5_modify_rule_destination(handle->rule[0],
1013 for (i = 0; i < handle->num_rules; i++) {
1014 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1015 return _mlx5_modify_rule_destination(handle->rule[i],
1022 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
1023 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1024 struct mlx5_flow_table *new_next_ft,
1025 struct mlx5_flow_table *old_next_ft)
1027 struct mlx5_flow_destination dest = {};
1028 struct mlx5_flow_rule *iter;
1031 /* new_next_ft and old_next_ft could be NULL only
1032 * when we create/destroy the anchor flow table.
1034 if (!new_next_ft || !old_next_ft)
1037 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1038 dest.ft = new_next_ft;
1040 mutex_lock(&old_next_ft->lock);
1041 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1042 mutex_unlock(&old_next_ft->lock);
1043 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1044 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1045 iter->ft->ns == new_next_ft->ns)
1048 err = _mlx5_modify_rule_destination(iter, &dest);
1050 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1056 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1057 struct fs_prio *prio)
1059 struct mlx5_flow_table *next_ft, *first_ft;
1062 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1064 first_ft = list_first_entry_or_null(&prio->node.children,
1065 struct mlx5_flow_table, node.list);
1066 if (!first_ft || first_ft->level > ft->level) {
1067 err = connect_prev_fts(dev, ft, prio);
1071 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1072 err = connect_fwd_rules(dev, ft, next_ft);
1077 if (MLX5_CAP_FLOWTABLE(dev,
1078 flow_table_properties_nic_receive.modify_root))
1079 err = update_root_ft_create(ft, prio);
1083 static void list_add_flow_table(struct mlx5_flow_table *ft,
1084 struct fs_prio *prio)
1086 struct list_head *prev = &prio->node.children;
1087 struct mlx5_flow_table *iter;
1089 fs_for_each_ft(iter, prio) {
1090 if (iter->level > ft->level)
1092 prev = &iter->node.list;
1094 list_add(&ft->node.list, prev);
1097 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1098 struct mlx5_flow_table_attr *ft_attr,
1099 enum fs_flow_table_op_mod op_mod,
1102 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1103 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1104 struct mlx5_flow_table *next_ft;
1105 struct fs_prio *fs_prio = NULL;
1106 struct mlx5_flow_table *ft;
1110 pr_err("mlx5: flow steering failed to find root of namespace\n");
1111 return ERR_PTR(-ENODEV);
1114 mutex_lock(&root->chain_lock);
1115 fs_prio = find_prio(ns, ft_attr->prio);
1121 /* The level is related to the
1122 * priority level range.
1124 if (ft_attr->level >= fs_prio->num_levels) {
1129 ft_attr->level += fs_prio->start_level;
1132 /* The level is related to the
1133 * priority level range.
1135 ft = alloc_flow_table(ft_attr->level,
1138 op_mod, ft_attr->flags);
1144 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1145 next_ft = unmanaged ? ft_attr->next_ft :
1146 find_next_chained_ft(fs_prio);
1147 ft->def_miss_action = ns->def_miss_action;
1149 err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
1154 err = connect_flow_table(root->dev, ft, fs_prio);
1159 ft->node.active = true;
1160 down_write_ref_node(&fs_prio->node, false);
1162 tree_add_node(&ft->node, &fs_prio->node);
1163 list_add_flow_table(ft, fs_prio);
1165 ft->node.root = fs_prio->node.root;
1168 up_write_ref_node(&fs_prio->node, false);
1169 mutex_unlock(&root->chain_lock);
1170 trace_mlx5_fs_add_ft(ft);
1173 root->cmds->destroy_flow_table(root, ft);
1175 rhltable_destroy(&ft->fgs_hash);
1178 mutex_unlock(&root->chain_lock);
1179 return ERR_PTR(err);
1182 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1183 struct mlx5_flow_table_attr *ft_attr)
1185 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1187 EXPORT_SYMBOL(mlx5_create_flow_table);
1189 struct mlx5_flow_table *
1190 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1191 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1193 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1196 struct mlx5_flow_table*
1197 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1198 int prio, u32 level)
1200 struct mlx5_flow_table_attr ft_attr = {};
1202 ft_attr.level = level;
1203 ft_attr.prio = prio;
1204 ft_attr.max_fte = 1;
1206 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1208 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1210 #define MAX_FLOW_GROUP_SIZE BIT(24)
1211 struct mlx5_flow_table*
1212 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1213 struct mlx5_flow_table_attr *ft_attr)
1215 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1216 int max_num_groups = ft_attr->autogroup.max_num_groups;
1217 struct mlx5_flow_table *ft;
1218 int autogroups_max_fte;
1220 ft = mlx5_create_flow_table(ns, ft_attr);
1224 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1225 if (max_num_groups > autogroups_max_fte)
1227 if (num_reserved_entries > ft->max_fte)
1230 /* Align the number of groups according to the largest group size */
1231 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1232 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1234 ft->autogroup.active = true;
1235 ft->autogroup.required_groups = max_num_groups;
1236 ft->autogroup.max_fte = autogroups_max_fte;
1237 /* We save place for flow groups in addition to max types */
1238 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1243 mlx5_destroy_flow_table(ft);
1244 return ERR_PTR(-ENOSPC);
1246 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1248 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1251 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1252 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1253 fg_in, match_criteria);
1254 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1256 match_criteria_enable);
1257 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1259 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1261 struct mlx5_flow_group *fg;
1264 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1265 return ERR_PTR(-EPERM);
1267 down_write_ref_node(&ft->node, false);
1268 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1269 start_index, end_index,
1270 ft->node.children.prev);
1271 up_write_ref_node(&ft->node, false);
1275 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1277 tree_put_node(&fg->node, false);
1278 return ERR_PTR(err);
1280 trace_mlx5_fs_add_fg(fg);
1281 fg->node.active = true;
1285 EXPORT_SYMBOL(mlx5_create_flow_group);
1287 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1289 struct mlx5_flow_rule *rule;
1291 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1295 INIT_LIST_HEAD(&rule->next_ft);
1296 rule->node.type = FS_TYPE_FLOW_DEST;
1298 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1303 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1305 struct mlx5_flow_handle *handle;
1307 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1311 handle->num_rules = num_rules;
1316 static void destroy_flow_handle(struct fs_fte *fte,
1317 struct mlx5_flow_handle *handle,
1318 struct mlx5_flow_destination *dest,
1322 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1324 list_del(&handle->rule[i]->node.list);
1325 kfree(handle->rule[i]);
1331 static struct mlx5_flow_handle *
1332 create_flow_handle(struct fs_fte *fte,
1333 struct mlx5_flow_destination *dest,
1338 struct mlx5_flow_handle *handle;
1339 struct mlx5_flow_rule *rule = NULL;
1340 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1341 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1345 handle = alloc_handle((dest_num) ? dest_num : 1);
1347 return ERR_PTR(-ENOMEM);
1351 rule = find_flow_rule(fte, dest + i);
1353 refcount_inc(&rule->node.refcount);
1359 rule = alloc_rule(dest + i);
1363 /* Add dest to dests list- we need flow tables to be in the
1364 * end of the list for forward to next prio rules.
1366 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1368 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1369 list_add(&rule->node.list, &fte->node.children);
1371 list_add_tail(&rule->node.list, &fte->node.children);
1375 type = dest[i].type ==
1376 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1377 *modify_mask |= type ? count : dst;
1380 handle->rule[i] = rule;
1381 } while (++i < dest_num);
1386 destroy_flow_handle(fte, handle, dest, i);
1387 return ERR_PTR(-ENOMEM);
1390 /* fte should not be deleted while calling this function */
1391 static struct mlx5_flow_handle *
1392 add_rule_fte(struct fs_fte *fte,
1393 struct mlx5_flow_group *fg,
1394 struct mlx5_flow_destination *dest,
1398 struct mlx5_flow_root_namespace *root;
1399 struct mlx5_flow_handle *handle;
1400 struct mlx5_flow_table *ft;
1401 int modify_mask = 0;
1403 bool new_rule = false;
1405 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1407 if (IS_ERR(handle) || !new_rule)
1411 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1413 fs_get_obj(ft, fg->node.parent);
1414 root = find_root(&fg->node);
1415 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1416 err = root->cmds->create_fte(root, ft, fg, fte);
1418 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1422 fte->node.active = true;
1423 fte->status |= FS_FTE_STATUS_EXISTING;
1424 atomic_inc(&fg->node.version);
1430 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1431 return ERR_PTR(err);
1434 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1435 const struct mlx5_flow_spec *spec)
1437 struct list_head *prev = &ft->node.children;
1438 u32 max_fte = ft->autogroup.max_fte;
1439 unsigned int candidate_index = 0;
1440 unsigned int group_size = 0;
1441 struct mlx5_flow_group *fg;
1443 if (!ft->autogroup.active)
1444 return ERR_PTR(-ENOENT);
1446 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1447 group_size = ft->autogroup.group_size;
1449 /* max_fte == ft->autogroup.max_types */
1450 if (group_size == 0)
1453 /* sorted by start_index */
1454 fs_for_each_fg(fg, ft) {
1455 if (candidate_index + group_size > fg->start_index)
1456 candidate_index = fg->start_index + fg->max_ftes;
1459 prev = &fg->node.list;
1462 if (candidate_index + group_size > max_fte)
1463 return ERR_PTR(-ENOSPC);
1465 fg = alloc_insert_flow_group(ft,
1466 spec->match_criteria_enable,
1467 spec->match_criteria,
1469 candidate_index + group_size - 1,
1474 if (group_size == ft->autogroup.group_size)
1475 ft->autogroup.num_groups++;
1481 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1482 struct mlx5_flow_group *fg)
1484 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1485 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1486 void *match_criteria_addr;
1487 u8 src_esw_owner_mask_on;
1492 in = kvzalloc(inlen, GFP_KERNEL);
1496 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1497 fg->mask.match_criteria_enable);
1498 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1499 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1502 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1504 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1505 source_eswitch_owner_vhca_id);
1506 MLX5_SET(create_flow_group_in, in,
1507 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1509 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1510 in, match_criteria);
1511 memcpy(match_criteria_addr, fg->mask.match_criteria,
1512 sizeof(fg->mask.match_criteria));
1514 err = root->cmds->create_flow_group(root, ft, in, fg);
1516 fg->node.active = true;
1517 trace_mlx5_fs_add_fg(fg);
1524 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1525 struct mlx5_flow_destination *d2)
1527 if (d1->type == d2->type) {
1528 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1529 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1530 d1->vport.num == d2->vport.num &&
1531 d1->vport.flags == d2->vport.flags &&
1532 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1533 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1534 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1535 (d1->vport.pkt_reformat->id ==
1536 d2->vport.pkt_reformat->id) : true)) ||
1537 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1538 d1->ft == d2->ft) ||
1539 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1540 d1->tir_num == d2->tir_num) ||
1541 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1542 d1->ft_num == d2->ft_num) ||
1543 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1544 d1->sampler_id == d2->sampler_id))
1551 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1552 struct mlx5_flow_destination *dest)
1554 struct mlx5_flow_rule *rule;
1556 list_for_each_entry(rule, &fte->node.children, node.list) {
1557 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1563 static bool check_conflicting_actions(u32 action1, u32 action2)
1565 u32 xored_actions = action1 ^ action2;
1567 /* if one rule only wants to count, it's ok */
1568 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1569 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1572 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1573 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1574 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1575 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1576 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1577 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1578 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1579 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1585 static int check_conflicting_ftes(struct fs_fte *fte,
1586 const struct mlx5_flow_context *flow_context,
1587 const struct mlx5_flow_act *flow_act)
1589 if (check_conflicting_actions(flow_act->action, fte->action.action)) {
1590 mlx5_core_warn(get_dev(&fte->node),
1591 "Found two FTEs with conflicting actions\n");
1595 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1596 fte->flow_context.flow_tag != flow_context->flow_tag) {
1597 mlx5_core_warn(get_dev(&fte->node),
1598 "FTE flow tag %u already exists with different flow tag %u\n",
1599 fte->flow_context.flow_tag,
1600 flow_context->flow_tag);
1607 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1608 const struct mlx5_flow_spec *spec,
1609 struct mlx5_flow_act *flow_act,
1610 struct mlx5_flow_destination *dest,
1614 struct mlx5_flow_handle *handle;
1619 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1621 return ERR_PTR(ret);
1623 old_action = fte->action.action;
1624 fte->action.action |= flow_act->action;
1625 handle = add_rule_fte(fte, fg, dest, dest_num,
1626 old_action != flow_act->action);
1627 if (IS_ERR(handle)) {
1628 fte->action.action = old_action;
1631 trace_mlx5_fs_set_fte(fte, false);
1633 for (i = 0; i < handle->num_rules; i++) {
1634 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1635 tree_add_node(&handle->rule[i]->node, &fte->node);
1636 trace_mlx5_fs_add_rule(handle->rule[i]);
1642 static bool counter_is_valid(u32 action)
1644 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1645 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1646 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1649 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1650 struct mlx5_flow_act *flow_act,
1651 struct mlx5_flow_table *ft)
1653 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1654 u32 action = flow_act->action;
1656 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1657 return counter_is_valid(action);
1659 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1663 if (ft->type != FS_FT_FDB &&
1664 ft->type != FS_FT_NIC_RX)
1667 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1668 ft->type != dest->ft->type)
1672 if (!dest || ((dest->type ==
1673 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1674 (dest->ft->level <= ft->level && !ignore_level)))
1680 struct list_head list;
1681 struct mlx5_flow_group *g;
1684 static void free_match_list(struct match_list *head, bool ft_locked)
1686 struct match_list *iter, *match_tmp;
1688 list_for_each_entry_safe(iter, match_tmp, &head->list,
1690 tree_put_node(&iter->g->node, ft_locked);
1691 list_del(&iter->list);
1696 static int build_match_list(struct match_list *match_head,
1697 struct mlx5_flow_table *ft,
1698 const struct mlx5_flow_spec *spec,
1701 struct rhlist_head *tmp, *list;
1702 struct mlx5_flow_group *g;
1706 INIT_LIST_HEAD(&match_head->list);
1707 /* Collect all fgs which has a matching match_criteria */
1708 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1709 /* RCU is atomic, we can't execute FW commands here */
1710 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1711 struct match_list *curr_match;
1713 if (unlikely(!tree_get_node(&g->node)))
1716 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1719 free_match_list(match_head, ft_locked);
1723 list_add_tail(&curr_match->list, &match_head->list);
1729 static u64 matched_fgs_get_version(struct list_head *match_head)
1731 struct match_list *iter;
1734 list_for_each_entry(iter, match_head, list)
1735 version += (u64)atomic_read(&iter->g->node.version);
1739 static struct fs_fte *
1740 lookup_fte_locked(struct mlx5_flow_group *g,
1741 const u32 *match_value,
1744 struct fs_fte *fte_tmp;
1747 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1749 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1750 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1752 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1756 if (!fte_tmp->node.active) {
1757 tree_put_node(&fte_tmp->node, false);
1762 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1765 up_write_ref_node(&g->node, false);
1767 up_read_ref_node(&g->node);
1771 static struct mlx5_flow_handle *
1772 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1773 struct list_head *match_head,
1774 const struct mlx5_flow_spec *spec,
1775 struct mlx5_flow_act *flow_act,
1776 struct mlx5_flow_destination *dest,
1780 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1781 struct mlx5_flow_group *g;
1782 struct mlx5_flow_handle *rule;
1783 struct match_list *iter;
1784 bool take_write = false;
1789 fte = alloc_fte(ft, spec, flow_act);
1791 return ERR_PTR(-ENOMEM);
1793 search_again_locked:
1794 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1796 version = matched_fgs_get_version(match_head);
1797 /* Try to find an fte with identical match value and attempt update its
1800 list_for_each_entry(iter, match_head, list) {
1801 struct fs_fte *fte_tmp;
1804 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1807 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1808 /* No error check needed here, because insert_fte() is not called */
1809 up_write_ref_node(&fte_tmp->node, false);
1810 tree_put_node(&fte_tmp->node, false);
1811 kmem_cache_free(steering->ftes_cache, fte);
1816 /* No group with matching fte found, or we skipped the search.
1817 * Try to add a new fte to any matching fg.
1820 /* Check the ft version, for case that new flow group
1821 * was added while the fgs weren't locked
1823 if (atomic_read(&ft->node.version) != ft_version) {
1824 rule = ERR_PTR(-EAGAIN);
1828 /* Check the fgs version. If version have changed it could be that an
1829 * FTE with the same match value was added while the fgs weren't
1832 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1833 version != matched_fgs_get_version(match_head)) {
1835 goto search_again_locked;
1838 list_for_each_entry(iter, match_head, list) {
1841 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1843 if (!g->node.active) {
1844 up_write_ref_node(&g->node, false);
1848 err = insert_fte(g, fte);
1850 up_write_ref_node(&g->node, false);
1853 kmem_cache_free(steering->ftes_cache, fte);
1854 return ERR_PTR(err);
1857 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1858 up_write_ref_node(&g->node, false);
1859 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1860 up_write_ref_node(&fte->node, false);
1862 tree_put_node(&fte->node, false);
1865 rule = ERR_PTR(-ENOENT);
1867 kmem_cache_free(steering->ftes_cache, fte);
1871 static struct mlx5_flow_handle *
1872 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1873 const struct mlx5_flow_spec *spec,
1874 struct mlx5_flow_act *flow_act,
1875 struct mlx5_flow_destination *dest,
1879 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1880 struct mlx5_flow_handle *rule;
1881 struct match_list match_head;
1882 struct mlx5_flow_group *g;
1883 bool take_write = false;
1889 if (!check_valid_spec(spec))
1890 return ERR_PTR(-EINVAL);
1892 for (i = 0; i < dest_num; i++) {
1893 if (!dest_is_valid(&dest[i], flow_act, ft))
1894 return ERR_PTR(-EINVAL);
1896 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1897 search_again_locked:
1898 version = atomic_read(&ft->node.version);
1900 /* Collect all fgs which has a matching match_criteria */
1901 err = build_match_list(&match_head, ft, spec, take_write);
1904 up_write_ref_node(&ft->node, false);
1906 up_read_ref_node(&ft->node);
1907 return ERR_PTR(err);
1911 up_read_ref_node(&ft->node);
1913 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
1915 free_match_list(&match_head, take_write);
1916 if (!IS_ERR(rule) ||
1917 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
1919 up_write_ref_node(&ft->node, false);
1924 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
1928 if (PTR_ERR(rule) == -EAGAIN ||
1929 version != atomic_read(&ft->node.version))
1930 goto search_again_locked;
1932 g = alloc_auto_flow_group(ft, spec);
1935 up_write_ref_node(&ft->node, false);
1939 fte = alloc_fte(ft, spec, flow_act);
1941 up_write_ref_node(&ft->node, false);
1946 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1947 up_write_ref_node(&ft->node, false);
1949 err = create_auto_flow_group(ft, g);
1951 goto err_release_fg;
1953 err = insert_fte(g, fte);
1955 goto err_release_fg;
1957 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1958 up_write_ref_node(&g->node, false);
1959 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1960 up_write_ref_node(&fte->node, false);
1962 tree_put_node(&fte->node, false);
1963 tree_put_node(&g->node, false);
1967 up_write_ref_node(&g->node, false);
1968 kmem_cache_free(steering->ftes_cache, fte);
1970 tree_put_node(&g->node, false);
1971 return ERR_PTR(err);
1974 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1976 return ((ft->type == FS_FT_NIC_RX) &&
1977 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
1980 struct mlx5_flow_handle *
1981 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1982 const struct mlx5_flow_spec *spec,
1983 struct mlx5_flow_act *flow_act,
1984 struct mlx5_flow_destination *dest,
1987 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1988 static const struct mlx5_flow_spec zero_spec = {};
1989 struct mlx5_flow_destination *gen_dest = NULL;
1990 struct mlx5_flow_table *next_ft = NULL;
1991 struct mlx5_flow_handle *handle = NULL;
1992 u32 sw_action = flow_act->action;
1998 if (!is_fwd_next_action(sw_action))
1999 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2001 if (!fwd_next_prio_supported(ft))
2002 return ERR_PTR(-EOPNOTSUPP);
2004 mutex_lock(&root->chain_lock);
2005 next_ft = find_next_fwd_ft(ft, flow_act);
2007 handle = ERR_PTR(-EOPNOTSUPP);
2011 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2014 handle = ERR_PTR(-ENOMEM);
2017 for (i = 0; i < num_dest; i++)
2018 gen_dest[i] = dest[i];
2020 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2021 gen_dest[i].ft = next_ft;
2024 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2025 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2026 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2027 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2031 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2032 mutex_lock(&next_ft->lock);
2033 list_add(&handle->rule[num_dest - 1]->next_ft,
2034 &next_ft->fwd_rules);
2035 mutex_unlock(&next_ft->lock);
2036 handle->rule[num_dest - 1]->sw_action = sw_action;
2037 handle->rule[num_dest - 1]->ft = ft;
2040 mutex_unlock(&root->chain_lock);
2044 EXPORT_SYMBOL(mlx5_add_flow_rules);
2046 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2051 /* In order to consolidate the HW changes we lock the FTE for other
2052 * changes, and increase its refcount, in order not to perform the
2053 * "del" functions of the FTE. Will handle them here.
2054 * The removal of the rules is done under locked FTE.
2055 * After removing all the handle's rules, if there are remaining
2056 * rules, it means we just need to modify the FTE in FW, and
2057 * unlock/decrease the refcount we increased before.
2058 * Otherwise, it means the FTE should be deleted. First delete the
2059 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2060 * the FTE, which will handle the last decrease of the refcount, as
2061 * well as required handling of its parent.
2063 fs_get_obj(fte, handle->rule[0]->node.parent);
2064 down_write_ref_node(&fte->node, false);
2065 for (i = handle->num_rules - 1; i >= 0; i--)
2066 tree_remove_node(&handle->rule[i]->node, true);
2067 if (fte->dests_size) {
2068 if (fte->modify_mask)
2070 up_write_ref_node(&fte->node, false);
2071 } else if (list_empty(&fte->node.children)) {
2072 del_hw_fte(&fte->node);
2073 /* Avoid double call to del_hw_fte */
2074 fte->node.del_hw_func = NULL;
2075 up_write_ref_node(&fte->node, false);
2076 tree_put_node(&fte->node, false);
2078 up_write_ref_node(&fte->node, false);
2082 EXPORT_SYMBOL(mlx5_del_flow_rules);
2084 /* Assuming prio->node.children(flow tables) is sorted by level */
2085 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2087 struct fs_prio *prio;
2089 fs_get_obj(prio, ft->node.parent);
2091 if (!list_is_last(&ft->node.list, &prio->node.children))
2092 return list_next_entry(ft, node.list);
2093 return find_next_chained_ft(prio);
2096 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2098 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2099 struct mlx5_ft_underlay_qp *uqp;
2100 struct mlx5_flow_table *new_root_ft = NULL;
2104 if (root->root_ft != ft)
2107 new_root_ft = find_next_ft(ft);
2109 root->root_ft = NULL;
2113 if (list_empty(&root->underlay_qpns)) {
2114 /* Don't set any QPN (zero) in case QPN list is empty */
2116 err = root->cmds->update_root_ft(root, new_root_ft,
2119 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2121 err = root->cmds->update_root_ft(root,
2130 mlx5_core_warn(root->dev,
2131 "Update root flow table of id(%u) qpn(%d) failed\n",
2134 root->root_ft = new_root_ft;
2139 /* Connect flow table from previous priority to
2140 * the next flow table.
2142 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2144 struct mlx5_core_dev *dev = get_dev(&ft->node);
2145 struct mlx5_flow_table *next_ft;
2146 struct fs_prio *prio;
2149 err = update_root_ft_destroy(ft);
2153 fs_get_obj(prio, ft->node.parent);
2154 if (!(list_first_entry(&prio->node.children,
2155 struct mlx5_flow_table,
2159 next_ft = find_next_ft(ft);
2160 err = connect_fwd_rules(dev, next_ft, ft);
2164 err = connect_prev_fts(dev, next_ft, prio);
2166 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2171 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2173 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2176 mutex_lock(&root->chain_lock);
2177 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2178 err = disconnect_flow_table(ft);
2180 mutex_unlock(&root->chain_lock);
2183 if (tree_remove_node(&ft->node, false))
2184 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2186 mutex_unlock(&root->chain_lock);
2190 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2192 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2194 if (tree_remove_node(&fg->node, false))
2195 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2198 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2200 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2203 struct mlx5_flow_steering *steering = dev->priv.steering;
2205 if (!steering || !steering->fdb_sub_ns)
2208 return steering->fdb_sub_ns[n];
2210 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2212 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2215 case MLX5_FLOW_NAMESPACE_BYPASS:
2216 case MLX5_FLOW_NAMESPACE_LAG:
2217 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2218 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2219 case MLX5_FLOW_NAMESPACE_KERNEL:
2220 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2221 case MLX5_FLOW_NAMESPACE_ANCHOR:
2228 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2229 enum mlx5_flow_namespace_type type)
2231 struct mlx5_flow_steering *steering = dev->priv.steering;
2232 struct mlx5_flow_root_namespace *root_ns;
2234 struct fs_prio *fs_prio;
2235 struct mlx5_flow_namespace *ns;
2241 case MLX5_FLOW_NAMESPACE_FDB:
2242 if (steering->fdb_root_ns)
2243 return &steering->fdb_root_ns->ns;
2245 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2246 if (steering->port_sel_root_ns)
2247 return &steering->port_sel_root_ns->ns;
2249 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2250 if (steering->sniffer_rx_root_ns)
2251 return &steering->sniffer_rx_root_ns->ns;
2253 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2254 if (steering->sniffer_tx_root_ns)
2255 return &steering->sniffer_tx_root_ns->ns;
2257 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2258 root_ns = steering->fdb_root_ns;
2259 prio = FDB_BYPASS_PATH;
2261 case MLX5_FLOW_NAMESPACE_EGRESS:
2262 case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
2263 root_ns = steering->egress_root_ns;
2264 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2266 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2267 root_ns = steering->rdma_rx_root_ns;
2268 prio = RDMA_RX_BYPASS_PRIO;
2270 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2271 root_ns = steering->rdma_rx_root_ns;
2272 prio = RDMA_RX_KERNEL_PRIO;
2274 case MLX5_FLOW_NAMESPACE_RDMA_TX:
2275 root_ns = steering->rdma_tx_root_ns;
2277 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2278 root_ns = steering->rdma_rx_root_ns;
2279 prio = RDMA_RX_COUNTERS_PRIO;
2281 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2282 root_ns = steering->rdma_tx_root_ns;
2283 prio = RDMA_TX_COUNTERS_PRIO;
2285 default: /* Must be NIC RX */
2286 WARN_ON(!is_nic_rx_ns(type));
2287 root_ns = steering->root_ns;
2295 fs_prio = find_prio(&root_ns->ns, prio);
2299 ns = list_first_entry(&fs_prio->node.children,
2305 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2307 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2308 enum mlx5_flow_namespace_type type,
2311 struct mlx5_flow_steering *steering = dev->priv.steering;
2317 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2318 if (vport >= steering->esw_egress_acl_vports)
2320 if (steering->esw_egress_root_ns &&
2321 steering->esw_egress_root_ns[vport])
2322 return &steering->esw_egress_root_ns[vport]->ns;
2325 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2326 if (vport >= steering->esw_ingress_acl_vports)
2328 if (steering->esw_ingress_root_ns &&
2329 steering->esw_ingress_root_ns[vport])
2330 return &steering->esw_ingress_root_ns[vport]->ns;
2338 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2341 enum fs_node_type type)
2343 struct fs_prio *fs_prio;
2345 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2347 return ERR_PTR(-ENOMEM);
2349 fs_prio->node.type = type;
2350 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2351 tree_add_node(&fs_prio->node, &ns->node);
2352 fs_prio->num_levels = num_levels;
2353 fs_prio->prio = prio;
2354 list_add_tail(&fs_prio->node.list, &ns->node.children);
2359 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2363 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2366 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2367 unsigned int prio, int num_levels)
2369 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2372 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2375 ns->node.type = FS_TYPE_NAMESPACE;
2380 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2383 struct mlx5_flow_namespace *ns;
2385 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2387 return ERR_PTR(-ENOMEM);
2389 fs_init_namespace(ns);
2390 ns->def_miss_action = def_miss_act;
2391 tree_init_node(&ns->node, NULL, del_sw_ns);
2392 tree_add_node(&ns->node, &prio->node);
2393 list_add_tail(&ns->node.list, &prio->node.children);
2398 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2399 struct init_tree_node *prio_metadata)
2401 struct fs_prio *fs_prio;
2404 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2405 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2406 if (IS_ERR(fs_prio))
2407 return PTR_ERR(fs_prio);
2412 #define FLOW_TABLE_BIT_SZ 1
2413 #define GET_FLOW_TABLE_CAP(dev, offset) \
2414 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2416 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2417 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2421 for (i = 0; i < caps->arr_sz; i++) {
2422 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2428 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2429 struct init_tree_node *init_node,
2430 struct fs_node *fs_parent_node,
2431 struct init_tree_node *init_parent_node,
2434 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2435 flow_table_properties_nic_receive.
2437 struct mlx5_flow_namespace *fs_ns;
2438 struct fs_prio *fs_prio;
2439 struct fs_node *base;
2443 if (init_node->type == FS_TYPE_PRIO) {
2444 if ((init_node->min_ft_level > max_ft_level) ||
2445 !has_required_caps(steering->dev, &init_node->caps))
2448 fs_get_obj(fs_ns, fs_parent_node);
2449 if (init_node->num_leaf_prios)
2450 return create_leaf_prios(fs_ns, prio, init_node);
2451 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2452 if (IS_ERR(fs_prio))
2453 return PTR_ERR(fs_prio);
2454 base = &fs_prio->node;
2455 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2456 fs_get_obj(fs_prio, fs_parent_node);
2457 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2459 return PTR_ERR(fs_ns);
2460 base = &fs_ns->node;
2465 for (i = 0; i < init_node->ar_size; i++) {
2466 err = init_root_tree_recursive(steering, &init_node->children[i],
2467 base, init_node, prio);
2470 if (init_node->children[i].type == FS_TYPE_PRIO &&
2471 init_node->children[i].num_leaf_prios) {
2472 prio += init_node->children[i].num_leaf_prios;
2479 static int init_root_tree(struct mlx5_flow_steering *steering,
2480 struct init_tree_node *init_node,
2481 struct fs_node *fs_parent_node)
2486 for (i = 0; i < init_node->ar_size; i++) {
2487 err = init_root_tree_recursive(steering, &init_node->children[i],
2496 static void del_sw_root_ns(struct fs_node *node)
2498 struct mlx5_flow_root_namespace *root_ns;
2499 struct mlx5_flow_namespace *ns;
2501 fs_get_obj(ns, node);
2502 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2503 mutex_destroy(&root_ns->chain_lock);
2507 static struct mlx5_flow_root_namespace
2508 *create_root_ns(struct mlx5_flow_steering *steering,
2509 enum fs_flow_table_type table_type)
2511 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2512 struct mlx5_flow_root_namespace *root_ns;
2513 struct mlx5_flow_namespace *ns;
2515 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE &&
2516 (table_type == FS_FT_NIC_RX || table_type == FS_FT_NIC_TX))
2517 cmds = mlx5_fs_cmd_get_default_ipsec_fpga_cmds(table_type);
2519 /* Create the root namespace */
2520 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2524 root_ns->dev = steering->dev;
2525 root_ns->table_type = table_type;
2526 root_ns->cmds = cmds;
2528 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2531 fs_init_namespace(ns);
2532 mutex_init(&root_ns->chain_lock);
2533 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2534 tree_add_node(&ns->node, NULL);
2539 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2541 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2543 struct fs_prio *prio;
2545 fs_for_each_prio(prio, ns) {
2546 /* This updates prio start_level and num_levels */
2547 set_prio_attrs_in_prio(prio, acc_level);
2548 acc_level += prio->num_levels;
2553 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2555 struct mlx5_flow_namespace *ns;
2556 int acc_level_ns = acc_level;
2558 prio->start_level = acc_level;
2559 fs_for_each_ns(ns, prio) {
2560 /* This updates start_level and num_levels of ns's priority descendants */
2561 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2563 /* If this a prio with chains, and we can jump from one chain
2564 * (namespace) to another, so we accumulate the levels
2566 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2567 acc_level = acc_level_ns;
2570 if (!prio->num_levels)
2571 prio->num_levels = acc_level_ns - prio->start_level;
2572 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2575 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2577 struct mlx5_flow_namespace *ns = &root_ns->ns;
2578 struct fs_prio *prio;
2579 int start_level = 0;
2581 fs_for_each_prio(prio, ns) {
2582 set_prio_attrs_in_prio(prio, start_level);
2583 start_level += prio->num_levels;
2587 #define ANCHOR_PRIO 0
2588 #define ANCHOR_SIZE 1
2589 #define ANCHOR_LEVEL 0
2590 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2592 struct mlx5_flow_namespace *ns = NULL;
2593 struct mlx5_flow_table_attr ft_attr = {};
2594 struct mlx5_flow_table *ft;
2596 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2600 ft_attr.max_fte = ANCHOR_SIZE;
2601 ft_attr.level = ANCHOR_LEVEL;
2602 ft_attr.prio = ANCHOR_PRIO;
2604 ft = mlx5_create_flow_table(ns, &ft_attr);
2606 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2612 static int init_root_ns(struct mlx5_flow_steering *steering)
2616 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2617 if (!steering->root_ns)
2620 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2624 set_prio_attrs(steering->root_ns);
2625 err = create_anchor_flow_table(steering);
2632 cleanup_root_ns(steering->root_ns);
2633 steering->root_ns = NULL;
2637 static void clean_tree(struct fs_node *node)
2640 struct fs_node *iter;
2641 struct fs_node *temp;
2643 tree_get_node(node);
2644 list_for_each_entry_safe(iter, temp, &node->children, list)
2646 tree_put_node(node, false);
2647 tree_remove_node(node, false);
2651 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2656 clean_tree(&root_ns->ns.node);
2659 void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2661 struct mlx5_flow_steering *steering = dev->priv.steering;
2663 cleanup_root_ns(steering->root_ns);
2664 cleanup_root_ns(steering->fdb_root_ns);
2665 steering->fdb_root_ns = NULL;
2666 kfree(steering->fdb_sub_ns);
2667 steering->fdb_sub_ns = NULL;
2668 cleanup_root_ns(steering->port_sel_root_ns);
2669 cleanup_root_ns(steering->sniffer_rx_root_ns);
2670 cleanup_root_ns(steering->sniffer_tx_root_ns);
2671 cleanup_root_ns(steering->rdma_rx_root_ns);
2672 cleanup_root_ns(steering->rdma_tx_root_ns);
2673 cleanup_root_ns(steering->egress_root_ns);
2674 mlx5_cleanup_fc_stats(dev);
2675 kmem_cache_destroy(steering->ftes_cache);
2676 kmem_cache_destroy(steering->fgs_cache);
2677 mlx5_ft_pool_destroy(dev);
2681 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2683 struct fs_prio *prio;
2685 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2686 if (!steering->sniffer_tx_root_ns)
2689 /* Create single prio */
2690 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2691 return PTR_ERR_OR_ZERO(prio);
2694 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2696 struct fs_prio *prio;
2698 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2699 if (!steering->sniffer_rx_root_ns)
2702 /* Create single prio */
2703 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2704 return PTR_ERR_OR_ZERO(prio);
2707 #define PORT_SEL_NUM_LEVELS 3
2708 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2710 struct fs_prio *prio;
2712 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2713 if (!steering->port_sel_root_ns)
2716 /* Create single prio */
2717 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2718 PORT_SEL_NUM_LEVELS);
2719 return PTR_ERR_OR_ZERO(prio);
2722 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2726 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2727 if (!steering->rdma_rx_root_ns)
2730 err = init_root_tree(steering, &rdma_rx_root_fs,
2731 &steering->rdma_rx_root_ns->ns.node);
2735 set_prio_attrs(steering->rdma_rx_root_ns);
2740 cleanup_root_ns(steering->rdma_rx_root_ns);
2741 steering->rdma_rx_root_ns = NULL;
2745 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2749 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2750 if (!steering->rdma_tx_root_ns)
2753 err = init_root_tree(steering, &rdma_tx_root_fs,
2754 &steering->rdma_tx_root_ns->ns.node);
2758 set_prio_attrs(steering->rdma_tx_root_ns);
2763 cleanup_root_ns(steering->rdma_tx_root_ns);
2764 steering->rdma_tx_root_ns = NULL;
2768 /* FT and tc chains are stored in the same array so we can re-use the
2769 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2770 * When creating a new ns for each chain store it in the first available slot.
2771 * Assume tc chains are created and stored first and only then the FT chain.
2773 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2774 struct mlx5_flow_namespace *ns)
2778 while (steering->fdb_sub_ns[chain])
2781 steering->fdb_sub_ns[chain] = ns;
2784 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2785 struct fs_prio *maj_prio)
2787 struct mlx5_flow_namespace *ns;
2788 struct fs_prio *min_prio;
2791 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2795 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2796 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2797 if (IS_ERR(min_prio))
2798 return PTR_ERR(min_prio);
2801 store_fdb_sub_ns_prio_chain(steering, ns);
2806 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2810 struct fs_prio *maj_prio;
2815 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2816 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2819 if (IS_ERR(maj_prio))
2820 return PTR_ERR(maj_prio);
2822 for (chain = 0; chain < chains; chain++) {
2823 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2831 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2835 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2836 sizeof(*steering->fdb_sub_ns),
2838 if (!steering->fdb_sub_ns)
2841 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2845 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2852 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2854 struct mlx5_flow_namespace *ns;
2855 struct fs_prio *prio;
2858 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2860 return PTR_ERR(prio);
2862 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2866 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2867 prio = fs_create_prio(ns, i, 1);
2869 return PTR_ERR(prio);
2874 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2876 struct fs_prio *maj_prio;
2879 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2880 if (!steering->fdb_root_ns)
2883 err = create_fdb_bypass(steering);
2887 err = create_fdb_fast_path(steering);
2891 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2892 if (IS_ERR(maj_prio)) {
2893 err = PTR_ERR(maj_prio);
2897 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
2898 if (IS_ERR(maj_prio)) {
2899 err = PTR_ERR(maj_prio);
2903 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
2904 if (IS_ERR(maj_prio)) {
2905 err = PTR_ERR(maj_prio);
2909 /* We put this priority last, knowing that nothing will get here
2910 * unless explicitly forwarded to. This is possible because the
2911 * slow path tables have catch all rules and nothing gets passed
2914 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
2915 if (IS_ERR(maj_prio)) {
2916 err = PTR_ERR(maj_prio);
2920 set_prio_attrs(steering->fdb_root_ns);
2924 cleanup_root_ns(steering->fdb_root_ns);
2925 kfree(steering->fdb_sub_ns);
2926 steering->fdb_sub_ns = NULL;
2927 steering->fdb_root_ns = NULL;
2931 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2933 struct fs_prio *prio;
2935 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2936 if (!steering->esw_egress_root_ns[vport])
2940 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2941 return PTR_ERR_OR_ZERO(prio);
2944 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2946 struct fs_prio *prio;
2948 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2949 if (!steering->esw_ingress_root_ns[vport])
2953 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2954 return PTR_ERR_OR_ZERO(prio);
2957 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
2959 struct mlx5_flow_steering *steering = dev->priv.steering;
2963 steering->esw_egress_root_ns =
2964 kcalloc(total_vports,
2965 sizeof(*steering->esw_egress_root_ns),
2967 if (!steering->esw_egress_root_ns)
2970 for (i = 0; i < total_vports; i++) {
2971 err = init_egress_acl_root_ns(steering, i);
2973 goto cleanup_root_ns;
2975 steering->esw_egress_acl_vports = total_vports;
2979 for (i--; i >= 0; i--)
2980 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2981 kfree(steering->esw_egress_root_ns);
2982 steering->esw_egress_root_ns = NULL;
2986 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
2988 struct mlx5_flow_steering *steering = dev->priv.steering;
2991 if (!steering->esw_egress_root_ns)
2994 for (i = 0; i < steering->esw_egress_acl_vports; i++)
2995 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2997 kfree(steering->esw_egress_root_ns);
2998 steering->esw_egress_root_ns = NULL;
3001 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3003 struct mlx5_flow_steering *steering = dev->priv.steering;
3007 steering->esw_ingress_root_ns =
3008 kcalloc(total_vports,
3009 sizeof(*steering->esw_ingress_root_ns),
3011 if (!steering->esw_ingress_root_ns)
3014 for (i = 0; i < total_vports; i++) {
3015 err = init_ingress_acl_root_ns(steering, i);
3017 goto cleanup_root_ns;
3019 steering->esw_ingress_acl_vports = total_vports;
3023 for (i--; i >= 0; i--)
3024 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3025 kfree(steering->esw_ingress_root_ns);
3026 steering->esw_ingress_root_ns = NULL;
3030 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3032 struct mlx5_flow_steering *steering = dev->priv.steering;
3035 if (!steering->esw_ingress_root_ns)
3038 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3039 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3041 kfree(steering->esw_ingress_root_ns);
3042 steering->esw_ingress_root_ns = NULL;
3045 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3049 steering->egress_root_ns = create_root_ns(steering,
3051 if (!steering->egress_root_ns)
3054 err = init_root_tree(steering, &egress_root_fs,
3055 &steering->egress_root_ns->ns.node);
3058 set_prio_attrs(steering->egress_root_ns);
3061 cleanup_root_ns(steering->egress_root_ns);
3062 steering->egress_root_ns = NULL;
3066 int mlx5_init_fs(struct mlx5_core_dev *dev)
3068 struct mlx5_flow_steering *steering;
3071 err = mlx5_init_fc_stats(dev);
3075 err = mlx5_ft_pool_init(dev);
3079 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3085 steering->dev = dev;
3086 dev->priv.steering = steering;
3088 if (mlx5_fs_dr_is_supported(dev))
3089 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3091 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3093 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3094 sizeof(struct mlx5_flow_group), 0,
3096 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3098 if (!steering->ftes_cache || !steering->fgs_cache) {
3103 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3104 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3105 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3106 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3107 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3108 err = init_root_ns(steering);
3113 if (MLX5_ESWITCH_MANAGER(dev)) {
3114 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3115 err = init_fdb_root_ns(steering);
3121 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3122 err = init_sniffer_rx_root_ns(steering);
3127 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3128 err = init_sniffer_tx_root_ns(steering);
3133 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3134 err = init_port_sel_root_ns(steering);
3139 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3140 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3141 err = init_rdma_rx_root_ns(steering);
3146 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3147 err = init_rdma_tx_root_ns(steering);
3152 if (mlx5_fpga_ipsec_device_caps(steering->dev) & MLX5_ACCEL_IPSEC_CAP_DEVICE ||
3153 MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3154 err = init_egress_root_ns(steering);
3161 mlx5_cleanup_fs(dev);
3165 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3167 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3168 struct mlx5_ft_underlay_qp *new_uqp;
3171 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3175 mutex_lock(&root->chain_lock);
3177 if (!root->root_ft) {
3179 goto update_ft_fail;
3182 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3185 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3187 goto update_ft_fail;
3190 new_uqp->qpn = underlay_qpn;
3191 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3193 mutex_unlock(&root->chain_lock);
3198 mutex_unlock(&root->chain_lock);
3202 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3204 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3206 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3207 struct mlx5_ft_underlay_qp *uqp;
3211 mutex_lock(&root->chain_lock);
3212 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3213 if (uqp->qpn == underlay_qpn) {
3220 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3226 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3229 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3232 list_del(&uqp->list);
3233 mutex_unlock(&root->chain_lock);
3239 mutex_unlock(&root->chain_lock);
3242 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3244 static struct mlx5_flow_root_namespace
3245 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3247 struct mlx5_flow_namespace *ns;
3249 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3250 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3251 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3253 ns = mlx5_get_flow_namespace(dev, ns_type);
3257 return find_root(&ns->node);
3260 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3261 u8 ns_type, u8 num_actions,
3262 void *modify_actions)
3264 struct mlx5_flow_root_namespace *root;
3265 struct mlx5_modify_hdr *modify_hdr;
3268 root = get_root_namespace(dev, ns_type);
3270 return ERR_PTR(-EOPNOTSUPP);
3272 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3274 return ERR_PTR(-ENOMEM);
3276 modify_hdr->ns_type = ns_type;
3277 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3278 modify_actions, modify_hdr);
3281 return ERR_PTR(err);
3286 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3288 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3289 struct mlx5_modify_hdr *modify_hdr)
3291 struct mlx5_flow_root_namespace *root;
3293 root = get_root_namespace(dev, modify_hdr->ns_type);
3296 root->cmds->modify_header_dealloc(root, modify_hdr);
3299 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3301 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3302 struct mlx5_pkt_reformat_params *params,
3303 enum mlx5_flow_namespace_type ns_type)
3305 struct mlx5_pkt_reformat *pkt_reformat;
3306 struct mlx5_flow_root_namespace *root;
3309 root = get_root_namespace(dev, ns_type);
3311 return ERR_PTR(-EOPNOTSUPP);
3313 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3315 return ERR_PTR(-ENOMEM);
3317 pkt_reformat->ns_type = ns_type;
3318 pkt_reformat->reformat_type = params->type;
3319 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3322 kfree(pkt_reformat);
3323 return ERR_PTR(err);
3326 return pkt_reformat;
3328 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3330 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3331 struct mlx5_pkt_reformat *pkt_reformat)
3333 struct mlx5_flow_root_namespace *root;
3335 root = get_root_namespace(dev, pkt_reformat->ns_type);
3338 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3339 kfree(pkt_reformat);
3341 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3343 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3348 struct mlx5_flow_definer *
3349 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3350 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3353 struct mlx5_flow_root_namespace *root;
3354 struct mlx5_flow_definer *definer;
3357 root = get_root_namespace(dev, ns_type);
3359 return ERR_PTR(-EOPNOTSUPP);
3361 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3363 return ERR_PTR(-ENOMEM);
3365 definer->ns_type = ns_type;
3366 id = root->cmds->create_match_definer(root, format_id, match_mask);
3368 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3376 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3377 struct mlx5_flow_definer *definer)
3379 struct mlx5_flow_root_namespace *root;
3381 root = get_root_namespace(dev, definer->ns_type);
3385 root->cmds->destroy_match_definer(root, definer->id);
3389 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3390 struct mlx5_flow_root_namespace *peer_ns)
3392 if (peer_ns && ns->mode != peer_ns->mode) {
3393 mlx5_core_err(ns->dev,
3394 "Can't peer namespace of different steering mode\n");
3398 return ns->cmds->set_peer(ns, peer_ns);
3401 /* This function should be called only at init stage of the namespace.
3402 * It is not safe to call this function while steering operations
3403 * are executed in the namespace.
3405 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3406 enum mlx5_flow_steering_mode mode)
3408 struct mlx5_flow_root_namespace *root;
3409 const struct mlx5_flow_cmds *cmds;
3412 root = find_root(&ns->node);
3413 if (&root->ns != ns)
3414 /* Can't set cmds to non root namespace */
3417 if (root->table_type != FS_FT_FDB)
3420 if (root->mode == mode)
3423 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3424 cmds = mlx5_fs_cmd_get_dr_cmds();
3426 cmds = mlx5_fs_cmd_get_fw_cmds();
3430 err = cmds->create_ns(root);
3432 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3437 root->cmds->destroy_ns(root);