2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mutex.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/vport.h>
36 #include <linux/mlx5/eswitch.h>
37 #include <net/devlink.h>
39 #include "mlx5_core.h"
42 #include "fs_ft_pool.h"
43 #include "diag/fs_tracepoint.h"
46 #define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
47 sizeof(struct init_tree_node))
49 #define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
50 ...) {.type = FS_TYPE_PRIO,\
51 .min_ft_level = min_level_val,\
52 .num_levels = num_levels_val,\
53 .num_leaf_prios = num_prios_val,\
55 .children = (struct init_tree_node[]) {__VA_ARGS__},\
56 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
59 #define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
60 ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
63 #define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
64 .def_miss_action = def_miss_act,\
65 .children = (struct init_tree_node[]) {__VA_ARGS__},\
66 .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
69 #define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
72 #define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
74 #define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
75 .caps = (long[]) {__VA_ARGS__} }
77 #define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
78 FS_CAP(flow_table_properties_nic_receive.modify_root), \
79 FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
80 FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
82 #define FS_CHAINING_CAPS_EGRESS \
84 FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
85 FS_CAP(flow_table_properties_nic_transmit.modify_root), \
86 FS_CAP(flow_table_properties_nic_transmit \
87 .identified_miss_table_mode), \
88 FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
90 #define FS_CHAINING_CAPS_RDMA_TX \
92 FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
93 FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
94 FS_CAP(flow_table_properties_nic_transmit_rdma \
95 .identified_miss_table_mode), \
96 FS_CAP(flow_table_properties_nic_transmit_rdma \
99 #define LEFTOVERS_NUM_LEVELS 1
100 #define LEFTOVERS_NUM_PRIOS 1
102 #define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
103 #define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
105 #define BY_PASS_PRIO_NUM_LEVELS 1
106 #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
109 #define KERNEL_RX_MACSEC_NUM_PRIOS 1
110 #define KERNEL_RX_MACSEC_NUM_LEVELS 2
111 #define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
113 #define ETHTOOL_PRIO_NUM_LEVELS 1
114 #define ETHTOOL_NUM_PRIOS 11
115 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
116 /* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
119 #define KERNEL_NIC_PRIO_NUM_LEVELS 9
120 #define KERNEL_NIC_NUM_PRIOS 1
121 /* One more level for tc */
122 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
124 #define KERNEL_NIC_TC_NUM_PRIOS 1
125 #define KERNEL_NIC_TC_NUM_LEVELS 3
127 #define ANCHOR_NUM_LEVELS 1
128 #define ANCHOR_NUM_PRIOS 1
129 #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
131 #define OFFLOADS_MAX_FT 2
132 #define OFFLOADS_NUM_PRIOS 2
133 #define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
135 #define LAG_PRIO_NUM_LEVELS 1
136 #define LAG_NUM_PRIOS 1
137 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
139 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
140 #define KERNEL_TX_IPSEC_NUM_LEVELS 3
141 #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
143 #define KERNEL_TX_MACSEC_NUM_PRIOS 1
144 #define KERNEL_TX_MACSEC_NUM_LEVELS 2
145 #define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
152 static struct init_tree_node {
153 enum fs_node_type type;
154 struct init_tree_node *children;
156 struct node_caps caps;
161 enum mlx5_flow_table_miss_action def_miss_action;
163 .type = FS_TYPE_NAMESPACE,
165 .children = (struct init_tree_node[]){
166 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
167 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
168 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
169 BY_PASS_PRIO_NUM_LEVELS))),
170 ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
171 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
172 ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
173 KERNEL_RX_MACSEC_NUM_LEVELS))),
174 ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
175 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
176 ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
177 LAG_PRIO_NUM_LEVELS))),
178 ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
179 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
180 ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
182 ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
183 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
184 ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
185 ETHTOOL_PRIO_NUM_LEVELS))),
186 ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
187 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
188 ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
189 KERNEL_NIC_TC_NUM_LEVELS),
190 ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
191 KERNEL_NIC_PRIO_NUM_LEVELS))),
192 ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
193 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
194 ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
195 LEFTOVERS_NUM_LEVELS))),
196 ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
197 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
198 ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
199 ANCHOR_NUM_LEVELS))),
203 static struct init_tree_node egress_root_fs = {
204 .type = FS_TYPE_NAMESPACE,
206 .children = (struct init_tree_node[]) {
207 ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
208 FS_CHAINING_CAPS_EGRESS,
209 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
210 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
211 BY_PASS_PRIO_NUM_LEVELS))),
212 ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
213 FS_CHAINING_CAPS_EGRESS,
214 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
215 ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
216 KERNEL_TX_IPSEC_NUM_LEVELS))),
217 ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
218 FS_CHAINING_CAPS_EGRESS,
219 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
220 ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
221 KERNEL_TX_MACSEC_NUM_LEVELS))),
227 RDMA_RX_COUNTERS_PRIO,
232 #define RDMA_RX_IPSEC_NUM_PRIOS 1
233 #define RDMA_RX_IPSEC_NUM_LEVELS 2
234 #define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
236 #define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
237 #define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
238 #define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
240 static struct init_tree_node rdma_rx_root_fs = {
241 .type = FS_TYPE_NAMESPACE,
243 .children = (struct init_tree_node[]) {
244 [RDMA_RX_IPSEC_PRIO] =
245 ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
247 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
248 ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
249 RDMA_RX_IPSEC_NUM_LEVELS))),
250 [RDMA_RX_COUNTERS_PRIO] =
251 ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
253 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
254 ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
255 RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
256 [RDMA_RX_BYPASS_PRIO] =
257 ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
259 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
260 ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
261 BY_PASS_PRIO_NUM_LEVELS))),
262 [RDMA_RX_KERNEL_PRIO] =
263 ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
265 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
266 ADD_MULTIPLE_PRIO(1, 1))),
271 RDMA_TX_COUNTERS_PRIO,
276 #define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
277 #define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
279 #define RDMA_TX_IPSEC_NUM_PRIOS 1
280 #define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
281 #define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
283 static struct init_tree_node rdma_tx_root_fs = {
284 .type = FS_TYPE_NAMESPACE,
286 .children = (struct init_tree_node[]) {
287 [RDMA_TX_COUNTERS_PRIO] =
288 ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
290 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
291 ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
292 RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
293 [RDMA_TX_IPSEC_PRIO] =
294 ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
296 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
297 ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
298 RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
300 [RDMA_TX_BYPASS_PRIO] =
301 ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
302 FS_CHAINING_CAPS_RDMA_TX,
303 ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
304 ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
305 BY_PASS_PRIO_NUM_LEVELS))),
309 enum fs_i_lock_class {
315 static const struct rhashtable_params rhash_fte = {
316 .key_len = sizeof_field(struct fs_fte, val),
317 .key_offset = offsetof(struct fs_fte, val),
318 .head_offset = offsetof(struct fs_fte, hash),
319 .automatic_shrinking = true,
323 static const struct rhashtable_params rhash_fg = {
324 .key_len = sizeof_field(struct mlx5_flow_group, mask),
325 .key_offset = offsetof(struct mlx5_flow_group, mask),
326 .head_offset = offsetof(struct mlx5_flow_group, hash),
327 .automatic_shrinking = true,
332 static void del_hw_flow_table(struct fs_node *node);
333 static void del_hw_flow_group(struct fs_node *node);
334 static void del_hw_fte(struct fs_node *node);
335 static void del_sw_flow_table(struct fs_node *node);
336 static void del_sw_flow_group(struct fs_node *node);
337 static void del_sw_fte(struct fs_node *node);
338 static void del_sw_prio(struct fs_node *node);
339 static void del_sw_ns(struct fs_node *node);
340 /* Delete rule (destination) is special case that
341 * requires to lock the FTE for all the deletion process.
343 static void del_sw_hw_rule(struct fs_node *node);
344 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
345 struct mlx5_flow_destination *d2);
346 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
347 static struct mlx5_flow_rule *
348 find_flow_rule(struct fs_fte *fte,
349 struct mlx5_flow_destination *dest);
351 static void tree_init_node(struct fs_node *node,
352 void (*del_hw_func)(struct fs_node *),
353 void (*del_sw_func)(struct fs_node *))
355 refcount_set(&node->refcount, 1);
356 INIT_LIST_HEAD(&node->list);
357 INIT_LIST_HEAD(&node->children);
358 init_rwsem(&node->lock);
359 node->del_hw_func = del_hw_func;
360 node->del_sw_func = del_sw_func;
361 node->active = false;
364 static void tree_add_node(struct fs_node *node, struct fs_node *parent)
367 refcount_inc(&parent->refcount);
368 node->parent = parent;
370 /* Parent is the root */
374 node->root = parent->root;
377 static int tree_get_node(struct fs_node *node)
379 return refcount_inc_not_zero(&node->refcount);
382 static void nested_down_read_ref_node(struct fs_node *node,
383 enum fs_i_lock_class class)
386 down_read_nested(&node->lock, class);
387 refcount_inc(&node->refcount);
391 static void nested_down_write_ref_node(struct fs_node *node,
392 enum fs_i_lock_class class)
395 down_write_nested(&node->lock, class);
396 refcount_inc(&node->refcount);
400 static void down_write_ref_node(struct fs_node *node, bool locked)
404 down_write(&node->lock);
405 refcount_inc(&node->refcount);
409 static void up_read_ref_node(struct fs_node *node)
411 refcount_dec(&node->refcount);
412 up_read(&node->lock);
415 static void up_write_ref_node(struct fs_node *node, bool locked)
417 refcount_dec(&node->refcount);
419 up_write(&node->lock);
422 static void tree_put_node(struct fs_node *node, bool locked)
424 struct fs_node *parent_node = node->parent;
426 if (refcount_dec_and_test(&node->refcount)) {
427 if (node->del_hw_func)
428 node->del_hw_func(node);
430 down_write_ref_node(parent_node, locked);
431 list_del_init(&node->list);
433 node->del_sw_func(node);
435 up_write_ref_node(parent_node, locked);
438 if (!node && parent_node)
439 tree_put_node(parent_node, locked);
442 static int tree_remove_node(struct fs_node *node, bool locked)
444 if (refcount_read(&node->refcount) > 1) {
445 refcount_dec(&node->refcount);
448 tree_put_node(node, locked);
452 static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
455 struct fs_prio *iter_prio;
457 fs_for_each_prio(iter_prio, ns) {
458 if (iter_prio->prio == prio)
465 static bool is_fwd_next_action(u32 action)
467 return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
468 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
471 static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
473 return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
474 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
475 type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
476 type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
477 type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
478 type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
479 type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
480 type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
483 static bool check_valid_spec(const struct mlx5_flow_spec *spec)
487 for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
488 if (spec->match_value[i] & ~spec->match_criteria[i]) {
489 pr_warn("mlx5_core: match_value differs from match_criteria\n");
496 struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
498 struct fs_node *root;
499 struct mlx5_flow_namespace *ns;
503 if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
504 pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
508 ns = container_of(root, struct mlx5_flow_namespace, node);
509 return container_of(ns, struct mlx5_flow_root_namespace, ns);
512 static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
514 struct mlx5_flow_root_namespace *root = find_root(node);
517 return root->dev->priv.steering;
521 static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
523 struct mlx5_flow_root_namespace *root = find_root(node);
530 static void del_sw_ns(struct fs_node *node)
535 static void del_sw_prio(struct fs_node *node)
540 static void del_hw_flow_table(struct fs_node *node)
542 struct mlx5_flow_root_namespace *root;
543 struct mlx5_flow_table *ft;
544 struct mlx5_core_dev *dev;
547 fs_get_obj(ft, node);
548 dev = get_dev(&ft->node);
549 root = find_root(&ft->node);
550 trace_mlx5_fs_del_ft(ft);
553 err = root->cmds->destroy_flow_table(root, ft);
555 mlx5_core_warn(dev, "flow steering can't destroy ft\n");
559 static void del_sw_flow_table(struct fs_node *node)
561 struct mlx5_flow_table *ft;
562 struct fs_prio *prio;
564 fs_get_obj(ft, node);
566 rhltable_destroy(&ft->fgs_hash);
567 if (ft->node.parent) {
568 fs_get_obj(prio, ft->node.parent);
574 static void modify_fte(struct fs_fte *fte)
576 struct mlx5_flow_root_namespace *root;
577 struct mlx5_flow_table *ft;
578 struct mlx5_flow_group *fg;
579 struct mlx5_core_dev *dev;
582 fs_get_obj(fg, fte->node.parent);
583 fs_get_obj(ft, fg->node.parent);
584 dev = get_dev(&fte->node);
586 root = find_root(&ft->node);
587 err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
590 "%s can't del rule fg id=%d fte_index=%d\n",
591 __func__, fg->id, fte->index);
592 fte->modify_mask = 0;
595 static void del_sw_hw_rule(struct fs_node *node)
597 struct mlx5_flow_rule *rule;
600 fs_get_obj(rule, node);
601 fs_get_obj(fte, rule->node.parent);
602 trace_mlx5_fs_del_rule(rule);
603 if (is_fwd_next_action(rule->sw_action)) {
604 mutex_lock(&rule->dest_attr.ft->lock);
605 list_del(&rule->next_ft);
606 mutex_unlock(&rule->dest_attr.ft->lock);
609 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
612 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
613 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
614 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
618 if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
620 fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
621 fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
625 if (is_fwd_dest_type(rule->dest_attr.type)) {
630 fte->action.action &=
631 ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
633 BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
640 static void del_hw_fte(struct fs_node *node)
642 struct mlx5_flow_root_namespace *root;
643 struct mlx5_flow_table *ft;
644 struct mlx5_flow_group *fg;
645 struct mlx5_core_dev *dev;
649 fs_get_obj(fte, node);
650 fs_get_obj(fg, fte->node.parent);
651 fs_get_obj(ft, fg->node.parent);
653 trace_mlx5_fs_del_fte(fte);
654 WARN_ON(fte->dests_size);
655 dev = get_dev(&ft->node);
656 root = find_root(&ft->node);
658 err = root->cmds->delete_fte(root, ft, fte);
661 "flow steering can't delete fte in index %d of flow group id %d\n",
663 node->active = false;
667 static void del_sw_fte(struct fs_node *node)
669 struct mlx5_flow_steering *steering = get_steering(node);
670 struct mlx5_flow_group *fg;
674 fs_get_obj(fte, node);
675 fs_get_obj(fg, fte->node.parent);
677 err = rhashtable_remove_fast(&fg->ftes_hash,
681 ida_free(&fg->fte_allocator, fte->index - fg->start_index);
682 kmem_cache_free(steering->ftes_cache, fte);
685 static void del_hw_flow_group(struct fs_node *node)
687 struct mlx5_flow_root_namespace *root;
688 struct mlx5_flow_group *fg;
689 struct mlx5_flow_table *ft;
690 struct mlx5_core_dev *dev;
692 fs_get_obj(fg, node);
693 fs_get_obj(ft, fg->node.parent);
694 dev = get_dev(&ft->node);
695 trace_mlx5_fs_del_fg(fg);
697 root = find_root(&ft->node);
698 if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
699 mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
703 static void del_sw_flow_group(struct fs_node *node)
705 struct mlx5_flow_steering *steering = get_steering(node);
706 struct mlx5_flow_group *fg;
707 struct mlx5_flow_table *ft;
710 fs_get_obj(fg, node);
711 fs_get_obj(ft, fg->node.parent);
713 rhashtable_destroy(&fg->ftes_hash);
714 ida_destroy(&fg->fte_allocator);
715 if (ft->autogroup.active &&
716 fg->max_ftes == ft->autogroup.group_size &&
717 fg->start_index < ft->autogroup.max_fte)
718 ft->autogroup.num_groups--;
719 err = rhltable_remove(&ft->fgs_hash,
723 kmem_cache_free(steering->fgs_cache, fg);
726 static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
731 index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes - 1, GFP_KERNEL);
735 fte->index = index + fg->start_index;
736 ret = rhashtable_insert_fast(&fg->ftes_hash,
742 tree_add_node(&fte->node, &fg->node);
743 list_add_tail(&fte->node.list, &fg->node.children);
747 ida_free(&fg->fte_allocator, index);
751 static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
752 const struct mlx5_flow_spec *spec,
753 struct mlx5_flow_act *flow_act)
755 struct mlx5_flow_steering *steering = get_steering(&ft->node);
758 fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
760 return ERR_PTR(-ENOMEM);
762 memcpy(fte->val, &spec->match_value, sizeof(fte->val));
763 fte->node.type = FS_TYPE_FLOW_ENTRY;
764 fte->action = *flow_act;
765 fte->flow_context = spec->flow_context;
767 tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
772 static void dealloc_flow_group(struct mlx5_flow_steering *steering,
773 struct mlx5_flow_group *fg)
775 rhashtable_destroy(&fg->ftes_hash);
776 kmem_cache_free(steering->fgs_cache, fg);
779 static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
780 u8 match_criteria_enable,
781 const void *match_criteria,
785 struct mlx5_flow_group *fg;
788 fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
790 return ERR_PTR(-ENOMEM);
792 ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
794 kmem_cache_free(steering->fgs_cache, fg);
798 ida_init(&fg->fte_allocator);
799 fg->mask.match_criteria_enable = match_criteria_enable;
800 memcpy(&fg->mask.match_criteria, match_criteria,
801 sizeof(fg->mask.match_criteria));
802 fg->node.type = FS_TYPE_FLOW_GROUP;
803 fg->start_index = start_index;
804 fg->max_ftes = end_index - start_index + 1;
809 static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
810 u8 match_criteria_enable,
811 const void *match_criteria,
814 struct list_head *prev)
816 struct mlx5_flow_steering *steering = get_steering(&ft->node);
817 struct mlx5_flow_group *fg;
820 fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
821 start_index, end_index);
825 /* initialize refcnt, add to parent list */
826 ret = rhltable_insert(&ft->fgs_hash,
830 dealloc_flow_group(steering, fg);
834 tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
835 tree_add_node(&fg->node, &ft->node);
836 /* Add node to group list */
837 list_add(&fg->node.list, prev);
838 atomic_inc(&ft->node.version);
843 static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
844 enum fs_flow_table_type table_type,
845 enum fs_flow_table_op_mod op_mod,
848 struct mlx5_flow_table *ft;
851 ft = kzalloc(sizeof(*ft), GFP_KERNEL);
853 return ERR_PTR(-ENOMEM);
855 ret = rhltable_init(&ft->fgs_hash, &rhash_fg);
862 ft->node.type = FS_TYPE_FLOW_TABLE;
864 ft->type = table_type;
867 INIT_LIST_HEAD(&ft->fwd_rules);
868 mutex_init(&ft->lock);
873 /* If reverse is false, then we search for the first flow table in the
874 * root sub-tree from start(closest from right), else we search for the
875 * last flow table in the root sub-tree till start(closest from left).
877 static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
878 struct list_head *start,
881 #define list_advance_entry(pos, reverse) \
882 ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
884 #define list_for_each_advance_continue(pos, head, reverse) \
885 for (pos = list_advance_entry(pos, reverse); \
886 &pos->list != (head); \
887 pos = list_advance_entry(pos, reverse))
889 struct fs_node *iter = list_entry(start, struct fs_node, list);
890 struct mlx5_flow_table *ft = NULL;
892 if (!root || root->type == FS_TYPE_PRIO_CHAINS)
895 list_for_each_advance_continue(iter, &root->children, reverse) {
896 if (iter->type == FS_TYPE_FLOW_TABLE) {
897 fs_get_obj(ft, iter);
900 ft = find_closest_ft_recursive(iter, &iter->children, reverse);
908 /* If reverse is false then return the first flow table in next priority of
909 * prio in the tree, else return the last flow table in the previous priority
910 * of prio in the tree.
912 static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
914 struct mlx5_flow_table *ft = NULL;
915 struct fs_node *curr_node;
916 struct fs_node *parent;
918 parent = prio->node.parent;
919 curr_node = &prio->node;
920 while (!ft && parent) {
921 ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
923 parent = curr_node->parent;
928 /* Assuming all the tree is locked by mutex chain lock */
929 static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
931 return find_closest_ft(prio, false);
934 /* Assuming all the tree is locked by mutex chain lock */
935 static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
937 return find_closest_ft(prio, true);
940 static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
941 struct mlx5_flow_act *flow_act)
943 struct fs_prio *prio;
946 next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
947 fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
949 return find_next_chained_ft(prio);
952 static int connect_fts_in_prio(struct mlx5_core_dev *dev,
953 struct fs_prio *prio,
954 struct mlx5_flow_table *ft)
956 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
957 struct mlx5_flow_table *iter;
960 fs_for_each_ft(iter, prio) {
961 err = root->cmds->modify_flow_table(root, iter, ft);
964 "Failed to modify flow table id %d, type %d, err %d\n",
965 iter->id, iter->type, err);
966 /* The driver is out of sync with the FW */
973 /* Connect flow tables from previous priority of prio to ft */
974 static int connect_prev_fts(struct mlx5_core_dev *dev,
975 struct mlx5_flow_table *ft,
976 struct fs_prio *prio)
978 struct mlx5_flow_table *prev_ft;
980 prev_ft = find_prev_chained_ft(prio);
982 struct fs_prio *prev_prio;
984 fs_get_obj(prev_prio, prev_ft->node.parent);
985 return connect_fts_in_prio(dev, prev_prio, ft);
990 static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
993 struct mlx5_flow_root_namespace *root = find_root(&prio->node);
994 struct mlx5_ft_underlay_qp *uqp;
995 int min_level = INT_MAX;
1000 min_level = root->root_ft->level;
1002 if (ft->level >= min_level)
1005 if (list_empty(&root->underlay_qpns)) {
1006 /* Don't set any QPN (zero) in case QPN list is empty */
1008 err = root->cmds->update_root_ft(root, ft, qpn, false);
1010 list_for_each_entry(uqp, &root->underlay_qpns, list) {
1012 err = root->cmds->update_root_ft(root, ft,
1020 mlx5_core_warn(root->dev,
1021 "Update root flow table of id(%u) qpn(%d) failed\n",
1029 static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
1030 struct mlx5_flow_destination *dest)
1032 struct mlx5_flow_root_namespace *root;
1033 struct mlx5_flow_table *ft;
1034 struct mlx5_flow_group *fg;
1036 int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1039 fs_get_obj(fte, rule->node.parent);
1040 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1042 down_write_ref_node(&fte->node, false);
1043 fs_get_obj(fg, fte->node.parent);
1044 fs_get_obj(ft, fg->node.parent);
1046 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1047 root = find_root(&ft->node);
1048 err = root->cmds->update_fte(root, ft, fg,
1050 up_write_ref_node(&fte->node, false);
1055 int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
1056 struct mlx5_flow_destination *new_dest,
1057 struct mlx5_flow_destination *old_dest)
1062 if (handle->num_rules != 1)
1064 return _mlx5_modify_rule_destination(handle->rule[0],
1068 for (i = 0; i < handle->num_rules; i++) {
1069 if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
1070 return _mlx5_modify_rule_destination(handle->rule[i],
1077 /* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
1078 static int connect_fwd_rules(struct mlx5_core_dev *dev,
1079 struct mlx5_flow_table *new_next_ft,
1080 struct mlx5_flow_table *old_next_ft)
1082 struct mlx5_flow_destination dest = {};
1083 struct mlx5_flow_rule *iter;
1086 /* new_next_ft and old_next_ft could be NULL only
1087 * when we create/destroy the anchor flow table.
1089 if (!new_next_ft || !old_next_ft)
1092 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1093 dest.ft = new_next_ft;
1095 mutex_lock(&old_next_ft->lock);
1096 list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
1097 mutex_unlock(&old_next_ft->lock);
1098 list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
1099 if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
1100 iter->ft->ns == new_next_ft->ns)
1103 err = _mlx5_modify_rule_destination(iter, &dest);
1105 pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
1111 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
1112 struct fs_prio *prio)
1114 struct mlx5_flow_table *next_ft, *first_ft;
1117 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
1119 first_ft = list_first_entry_or_null(&prio->node.children,
1120 struct mlx5_flow_table, node.list);
1121 if (!first_ft || first_ft->level > ft->level) {
1122 err = connect_prev_fts(dev, ft, prio);
1126 next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
1127 err = connect_fwd_rules(dev, ft, next_ft);
1132 if (MLX5_CAP_FLOWTABLE(dev,
1133 flow_table_properties_nic_receive.modify_root))
1134 err = update_root_ft_create(ft, prio);
1138 static void list_add_flow_table(struct mlx5_flow_table *ft,
1139 struct fs_prio *prio)
1141 struct list_head *prev = &prio->node.children;
1142 struct mlx5_flow_table *iter;
1144 fs_for_each_ft(iter, prio) {
1145 if (iter->level > ft->level)
1147 prev = &iter->node.list;
1149 list_add(&ft->node.list, prev);
1152 static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1153 struct mlx5_flow_table_attr *ft_attr,
1154 enum fs_flow_table_op_mod op_mod,
1157 struct mlx5_flow_root_namespace *root = find_root(&ns->node);
1158 bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
1159 struct mlx5_flow_table *next_ft;
1160 struct fs_prio *fs_prio = NULL;
1161 struct mlx5_flow_table *ft;
1165 pr_err("mlx5: flow steering failed to find root of namespace\n");
1166 return ERR_PTR(-ENODEV);
1169 mutex_lock(&root->chain_lock);
1170 fs_prio = find_prio(ns, ft_attr->prio);
1176 /* The level is related to the
1177 * priority level range.
1179 if (ft_attr->level >= fs_prio->num_levels) {
1184 ft_attr->level += fs_prio->start_level;
1187 /* The level is related to the
1188 * priority level range.
1190 ft = alloc_flow_table(ft_attr->level,
1193 op_mod, ft_attr->flags);
1199 tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
1200 next_ft = unmanaged ? ft_attr->next_ft :
1201 find_next_chained_ft(fs_prio);
1202 ft->def_miss_action = ns->def_miss_action;
1204 err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
1209 err = connect_flow_table(root->dev, ft, fs_prio);
1214 ft->node.active = true;
1215 down_write_ref_node(&fs_prio->node, false);
1217 tree_add_node(&ft->node, &fs_prio->node);
1218 list_add_flow_table(ft, fs_prio);
1220 ft->node.root = fs_prio->node.root;
1223 up_write_ref_node(&fs_prio->node, false);
1224 mutex_unlock(&root->chain_lock);
1225 trace_mlx5_fs_add_ft(ft);
1228 root->cmds->destroy_flow_table(root, ft);
1230 rhltable_destroy(&ft->fgs_hash);
1233 mutex_unlock(&root->chain_lock);
1234 return ERR_PTR(err);
1237 struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
1238 struct mlx5_flow_table_attr *ft_attr)
1240 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
1242 EXPORT_SYMBOL(mlx5_create_flow_table);
1244 u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
1248 EXPORT_SYMBOL(mlx5_flow_table_id);
1250 struct mlx5_flow_table *
1251 mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
1252 struct mlx5_flow_table_attr *ft_attr, u16 vport)
1254 return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
1257 struct mlx5_flow_table*
1258 mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
1259 int prio, u32 level)
1261 struct mlx5_flow_table_attr ft_attr = {};
1263 ft_attr.level = level;
1264 ft_attr.prio = prio;
1265 ft_attr.max_fte = 1;
1267 return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
1269 EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
1271 #define MAX_FLOW_GROUP_SIZE BIT(24)
1272 struct mlx5_flow_table*
1273 mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
1274 struct mlx5_flow_table_attr *ft_attr)
1276 int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
1277 int max_num_groups = ft_attr->autogroup.max_num_groups;
1278 struct mlx5_flow_table *ft;
1279 int autogroups_max_fte;
1281 ft = mlx5_create_flow_table(ns, ft_attr);
1285 autogroups_max_fte = ft->max_fte - num_reserved_entries;
1286 if (max_num_groups > autogroups_max_fte)
1288 if (num_reserved_entries > ft->max_fte)
1291 /* Align the number of groups according to the largest group size */
1292 if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
1293 max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
1295 ft->autogroup.active = true;
1296 ft->autogroup.required_groups = max_num_groups;
1297 ft->autogroup.max_fte = autogroups_max_fte;
1298 /* We save place for flow groups in addition to max types */
1299 ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
1304 mlx5_destroy_flow_table(ft);
1305 return ERR_PTR(-ENOSPC);
1307 EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
1309 struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
1312 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1313 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1314 fg_in, match_criteria);
1315 u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
1317 match_criteria_enable);
1318 int start_index = MLX5_GET(create_flow_group_in, fg_in,
1320 int end_index = MLX5_GET(create_flow_group_in, fg_in,
1322 struct mlx5_flow_group *fg;
1325 if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
1326 return ERR_PTR(-EPERM);
1328 down_write_ref_node(&ft->node, false);
1329 fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
1330 start_index, end_index,
1331 ft->node.children.prev);
1332 up_write_ref_node(&ft->node, false);
1336 err = root->cmds->create_flow_group(root, ft, fg_in, fg);
1338 tree_put_node(&fg->node, false);
1339 return ERR_PTR(err);
1341 trace_mlx5_fs_add_fg(fg);
1342 fg->node.active = true;
1346 EXPORT_SYMBOL(mlx5_create_flow_group);
1348 static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
1350 struct mlx5_flow_rule *rule;
1352 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1356 INIT_LIST_HEAD(&rule->next_ft);
1357 rule->node.type = FS_TYPE_FLOW_DEST;
1359 memcpy(&rule->dest_attr, dest, sizeof(*dest));
1361 rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
1366 static struct mlx5_flow_handle *alloc_handle(int num_rules)
1368 struct mlx5_flow_handle *handle;
1370 handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
1374 handle->num_rules = num_rules;
1379 static void destroy_flow_handle(struct fs_fte *fte,
1380 struct mlx5_flow_handle *handle,
1381 struct mlx5_flow_destination *dest,
1385 if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
1387 list_del(&handle->rule[i]->node.list);
1388 kfree(handle->rule[i]);
1394 static struct mlx5_flow_handle *
1395 create_flow_handle(struct fs_fte *fte,
1396 struct mlx5_flow_destination *dest,
1401 struct mlx5_flow_handle *handle;
1402 struct mlx5_flow_rule *rule = NULL;
1403 static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
1404 static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
1408 handle = alloc_handle((dest_num) ? dest_num : 1);
1410 return ERR_PTR(-ENOMEM);
1414 rule = find_flow_rule(fte, dest + i);
1416 refcount_inc(&rule->node.refcount);
1422 rule = alloc_rule(dest + i);
1426 /* Add dest to dests list- we need flow tables to be in the
1427 * end of the list for forward to next prio rules.
1429 tree_init_node(&rule->node, NULL, del_sw_hw_rule);
1431 dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
1432 list_add(&rule->node.list, &fte->node.children);
1434 list_add_tail(&rule->node.list, &fte->node.children);
1438 if (is_fwd_dest_type(dest[i].type))
1441 type = dest[i].type ==
1442 MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1443 *modify_mask |= type ? count : dst;
1446 handle->rule[i] = rule;
1447 } while (++i < dest_num);
1452 destroy_flow_handle(fte, handle, dest, i);
1453 return ERR_PTR(-ENOMEM);
1456 /* fte should not be deleted while calling this function */
1457 static struct mlx5_flow_handle *
1458 add_rule_fte(struct fs_fte *fte,
1459 struct mlx5_flow_group *fg,
1460 struct mlx5_flow_destination *dest,
1464 struct mlx5_flow_root_namespace *root;
1465 struct mlx5_flow_handle *handle;
1466 struct mlx5_flow_table *ft;
1467 int modify_mask = 0;
1469 bool new_rule = false;
1471 handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
1473 if (IS_ERR(handle) || !new_rule)
1477 modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
1479 fs_get_obj(ft, fg->node.parent);
1480 root = find_root(&fg->node);
1481 if (!(fte->status & FS_FTE_STATUS_EXISTING))
1482 err = root->cmds->create_fte(root, ft, fg, fte);
1484 err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
1488 fte->node.active = true;
1489 fte->status |= FS_FTE_STATUS_EXISTING;
1490 atomic_inc(&fg->node.version);
1496 destroy_flow_handle(fte, handle, dest, handle->num_rules);
1497 return ERR_PTR(err);
1500 static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
1501 const struct mlx5_flow_spec *spec)
1503 struct list_head *prev = &ft->node.children;
1504 u32 max_fte = ft->autogroup.max_fte;
1505 unsigned int candidate_index = 0;
1506 unsigned int group_size = 0;
1507 struct mlx5_flow_group *fg;
1509 if (!ft->autogroup.active)
1510 return ERR_PTR(-ENOENT);
1512 if (ft->autogroup.num_groups < ft->autogroup.required_groups)
1513 group_size = ft->autogroup.group_size;
1515 /* max_fte == ft->autogroup.max_types */
1516 if (group_size == 0)
1519 /* sorted by start_index */
1520 fs_for_each_fg(fg, ft) {
1521 if (candidate_index + group_size > fg->start_index)
1522 candidate_index = fg->start_index + fg->max_ftes;
1525 prev = &fg->node.list;
1528 if (candidate_index + group_size > max_fte)
1529 return ERR_PTR(-ENOSPC);
1531 fg = alloc_insert_flow_group(ft,
1532 spec->match_criteria_enable,
1533 spec->match_criteria,
1535 candidate_index + group_size - 1,
1540 if (group_size == ft->autogroup.group_size)
1541 ft->autogroup.num_groups++;
1547 static int create_auto_flow_group(struct mlx5_flow_table *ft,
1548 struct mlx5_flow_group *fg)
1550 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
1551 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1552 void *match_criteria_addr;
1553 u8 src_esw_owner_mask_on;
1558 in = kvzalloc(inlen, GFP_KERNEL);
1562 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1563 fg->mask.match_criteria_enable);
1564 MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
1565 MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
1568 misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
1570 src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
1571 source_eswitch_owner_vhca_id);
1572 MLX5_SET(create_flow_group_in, in,
1573 source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
1575 match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
1576 in, match_criteria);
1577 memcpy(match_criteria_addr, fg->mask.match_criteria,
1578 sizeof(fg->mask.match_criteria));
1580 err = root->cmds->create_flow_group(root, ft, in, fg);
1582 fg->node.active = true;
1583 trace_mlx5_fs_add_fg(fg);
1590 static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
1591 struct mlx5_flow_destination *d2)
1593 if (d1->type == d2->type) {
1594 if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
1595 d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
1596 d1->vport.num == d2->vport.num &&
1597 d1->vport.flags == d2->vport.flags &&
1598 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
1599 (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
1600 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
1601 (d1->vport.pkt_reformat->id ==
1602 d2->vport.pkt_reformat->id) : true)) ||
1603 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1604 d1->ft == d2->ft) ||
1605 (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
1606 d1->tir_num == d2->tir_num) ||
1607 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
1608 d1->ft_num == d2->ft_num) ||
1609 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
1610 d1->sampler_id == d2->sampler_id) ||
1611 (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
1612 d1->range.field == d2->range.field &&
1613 d1->range.hit_ft == d2->range.hit_ft &&
1614 d1->range.miss_ft == d2->range.miss_ft &&
1615 d1->range.min == d2->range.min &&
1616 d1->range.max == d2->range.max))
1623 static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
1624 struct mlx5_flow_destination *dest)
1626 struct mlx5_flow_rule *rule;
1628 list_for_each_entry(rule, &fte->node.children, node.list) {
1629 if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
1635 static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
1636 const struct mlx5_fs_vlan *vlan1)
1638 return vlan0->ethtype != vlan1->ethtype ||
1639 vlan0->vid != vlan1->vid ||
1640 vlan0->prio != vlan1->prio;
1643 static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
1644 const struct mlx5_flow_act *act2)
1646 u32 action1 = act1->action;
1647 u32 action2 = act2->action;
1650 xored_actions = action1 ^ action2;
1652 /* if one rule only wants to count, it's ok */
1653 if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
1654 action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
1657 if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1658 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
1659 MLX5_FLOW_CONTEXT_ACTION_DECAP |
1660 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1661 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
1662 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
1663 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
1664 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
1667 if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
1668 act1->pkt_reformat != act2->pkt_reformat)
1671 if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1672 act1->modify_hdr != act2->modify_hdr)
1675 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
1676 check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
1679 if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
1680 check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
1686 static int check_conflicting_ftes(struct fs_fte *fte,
1687 const struct mlx5_flow_context *flow_context,
1688 const struct mlx5_flow_act *flow_act)
1690 if (check_conflicting_actions(flow_act, &fte->action)) {
1691 mlx5_core_warn(get_dev(&fte->node),
1692 "Found two FTEs with conflicting actions\n");
1696 if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
1697 fte->flow_context.flow_tag != flow_context->flow_tag) {
1698 mlx5_core_warn(get_dev(&fte->node),
1699 "FTE flow tag %u already exists with different flow tag %u\n",
1700 fte->flow_context.flow_tag,
1701 flow_context->flow_tag);
1708 static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
1709 const struct mlx5_flow_spec *spec,
1710 struct mlx5_flow_act *flow_act,
1711 struct mlx5_flow_destination *dest,
1715 struct mlx5_flow_handle *handle;
1720 ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
1722 return ERR_PTR(ret);
1724 old_action = fte->action.action;
1725 fte->action.action |= flow_act->action;
1726 handle = add_rule_fte(fte, fg, dest, dest_num,
1727 old_action != flow_act->action);
1728 if (IS_ERR(handle)) {
1729 fte->action.action = old_action;
1732 trace_mlx5_fs_set_fte(fte, false);
1734 for (i = 0; i < handle->num_rules; i++) {
1735 if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
1736 tree_add_node(&handle->rule[i]->node, &fte->node);
1737 trace_mlx5_fs_add_rule(handle->rule[i]);
1743 static bool counter_is_valid(u32 action)
1745 return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
1746 MLX5_FLOW_CONTEXT_ACTION_ALLOW |
1747 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
1750 static bool dest_is_valid(struct mlx5_flow_destination *dest,
1751 struct mlx5_flow_act *flow_act,
1752 struct mlx5_flow_table *ft)
1754 bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
1755 u32 action = flow_act->action;
1757 if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
1758 return counter_is_valid(action);
1760 if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
1764 if (ft->type != FS_FT_FDB &&
1765 ft->type != FS_FT_NIC_RX &&
1766 ft->type != FS_FT_NIC_TX)
1769 if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1770 ft->type != dest->ft->type)
1774 if (!dest || ((dest->type ==
1775 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
1776 (dest->ft->level <= ft->level && !ignore_level)))
1782 struct list_head list;
1783 struct mlx5_flow_group *g;
1786 static void free_match_list(struct match_list *head, bool ft_locked)
1788 struct match_list *iter, *match_tmp;
1790 list_for_each_entry_safe(iter, match_tmp, &head->list,
1792 tree_put_node(&iter->g->node, ft_locked);
1793 list_del(&iter->list);
1798 static int build_match_list(struct match_list *match_head,
1799 struct mlx5_flow_table *ft,
1800 const struct mlx5_flow_spec *spec,
1801 struct mlx5_flow_group *fg,
1804 struct rhlist_head *tmp, *list;
1805 struct mlx5_flow_group *g;
1808 INIT_LIST_HEAD(&match_head->list);
1809 /* Collect all fgs which has a matching match_criteria */
1810 list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
1811 /* RCU is atomic, we can't execute FW commands here */
1812 rhl_for_each_entry_rcu(g, tmp, list, hash) {
1813 struct match_list *curr_match;
1818 if (unlikely(!tree_get_node(&g->node)))
1821 curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
1824 free_match_list(match_head, ft_locked);
1828 list_add_tail(&curr_match->list, &match_head->list);
1834 static u64 matched_fgs_get_version(struct list_head *match_head)
1836 struct match_list *iter;
1839 list_for_each_entry(iter, match_head, list)
1840 version += (u64)atomic_read(&iter->g->node.version);
1844 static struct fs_fte *
1845 lookup_fte_locked(struct mlx5_flow_group *g,
1846 const u32 *match_value,
1849 struct fs_fte *fte_tmp;
1852 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1854 nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
1855 fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
1857 if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
1861 if (!fte_tmp->node.active) {
1862 tree_put_node(&fte_tmp->node, false);
1867 nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
1870 up_write_ref_node(&g->node, false);
1872 up_read_ref_node(&g->node);
1876 static struct mlx5_flow_handle *
1877 try_add_to_existing_fg(struct mlx5_flow_table *ft,
1878 struct list_head *match_head,
1879 const struct mlx5_flow_spec *spec,
1880 struct mlx5_flow_act *flow_act,
1881 struct mlx5_flow_destination *dest,
1885 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1886 struct mlx5_flow_group *g;
1887 struct mlx5_flow_handle *rule;
1888 struct match_list *iter;
1889 bool take_write = false;
1894 fte = alloc_fte(ft, spec, flow_act);
1896 return ERR_PTR(-ENOMEM);
1898 search_again_locked:
1899 if (flow_act->flags & FLOW_ACT_NO_APPEND)
1901 version = matched_fgs_get_version(match_head);
1902 /* Try to find an fte with identical match value and attempt update its
1905 list_for_each_entry(iter, match_head, list) {
1906 struct fs_fte *fte_tmp;
1909 fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
1912 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
1913 /* No error check needed here, because insert_fte() is not called */
1914 up_write_ref_node(&fte_tmp->node, false);
1915 tree_put_node(&fte_tmp->node, false);
1916 kmem_cache_free(steering->ftes_cache, fte);
1921 /* No group with matching fte found, or we skipped the search.
1922 * Try to add a new fte to any matching fg.
1925 /* Check the ft version, for case that new flow group
1926 * was added while the fgs weren't locked
1928 if (atomic_read(&ft->node.version) != ft_version) {
1929 rule = ERR_PTR(-EAGAIN);
1933 /* Check the fgs version. If version have changed it could be that an
1934 * FTE with the same match value was added while the fgs weren't
1937 if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
1938 version != matched_fgs_get_version(match_head)) {
1940 goto search_again_locked;
1943 list_for_each_entry(iter, match_head, list) {
1946 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
1948 if (!g->node.active) {
1949 up_write_ref_node(&g->node, false);
1953 err = insert_fte(g, fte);
1955 up_write_ref_node(&g->node, false);
1958 kmem_cache_free(steering->ftes_cache, fte);
1959 return ERR_PTR(err);
1962 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
1963 up_write_ref_node(&g->node, false);
1964 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
1965 up_write_ref_node(&fte->node, false);
1967 tree_put_node(&fte->node, false);
1970 rule = ERR_PTR(-ENOENT);
1972 kmem_cache_free(steering->ftes_cache, fte);
1976 static struct mlx5_flow_handle *
1977 _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1978 const struct mlx5_flow_spec *spec,
1979 struct mlx5_flow_act *flow_act,
1980 struct mlx5_flow_destination *dest,
1984 struct mlx5_flow_steering *steering = get_steering(&ft->node);
1985 struct mlx5_flow_handle *rule;
1986 struct match_list match_head;
1987 struct mlx5_flow_group *g;
1988 bool take_write = false;
1994 if (!check_valid_spec(spec))
1995 return ERR_PTR(-EINVAL);
1997 if (flow_act->fg && ft->autogroup.active)
1998 return ERR_PTR(-EINVAL);
2000 if (dest && dest_num <= 0)
2001 return ERR_PTR(-EINVAL);
2003 for (i = 0; i < dest_num; i++) {
2004 if (!dest_is_valid(&dest[i], flow_act, ft))
2005 return ERR_PTR(-EINVAL);
2007 nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2008 search_again_locked:
2009 version = atomic_read(&ft->node.version);
2011 /* Collect all fgs which has a matching match_criteria */
2012 err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
2015 up_write_ref_node(&ft->node, false);
2017 up_read_ref_node(&ft->node);
2018 return ERR_PTR(err);
2022 up_read_ref_node(&ft->node);
2024 rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
2026 free_match_list(&match_head, take_write);
2027 if (!IS_ERR(rule) ||
2028 (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
2030 up_write_ref_node(&ft->node, false);
2035 nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
2039 if (PTR_ERR(rule) == -EAGAIN ||
2040 version != atomic_read(&ft->node.version))
2041 goto search_again_locked;
2043 g = alloc_auto_flow_group(ft, spec);
2046 up_write_ref_node(&ft->node, false);
2050 fte = alloc_fte(ft, spec, flow_act);
2052 up_write_ref_node(&ft->node, false);
2057 nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
2058 up_write_ref_node(&ft->node, false);
2060 err = create_auto_flow_group(ft, g);
2062 goto err_release_fg;
2064 err = insert_fte(g, fte);
2066 goto err_release_fg;
2068 nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
2069 up_write_ref_node(&g->node, false);
2070 rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
2071 up_write_ref_node(&fte->node, false);
2073 tree_put_node(&fte->node, false);
2074 tree_put_node(&g->node, false);
2078 up_write_ref_node(&g->node, false);
2079 kmem_cache_free(steering->ftes_cache, fte);
2081 tree_put_node(&g->node, false);
2082 return ERR_PTR(err);
2085 static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
2087 return ((ft->type == FS_FT_NIC_RX) &&
2088 (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
2091 struct mlx5_flow_handle *
2092 mlx5_add_flow_rules(struct mlx5_flow_table *ft,
2093 const struct mlx5_flow_spec *spec,
2094 struct mlx5_flow_act *flow_act,
2095 struct mlx5_flow_destination *dest,
2098 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2099 static const struct mlx5_flow_spec zero_spec = {};
2100 struct mlx5_flow_destination *gen_dest = NULL;
2101 struct mlx5_flow_table *next_ft = NULL;
2102 struct mlx5_flow_handle *handle = NULL;
2103 u32 sw_action = flow_act->action;
2109 if (!is_fwd_next_action(sw_action))
2110 return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2112 if (!fwd_next_prio_supported(ft))
2113 return ERR_PTR(-EOPNOTSUPP);
2115 mutex_lock(&root->chain_lock);
2116 next_ft = find_next_fwd_ft(ft, flow_act);
2118 handle = ERR_PTR(-EOPNOTSUPP);
2122 gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
2125 handle = ERR_PTR(-ENOMEM);
2128 for (i = 0; i < num_dest; i++)
2129 gen_dest[i] = dest[i];
2131 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2132 gen_dest[i].ft = next_ft;
2135 flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
2136 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
2137 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2138 handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
2142 if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
2143 mutex_lock(&next_ft->lock);
2144 list_add(&handle->rule[num_dest - 1]->next_ft,
2145 &next_ft->fwd_rules);
2146 mutex_unlock(&next_ft->lock);
2147 handle->rule[num_dest - 1]->sw_action = sw_action;
2148 handle->rule[num_dest - 1]->ft = ft;
2151 mutex_unlock(&root->chain_lock);
2155 EXPORT_SYMBOL(mlx5_add_flow_rules);
2157 void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
2162 /* In order to consolidate the HW changes we lock the FTE for other
2163 * changes, and increase its refcount, in order not to perform the
2164 * "del" functions of the FTE. Will handle them here.
2165 * The removal of the rules is done under locked FTE.
2166 * After removing all the handle's rules, if there are remaining
2167 * rules, it means we just need to modify the FTE in FW, and
2168 * unlock/decrease the refcount we increased before.
2169 * Otherwise, it means the FTE should be deleted. First delete the
2170 * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
2171 * the FTE, which will handle the last decrease of the refcount, as
2172 * well as required handling of its parent.
2174 fs_get_obj(fte, handle->rule[0]->node.parent);
2175 down_write_ref_node(&fte->node, false);
2176 for (i = handle->num_rules - 1; i >= 0; i--)
2177 tree_remove_node(&handle->rule[i]->node, true);
2178 if (list_empty(&fte->node.children)) {
2179 fte->node.del_hw_func(&fte->node);
2180 /* Avoid double call to del_hw_fte */
2181 fte->node.del_hw_func = NULL;
2182 up_write_ref_node(&fte->node, false);
2183 tree_put_node(&fte->node, false);
2184 } else if (fte->dests_size) {
2185 if (fte->modify_mask)
2187 up_write_ref_node(&fte->node, false);
2189 up_write_ref_node(&fte->node, false);
2193 EXPORT_SYMBOL(mlx5_del_flow_rules);
2195 /* Assuming prio->node.children(flow tables) is sorted by level */
2196 static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
2198 struct fs_prio *prio;
2200 fs_get_obj(prio, ft->node.parent);
2202 if (!list_is_last(&ft->node.list, &prio->node.children))
2203 return list_next_entry(ft, node.list);
2204 return find_next_chained_ft(prio);
2207 static int update_root_ft_destroy(struct mlx5_flow_table *ft)
2209 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2210 struct mlx5_ft_underlay_qp *uqp;
2211 struct mlx5_flow_table *new_root_ft = NULL;
2215 if (root->root_ft != ft)
2218 new_root_ft = find_next_ft(ft);
2220 root->root_ft = NULL;
2224 if (list_empty(&root->underlay_qpns)) {
2225 /* Don't set any QPN (zero) in case QPN list is empty */
2227 err = root->cmds->update_root_ft(root, new_root_ft,
2230 list_for_each_entry(uqp, &root->underlay_qpns, list) {
2232 err = root->cmds->update_root_ft(root,
2241 mlx5_core_warn(root->dev,
2242 "Update root flow table of id(%u) qpn(%d) failed\n",
2245 root->root_ft = new_root_ft;
2250 /* Connect flow table from previous priority to
2251 * the next flow table.
2253 static int disconnect_flow_table(struct mlx5_flow_table *ft)
2255 struct mlx5_core_dev *dev = get_dev(&ft->node);
2256 struct mlx5_flow_table *next_ft;
2257 struct fs_prio *prio;
2260 err = update_root_ft_destroy(ft);
2264 fs_get_obj(prio, ft->node.parent);
2265 if (!(list_first_entry(&prio->node.children,
2266 struct mlx5_flow_table,
2270 next_ft = find_next_ft(ft);
2271 err = connect_fwd_rules(dev, next_ft, ft);
2275 err = connect_prev_fts(dev, next_ft, prio);
2277 mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
2282 int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
2284 struct mlx5_flow_root_namespace *root = find_root(&ft->node);
2287 mutex_lock(&root->chain_lock);
2288 if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
2289 err = disconnect_flow_table(ft);
2291 mutex_unlock(&root->chain_lock);
2294 if (tree_remove_node(&ft->node, false))
2295 mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
2297 mutex_unlock(&root->chain_lock);
2301 EXPORT_SYMBOL(mlx5_destroy_flow_table);
2303 void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
2305 if (tree_remove_node(&fg->node, false))
2306 mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
2309 EXPORT_SYMBOL(mlx5_destroy_flow_group);
2311 struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
2314 struct mlx5_flow_steering *steering = dev->priv.steering;
2316 if (!steering || !steering->fdb_sub_ns)
2319 return steering->fdb_sub_ns[n];
2321 EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
2323 static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
2326 case MLX5_FLOW_NAMESPACE_BYPASS:
2327 case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
2328 case MLX5_FLOW_NAMESPACE_LAG:
2329 case MLX5_FLOW_NAMESPACE_OFFLOADS:
2330 case MLX5_FLOW_NAMESPACE_ETHTOOL:
2331 case MLX5_FLOW_NAMESPACE_KERNEL:
2332 case MLX5_FLOW_NAMESPACE_LEFTOVERS:
2333 case MLX5_FLOW_NAMESPACE_ANCHOR:
2340 struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2341 enum mlx5_flow_namespace_type type)
2343 struct mlx5_flow_steering *steering = dev->priv.steering;
2344 struct mlx5_flow_root_namespace *root_ns;
2346 struct fs_prio *fs_prio;
2347 struct mlx5_flow_namespace *ns;
2353 case MLX5_FLOW_NAMESPACE_FDB:
2354 if (steering->fdb_root_ns)
2355 return &steering->fdb_root_ns->ns;
2357 case MLX5_FLOW_NAMESPACE_PORT_SEL:
2358 if (steering->port_sel_root_ns)
2359 return &steering->port_sel_root_ns->ns;
2361 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2362 if (steering->sniffer_rx_root_ns)
2363 return &steering->sniffer_rx_root_ns->ns;
2365 case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
2366 if (steering->sniffer_tx_root_ns)
2367 return &steering->sniffer_tx_root_ns->ns;
2369 case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
2370 root_ns = steering->fdb_root_ns;
2371 prio = FDB_BYPASS_PATH;
2373 case MLX5_FLOW_NAMESPACE_EGRESS:
2374 case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
2375 case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
2376 root_ns = steering->egress_root_ns;
2377 prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
2379 case MLX5_FLOW_NAMESPACE_RDMA_RX:
2380 root_ns = steering->rdma_rx_root_ns;
2381 prio = RDMA_RX_BYPASS_PRIO;
2383 case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
2384 root_ns = steering->rdma_rx_root_ns;
2385 prio = RDMA_RX_KERNEL_PRIO;
2387 case MLX5_FLOW_NAMESPACE_RDMA_TX:
2388 root_ns = steering->rdma_tx_root_ns;
2390 case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
2391 root_ns = steering->rdma_rx_root_ns;
2392 prio = RDMA_RX_COUNTERS_PRIO;
2394 case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
2395 root_ns = steering->rdma_tx_root_ns;
2396 prio = RDMA_TX_COUNTERS_PRIO;
2398 case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
2399 root_ns = steering->rdma_rx_root_ns;
2400 prio = RDMA_RX_IPSEC_PRIO;
2402 case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
2403 root_ns = steering->rdma_tx_root_ns;
2404 prio = RDMA_TX_IPSEC_PRIO;
2406 default: /* Must be NIC RX */
2407 WARN_ON(!is_nic_rx_ns(type));
2408 root_ns = steering->root_ns;
2416 fs_prio = find_prio(&root_ns->ns, prio);
2420 ns = list_first_entry(&fs_prio->node.children,
2426 EXPORT_SYMBOL(mlx5_get_flow_namespace);
2428 struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2429 enum mlx5_flow_namespace_type type,
2432 struct mlx5_flow_steering *steering = dev->priv.steering;
2438 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2439 if (vport >= steering->esw_egress_acl_vports)
2441 if (steering->esw_egress_root_ns &&
2442 steering->esw_egress_root_ns[vport])
2443 return &steering->esw_egress_root_ns[vport]->ns;
2446 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2447 if (vport >= steering->esw_ingress_acl_vports)
2449 if (steering->esw_ingress_root_ns &&
2450 steering->esw_ingress_root_ns[vport])
2451 return &steering->esw_ingress_root_ns[vport]->ns;
2459 static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
2462 enum fs_node_type type)
2464 struct fs_prio *fs_prio;
2466 fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
2468 return ERR_PTR(-ENOMEM);
2470 fs_prio->node.type = type;
2471 tree_init_node(&fs_prio->node, NULL, del_sw_prio);
2472 tree_add_node(&fs_prio->node, &ns->node);
2473 fs_prio->num_levels = num_levels;
2474 fs_prio->prio = prio;
2475 list_add_tail(&fs_prio->node.list, &ns->node.children);
2480 static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
2484 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
2487 static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2488 unsigned int prio, int num_levels)
2490 return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
2493 static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
2496 ns->node.type = FS_TYPE_NAMESPACE;
2501 static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
2504 struct mlx5_flow_namespace *ns;
2506 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
2508 return ERR_PTR(-ENOMEM);
2510 fs_init_namespace(ns);
2511 ns->def_miss_action = def_miss_act;
2512 tree_init_node(&ns->node, NULL, del_sw_ns);
2513 tree_add_node(&ns->node, &prio->node);
2514 list_add_tail(&ns->node.list, &prio->node.children);
2519 static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
2520 struct init_tree_node *prio_metadata)
2522 struct fs_prio *fs_prio;
2525 for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
2526 fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
2527 if (IS_ERR(fs_prio))
2528 return PTR_ERR(fs_prio);
2533 #define FLOW_TABLE_BIT_SZ 1
2534 #define GET_FLOW_TABLE_CAP(dev, offset) \
2535 ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \
2537 (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
2538 static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
2542 for (i = 0; i < caps->arr_sz; i++) {
2543 if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
2549 static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
2550 struct init_tree_node *init_node,
2551 struct fs_node *fs_parent_node,
2552 struct init_tree_node *init_parent_node,
2555 int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
2556 flow_table_properties_nic_receive.
2558 struct mlx5_flow_namespace *fs_ns;
2559 struct fs_prio *fs_prio;
2560 struct fs_node *base;
2564 if (init_node->type == FS_TYPE_PRIO) {
2565 if ((init_node->min_ft_level > max_ft_level) ||
2566 !has_required_caps(steering->dev, &init_node->caps))
2569 fs_get_obj(fs_ns, fs_parent_node);
2570 if (init_node->num_leaf_prios)
2571 return create_leaf_prios(fs_ns, prio, init_node);
2572 fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
2573 if (IS_ERR(fs_prio))
2574 return PTR_ERR(fs_prio);
2575 base = &fs_prio->node;
2576 } else if (init_node->type == FS_TYPE_NAMESPACE) {
2577 fs_get_obj(fs_prio, fs_parent_node);
2578 fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
2580 return PTR_ERR(fs_ns);
2581 base = &fs_ns->node;
2586 for (i = 0; i < init_node->ar_size; i++) {
2587 err = init_root_tree_recursive(steering, &init_node->children[i],
2588 base, init_node, prio);
2591 if (init_node->children[i].type == FS_TYPE_PRIO &&
2592 init_node->children[i].num_leaf_prios) {
2593 prio += init_node->children[i].num_leaf_prios;
2600 static int init_root_tree(struct mlx5_flow_steering *steering,
2601 struct init_tree_node *init_node,
2602 struct fs_node *fs_parent_node)
2607 for (i = 0; i < init_node->ar_size; i++) {
2608 err = init_root_tree_recursive(steering, &init_node->children[i],
2617 static void del_sw_root_ns(struct fs_node *node)
2619 struct mlx5_flow_root_namespace *root_ns;
2620 struct mlx5_flow_namespace *ns;
2622 fs_get_obj(ns, node);
2623 root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
2624 mutex_destroy(&root_ns->chain_lock);
2628 static struct mlx5_flow_root_namespace
2629 *create_root_ns(struct mlx5_flow_steering *steering,
2630 enum fs_flow_table_type table_type)
2632 const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
2633 struct mlx5_flow_root_namespace *root_ns;
2634 struct mlx5_flow_namespace *ns;
2636 /* Create the root namespace */
2637 root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
2641 root_ns->dev = steering->dev;
2642 root_ns->table_type = table_type;
2643 root_ns->cmds = cmds;
2645 INIT_LIST_HEAD(&root_ns->underlay_qpns);
2648 fs_init_namespace(ns);
2649 mutex_init(&root_ns->chain_lock);
2650 tree_init_node(&ns->node, NULL, del_sw_root_ns);
2651 tree_add_node(&ns->node, NULL);
2656 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
2658 static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
2660 struct fs_prio *prio;
2662 fs_for_each_prio(prio, ns) {
2663 /* This updates prio start_level and num_levels */
2664 set_prio_attrs_in_prio(prio, acc_level);
2665 acc_level += prio->num_levels;
2670 static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
2672 struct mlx5_flow_namespace *ns;
2673 int acc_level_ns = acc_level;
2675 prio->start_level = acc_level;
2676 fs_for_each_ns(ns, prio) {
2677 /* This updates start_level and num_levels of ns's priority descendants */
2678 acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
2680 /* If this a prio with chains, and we can jump from one chain
2681 * (namespace) to another, so we accumulate the levels
2683 if (prio->node.type == FS_TYPE_PRIO_CHAINS)
2684 acc_level = acc_level_ns;
2687 if (!prio->num_levels)
2688 prio->num_levels = acc_level_ns - prio->start_level;
2689 WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
2692 static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
2694 struct mlx5_flow_namespace *ns = &root_ns->ns;
2695 struct fs_prio *prio;
2696 int start_level = 0;
2698 fs_for_each_prio(prio, ns) {
2699 set_prio_attrs_in_prio(prio, start_level);
2700 start_level += prio->num_levels;
2704 #define ANCHOR_PRIO 0
2705 #define ANCHOR_SIZE 1
2706 #define ANCHOR_LEVEL 0
2707 static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
2709 struct mlx5_flow_namespace *ns = NULL;
2710 struct mlx5_flow_table_attr ft_attr = {};
2711 struct mlx5_flow_table *ft;
2713 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
2717 ft_attr.max_fte = ANCHOR_SIZE;
2718 ft_attr.level = ANCHOR_LEVEL;
2719 ft_attr.prio = ANCHOR_PRIO;
2721 ft = mlx5_create_flow_table(ns, &ft_attr);
2723 mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
2729 static int init_root_ns(struct mlx5_flow_steering *steering)
2733 steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
2734 if (!steering->root_ns)
2737 err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
2741 set_prio_attrs(steering->root_ns);
2742 err = create_anchor_flow_table(steering);
2749 cleanup_root_ns(steering->root_ns);
2750 steering->root_ns = NULL;
2754 static void clean_tree(struct fs_node *node)
2757 struct fs_node *iter;
2758 struct fs_node *temp;
2760 tree_get_node(node);
2761 list_for_each_entry_safe(iter, temp, &node->children, list)
2763 tree_put_node(node, false);
2764 tree_remove_node(node, false);
2768 static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2773 clean_tree(&root_ns->ns.node);
2776 static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
2778 struct fs_prio *prio;
2780 steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
2781 if (!steering->sniffer_tx_root_ns)
2784 /* Create single prio */
2785 prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
2786 return PTR_ERR_OR_ZERO(prio);
2789 static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
2791 struct fs_prio *prio;
2793 steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
2794 if (!steering->sniffer_rx_root_ns)
2797 /* Create single prio */
2798 prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
2799 return PTR_ERR_OR_ZERO(prio);
2802 #define PORT_SEL_NUM_LEVELS 3
2803 static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
2805 struct fs_prio *prio;
2807 steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
2808 if (!steering->port_sel_root_ns)
2811 /* Create single prio */
2812 prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
2813 PORT_SEL_NUM_LEVELS);
2814 return PTR_ERR_OR_ZERO(prio);
2817 static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
2821 steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
2822 if (!steering->rdma_rx_root_ns)
2825 err = init_root_tree(steering, &rdma_rx_root_fs,
2826 &steering->rdma_rx_root_ns->ns.node);
2830 set_prio_attrs(steering->rdma_rx_root_ns);
2835 cleanup_root_ns(steering->rdma_rx_root_ns);
2836 steering->rdma_rx_root_ns = NULL;
2840 static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
2844 steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
2845 if (!steering->rdma_tx_root_ns)
2848 err = init_root_tree(steering, &rdma_tx_root_fs,
2849 &steering->rdma_tx_root_ns->ns.node);
2853 set_prio_attrs(steering->rdma_tx_root_ns);
2858 cleanup_root_ns(steering->rdma_tx_root_ns);
2859 steering->rdma_tx_root_ns = NULL;
2863 /* FT and tc chains are stored in the same array so we can re-use the
2864 * mlx5_get_fdb_sub_ns() and tc api for FT chains.
2865 * When creating a new ns for each chain store it in the first available slot.
2866 * Assume tc chains are created and stored first and only then the FT chain.
2868 static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2869 struct mlx5_flow_namespace *ns)
2873 while (steering->fdb_sub_ns[chain])
2876 steering->fdb_sub_ns[chain] = ns;
2879 static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
2880 struct fs_prio *maj_prio)
2882 struct mlx5_flow_namespace *ns;
2883 struct fs_prio *min_prio;
2886 ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2890 for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
2891 min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
2892 if (IS_ERR(min_prio))
2893 return PTR_ERR(min_prio);
2896 store_fdb_sub_ns_prio_chain(steering, ns);
2901 static int create_fdb_chains(struct mlx5_flow_steering *steering,
2905 struct fs_prio *maj_prio;
2910 levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
2911 maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
2914 if (IS_ERR(maj_prio))
2915 return PTR_ERR(maj_prio);
2917 for (chain = 0; chain < chains; chain++) {
2918 err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
2926 static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
2930 steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
2931 sizeof(*steering->fdb_sub_ns),
2933 if (!steering->fdb_sub_ns)
2936 err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
2940 err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
2947 static int create_fdb_bypass(struct mlx5_flow_steering *steering)
2949 struct mlx5_flow_namespace *ns;
2950 struct fs_prio *prio;
2953 prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
2955 return PTR_ERR(prio);
2957 ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
2961 for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
2962 prio = fs_create_prio(ns, i, 1);
2964 return PTR_ERR(prio);
2969 static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
2971 cleanup_root_ns(steering->fdb_root_ns);
2972 steering->fdb_root_ns = NULL;
2973 kfree(steering->fdb_sub_ns);
2974 steering->fdb_sub_ns = NULL;
2977 static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
2979 struct fs_prio *maj_prio;
2982 steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
2983 if (!steering->fdb_root_ns)
2986 err = create_fdb_bypass(steering);
2990 err = create_fdb_fast_path(steering);
2994 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
2995 if (IS_ERR(maj_prio)) {
2996 err = PTR_ERR(maj_prio);
3000 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 4);
3001 if (IS_ERR(maj_prio)) {
3002 err = PTR_ERR(maj_prio);
3006 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
3007 if (IS_ERR(maj_prio)) {
3008 err = PTR_ERR(maj_prio);
3012 /* We put this priority last, knowing that nothing will get here
3013 * unless explicitly forwarded to. This is possible because the
3014 * slow path tables have catch all rules and nothing gets passed
3017 maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
3018 if (IS_ERR(maj_prio)) {
3019 err = PTR_ERR(maj_prio);
3023 set_prio_attrs(steering->fdb_root_ns);
3027 cleanup_fdb_root_ns(steering);
3031 static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3033 struct fs_prio *prio;
3035 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
3036 if (!steering->esw_egress_root_ns[vport])
3040 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
3041 return PTR_ERR_OR_ZERO(prio);
3044 static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
3046 struct fs_prio *prio;
3048 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
3049 if (!steering->esw_ingress_root_ns[vport])
3053 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
3054 return PTR_ERR_OR_ZERO(prio);
3057 int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3059 struct mlx5_flow_steering *steering = dev->priv.steering;
3063 steering->esw_egress_root_ns =
3064 kcalloc(total_vports,
3065 sizeof(*steering->esw_egress_root_ns),
3067 if (!steering->esw_egress_root_ns)
3070 for (i = 0; i < total_vports; i++) {
3071 err = init_egress_acl_root_ns(steering, i);
3073 goto cleanup_root_ns;
3075 steering->esw_egress_acl_vports = total_vports;
3079 for (i--; i >= 0; i--)
3080 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3081 kfree(steering->esw_egress_root_ns);
3082 steering->esw_egress_root_ns = NULL;
3086 void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
3088 struct mlx5_flow_steering *steering = dev->priv.steering;
3091 if (!steering->esw_egress_root_ns)
3094 for (i = 0; i < steering->esw_egress_acl_vports; i++)
3095 cleanup_root_ns(steering->esw_egress_root_ns[i]);
3097 kfree(steering->esw_egress_root_ns);
3098 steering->esw_egress_root_ns = NULL;
3101 int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
3103 struct mlx5_flow_steering *steering = dev->priv.steering;
3107 steering->esw_ingress_root_ns =
3108 kcalloc(total_vports,
3109 sizeof(*steering->esw_ingress_root_ns),
3111 if (!steering->esw_ingress_root_ns)
3114 for (i = 0; i < total_vports; i++) {
3115 err = init_ingress_acl_root_ns(steering, i);
3117 goto cleanup_root_ns;
3119 steering->esw_ingress_acl_vports = total_vports;
3123 for (i--; i >= 0; i--)
3124 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3125 kfree(steering->esw_ingress_root_ns);
3126 steering->esw_ingress_root_ns = NULL;
3130 void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
3132 struct mlx5_flow_steering *steering = dev->priv.steering;
3135 if (!steering->esw_ingress_root_ns)
3138 for (i = 0; i < steering->esw_ingress_acl_vports; i++)
3139 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
3141 kfree(steering->esw_ingress_root_ns);
3142 steering->esw_ingress_root_ns = NULL;
3145 u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
3147 struct mlx5_flow_root_namespace *root;
3148 struct mlx5_flow_namespace *ns;
3150 ns = mlx5_get_flow_namespace(dev, type);
3154 root = find_root(&ns->node);
3158 return root->cmds->get_capabilities(root, root->table_type);
3161 static int init_egress_root_ns(struct mlx5_flow_steering *steering)
3165 steering->egress_root_ns = create_root_ns(steering,
3167 if (!steering->egress_root_ns)
3170 err = init_root_tree(steering, &egress_root_fs,
3171 &steering->egress_root_ns->ns.node);
3174 set_prio_attrs(steering->egress_root_ns);
3177 cleanup_root_ns(steering->egress_root_ns);
3178 steering->egress_root_ns = NULL;
3182 static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
3183 union devlink_param_value val,
3184 struct netlink_ext_ack *extack)
3186 struct mlx5_core_dev *dev = devlink_priv(devlink);
3187 char *value = val.vstr;
3190 if (!strcmp(value, "dmfs")) {
3192 } else if (!strcmp(value, "smfs")) {
3196 eswitch_mode = mlx5_eswitch_mode(dev);
3197 smfs_cap = mlx5_fs_dr_is_supported(dev);
3201 NL_SET_ERR_MSG_MOD(extack,
3202 "Software managed steering is not supported by current device");
3205 else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
3206 NL_SET_ERR_MSG_MOD(extack,
3207 "Software managed steering is not supported when eswitch offloads enabled.");
3211 NL_SET_ERR_MSG_MOD(extack,
3212 "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
3219 static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
3220 struct devlink_param_gset_ctx *ctx)
3222 struct mlx5_core_dev *dev = devlink_priv(devlink);
3223 enum mlx5_flow_steering_mode mode;
3225 if (!strcmp(ctx->val.vstr, "smfs"))
3226 mode = MLX5_FLOW_STEERING_MODE_SMFS;
3228 mode = MLX5_FLOW_STEERING_MODE_DMFS;
3229 dev->priv.steering->mode = mode;
3234 static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
3235 struct devlink_param_gset_ctx *ctx)
3237 struct mlx5_core_dev *dev = devlink_priv(devlink);
3239 if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
3240 strcpy(ctx->val.vstr, "smfs");
3242 strcpy(ctx->val.vstr, "dmfs");
3246 static const struct devlink_param mlx5_fs_params[] = {
3247 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
3248 "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
3249 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
3250 mlx5_fs_mode_get, mlx5_fs_mode_set,
3251 mlx5_fs_mode_validate),
3254 void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
3256 struct mlx5_flow_steering *steering = dev->priv.steering;
3258 cleanup_root_ns(steering->root_ns);
3259 cleanup_fdb_root_ns(steering);
3260 cleanup_root_ns(steering->port_sel_root_ns);
3261 cleanup_root_ns(steering->sniffer_rx_root_ns);
3262 cleanup_root_ns(steering->sniffer_tx_root_ns);
3263 cleanup_root_ns(steering->rdma_rx_root_ns);
3264 cleanup_root_ns(steering->rdma_tx_root_ns);
3265 cleanup_root_ns(steering->egress_root_ns);
3267 devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
3268 ARRAY_SIZE(mlx5_fs_params));
3271 int mlx5_fs_core_init(struct mlx5_core_dev *dev)
3273 struct mlx5_flow_steering *steering = dev->priv.steering;
3276 err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
3277 ARRAY_SIZE(mlx5_fs_params));
3281 if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3282 (MLX5_CAP_GEN(dev, nic_flow_table))) ||
3283 ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
3284 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
3285 MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
3286 err = init_root_ns(steering);
3291 if (MLX5_ESWITCH_MANAGER(dev)) {
3292 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
3293 err = init_fdb_root_ns(steering);
3299 if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
3300 err = init_sniffer_rx_root_ns(steering);
3305 if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
3306 err = init_sniffer_tx_root_ns(steering);
3311 if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
3312 err = init_port_sel_root_ns(steering);
3317 if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
3318 MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
3319 err = init_rdma_rx_root_ns(steering);
3324 if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
3325 err = init_rdma_tx_root_ns(steering);
3330 if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
3331 err = init_egress_root_ns(steering);
3339 mlx5_fs_core_cleanup(dev);
3343 void mlx5_fs_core_free(struct mlx5_core_dev *dev)
3345 struct mlx5_flow_steering *steering = dev->priv.steering;
3347 kmem_cache_destroy(steering->ftes_cache);
3348 kmem_cache_destroy(steering->fgs_cache);
3350 mlx5_ft_pool_destroy(dev);
3351 mlx5_cleanup_fc_stats(dev);
3354 int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
3356 struct mlx5_flow_steering *steering;
3359 err = mlx5_init_fc_stats(dev);
3363 err = mlx5_ft_pool_init(dev);
3367 steering = kzalloc(sizeof(*steering), GFP_KERNEL);
3373 steering->dev = dev;
3374 dev->priv.steering = steering;
3376 if (mlx5_fs_dr_is_supported(dev))
3377 steering->mode = MLX5_FLOW_STEERING_MODE_SMFS;
3379 steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
3381 steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
3382 sizeof(struct mlx5_flow_group), 0,
3384 steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
3386 if (!steering->ftes_cache || !steering->fgs_cache) {
3394 mlx5_fs_core_free(dev);
3398 int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3400 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3401 struct mlx5_ft_underlay_qp *new_uqp;
3404 new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
3408 mutex_lock(&root->chain_lock);
3410 if (!root->root_ft) {
3412 goto update_ft_fail;
3415 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3418 mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
3420 goto update_ft_fail;
3423 new_uqp->qpn = underlay_qpn;
3424 list_add_tail(&new_uqp->list, &root->underlay_qpns);
3426 mutex_unlock(&root->chain_lock);
3431 mutex_unlock(&root->chain_lock);
3435 EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
3437 int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
3439 struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
3440 struct mlx5_ft_underlay_qp *uqp;
3444 mutex_lock(&root->chain_lock);
3445 list_for_each_entry(uqp, &root->underlay_qpns, list) {
3446 if (uqp->qpn == underlay_qpn) {
3453 mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
3459 err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
3462 mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
3465 list_del(&uqp->list);
3466 mutex_unlock(&root->chain_lock);
3472 mutex_unlock(&root->chain_lock);
3475 EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
3477 static struct mlx5_flow_root_namespace
3478 *get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
3480 struct mlx5_flow_namespace *ns;
3482 if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
3483 ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
3484 ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
3486 ns = mlx5_get_flow_namespace(dev, ns_type);
3490 return find_root(&ns->node);
3493 struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
3494 u8 ns_type, u8 num_actions,
3495 void *modify_actions)
3497 struct mlx5_flow_root_namespace *root;
3498 struct mlx5_modify_hdr *modify_hdr;
3501 root = get_root_namespace(dev, ns_type);
3503 return ERR_PTR(-EOPNOTSUPP);
3505 modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
3507 return ERR_PTR(-ENOMEM);
3509 modify_hdr->ns_type = ns_type;
3510 err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
3511 modify_actions, modify_hdr);
3514 return ERR_PTR(err);
3519 EXPORT_SYMBOL(mlx5_modify_header_alloc);
3521 void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
3522 struct mlx5_modify_hdr *modify_hdr)
3524 struct mlx5_flow_root_namespace *root;
3526 root = get_root_namespace(dev, modify_hdr->ns_type);
3529 root->cmds->modify_header_dealloc(root, modify_hdr);
3532 EXPORT_SYMBOL(mlx5_modify_header_dealloc);
3534 struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
3535 struct mlx5_pkt_reformat_params *params,
3536 enum mlx5_flow_namespace_type ns_type)
3538 struct mlx5_pkt_reformat *pkt_reformat;
3539 struct mlx5_flow_root_namespace *root;
3542 root = get_root_namespace(dev, ns_type);
3544 return ERR_PTR(-EOPNOTSUPP);
3546 pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
3548 return ERR_PTR(-ENOMEM);
3550 pkt_reformat->ns_type = ns_type;
3551 pkt_reformat->reformat_type = params->type;
3552 err = root->cmds->packet_reformat_alloc(root, params, ns_type,
3555 kfree(pkt_reformat);
3556 return ERR_PTR(err);
3559 return pkt_reformat;
3561 EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
3563 void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
3564 struct mlx5_pkt_reformat *pkt_reformat)
3566 struct mlx5_flow_root_namespace *root;
3568 root = get_root_namespace(dev, pkt_reformat->ns_type);
3571 root->cmds->packet_reformat_dealloc(root, pkt_reformat);
3572 kfree(pkt_reformat);
3574 EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
3576 int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
3581 struct mlx5_flow_definer *
3582 mlx5_create_match_definer(struct mlx5_core_dev *dev,
3583 enum mlx5_flow_namespace_type ns_type, u16 format_id,
3586 struct mlx5_flow_root_namespace *root;
3587 struct mlx5_flow_definer *definer;
3590 root = get_root_namespace(dev, ns_type);
3592 return ERR_PTR(-EOPNOTSUPP);
3594 definer = kzalloc(sizeof(*definer), GFP_KERNEL);
3596 return ERR_PTR(-ENOMEM);
3598 definer->ns_type = ns_type;
3599 id = root->cmds->create_match_definer(root, format_id, match_mask);
3601 mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
3609 void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
3610 struct mlx5_flow_definer *definer)
3612 struct mlx5_flow_root_namespace *root;
3614 root = get_root_namespace(dev, definer->ns_type);
3618 root->cmds->destroy_match_definer(root, definer->id);
3622 int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
3623 struct mlx5_flow_root_namespace *peer_ns,
3626 if (peer_ns && ns->mode != peer_ns->mode) {
3627 mlx5_core_err(ns->dev,
3628 "Can't peer namespace of different steering mode\n");
3632 return ns->cmds->set_peer(ns, peer_ns, peer_idx);
3635 /* This function should be called only at init stage of the namespace.
3636 * It is not safe to call this function while steering operations
3637 * are executed in the namespace.
3639 int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
3640 enum mlx5_flow_steering_mode mode)
3642 struct mlx5_flow_root_namespace *root;
3643 const struct mlx5_flow_cmds *cmds;
3646 root = find_root(&ns->node);
3647 if (&root->ns != ns)
3648 /* Can't set cmds to non root namespace */
3651 if (root->table_type != FS_FT_FDB)
3654 if (root->mode == mode)
3657 if (mode == MLX5_FLOW_STEERING_MODE_SMFS)
3658 cmds = mlx5_fs_cmd_get_dr_cmds();
3660 cmds = mlx5_fs_cmd_get_fw_cmds();
3664 err = cmds->create_ns(root);
3666 mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
3671 root->cmds->destroy_ns(root);