2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
41 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
42 struct mlx5e_l2_rule *ai, int type);
43 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
44 struct mlx5e_l2_rule *ai);
60 MLX5E_ACTION_NONE = 0,
65 struct mlx5e_l2_hash_node {
66 struct hlist_node hlist;
68 struct mlx5e_l2_rule ai;
72 static inline int mlx5e_hash_l2(u8 *addr)
77 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
79 struct mlx5e_l2_hash_node *hn;
80 int ix = mlx5e_hash_l2(addr);
83 hlist_for_each_entry(hn, &hash[ix], hlist)
84 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
90 hn->action = MLX5E_ACTION_NONE;
94 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
98 ether_addr_copy(hn->ai.addr, addr);
99 hn->action = MLX5E_ACTION_ADD;
101 hlist_add_head(&hn->hlist, &hash[ix]);
104 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
106 hlist_del(&hn->hlist);
110 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
112 struct net_device *ndev = priv->netdev;
121 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
124 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
126 if (list_size > max_list_size) {
128 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
129 list_size, max_list_size);
130 list_size = max_list_size;
133 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
138 for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
144 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
146 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
153 enum mlx5e_vlan_rule_type {
154 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
155 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
156 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
157 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
158 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
161 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
162 enum mlx5e_vlan_rule_type rule_type,
163 u16 vid, struct mlx5_flow_spec *spec)
165 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
166 struct mlx5_flow_destination dest = {};
167 struct mlx5_flow_handle **rule_p;
168 MLX5_DECLARE_FLOW_ACT(flow_act);
171 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
172 dest.ft = priv->fs.l2.ft.t;
174 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
177 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
178 /* cvlan_tag enabled in match criteria and
179 * disabled in match value means both S & C tags
180 * don't exist (untagged of both)
182 rule_p = &priv->fs.vlan.untagged_rule;
183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
184 outer_headers.cvlan_tag);
186 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
187 rule_p = &priv->fs.vlan.any_cvlan_rule;
188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
189 outer_headers.cvlan_tag);
190 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
192 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
193 rule_p = &priv->fs.vlan.any_svlan_rule;
194 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
195 outer_headers.svlan_tag);
196 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
198 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
199 rule_p = &priv->fs.vlan.active_svlans_rule[vid];
200 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
201 outer_headers.svlan_tag);
202 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
204 outer_headers.first_vid);
205 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
208 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
209 rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
210 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
211 outer_headers.cvlan_tag);
212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
213 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
214 outer_headers.first_vid);
215 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
220 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
223 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
225 if (IS_ERR(*rule_p)) {
226 err = PTR_ERR(*rule_p);
228 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
234 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
235 enum mlx5e_vlan_rule_type rule_type, u16 vid)
237 struct mlx5_flow_spec *spec;
240 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
244 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
245 mlx5e_vport_context_update_vlans(priv);
247 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
254 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
255 enum mlx5e_vlan_rule_type rule_type, u16 vid)
258 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
259 if (priv->fs.vlan.untagged_rule) {
260 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
261 priv->fs.vlan.untagged_rule = NULL;
264 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
265 if (priv->fs.vlan.any_cvlan_rule) {
266 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
267 priv->fs.vlan.any_cvlan_rule = NULL;
270 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
271 if (priv->fs.vlan.any_svlan_rule) {
272 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
273 priv->fs.vlan.any_svlan_rule = NULL;
276 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
277 if (priv->fs.vlan.active_svlans_rule[vid]) {
278 mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
279 priv->fs.vlan.active_svlans_rule[vid] = NULL;
282 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
283 if (priv->fs.vlan.active_cvlans_rule[vid]) {
284 mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
285 priv->fs.vlan.active_cvlans_rule[vid] = NULL;
287 mlx5e_vport_context_update_vlans(priv);
292 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
298 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
302 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
306 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
309 void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
311 if (!priv->fs.vlan.cvlan_filter_disabled)
314 priv->fs.vlan.cvlan_filter_disabled = false;
315 if (priv->netdev->flags & IFF_PROMISC)
317 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
320 void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
322 if (priv->fs.vlan.cvlan_filter_disabled)
325 priv->fs.vlan.cvlan_filter_disabled = true;
326 if (priv->netdev->flags & IFF_PROMISC)
328 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
331 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
335 set_bit(vid, priv->fs.vlan.active_cvlans);
337 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
339 clear_bit(vid, priv->fs.vlan.active_cvlans);
344 static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
346 struct net_device *netdev = priv->netdev;
349 set_bit(vid, priv->fs.vlan.active_svlans);
351 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
353 clear_bit(vid, priv->fs.vlan.active_svlans);
357 /* Need to fix some features.. */
358 netdev_update_features(netdev);
362 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
364 struct mlx5e_priv *priv = netdev_priv(dev);
366 if (be16_to_cpu(proto) == ETH_P_8021Q)
367 return mlx5e_vlan_rx_add_cvid(priv, vid);
368 else if (be16_to_cpu(proto) == ETH_P_8021AD)
369 return mlx5e_vlan_rx_add_svid(priv, vid);
374 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
376 struct mlx5e_priv *priv = netdev_priv(dev);
378 if (be16_to_cpu(proto) == ETH_P_8021Q) {
379 clear_bit(vid, priv->fs.vlan.active_cvlans);
380 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
381 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
382 clear_bit(vid, priv->fs.vlan.active_svlans);
383 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
384 netdev_update_features(dev);
390 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
394 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
396 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
397 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
400 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
401 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
403 if (priv->fs.vlan.cvlan_filter_disabled)
404 mlx5e_add_any_vid_rules(priv);
407 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
411 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
413 for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
414 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
417 for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
418 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
420 WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
422 /* must be called after DESTROY bit is set and
423 * set_rx_mode is called and flushed
425 if (priv->fs.vlan.cvlan_filter_disabled)
426 mlx5e_del_any_vid_rules(priv);
429 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
430 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
431 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
433 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
434 struct mlx5e_l2_hash_node *hn)
436 u8 action = hn->action;
437 u8 mac_addr[ETH_ALEN];
440 ether_addr_copy(mac_addr, hn->ai.addr);
443 case MLX5E_ACTION_ADD:
444 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
445 if (!is_multicast_ether_addr(mac_addr)) {
446 l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr);
449 hn->action = MLX5E_ACTION_NONE;
452 case MLX5E_ACTION_DEL:
453 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
454 l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr);
455 mlx5e_del_l2_flow_rule(priv, &hn->ai);
456 mlx5e_del_l2_from_hash(hn);
461 netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n",
462 action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err);
465 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
467 struct net_device *netdev = priv->netdev;
468 struct netdev_hw_addr *ha;
470 netif_addr_lock_bh(netdev);
472 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
473 priv->netdev->dev_addr);
475 netdev_for_each_uc_addr(ha, netdev)
476 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
478 netdev_for_each_mc_addr(ha, netdev)
479 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
481 netif_addr_unlock_bh(netdev);
484 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
485 u8 addr_array[][ETH_ALEN], int size)
487 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
488 struct net_device *ndev = priv->netdev;
489 struct mlx5e_l2_hash_node *hn;
490 struct hlist_head *addr_list;
491 struct hlist_node *tmp;
495 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
497 if (is_uc) /* Make sure our own address is pushed first */
498 ether_addr_copy(addr_array[i++], ndev->dev_addr);
499 else if (priv->fs.l2.broadcast_enabled)
500 ether_addr_copy(addr_array[i++], ndev->broadcast);
502 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
503 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
507 ether_addr_copy(addr_array[i++], hn->ai.addr);
511 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
514 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
515 struct mlx5e_l2_hash_node *hn;
516 u8 (*addr_array)[ETH_ALEN] = NULL;
517 struct hlist_head *addr_list;
518 struct hlist_node *tmp;
524 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
526 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
527 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
529 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
530 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
533 if (size > max_size) {
534 netdev_warn(priv->netdev,
535 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
536 is_uc ? "UC" : "MC", size, max_size);
541 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
546 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
549 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
552 netdev_err(priv->netdev,
553 "Failed to modify vport %s list err(%d)\n",
554 is_uc ? "UC" : "MC", err);
558 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
560 struct mlx5e_l2_table *ea = &priv->fs.l2;
562 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
563 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
564 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
565 ea->allmulti_enabled,
566 ea->promisc_enabled);
569 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
571 struct mlx5e_l2_hash_node *hn;
572 struct hlist_node *tmp;
575 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
576 mlx5e_execute_l2_action(priv, hn);
578 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
579 mlx5e_execute_l2_action(priv, hn);
582 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
584 struct mlx5e_l2_hash_node *hn;
585 struct hlist_node *tmp;
588 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
589 hn->action = MLX5E_ACTION_DEL;
590 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
591 hn->action = MLX5E_ACTION_DEL;
593 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
594 mlx5e_sync_netdev_addr(priv);
596 mlx5e_apply_netdev_addr(priv);
599 void mlx5e_set_rx_mode_work(struct work_struct *work)
601 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
604 struct mlx5e_l2_table *ea = &priv->fs.l2;
605 struct net_device *ndev = priv->netdev;
607 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
608 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
609 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
610 bool broadcast_enabled = rx_mode_enable;
612 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
613 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
614 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
615 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
616 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
617 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
619 if (enable_promisc) {
620 if (!priv->channels.params.vlan_strip_disable)
621 netdev_warn_once(ndev,
622 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
623 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
624 if (!priv->fs.vlan.cvlan_filter_disabled)
625 mlx5e_add_any_vid_rules(priv);
628 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
629 if (enable_broadcast)
630 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
632 mlx5e_handle_netdev_addr(priv);
634 if (disable_broadcast)
635 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
636 if (disable_allmulti)
637 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
638 if (disable_promisc) {
639 if (!priv->fs.vlan.cvlan_filter_disabled)
640 mlx5e_del_any_vid_rules(priv);
641 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
644 ea->promisc_enabled = promisc_enabled;
645 ea->allmulti_enabled = allmulti_enabled;
646 ea->broadcast_enabled = broadcast_enabled;
648 mlx5e_vport_context_update(priv);
651 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
655 for (i = ft->num_groups - 1; i >= 0; i--) {
656 if (!IS_ERR_OR_NULL(ft->g[i]))
657 mlx5_destroy_flow_group(ft->g[i]);
663 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
665 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
668 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
670 mlx5e_destroy_groups(ft);
672 mlx5_destroy_flow_table(ft->t);
676 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
680 for (i = 0; i < MLX5E_NUM_TT; i++) {
681 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
682 mlx5_del_flow_rules(ttc->rules[i].rule);
683 ttc->rules[i].rule = NULL;
687 for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) {
688 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
689 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
690 ttc->tunnel_rules[i] = NULL;
695 struct mlx5e_etype_proto {
700 static struct mlx5e_etype_proto ttc_rules[] = {
701 [MLX5E_TT_IPV4_TCP] = {
703 .proto = IPPROTO_TCP,
705 [MLX5E_TT_IPV6_TCP] = {
707 .proto = IPPROTO_TCP,
709 [MLX5E_TT_IPV4_UDP] = {
711 .proto = IPPROTO_UDP,
713 [MLX5E_TT_IPV6_UDP] = {
715 .proto = IPPROTO_UDP,
717 [MLX5E_TT_IPV4_IPSEC_AH] = {
721 [MLX5E_TT_IPV6_IPSEC_AH] = {
725 [MLX5E_TT_IPV4_IPSEC_ESP] = {
727 .proto = IPPROTO_ESP,
729 [MLX5E_TT_IPV6_IPSEC_ESP] = {
731 .proto = IPPROTO_ESP,
747 static struct mlx5e_etype_proto ttc_tunnel_rules[] = {
748 [MLX5E_TT_IPV4_GRE] = {
750 .proto = IPPROTO_GRE,
752 [MLX5E_TT_IPV6_GRE] = {
754 .proto = IPPROTO_GRE,
756 [MLX5E_TT_IPV4_IPIP] = {
758 .proto = IPPROTO_IPIP,
760 [MLX5E_TT_IPV6_IPIP] = {
762 .proto = IPPROTO_IPIP,
764 [MLX5E_TT_IPV4_IPV6] = {
766 .proto = IPPROTO_IPV6,
768 [MLX5E_TT_IPV6_IPV6] = {
770 .proto = IPPROTO_IPV6,
775 u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt)
777 return ttc_tunnel_rules[tt].proto;
780 static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type)
782 switch (proto_type) {
784 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
787 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
788 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
794 static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
798 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
799 if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto))
805 bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
807 return (mlx5e_tunnel_any_rx_proto_supported(mdev) &&
808 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
811 static u8 mlx5e_etype_to_ipv(u16 ethertype)
813 if (ethertype == ETH_P_IP)
816 if (ethertype == ETH_P_IPV6)
822 static struct mlx5_flow_handle *
823 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
824 struct mlx5_flow_table *ft,
825 struct mlx5_flow_destination *dest,
829 int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
830 MLX5_DECLARE_FLOW_ACT(flow_act);
831 struct mlx5_flow_handle *rule;
832 struct mlx5_flow_spec *spec;
836 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
838 return ERR_PTR(-ENOMEM);
841 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
842 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
843 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
846 ipv = mlx5e_etype_to_ipv(etype);
847 if (match_ipv_outer && ipv) {
848 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
849 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
850 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
852 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
853 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
854 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
857 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
860 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
864 return err ? ERR_PTR(err) : rule;
867 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
868 struct ttc_params *params,
869 struct mlx5e_ttc_table *ttc)
871 struct mlx5_flow_destination dest = {};
872 struct mlx5_flow_handle **trules;
873 struct mlx5e_ttc_rule *rules;
874 struct mlx5_flow_table *ft;
881 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
882 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
883 struct mlx5e_ttc_rule *rule = &rules[tt];
885 if (tt == MLX5E_TT_ANY)
886 dest.tir_num = params->any_tt_tirn;
888 dest.tir_num = params->indir_tirn[tt];
890 rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest,
892 ttc_rules[tt].proto);
893 if (IS_ERR(rule->rule)) {
894 err = PTR_ERR(rule->rule);
898 rule->default_dest = dest;
901 if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
904 trules = ttc->tunnel_rules;
905 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
906 dest.ft = params->inner_ttc->ft.t;
907 for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
908 if (!mlx5e_tunnel_proto_supported_rx(priv->mdev,
909 ttc_tunnel_rules[tt].proto))
911 trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
912 ttc_tunnel_rules[tt].etype,
913 ttc_tunnel_rules[tt].proto);
914 if (IS_ERR(trules[tt])) {
915 err = PTR_ERR(trules[tt]);
924 mlx5e_cleanup_ttc_rules(ttc);
928 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
931 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
932 struct mlx5e_flow_table *ft = &ttc->ft;
938 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
939 sizeof(*ft->g), GFP_KERNEL);
942 in = kvzalloc(inlen, GFP_KERNEL);
949 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
950 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
952 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
954 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
955 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
956 MLX5_SET_CFG(in, start_flow_index, ix);
957 ix += MLX5E_TTC_GROUP1_SIZE;
958 MLX5_SET_CFG(in, end_flow_index, ix - 1);
959 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
960 if (IS_ERR(ft->g[ft->num_groups]))
965 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
966 MLX5_SET_CFG(in, start_flow_index, ix);
967 ix += MLX5E_TTC_GROUP2_SIZE;
968 MLX5_SET_CFG(in, end_flow_index, ix - 1);
969 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
970 if (IS_ERR(ft->g[ft->num_groups]))
975 memset(in, 0, inlen);
976 MLX5_SET_CFG(in, start_flow_index, ix);
977 ix += MLX5E_TTC_GROUP3_SIZE;
978 MLX5_SET_CFG(in, end_flow_index, ix - 1);
979 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
980 if (IS_ERR(ft->g[ft->num_groups]))
988 err = PTR_ERR(ft->g[ft->num_groups]);
989 ft->g[ft->num_groups] = NULL;
995 static struct mlx5_flow_handle *
996 mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
997 struct mlx5_flow_table *ft,
998 struct mlx5_flow_destination *dest,
1001 MLX5_DECLARE_FLOW_ACT(flow_act);
1002 struct mlx5_flow_handle *rule;
1003 struct mlx5_flow_spec *spec;
1007 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1009 return ERR_PTR(-ENOMEM);
1011 ipv = mlx5e_etype_to_ipv(etype);
1013 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1014 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
1015 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
1019 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1020 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
1021 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
1024 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
1026 err = PTR_ERR(rule);
1027 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
1031 return err ? ERR_PTR(err) : rule;
1034 static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
1035 struct ttc_params *params,
1036 struct mlx5e_ttc_table *ttc)
1038 struct mlx5_flow_destination dest = {};
1039 struct mlx5e_ttc_rule *rules;
1040 struct mlx5_flow_table *ft;
1046 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1048 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1049 struct mlx5e_ttc_rule *rule = &rules[tt];
1051 if (tt == MLX5E_TT_ANY)
1052 dest.tir_num = params->any_tt_tirn;
1054 dest.tir_num = params->indir_tirn[tt];
1056 rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
1057 ttc_rules[tt].etype,
1058 ttc_rules[tt].proto);
1059 if (IS_ERR(rule->rule)) {
1060 err = PTR_ERR(rule->rule);
1064 rule->default_dest = dest;
1071 mlx5e_cleanup_ttc_rules(ttc);
1075 static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
1077 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1078 struct mlx5e_flow_table *ft = &ttc->ft;
1084 ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1087 in = kvzalloc(inlen, GFP_KERNEL);
1094 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1095 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
1096 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
1097 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
1098 MLX5_SET_CFG(in, start_flow_index, ix);
1099 ix += MLX5E_INNER_TTC_GROUP1_SIZE;
1100 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1101 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1102 if (IS_ERR(ft->g[ft->num_groups]))
1107 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
1108 MLX5_SET_CFG(in, start_flow_index, ix);
1109 ix += MLX5E_INNER_TTC_GROUP2_SIZE;
1110 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1111 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1112 if (IS_ERR(ft->g[ft->num_groups]))
1117 memset(in, 0, inlen);
1118 MLX5_SET_CFG(in, start_flow_index, ix);
1119 ix += MLX5E_INNER_TTC_GROUP3_SIZE;
1120 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1121 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1122 if (IS_ERR(ft->g[ft->num_groups]))
1130 err = PTR_ERR(ft->g[ft->num_groups]);
1131 ft->g[ft->num_groups] = NULL;
1137 void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
1138 struct ttc_params *ttc_params)
1140 ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
1141 ttc_params->inner_ttc = &priv->fs.inner_ttc;
1144 void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
1146 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1148 ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
1149 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
1150 ft_attr->prio = MLX5E_NIC_PRIO;
1153 void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
1156 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
1158 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
1159 ft_attr->level = MLX5E_TTC_FT_LEVEL;
1160 ft_attr->prio = MLX5E_NIC_PRIO;
1163 int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1164 struct mlx5e_ttc_table *ttc)
1166 struct mlx5e_flow_table *ft = &ttc->ft;
1169 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1172 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1173 if (IS_ERR(ft->t)) {
1174 err = PTR_ERR(ft->t);
1179 err = mlx5e_create_inner_ttc_table_groups(ttc);
1183 err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
1190 mlx5e_destroy_flow_table(ft);
1194 void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
1195 struct mlx5e_ttc_table *ttc)
1197 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
1200 mlx5e_cleanup_ttc_rules(ttc);
1201 mlx5e_destroy_flow_table(&ttc->ft);
1204 void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
1205 struct mlx5e_ttc_table *ttc)
1207 mlx5e_cleanup_ttc_rules(ttc);
1208 mlx5e_destroy_flow_table(&ttc->ft);
1211 int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
1212 struct mlx5e_ttc_table *ttc)
1214 bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
1215 struct mlx5e_flow_table *ft = &ttc->ft;
1218 ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr);
1219 if (IS_ERR(ft->t)) {
1220 err = PTR_ERR(ft->t);
1225 err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer);
1229 err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
1235 mlx5e_destroy_flow_table(ft);
1239 int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
1240 struct mlx5_flow_destination *new_dest)
1242 return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL);
1245 struct mlx5_flow_destination
1246 mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1248 struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest;
1250 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
1251 "TTC[%d] default dest is not setup yet", type);
1256 int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type)
1258 struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type);
1260 return mlx5e_ttc_fwd_dest(priv, type, &dest);
1263 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
1264 struct mlx5e_l2_rule *ai)
1266 if (!IS_ERR_OR_NULL(ai->rule)) {
1267 mlx5_del_flow_rules(ai->rule);
1272 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
1273 struct mlx5e_l2_rule *ai, int type)
1275 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
1276 struct mlx5_flow_destination dest = {};
1277 MLX5_DECLARE_FLOW_ACT(flow_act);
1278 struct mlx5_flow_spec *spec;
1283 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1287 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1288 outer_headers.dmac_47_16);
1289 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1290 outer_headers.dmac_47_16);
1292 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1293 dest.ft = priv->fs.ttc.ft.t;
1296 case MLX5E_FULLMATCH:
1297 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1298 eth_broadcast_addr(mc_dmac);
1299 ether_addr_copy(mv_dmac, ai->addr);
1302 case MLX5E_ALLMULTI:
1303 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1312 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1313 if (IS_ERR(ai->rule)) {
1314 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
1316 err = PTR_ERR(ai->rule);
1325 #define MLX5E_NUM_L2_GROUPS 3
1326 #define MLX5E_L2_GROUP1_SIZE BIT(0)
1327 #define MLX5E_L2_GROUP2_SIZE BIT(15)
1328 #define MLX5E_L2_GROUP3_SIZE BIT(0)
1329 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1330 MLX5E_L2_GROUP2_SIZE +\
1331 MLX5E_L2_GROUP3_SIZE)
1332 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1334 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1335 struct mlx5e_flow_table *ft = &l2_table->ft;
1342 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1345 in = kvzalloc(inlen, GFP_KERNEL);
1351 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1352 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1353 outer_headers.dmac_47_16);
1354 /* Flow Group for promiscuous */
1355 MLX5_SET_CFG(in, start_flow_index, ix);
1356 ix += MLX5E_L2_GROUP1_SIZE;
1357 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1358 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1359 if (IS_ERR(ft->g[ft->num_groups]))
1360 goto err_destroy_groups;
1363 /* Flow Group for full match */
1364 eth_broadcast_addr(mc_dmac);
1365 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1366 MLX5_SET_CFG(in, start_flow_index, ix);
1367 ix += MLX5E_L2_GROUP2_SIZE;
1368 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1369 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1370 if (IS_ERR(ft->g[ft->num_groups]))
1371 goto err_destroy_groups;
1374 /* Flow Group for allmulti */
1375 eth_zero_addr(mc_dmac);
1377 MLX5_SET_CFG(in, start_flow_index, ix);
1378 ix += MLX5E_L2_GROUP3_SIZE;
1379 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1380 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1381 if (IS_ERR(ft->g[ft->num_groups]))
1382 goto err_destroy_groups;
1389 err = PTR_ERR(ft->g[ft->num_groups]);
1390 ft->g[ft->num_groups] = NULL;
1391 mlx5e_destroy_groups(ft);
1397 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
1399 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
1402 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
1404 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
1405 struct mlx5e_flow_table *ft = &l2_table->ft;
1406 struct mlx5_flow_table_attr ft_attr = {};
1411 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1412 ft_attr.level = MLX5E_L2_FT_LEVEL;
1413 ft_attr.prio = MLX5E_NIC_PRIO;
1415 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1416 if (IS_ERR(ft->t)) {
1417 err = PTR_ERR(ft->t);
1422 err = mlx5e_create_l2_table_groups(l2_table);
1424 goto err_destroy_flow_table;
1428 err_destroy_flow_table:
1429 mlx5_destroy_flow_table(ft->t);
1435 #define MLX5E_NUM_VLAN_GROUPS 4
1436 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1437 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1438 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1439 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1440 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1441 MLX5E_VLAN_GROUP1_SIZE +\
1442 MLX5E_VLAN_GROUP2_SIZE +\
1443 MLX5E_VLAN_GROUP3_SIZE)
1445 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1450 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1452 memset(in, 0, inlen);
1453 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1454 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1455 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1456 MLX5_SET_CFG(in, start_flow_index, ix);
1457 ix += MLX5E_VLAN_GROUP0_SIZE;
1458 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1459 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1460 if (IS_ERR(ft->g[ft->num_groups]))
1461 goto err_destroy_groups;
1464 memset(in, 0, inlen);
1465 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1466 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1467 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1468 MLX5_SET_CFG(in, start_flow_index, ix);
1469 ix += MLX5E_VLAN_GROUP1_SIZE;
1470 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1471 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1472 if (IS_ERR(ft->g[ft->num_groups]))
1473 goto err_destroy_groups;
1476 memset(in, 0, inlen);
1477 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1478 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1479 MLX5_SET_CFG(in, start_flow_index, ix);
1480 ix += MLX5E_VLAN_GROUP2_SIZE;
1481 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1482 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1483 if (IS_ERR(ft->g[ft->num_groups]))
1484 goto err_destroy_groups;
1487 memset(in, 0, inlen);
1488 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1489 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1490 MLX5_SET_CFG(in, start_flow_index, ix);
1491 ix += MLX5E_VLAN_GROUP3_SIZE;
1492 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1493 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1494 if (IS_ERR(ft->g[ft->num_groups]))
1495 goto err_destroy_groups;
1501 err = PTR_ERR(ft->g[ft->num_groups]);
1502 ft->g[ft->num_groups] = NULL;
1503 mlx5e_destroy_groups(ft);
1508 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1511 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1514 in = kvzalloc(inlen, GFP_KERNEL);
1518 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1524 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1526 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1527 struct mlx5_flow_table_attr ft_attr = {};
1532 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1533 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1534 ft_attr.prio = MLX5E_NIC_PRIO;
1536 ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
1538 if (IS_ERR(ft->t)) {
1539 err = PTR_ERR(ft->t);
1543 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1546 goto err_destroy_vlan_table;
1549 err = mlx5e_create_vlan_table_groups(ft);
1553 mlx5e_add_vlan_rules(priv);
1559 err_destroy_vlan_table:
1560 mlx5_destroy_flow_table(ft->t);
1566 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1568 mlx5e_del_vlan_rules(priv);
1569 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1572 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1574 struct ttc_params ttc_params = {};
1577 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1578 MLX5_FLOW_NAMESPACE_KERNEL);
1583 err = mlx5e_arfs_create_tables(priv);
1585 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1587 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1590 mlx5e_set_ttc_basic_params(priv, &ttc_params);
1591 mlx5e_set_inner_ttc_ft_params(&ttc_params);
1592 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1593 ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
1595 err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
1597 netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
1599 goto err_destroy_arfs_tables;
1602 mlx5e_set_ttc_ft_params(&ttc_params);
1603 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1604 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1606 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1608 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1610 goto err_destroy_inner_ttc_table;
1613 err = mlx5e_create_l2_table(priv);
1615 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1617 goto err_destroy_ttc_table;
1620 err = mlx5e_create_vlan_table(priv);
1622 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1624 goto err_destroy_l2_table;
1627 mlx5e_ethtool_init_steering(priv);
1631 err_destroy_l2_table:
1632 mlx5e_destroy_l2_table(priv);
1633 err_destroy_ttc_table:
1634 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1635 err_destroy_inner_ttc_table:
1636 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1637 err_destroy_arfs_tables:
1638 mlx5e_arfs_destroy_tables(priv);
1643 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1645 mlx5e_destroy_vlan_table(priv);
1646 mlx5e_destroy_l2_table(priv);
1647 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1648 mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
1649 mlx5e_arfs_destroy_tables(priv);
1650 mlx5e_ethtool_cleanup_steering(priv);