2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
47 #include "en/tc/post_act.h"
49 #include "en/rep/tc.h"
50 #include "en/rep/neigh.h"
55 #include "en/tc_tun.h"
56 #include "en/mapping.h"
58 #include "en/mod_hdr.h"
59 #include "en/tc_tun_encap.h"
60 #include "en/tc/sample.h"
61 #include "en/tc/act/act.h"
62 #include "lib/devcom.h"
63 #include "lib/geneve.h"
64 #include "lib/fs_chains.h"
65 #include "diag/en_tc_tracepoint.h"
66 #include <asm/div64.h>
70 #define MLX5E_TC_TABLE_NUM_GROUPS 4
71 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
73 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
75 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
80 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
85 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
87 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
88 .soffset = MLX5_BYTE_OFF(fte_match_param,
89 misc_parameters_2.metadata_reg_c_1),
91 [ZONE_TO_REG] = zone_to_reg_ct,
92 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
93 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
94 [MARK_TO_REG] = mark_to_reg_ct,
95 [LABELS_TO_REG] = labels_to_reg_ct,
96 [FTEID_TO_REG] = fteid_to_reg_ct,
97 /* For NIC rules we store the restore metadata directly
98 * into reg_b that is passed to SW since we don't
99 * jump between steering domains.
101 [NIC_CHAIN_TO_REG] = {
102 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
106 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
109 /* To avoid false lock dependency warning set the tc_ht lock
110 * class different than the lock class of the ht being used when deleting
111 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
112 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
113 * it's different than the ht->mutex here.
115 static struct lock_class_key tc_ht_lock_key;
117 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
120 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
121 enum mlx5e_tc_attr_to_reg type,
125 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
126 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
127 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
128 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
129 u32 max_mask = GENMASK(match_len - 1, 0);
130 __be32 curr_mask_be, curr_val_be;
131 u32 curr_mask, curr_val;
133 fmask = headers_c + soffset;
134 fval = headers_v + soffset;
136 memcpy(&curr_mask_be, fmask, 4);
137 memcpy(&curr_val_be, fval, 4);
139 curr_mask = be32_to_cpu(curr_mask_be);
140 curr_val = be32_to_cpu(curr_val_be);
142 //move to correct offset
143 WARN_ON(mask > max_mask);
146 max_mask <<= moffset;
149 curr_mask &= ~max_mask;
150 curr_val &= ~max_mask;
152 //add current to mask
156 //back to be32 and write
157 curr_mask_be = cpu_to_be32(curr_mask);
158 curr_val_be = cpu_to_be32(curr_val);
160 memcpy(fmask, &curr_mask_be, 4);
161 memcpy(fval, &curr_val_be, 4);
163 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
167 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
168 enum mlx5e_tc_attr_to_reg type,
172 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
173 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
174 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
175 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
176 u32 max_mask = GENMASK(match_len - 1, 0);
177 __be32 curr_mask_be, curr_val_be;
178 u32 curr_mask, curr_val;
180 fmask = headers_c + soffset;
181 fval = headers_v + soffset;
183 memcpy(&curr_mask_be, fmask, 4);
184 memcpy(&curr_val_be, fval, 4);
186 curr_mask = be32_to_cpu(curr_mask_be);
187 curr_val = be32_to_cpu(curr_val_be);
189 *mask = (curr_mask >> moffset) & max_mask;
190 *val = (curr_val >> moffset) & max_mask;
194 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
195 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
196 enum mlx5_flow_namespace_type ns,
197 enum mlx5e_tc_attr_to_reg type,
200 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
201 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
202 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
206 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
208 return PTR_ERR(modact);
210 /* Firmware has 5bit length field and 0 means 32bits */
214 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
215 MLX5_SET(set_action_in, modact, field, mfield);
216 MLX5_SET(set_action_in, modact, offset, moffset);
217 MLX5_SET(set_action_in, modact, length, mlen);
218 MLX5_SET(set_action_in, modact, data, data);
219 err = mod_hdr_acts->num_actions;
220 mod_hdr_acts->num_actions++;
225 struct mlx5e_tc_int_port_priv *
226 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
228 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
229 struct mlx5_rep_uplink_priv *uplink_priv;
230 struct mlx5e_rep_priv *uplink_rpriv;
232 if (is_mdev_switchdev_mode(priv->mdev)) {
233 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
234 uplink_priv = &uplink_rpriv->uplink_priv;
236 return uplink_priv->int_port_priv;
242 static struct mlx5_tc_ct_priv *
243 get_ct_priv(struct mlx5e_priv *priv)
245 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
246 struct mlx5_rep_uplink_priv *uplink_priv;
247 struct mlx5e_rep_priv *uplink_rpriv;
249 if (is_mdev_switchdev_mode(priv->mdev)) {
250 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
251 uplink_priv = &uplink_rpriv->uplink_priv;
253 return uplink_priv->ct_priv;
256 return priv->fs.tc.ct;
259 static struct mlx5e_tc_psample *
260 get_sample_priv(struct mlx5e_priv *priv)
262 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
263 struct mlx5_rep_uplink_priv *uplink_priv;
264 struct mlx5e_rep_priv *uplink_rpriv;
266 if (is_mdev_switchdev_mode(priv->mdev)) {
267 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
268 uplink_priv = &uplink_rpriv->uplink_priv;
270 return uplink_priv->tc_psample;
276 struct mlx5_flow_handle *
277 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
278 struct mlx5_flow_spec *spec,
279 struct mlx5_flow_attr *attr)
281 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
283 if (is_mdev_switchdev_mode(priv->mdev))
284 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
286 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
290 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
291 struct mlx5_flow_handle *rule,
292 struct mlx5_flow_attr *attr)
294 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
296 if (is_mdev_switchdev_mode(priv->mdev)) {
297 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
302 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
306 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
307 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
308 enum mlx5_flow_namespace_type ns,
309 enum mlx5e_tc_attr_to_reg type,
312 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
314 return ret < 0 ? ret : 0;
317 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
318 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
319 enum mlx5e_tc_attr_to_reg type,
320 int act_id, u32 data)
322 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
323 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
324 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
327 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
329 /* Firmware has 5bit length field and 0 means 32bits */
333 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
334 MLX5_SET(set_action_in, modact, field, mfield);
335 MLX5_SET(set_action_in, modact, offset, moffset);
336 MLX5_SET(set_action_in, modact, length, mlen);
337 MLX5_SET(set_action_in, modact, data, data);
340 struct mlx5e_hairpin {
341 struct mlx5_hairpin *pair;
343 struct mlx5_core_dev *func_mdev;
344 struct mlx5e_priv *func_priv;
346 struct mlx5e_tir direct_tir;
349 struct mlx5e_rqt indir_rqt;
350 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
351 struct mlx5_ttc_table *ttc;
354 struct mlx5e_hairpin_entry {
355 /* a node of a hash table which keeps all the hairpin entries */
356 struct hlist_node hairpin_hlist;
358 /* protects flows list */
359 spinlock_t flows_lock;
360 /* flows sharing the same hairpin */
361 struct list_head flows;
362 /* hpe's that were not fully initialized when dead peer update event
363 * function traversed them.
365 struct list_head dead_peer_wait_list;
369 struct mlx5e_hairpin *hp;
371 struct completion res_ready;
374 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
375 struct mlx5e_tc_flow *flow);
377 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
379 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
380 return ERR_PTR(-EINVAL);
384 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
386 if (refcount_dec_and_test(&flow->refcnt)) {
387 mlx5e_tc_del_flow(priv, flow);
388 kfree_rcu(flow, rcu_head);
392 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
394 return flow_flag_test(flow, ESWITCH);
397 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
399 return flow_flag_test(flow, FT);
402 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
404 return flow_flag_test(flow, OFFLOADED);
407 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
409 return mlx5e_is_eswitch_flow(flow) ?
410 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
413 static struct mod_hdr_tbl *
414 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
416 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
418 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
419 &esw->offloads.mod_hdr :
420 &priv->fs.tc.mod_hdr;
423 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
424 struct mlx5e_tc_flow *flow,
425 struct mlx5e_tc_flow_parse_attr *parse_attr)
427 struct mlx5_modify_hdr *modify_hdr;
428 struct mlx5e_mod_hdr_handle *mh;
430 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
431 mlx5e_get_flow_namespace(flow),
432 &parse_attr->mod_hdr_acts);
436 modify_hdr = mlx5e_mod_hdr_get(mh);
437 flow->attr->modify_hdr = modify_hdr;
443 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
444 struct mlx5e_tc_flow *flow)
446 /* flow wasn't fully initialized */
450 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
456 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
458 struct mlx5_core_dev *mdev;
459 struct net_device *netdev;
460 struct mlx5e_priv *priv;
462 netdev = dev_get_by_index(net, ifindex);
464 return ERR_PTR(-ENODEV);
466 priv = netdev_priv(netdev);
470 /* Mirred tc action holds a refcount on the ifindex net_device (see
471 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
472 * after dev_put(netdev), while we're in the context of adding a tc flow.
474 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
475 * stored in a hairpin object, which exists until all flows, that refer to it, get
478 * On the other hand, after a hairpin object has been created, the peer net_device may
479 * be removed/unbound while there are still some hairpin flows that are using it. This
480 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
481 * NETDEV_UNREGISTER event of the peer net_device.
486 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
488 struct mlx5e_tir_builder *builder;
491 builder = mlx5e_tir_builder_alloc(false);
495 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
499 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
500 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
505 mlx5e_tir_builder_free(builder);
509 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
514 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
516 mlx5e_tir_destroy(&hp->direct_tir);
517 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
520 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
522 struct mlx5e_priv *priv = hp->func_priv;
523 struct mlx5_core_dev *mdev = priv->mdev;
524 struct mlx5e_rss_params_indir *indir;
527 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
531 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
532 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
533 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
540 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
542 struct mlx5e_priv *priv = hp->func_priv;
543 struct mlx5e_rss_params_hash rss_hash;
544 enum mlx5_traffic_types tt, max_tt;
545 struct mlx5e_tir_builder *builder;
548 builder = mlx5e_tir_builder_alloc(false);
552 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
554 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
555 struct mlx5e_rss_params_traffic_type rss_tt;
557 rss_tt = mlx5e_rss_get_default_tt_config(tt);
559 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
560 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
562 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
564 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
566 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
567 goto err_destroy_tirs;
570 mlx5e_tir_builder_clear(builder);
574 mlx5e_tir_builder_free(builder);
579 for (tt = 0; tt < max_tt; tt++)
580 mlx5e_tir_destroy(&hp->indir_tir[tt]);
585 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
589 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
590 mlx5e_tir_destroy(&hp->indir_tir[tt]);
593 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
594 struct ttc_params *ttc_params)
596 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
599 memset(ttc_params, 0, sizeof(*ttc_params));
601 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
602 MLX5_FLOW_NAMESPACE_KERNEL);
603 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
604 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
605 ttc_params->dests[tt].tir_num =
607 mlx5e_tir_get_tirn(&hp->direct_tir) :
608 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
611 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
612 ft_attr->prio = MLX5E_TC_PRIO;
615 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
617 struct mlx5e_priv *priv = hp->func_priv;
618 struct ttc_params ttc_params;
621 err = mlx5e_hairpin_create_indirect_rqt(hp);
625 err = mlx5e_hairpin_create_indirect_tirs(hp);
627 goto err_create_indirect_tirs;
629 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
630 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
631 if (IS_ERR(hp->ttc)) {
632 err = PTR_ERR(hp->ttc);
633 goto err_create_ttc_table;
636 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
638 mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
642 err_create_ttc_table:
643 mlx5e_hairpin_destroy_indirect_tirs(hp);
644 err_create_indirect_tirs:
645 mlx5e_rqt_destroy(&hp->indir_rqt);
650 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
652 mlx5_destroy_ttc_table(hp->ttc);
653 mlx5e_hairpin_destroy_indirect_tirs(hp);
654 mlx5e_rqt_destroy(&hp->indir_rqt);
657 static struct mlx5e_hairpin *
658 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
661 struct mlx5_core_dev *func_mdev, *peer_mdev;
662 struct mlx5e_hairpin *hp;
663 struct mlx5_hairpin *pair;
666 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
668 return ERR_PTR(-ENOMEM);
670 func_mdev = priv->mdev;
671 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
672 if (IS_ERR(peer_mdev)) {
673 err = PTR_ERR(peer_mdev);
674 goto create_pair_err;
677 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
680 goto create_pair_err;
683 hp->func_mdev = func_mdev;
684 hp->func_priv = priv;
685 hp->num_channels = params->num_channels;
687 err = mlx5e_hairpin_create_transport(hp);
689 goto create_transport_err;
691 if (hp->num_channels > 1) {
692 err = mlx5e_hairpin_rss_init(hp);
700 mlx5e_hairpin_destroy_transport(hp);
701 create_transport_err:
702 mlx5_core_hairpin_destroy(hp->pair);
708 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
710 if (hp->num_channels > 1)
711 mlx5e_hairpin_rss_cleanup(hp);
712 mlx5e_hairpin_destroy_transport(hp);
713 mlx5_core_hairpin_destroy(hp->pair);
717 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
719 return (peer_vhca_id << 16 | prio);
722 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
723 u16 peer_vhca_id, u8 prio)
725 struct mlx5e_hairpin_entry *hpe;
726 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
728 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
729 hairpin_hlist, hash_key) {
730 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
731 refcount_inc(&hpe->refcnt);
739 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
740 struct mlx5e_hairpin_entry *hpe)
742 /* no more hairpin flows for us, release the hairpin pair */
743 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
745 hash_del(&hpe->hairpin_hlist);
746 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
748 if (!IS_ERR_OR_NULL(hpe->hp)) {
749 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
750 dev_name(hpe->hp->pair->peer_mdev->device));
752 mlx5e_hairpin_destroy(hpe->hp);
755 WARN_ON(!list_empty(&hpe->flows));
759 #define UNKNOWN_MATCH_PRIO 8
761 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
762 struct mlx5_flow_spec *spec, u8 *match_prio,
763 struct netlink_ext_ack *extack)
765 void *headers_c, *headers_v;
766 u8 prio_val, prio_mask = 0;
769 #ifdef CONFIG_MLX5_CORE_EN_DCB
770 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
771 NL_SET_ERR_MSG_MOD(extack,
772 "only PCP trust state supported for hairpin");
776 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
777 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
779 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
781 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
782 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
785 if (!vlan_present || !prio_mask) {
786 prio_val = UNKNOWN_MATCH_PRIO;
787 } else if (prio_mask != 0x7) {
788 NL_SET_ERR_MSG_MOD(extack,
789 "masked priority match not supported for hairpin");
793 *match_prio = prio_val;
797 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
798 struct mlx5e_tc_flow *flow,
799 struct mlx5e_tc_flow_parse_attr *parse_attr,
800 struct netlink_ext_ack *extack)
802 int peer_ifindex = parse_attr->mirred_ifindex[0];
803 struct mlx5_hairpin_params params;
804 struct mlx5_core_dev *peer_mdev;
805 struct mlx5e_hairpin_entry *hpe;
806 struct mlx5e_hairpin *hp;
813 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
814 if (IS_ERR(peer_mdev)) {
815 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
816 return PTR_ERR(peer_mdev);
819 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
820 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
824 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
825 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
830 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
831 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
833 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
834 wait_for_completion(&hpe->res_ready);
836 if (IS_ERR(hpe->hp)) {
843 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
845 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
849 spin_lock_init(&hpe->flows_lock);
850 INIT_LIST_HEAD(&hpe->flows);
851 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
852 hpe->peer_vhca_id = peer_id;
853 hpe->prio = match_prio;
854 refcount_set(&hpe->refcnt, 1);
855 init_completion(&hpe->res_ready);
857 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
858 hash_hairpin_info(peer_id, match_prio));
859 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
861 params.log_data_size = 16;
862 params.log_data_size = min_t(u8, params.log_data_size,
863 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
864 params.log_data_size = max_t(u8, params.log_data_size,
865 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
867 params.log_num_packets = params.log_data_size -
868 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
869 params.log_num_packets = min_t(u8, params.log_num_packets,
870 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
872 params.q_counter = priv->q_counter;
873 /* set hairpin pair per each 50Gbs share of the link */
874 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
875 link_speed = max_t(u32, link_speed, 50000);
876 link_speed64 = link_speed;
877 do_div(link_speed64, 50000);
878 params.num_channels = link_speed64;
880 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
882 complete_all(&hpe->res_ready);
888 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
889 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
890 dev_name(hp->pair->peer_mdev->device),
891 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
894 if (hpe->hp->num_channels > 1) {
895 flow_flag_set(flow, HAIRPIN_RSS);
896 flow->attr->nic_attr->hairpin_ft =
897 mlx5_get_ttc_flow_table(hpe->hp->ttc);
899 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
903 spin_lock(&hpe->flows_lock);
904 list_add(&flow->hairpin, &hpe->flows);
905 spin_unlock(&hpe->flows_lock);
910 mlx5e_hairpin_put(priv, hpe);
914 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
915 struct mlx5e_tc_flow *flow)
917 /* flow wasn't fully initialized */
921 spin_lock(&flow->hpe->flows_lock);
922 list_del(&flow->hairpin);
923 spin_unlock(&flow->hpe->flows_lock);
925 mlx5e_hairpin_put(priv, flow->hpe);
929 struct mlx5_flow_handle *
930 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
931 struct mlx5_flow_spec *spec,
932 struct mlx5_flow_attr *attr)
934 struct mlx5_flow_context *flow_context = &spec->flow_context;
935 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
936 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
937 struct mlx5e_tc_table *tc = &priv->fs.tc;
938 struct mlx5_flow_destination dest[2] = {};
939 struct mlx5_flow_act flow_act = {
940 .action = attr->action,
941 .flags = FLOW_ACT_NO_APPEND,
943 struct mlx5_flow_handle *rule;
944 struct mlx5_flow_table *ft;
947 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
948 flow_context->flow_tag = nic_attr->flow_tag;
951 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
952 dest[dest_ix].ft = attr->dest_ft;
954 } else if (nic_attr->hairpin_ft) {
955 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
956 dest[dest_ix].ft = nic_attr->hairpin_ft;
958 } else if (nic_attr->hairpin_tirn) {
959 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
960 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
962 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
963 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
964 if (attr->dest_chain) {
965 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
968 if (IS_ERR(dest[dest_ix].ft))
969 return ERR_CAST(dest[dest_ix].ft);
971 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
976 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
977 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
978 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
980 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
981 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
982 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
986 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
987 flow_act.modify_hdr = attr->modify_hdr;
989 mutex_lock(&tc->t_lock);
990 if (IS_ERR_OR_NULL(tc->t)) {
991 /* Create the root table here if doesn't exist yet */
993 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
996 mutex_unlock(&tc->t_lock);
997 netdev_err(priv->netdev,
998 "Failed to create tc offload table\n");
999 rule = ERR_CAST(priv->fs.tc.t);
1003 mutex_unlock(&tc->t_lock);
1005 if (attr->chain || attr->prio)
1006 ft = mlx5_chains_get_table(nic_chains,
1007 attr->chain, attr->prio,
1013 rule = ERR_CAST(ft);
1017 if (attr->outer_match_level != MLX5_MATCH_NONE)
1018 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1020 rule = mlx5_add_flow_rules(ft, spec,
1021 &flow_act, dest, dest_ix);
1028 if (attr->chain || attr->prio)
1029 mlx5_chains_put_table(nic_chains,
1030 attr->chain, attr->prio,
1033 if (attr->dest_chain)
1034 mlx5_chains_put_table(nic_chains,
1035 attr->dest_chain, 1,
1038 return ERR_CAST(rule);
1042 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1043 struct mlx5e_tc_flow *flow,
1044 struct netlink_ext_ack *extack)
1046 struct mlx5e_tc_flow_parse_attr *parse_attr;
1047 struct mlx5_flow_attr *attr = flow->attr;
1048 struct mlx5_core_dev *dev = priv->mdev;
1049 struct mlx5_fc *counter;
1052 parse_attr = attr->parse_attr;
1054 if (flow_flag_test(flow, HAIRPIN)) {
1055 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1060 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1061 counter = mlx5_fc_create(dev, true);
1062 if (IS_ERR(counter))
1063 return PTR_ERR(counter);
1065 attr->counter = counter;
1068 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1069 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1070 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
1075 if (flow_flag_test(flow, CT))
1076 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1077 attr, &parse_attr->mod_hdr_acts);
1079 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1082 return PTR_ERR_OR_ZERO(flow->rule[0]);
1085 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1086 struct mlx5_flow_handle *rule,
1087 struct mlx5_flow_attr *attr)
1089 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
1091 mlx5_del_flow_rules(rule);
1093 if (attr->chain || attr->prio)
1094 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1097 if (attr->dest_chain)
1098 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1102 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1103 struct mlx5e_tc_flow *flow)
1105 struct mlx5_flow_attr *attr = flow->attr;
1106 struct mlx5e_tc_table *tc = &priv->fs.tc;
1108 flow_flag_clear(flow, OFFLOADED);
1110 if (flow_flag_test(flow, CT))
1111 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1112 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1113 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1115 /* Remove root table if no rules are left to avoid
1116 * extra steering hops.
1118 mutex_lock(&priv->fs.tc.t_lock);
1119 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1120 !IS_ERR_OR_NULL(tc->t)) {
1121 mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1122 priv->fs.tc.t = NULL;
1124 mutex_unlock(&priv->fs.tc.t_lock);
1126 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1127 mlx5e_detach_mod_hdr(priv, flow);
1129 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1130 mlx5_fc_destroy(priv->mdev, attr->counter);
1132 if (flow_flag_test(flow, HAIRPIN))
1133 mlx5e_hairpin_flow_del(priv, flow);
1135 kvfree(attr->parse_attr);
1139 struct mlx5_flow_handle *
1140 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1141 struct mlx5e_tc_flow *flow,
1142 struct mlx5_flow_spec *spec,
1143 struct mlx5_flow_attr *attr)
1145 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1146 struct mlx5_flow_handle *rule;
1148 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1149 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1151 if (flow_flag_test(flow, CT)) {
1152 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1154 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1157 } else if (flow_flag_test(flow, SAMPLE)) {
1158 rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
1159 mlx5e_tc_get_flow_tun_id(flow));
1161 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1167 if (attr->esw_attr->split_count) {
1168 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1169 if (IS_ERR(flow->rule[1])) {
1170 if (flow_flag_test(flow, CT))
1171 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1173 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1174 return flow->rule[1];
1181 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1182 struct mlx5e_tc_flow *flow,
1183 struct mlx5_flow_attr *attr)
1185 flow_flag_clear(flow, OFFLOADED);
1187 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1188 goto offload_rule_0;
1190 if (attr->esw_attr->split_count)
1191 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1193 if (flow_flag_test(flow, CT))
1194 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1195 else if (flow_flag_test(flow, SAMPLE))
1196 mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1199 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1202 struct mlx5_flow_handle *
1203 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1204 struct mlx5e_tc_flow *flow,
1205 struct mlx5_flow_spec *spec)
1207 struct mlx5_flow_attr *slow_attr;
1208 struct mlx5_flow_handle *rule;
1210 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1212 return ERR_PTR(-ENOMEM);
1214 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1215 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1216 slow_attr->esw_attr->split_count = 0;
1217 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1219 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1221 flow_flag_set(flow, SLOW);
1228 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1229 struct mlx5e_tc_flow *flow)
1231 struct mlx5_flow_attr *slow_attr;
1233 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1235 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1239 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1240 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1241 slow_attr->esw_attr->split_count = 0;
1242 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1243 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1244 flow_flag_clear(flow, SLOW);
1248 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1251 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1252 struct list_head *unready_flows)
1254 flow_flag_set(flow, NOT_READY);
1255 list_add_tail(&flow->unready, unready_flows);
1258 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1261 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1263 list_del(&flow->unready);
1264 flow_flag_clear(flow, NOT_READY);
1267 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1269 struct mlx5_rep_uplink_priv *uplink_priv;
1270 struct mlx5e_rep_priv *rpriv;
1271 struct mlx5_eswitch *esw;
1273 esw = flow->priv->mdev->priv.eswitch;
1274 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1275 uplink_priv = &rpriv->uplink_priv;
1277 mutex_lock(&uplink_priv->unready_flows_lock);
1278 unready_flow_add(flow, &uplink_priv->unready_flows);
1279 mutex_unlock(&uplink_priv->unready_flows_lock);
1282 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1284 struct mlx5_rep_uplink_priv *uplink_priv;
1285 struct mlx5e_rep_priv *rpriv;
1286 struct mlx5_eswitch *esw;
1288 esw = flow->priv->mdev->priv.eswitch;
1289 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1290 uplink_priv = &rpriv->uplink_priv;
1292 mutex_lock(&uplink_priv->unready_flows_lock);
1293 unready_flow_del(flow);
1294 mutex_unlock(&uplink_priv->unready_flows_lock);
1297 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1299 struct mlx5_core_dev *out_mdev, *route_mdev;
1300 struct mlx5e_priv *out_priv, *route_priv;
1302 out_priv = netdev_priv(out_dev);
1303 out_mdev = out_priv->mdev;
1304 route_priv = netdev_priv(route_dev);
1305 route_mdev = route_priv->mdev;
1307 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1308 route_mdev->coredev_type != MLX5_COREDEV_VF)
1311 return mlx5e_same_hw_devs(out_priv, route_priv);
1314 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1316 struct mlx5e_priv *out_priv, *route_priv;
1317 struct mlx5_devcom *devcom = NULL;
1318 struct mlx5_core_dev *route_mdev;
1319 struct mlx5_eswitch *esw;
1323 out_priv = netdev_priv(out_dev);
1324 esw = out_priv->mdev->priv.eswitch;
1325 route_priv = netdev_priv(route_dev);
1326 route_mdev = route_priv->mdev;
1328 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1329 if (mlx5_lag_is_active(out_priv->mdev)) {
1330 /* In lag case we may get devices from different eswitch instances.
1331 * If we failed to get vport num, it means, mostly, that we on the wrong
1334 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1338 devcom = out_priv->mdev->priv.devcom;
1339 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1344 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1346 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1350 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1351 struct mlx5e_tc_flow_parse_attr *parse_attr,
1352 struct mlx5e_tc_flow *flow)
1354 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
1355 struct mlx5_modify_hdr *mod_hdr;
1357 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1358 mlx5e_get_flow_namespace(flow),
1359 mod_hdr_acts->num_actions,
1360 mod_hdr_acts->actions);
1361 if (IS_ERR(mod_hdr))
1362 return PTR_ERR(mod_hdr);
1364 WARN_ON(flow->attr->modify_hdr);
1365 flow->attr->modify_hdr = mod_hdr;
1371 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1372 struct mlx5e_tc_flow *flow,
1373 struct netlink_ext_ack *extack)
1375 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1376 struct mlx5e_tc_flow_parse_attr *parse_attr;
1377 struct mlx5_flow_attr *attr = flow->attr;
1378 bool vf_tun = false, encap_valid = true;
1379 struct net_device *encap_dev = NULL;
1380 struct mlx5_esw_flow_attr *esw_attr;
1381 struct mlx5e_rep_priv *rpriv;
1382 struct mlx5e_priv *out_priv;
1383 struct mlx5_fc *counter;
1384 u32 max_prio, max_chain;
1388 parse_attr = attr->parse_attr;
1389 esw_attr = attr->esw_attr;
1391 /* We check chain range only for tc flows.
1392 * For ft flows, we checked attr->chain was originally 0 and set it to
1393 * FDB_FT_CHAIN which is outside tc range.
1394 * See mlx5e_rep_setup_ft_cb().
1396 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1397 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1398 NL_SET_ERR_MSG_MOD(extack,
1399 "Requested chain is out of supported range");
1404 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1405 if (attr->prio > max_prio) {
1406 NL_SET_ERR_MSG_MOD(extack,
1407 "Requested priority is out of supported range");
1412 if (flow_flag_test(flow, TUN_RX)) {
1413 err = mlx5e_attach_decap_route(priv, flow);
1417 if (!attr->chain && esw_attr->int_port) {
1418 /* If decap route device is internal port, change the
1419 * source vport value in reg_c0 back to uplink just in
1420 * case the rule performs goto chain > 0. If we have a miss
1421 * on chain > 0 we want the metadata regs to hold the
1422 * chain id so SW will resume handling of this packet
1423 * from the proper chain.
1425 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1426 esw_attr->in_rep->vport);
1428 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1429 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1434 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1438 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1439 err = mlx5e_attach_decap(priv, flow, extack);
1444 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1445 struct mlx5e_tc_int_port *int_port;
1448 NL_SET_ERR_MSG_MOD(extack,
1449 "Internal port rule is only supported on chain 0");
1454 if (attr->dest_chain) {
1455 NL_SET_ERR_MSG_MOD(extack,
1456 "Internal port rule offload doesn't support goto action");
1461 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1462 parse_attr->filter_dev->ifindex,
1463 flow_flag_test(flow, EGRESS) ?
1464 MLX5E_TC_INT_PORT_EGRESS :
1465 MLX5E_TC_INT_PORT_INGRESS);
1466 if (IS_ERR(int_port)) {
1467 err = PTR_ERR(int_port);
1471 esw_attr->int_port = int_port;
1474 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1475 struct net_device *out_dev;
1478 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1481 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1482 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1484 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1488 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1489 extack, &encap_dev, &encap_valid);
1494 if (esw_attr->dests[out_index].flags &
1495 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1496 !esw_attr->dest_int_port)
1498 out_priv = netdev_priv(encap_dev);
1499 rpriv = out_priv->ppriv;
1500 esw_attr->dests[out_index].rep = rpriv->rep;
1501 esw_attr->dests[out_index].mdev = out_priv->mdev;
1504 if (vf_tun && esw_attr->out_count > 1) {
1505 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1510 err = mlx5_eswitch_add_vlan_action(esw, attr);
1514 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1515 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1517 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
1521 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1527 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1528 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1529 if (IS_ERR(counter)) {
1530 err = PTR_ERR(counter);
1534 attr->counter = counter;
1537 /* we get here if one of the following takes place:
1538 * (1) there's no error
1539 * (2) there's an encap action and we don't have valid neigh
1542 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1544 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1546 if (IS_ERR(flow->rule[0])) {
1547 err = PTR_ERR(flow->rule[0]);
1550 flow_flag_set(flow, OFFLOADED);
1555 flow_flag_set(flow, FAILED);
1559 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1561 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1562 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1565 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1567 geneve_tlv_option_0_data);
1569 return !!geneve_tlv_opt_0_data;
1572 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1573 struct mlx5e_tc_flow *flow)
1575 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1576 struct mlx5_flow_attr *attr = flow->attr;
1577 struct mlx5_esw_flow_attr *esw_attr;
1578 bool vf_tun = false;
1581 esw_attr = attr->esw_attr;
1582 mlx5e_put_flow_tunnel_id(flow);
1584 if (flow_flag_test(flow, NOT_READY))
1585 remove_unready_flow(flow);
1587 if (mlx5e_is_offloaded_flow(flow)) {
1588 if (flow_flag_test(flow, SLOW))
1589 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1591 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1593 complete_all(&flow->del_hw_done);
1595 if (mlx5_flow_has_geneve_opt(flow))
1596 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1598 mlx5_eswitch_del_vlan_action(esw, attr);
1600 if (flow->decap_route)
1601 mlx5e_detach_decap_route(priv, flow);
1603 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1604 if (esw_attr->dests[out_index].flags &
1605 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1606 !esw_attr->dest_int_port)
1608 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1609 mlx5e_detach_encap(priv, flow, out_index);
1610 kfree(attr->parse_attr->tun_info[out_index]);
1614 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1616 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1617 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1618 if (vf_tun && attr->modify_hdr)
1619 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1621 mlx5e_detach_mod_hdr(priv, flow);
1624 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1625 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1627 if (esw_attr->int_port)
1628 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
1630 if (esw_attr->dest_int_port)
1631 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
1633 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1634 mlx5e_detach_decap(priv, flow);
1636 kfree(attr->sample_attr);
1637 kvfree(attr->esw_attr->rx_tun_attr);
1638 kvfree(attr->parse_attr);
1642 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1644 return flow->attr->counter;
1647 /* Iterate over tmp_list of flows attached to flow_list head. */
1648 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1650 struct mlx5e_tc_flow *flow, *tmp;
1652 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1653 mlx5e_flow_put(priv, flow);
1656 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1658 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1660 if (!flow_flag_test(flow, ESWITCH) ||
1661 !flow_flag_test(flow, DUP))
1664 mutex_lock(&esw->offloads.peer_mutex);
1665 list_del(&flow->peer);
1666 mutex_unlock(&esw->offloads.peer_mutex);
1668 flow_flag_clear(flow, DUP);
1670 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1671 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1672 kfree(flow->peer_flow);
1675 flow->peer_flow = NULL;
1678 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1680 struct mlx5_core_dev *dev = flow->priv->mdev;
1681 struct mlx5_devcom *devcom = dev->priv.devcom;
1682 struct mlx5_eswitch *peer_esw;
1684 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1688 __mlx5e_tc_del_fdb_peer_flow(flow);
1689 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1692 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1693 struct mlx5e_tc_flow *flow)
1695 if (mlx5e_is_eswitch_flow(flow)) {
1696 mlx5e_tc_del_fdb_peer_flow(flow);
1697 mlx5e_tc_del_fdb_flow(priv, flow);
1699 mlx5e_tc_del_nic_flow(priv, flow);
1703 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
1705 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1706 struct flow_action *flow_action = &rule->action;
1707 const struct flow_action_entry *act;
1713 flow_action_for_each(i, act, flow_action) {
1715 case FLOW_ACTION_GOTO:
1717 case FLOW_ACTION_SAMPLE:
1728 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1729 struct flow_dissector_key_enc_opts *opts,
1730 struct netlink_ext_ack *extack,
1733 struct geneve_opt *opt;
1738 while (opts->len > off) {
1739 opt = (struct geneve_opt *)&opts->data[off];
1741 if (!(*dont_care) || opt->opt_class || opt->type ||
1742 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1745 if (opt->opt_class != htons(U16_MAX) ||
1746 opt->type != U8_MAX) {
1747 NL_SET_ERR_MSG_MOD(extack,
1748 "Partial match of tunnel options in chain > 0 isn't supported");
1749 netdev_warn(priv->netdev,
1750 "Partial match of tunnel options in chain > 0 isn't supported");
1755 off += sizeof(struct geneve_opt) + opt->length * 4;
1761 #define COPY_DISSECTOR(rule, diss_key, dst)\
1763 struct flow_rule *__rule = (rule);\
1764 typeof(dst) __dst = dst;\
1767 skb_flow_dissector_target(__rule->match.dissector,\
1769 __rule->match.key),\
1773 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1774 struct mlx5e_tc_flow *flow,
1775 struct flow_cls_offload *f,
1776 struct net_device *filter_dev)
1778 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1779 struct netlink_ext_ack *extack = f->common.extack;
1780 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1781 struct flow_match_enc_opts enc_opts_match;
1782 struct tunnel_match_enc_opts tun_enc_opts;
1783 struct mlx5_rep_uplink_priv *uplink_priv;
1784 struct mlx5_flow_attr *attr = flow->attr;
1785 struct mlx5e_rep_priv *uplink_rpriv;
1786 struct tunnel_match_key tunnel_key;
1787 bool enc_opts_is_dont_care = true;
1788 u32 tun_id, enc_opts_id = 0;
1789 struct mlx5_eswitch *esw;
1793 esw = priv->mdev->priv.eswitch;
1794 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1795 uplink_priv = &uplink_rpriv->uplink_priv;
1797 memset(&tunnel_key, 0, sizeof(tunnel_key));
1798 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1799 &tunnel_key.enc_control);
1800 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1801 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1802 &tunnel_key.enc_ipv4);
1804 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1805 &tunnel_key.enc_ipv6);
1806 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1807 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1808 &tunnel_key.enc_tp);
1809 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1810 &tunnel_key.enc_key_id);
1811 tunnel_key.filter_ifindex = filter_dev->ifindex;
1813 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1817 flow_rule_match_enc_opts(rule, &enc_opts_match);
1818 err = enc_opts_is_dont_care_or_full_match(priv,
1819 enc_opts_match.mask,
1821 &enc_opts_is_dont_care);
1825 if (!enc_opts_is_dont_care) {
1826 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1827 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1828 sizeof(*enc_opts_match.key));
1829 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1830 sizeof(*enc_opts_match.mask));
1832 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1833 &tun_enc_opts, &enc_opts_id);
1838 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1839 mask = enc_opts_id ? TUNNEL_ID_MASK :
1840 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1843 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1844 TUNNEL_TO_REG, value, mask);
1846 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1847 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1848 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
1849 TUNNEL_TO_REG, value);
1853 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1856 flow->tunnel_id = value;
1861 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1864 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1868 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1870 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1871 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1872 struct mlx5_rep_uplink_priv *uplink_priv;
1873 struct mlx5e_rep_priv *uplink_rpriv;
1874 struct mlx5_eswitch *esw;
1876 esw = flow->priv->mdev->priv.eswitch;
1877 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1878 uplink_priv = &uplink_rpriv->uplink_priv;
1881 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1883 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1887 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1889 return flow->tunnel_id;
1892 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1893 struct flow_match_basic *match, bool outer,
1894 void *headers_c, void *headers_v)
1896 bool ip_version_cap;
1898 ip_version_cap = outer ?
1899 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1900 ft_field_support.outer_ip_version) :
1901 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1902 ft_field_support.inner_ip_version);
1904 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1905 (match->key->n_proto == htons(ETH_P_IP) ||
1906 match->key->n_proto == htons(ETH_P_IPV6))) {
1907 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1908 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1909 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1911 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1912 ntohs(match->mask->n_proto));
1913 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1914 ntohs(match->key->n_proto));
1918 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
1925 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1927 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
1929 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
1930 /* Return ip_version converted from ethertype anyway */
1932 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1933 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
1935 else if (ethertype == ETH_P_IPV6)
1941 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
1942 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
1943 * +---------+----------------------------------------+
1944 * |Arriving | Arriving Outer Header |
1945 * | Inner +---------+---------+---------+----------+
1946 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
1947 * +---------+---------+---------+---------+----------+
1948 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
1949 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
1950 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
1951 * | CE | CE | CE | CE | CE |
1952 * +---------+---------+---------+---------+----------+
1954 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
1955 * the inner ip_ecn value before hardware decap action.
1957 * Cells marked are changed from original inner packet ip_ecn value during decap, and
1958 * so matching those values on inner ip_ecn before decap will fail.
1960 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
1961 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
1962 * and such we can drop the inner ip_ecn=CE match.
1965 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
1966 struct flow_cls_offload *f,
1967 bool *match_inner_ecn)
1969 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
1970 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1971 struct netlink_ext_ack *extack = f->common.extack;
1972 struct flow_match_ip match;
1974 *match_inner_ecn = true;
1976 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1977 flow_rule_match_enc_ip(rule, &match);
1978 outer_ecn_key = match.key->tos & INET_ECN_MASK;
1979 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
1982 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1983 flow_rule_match_ip(rule, &match);
1984 inner_ecn_key = match.key->tos & INET_ECN_MASK;
1985 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
1988 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
1989 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
1990 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
1994 if (!outer_ecn_mask) {
1995 if (!inner_ecn_mask)
1998 NL_SET_ERR_MSG_MOD(extack,
1999 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2000 netdev_warn(priv->netdev,
2001 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2005 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2006 NL_SET_ERR_MSG_MOD(extack,
2007 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2008 netdev_warn(priv->netdev,
2009 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2013 if (!inner_ecn_mask)
2016 /* Both inner and outer have full mask on ecn */
2018 if (outer_ecn_key == INET_ECN_ECT_1) {
2019 /* inner ecn might change by DECAP action */
2021 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2022 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2026 if (outer_ecn_key != INET_ECN_CE)
2029 if (inner_ecn_key != INET_ECN_CE) {
2030 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2031 NL_SET_ERR_MSG_MOD(extack,
2032 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2033 netdev_warn(priv->netdev,
2034 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2038 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2039 * drop match on inner ecn
2041 *match_inner_ecn = false;
2046 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2047 struct mlx5e_tc_flow *flow,
2048 struct mlx5_flow_spec *spec,
2049 struct flow_cls_offload *f,
2050 struct net_device *filter_dev,
2054 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2055 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2056 struct netlink_ext_ack *extack = f->common.extack;
2057 bool needs_mapping, sets_mapping;
2060 if (!mlx5e_is_eswitch_flow(flow)) {
2061 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2065 needs_mapping = !!flow->attr->chain;
2066 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2067 *match_inner = !needs_mapping;
2069 if ((needs_mapping || sets_mapping) &&
2070 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2071 NL_SET_ERR_MSG_MOD(extack,
2072 "Chains on tunnel devices isn't supported without register loopback support");
2073 netdev_warn(priv->netdev,
2074 "Chains on tunnel devices isn't supported without register loopback support");
2078 if (!flow->attr->chain) {
2079 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2082 NL_SET_ERR_MSG_MOD(extack,
2083 "Failed to parse tunnel attributes");
2084 netdev_warn(priv->netdev,
2085 "Failed to parse tunnel attributes");
2089 /* With mpls over udp we decapsulate using packet reformat
2092 if (!netif_is_bareudp(filter_dev))
2093 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2094 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2097 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2098 struct mlx5_flow_spec *tmp_spec;
2100 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2102 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2103 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2106 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2108 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2111 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2112 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2115 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2121 if (!needs_mapping && !sets_mapping)
2124 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2127 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2129 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2133 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2135 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2139 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2141 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2145 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2147 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2151 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2153 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2154 get_match_inner_headers_value(spec) :
2155 get_match_outer_headers_value(spec);
2158 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2160 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2161 get_match_inner_headers_criteria(spec) :
2162 get_match_outer_headers_criteria(spec);
2165 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2166 struct flow_cls_offload *f)
2168 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2169 struct netlink_ext_ack *extack = f->common.extack;
2170 struct net_device *ingress_dev;
2171 struct flow_match_meta match;
2173 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2176 flow_rule_match_meta(rule, &match);
2177 if (!match.mask->ingress_ifindex)
2180 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2181 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2185 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2186 match.key->ingress_ifindex);
2188 NL_SET_ERR_MSG_MOD(extack,
2189 "Can't find the ingress port to match on");
2193 if (ingress_dev != filter_dev) {
2194 NL_SET_ERR_MSG_MOD(extack,
2195 "Can't match on the ingress filter port");
2202 static bool skip_key_basic(struct net_device *filter_dev,
2203 struct flow_cls_offload *f)
2205 /* When doing mpls over udp decap, the user needs to provide
2206 * MPLS_UC as the protocol in order to be able to match on mpls
2207 * label fields. However, the actual ethertype is IP so we want to
2208 * avoid matching on this, otherwise we'll fail the match.
2210 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2216 static int __parse_cls_flower(struct mlx5e_priv *priv,
2217 struct mlx5e_tc_flow *flow,
2218 struct mlx5_flow_spec *spec,
2219 struct flow_cls_offload *f,
2220 struct net_device *filter_dev,
2221 u8 *inner_match_level, u8 *outer_match_level)
2223 struct netlink_ext_ack *extack = f->common.extack;
2224 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2226 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2228 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2230 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2232 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2234 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2236 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2237 struct flow_dissector *dissector = rule->match.dissector;
2238 enum fs_flow_table_type fs_type;
2239 bool match_inner_ecn = true;
2245 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2246 match_level = outer_match_level;
2248 if (dissector->used_keys &
2249 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2250 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2251 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2252 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2253 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2254 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2255 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2256 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2257 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2258 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2259 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2260 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2261 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2262 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2263 BIT(FLOW_DISSECTOR_KEY_TCP) |
2264 BIT(FLOW_DISSECTOR_KEY_IP) |
2265 BIT(FLOW_DISSECTOR_KEY_CT) |
2266 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2267 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2268 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2269 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2270 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2271 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2272 dissector->used_keys);
2276 if (mlx5e_get_tc_tun(filter_dev)) {
2277 bool match_inner = false;
2279 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2280 outer_match_level, &match_inner);
2285 /* header pointers should point to the inner headers
2286 * if the packet was decapsulated already.
2287 * outer headers are set by parse_tunnel_attr.
2289 match_level = inner_match_level;
2290 headers_c = get_match_inner_headers_criteria(spec);
2291 headers_v = get_match_inner_headers_value(spec);
2294 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2299 err = mlx5e_flower_parse_meta(filter_dev, f);
2303 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2304 !skip_key_basic(filter_dev, f)) {
2305 struct flow_match_basic match;
2307 flow_rule_match_basic(rule, &match);
2308 mlx5e_tc_set_ethertype(priv->mdev, &match,
2309 match_level == outer_match_level,
2310 headers_c, headers_v);
2312 if (match.mask->n_proto)
2313 *match_level = MLX5_MATCH_L2;
2315 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2316 is_vlan_dev(filter_dev)) {
2317 struct flow_dissector_key_vlan filter_dev_mask;
2318 struct flow_dissector_key_vlan filter_dev_key;
2319 struct flow_match_vlan match;
2321 if (is_vlan_dev(filter_dev)) {
2322 match.key = &filter_dev_key;
2323 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2324 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2325 match.key->vlan_priority = 0;
2326 match.mask = &filter_dev_mask;
2327 memset(match.mask, 0xff, sizeof(*match.mask));
2328 match.mask->vlan_priority = 0;
2330 flow_rule_match_vlan(rule, &match);
2332 if (match.mask->vlan_id ||
2333 match.mask->vlan_priority ||
2334 match.mask->vlan_tpid) {
2335 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2336 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2338 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2341 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2343 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2347 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2348 match.mask->vlan_id);
2349 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2350 match.key->vlan_id);
2352 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2353 match.mask->vlan_priority);
2354 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2355 match.key->vlan_priority);
2357 *match_level = MLX5_MATCH_L2;
2359 } else if (*match_level != MLX5_MATCH_NONE) {
2360 /* cvlan_tag enabled in match criteria and
2361 * disabled in match value means both S & C tags
2362 * don't exist (untagged of both)
2364 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2365 *match_level = MLX5_MATCH_L2;
2368 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2369 struct flow_match_vlan match;
2371 flow_rule_match_cvlan(rule, &match);
2372 if (match.mask->vlan_id ||
2373 match.mask->vlan_priority ||
2374 match.mask->vlan_tpid) {
2375 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2377 NL_SET_ERR_MSG_MOD(extack,
2378 "Matching on CVLAN is not supported");
2382 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2383 MLX5_SET(fte_match_set_misc, misc_c,
2384 outer_second_svlan_tag, 1);
2385 MLX5_SET(fte_match_set_misc, misc_v,
2386 outer_second_svlan_tag, 1);
2388 MLX5_SET(fte_match_set_misc, misc_c,
2389 outer_second_cvlan_tag, 1);
2390 MLX5_SET(fte_match_set_misc, misc_v,
2391 outer_second_cvlan_tag, 1);
2394 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2395 match.mask->vlan_id);
2396 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2397 match.key->vlan_id);
2398 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2399 match.mask->vlan_priority);
2400 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2401 match.key->vlan_priority);
2403 *match_level = MLX5_MATCH_L2;
2404 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2408 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2409 struct flow_match_eth_addrs match;
2411 flow_rule_match_eth_addrs(rule, &match);
2412 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2415 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2419 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2422 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2426 if (!is_zero_ether_addr(match.mask->src) ||
2427 !is_zero_ether_addr(match.mask->dst))
2428 *match_level = MLX5_MATCH_L2;
2431 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2432 struct flow_match_control match;
2434 flow_rule_match_control(rule, &match);
2435 addr_type = match.key->addr_type;
2437 /* the HW doesn't support frag first/later */
2438 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2439 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2443 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2444 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2445 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2446 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2448 /* the HW doesn't need L3 inline to match on frag=no */
2449 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2450 *match_level = MLX5_MATCH_L2;
2451 /* *** L2 attributes parsing up to here *** */
2453 *match_level = MLX5_MATCH_L3;
2457 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2458 struct flow_match_basic match;
2460 flow_rule_match_basic(rule, &match);
2461 ip_proto = match.key->ip_proto;
2463 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2464 match.mask->ip_proto);
2465 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2466 match.key->ip_proto);
2468 if (match.mask->ip_proto)
2469 *match_level = MLX5_MATCH_L3;
2472 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2473 struct flow_match_ipv4_addrs match;
2475 flow_rule_match_ipv4_addrs(rule, &match);
2476 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2477 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2478 &match.mask->src, sizeof(match.mask->src));
2479 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2480 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2481 &match.key->src, sizeof(match.key->src));
2482 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2483 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2484 &match.mask->dst, sizeof(match.mask->dst));
2485 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2486 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2487 &match.key->dst, sizeof(match.key->dst));
2489 if (match.mask->src || match.mask->dst)
2490 *match_level = MLX5_MATCH_L3;
2493 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2494 struct flow_match_ipv6_addrs match;
2496 flow_rule_match_ipv6_addrs(rule, &match);
2497 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2498 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2499 &match.mask->src, sizeof(match.mask->src));
2500 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2501 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2502 &match.key->src, sizeof(match.key->src));
2504 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2505 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2506 &match.mask->dst, sizeof(match.mask->dst));
2507 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2508 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2509 &match.key->dst, sizeof(match.key->dst));
2511 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2512 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2513 *match_level = MLX5_MATCH_L3;
2516 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2517 struct flow_match_ip match;
2519 flow_rule_match_ip(rule, &match);
2520 if (match_inner_ecn) {
2521 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2522 match.mask->tos & 0x3);
2523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2524 match.key->tos & 0x3);
2527 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2528 match.mask->tos >> 2);
2529 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2530 match.key->tos >> 2);
2532 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2534 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2537 if (match.mask->ttl &&
2538 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2539 ft_field_support.outer_ipv4_ttl)) {
2540 NL_SET_ERR_MSG_MOD(extack,
2541 "Matching on TTL is not supported");
2545 if (match.mask->tos || match.mask->ttl)
2546 *match_level = MLX5_MATCH_L3;
2549 /* *** L3 attributes parsing up to here *** */
2551 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2552 struct flow_match_ports match;
2554 flow_rule_match_ports(rule, &match);
2557 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2558 tcp_sport, ntohs(match.mask->src));
2559 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2560 tcp_sport, ntohs(match.key->src));
2562 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2563 tcp_dport, ntohs(match.mask->dst));
2564 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2565 tcp_dport, ntohs(match.key->dst));
2569 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2570 udp_sport, ntohs(match.mask->src));
2571 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2572 udp_sport, ntohs(match.key->src));
2574 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2575 udp_dport, ntohs(match.mask->dst));
2576 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2577 udp_dport, ntohs(match.key->dst));
2580 NL_SET_ERR_MSG_MOD(extack,
2581 "Only UDP and TCP transports are supported for L4 matching");
2582 netdev_err(priv->netdev,
2583 "Only UDP and TCP transport are supported\n");
2587 if (match.mask->src || match.mask->dst)
2588 *match_level = MLX5_MATCH_L4;
2591 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2592 struct flow_match_tcp match;
2594 flow_rule_match_tcp(rule, &match);
2595 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2596 ntohs(match.mask->flags));
2597 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2598 ntohs(match.key->flags));
2600 if (match.mask->flags)
2601 *match_level = MLX5_MATCH_L4;
2603 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2604 struct flow_match_icmp match;
2606 flow_rule_match_icmp(rule, &match);
2609 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2610 MLX5_FLEX_PROTO_ICMP)) {
2611 NL_SET_ERR_MSG_MOD(extack,
2612 "Match on Flex protocols for ICMP is not supported");
2615 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2617 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2619 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2621 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2624 case IPPROTO_ICMPV6:
2625 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2626 MLX5_FLEX_PROTO_ICMPV6)) {
2627 NL_SET_ERR_MSG_MOD(extack,
2628 "Match on Flex protocols for ICMPV6 is not supported");
2631 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2633 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2635 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2637 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2641 NL_SET_ERR_MSG_MOD(extack,
2642 "Code and type matching only with ICMP and ICMPv6");
2643 netdev_err(priv->netdev,
2644 "Code and type matching only with ICMP and ICMPv6\n");
2647 if (match.mask->code || match.mask->type) {
2648 *match_level = MLX5_MATCH_L4;
2649 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2652 /* Currently supported only for MPLS over UDP */
2653 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2654 !netif_is_bareudp(filter_dev)) {
2655 NL_SET_ERR_MSG_MOD(extack,
2656 "Matching on MPLS is supported only for MPLS over UDP");
2657 netdev_err(priv->netdev,
2658 "Matching on MPLS is supported only for MPLS over UDP\n");
2665 static int parse_cls_flower(struct mlx5e_priv *priv,
2666 struct mlx5e_tc_flow *flow,
2667 struct mlx5_flow_spec *spec,
2668 struct flow_cls_offload *f,
2669 struct net_device *filter_dev)
2671 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2672 struct netlink_ext_ack *extack = f->common.extack;
2673 struct mlx5_core_dev *dev = priv->mdev;
2674 struct mlx5_eswitch *esw = dev->priv.eswitch;
2675 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2676 struct mlx5_eswitch_rep *rep;
2677 bool is_eswitch_flow;
2680 inner_match_level = MLX5_MATCH_NONE;
2681 outer_match_level = MLX5_MATCH_NONE;
2683 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2684 &inner_match_level, &outer_match_level);
2685 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2686 outer_match_level : inner_match_level;
2688 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2689 if (!err && is_eswitch_flow) {
2691 if (rep->vport != MLX5_VPORT_UPLINK &&
2692 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2693 esw->offloads.inline_mode < non_tunnel_match_level)) {
2694 NL_SET_ERR_MSG_MOD(extack,
2695 "Flow is not offloaded due to min inline setting");
2696 netdev_warn(priv->netdev,
2697 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2698 non_tunnel_match_level, esw->offloads.inline_mode);
2703 flow->attr->inner_match_level = inner_match_level;
2704 flow->attr->outer_match_level = outer_match_level;
2710 struct mlx5_fields {
2718 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2719 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2720 offsetof(struct pedit_headers, field) + (off), \
2721 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2723 /* masked values are the same and there are no rewrites that do not have a
2726 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2727 type matchmaskx = *(type *)(matchmaskp); \
2728 type matchvalx = *(type *)(matchvalp); \
2729 type maskx = *(type *)(maskp); \
2730 type valx = *(type *)(valp); \
2732 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2736 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2737 void *matchmaskp, u8 bsize)
2743 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2746 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2749 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2756 static struct mlx5_fields fields[] = {
2757 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2758 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2759 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2760 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2761 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2762 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2764 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2765 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2766 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2767 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2769 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2770 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2771 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2772 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2773 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2774 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2775 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2776 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2777 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2778 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2779 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2780 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2781 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2782 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2783 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2784 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2785 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2786 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2788 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2789 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2790 /* in linux iphdr tcp_flags is 8 bits long */
2791 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2793 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2794 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2797 static unsigned long mask_to_le(unsigned long mask, int size)
2803 mask_be32 = (__force __be32)(mask);
2804 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2805 } else if (size == 16) {
2806 mask_be32 = (__force __be32)(mask);
2807 mask_be16 = *(__be16 *)&mask_be32;
2808 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2813 static int offload_pedit_fields(struct mlx5e_priv *priv,
2815 struct pedit_headers_action *hdrs,
2816 struct mlx5e_tc_flow_parse_attr *parse_attr,
2818 struct netlink_ext_ack *extack)
2820 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2821 void *headers_c, *headers_v, *action, *vals_p;
2822 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2823 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2824 unsigned long mask, field_mask;
2825 int i, first, last, next_z;
2826 struct mlx5_fields *f;
2829 mod_acts = &parse_attr->mod_hdr_acts;
2830 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
2831 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
2833 set_masks = &hdrs[0].masks;
2834 add_masks = &hdrs[1].masks;
2835 set_vals = &hdrs[0].vals;
2836 add_vals = &hdrs[1].vals;
2838 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2842 /* avoid seeing bits set from previous iterations */
2846 s_masks_p = (void *)set_masks + f->offset;
2847 a_masks_p = (void *)add_masks + f->offset;
2849 s_mask = *s_masks_p & f->field_mask;
2850 a_mask = *a_masks_p & f->field_mask;
2852 if (!s_mask && !a_mask) /* nothing to offload here */
2855 if (s_mask && a_mask) {
2856 NL_SET_ERR_MSG_MOD(extack,
2857 "can't set and add to the same HW field");
2858 netdev_warn(priv->netdev,
2859 "mlx5: can't set and add to the same HW field (%x)\n",
2866 void *match_mask = headers_c + f->match_offset;
2867 void *match_val = headers_v + f->match_offset;
2869 cmd = MLX5_ACTION_TYPE_SET;
2871 vals_p = (void *)set_vals + f->offset;
2872 /* don't rewrite if we have a match on the same value */
2873 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2874 match_mask, f->field_bsize))
2876 /* clear to denote we consumed this field */
2877 *s_masks_p &= ~f->field_mask;
2879 cmd = MLX5_ACTION_TYPE_ADD;
2881 vals_p = (void *)add_vals + f->offset;
2882 /* add 0 is no change */
2883 if ((*(u32 *)vals_p & f->field_mask) == 0)
2885 /* clear to denote we consumed this field */
2886 *a_masks_p &= ~f->field_mask;
2891 mask = mask_to_le(mask, f->field_bsize);
2893 first = find_first_bit(&mask, f->field_bsize);
2894 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2895 last = find_last_bit(&mask, f->field_bsize);
2896 if (first < next_z && next_z < last) {
2897 NL_SET_ERR_MSG_MOD(extack,
2898 "rewrite of few sub-fields isn't supported");
2899 netdev_warn(priv->netdev,
2900 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2905 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
2906 if (IS_ERR(action)) {
2907 NL_SET_ERR_MSG_MOD(extack,
2908 "too many pedit actions, can't offload");
2909 mlx5_core_warn(priv->mdev,
2910 "mlx5: parsed %d pedit actions, can't do more\n",
2911 mod_acts->num_actions);
2912 return PTR_ERR(action);
2915 MLX5_SET(set_action_in, action, action_type, cmd);
2916 MLX5_SET(set_action_in, action, field, f->field);
2918 if (cmd == MLX5_ACTION_TYPE_SET) {
2921 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2923 /* if field is bit sized it can start not from first bit */
2924 start = find_first_bit(&field_mask, f->field_bsize);
2926 MLX5_SET(set_action_in, action, offset, first - start);
2927 /* length is num of bits to be written, zero means length of 32 */
2928 MLX5_SET(set_action_in, action, length, (last - first + 1));
2931 if (f->field_bsize == 32)
2932 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2933 else if (f->field_bsize == 16)
2934 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2935 else if (f->field_bsize == 8)
2936 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2938 ++mod_acts->num_actions;
2944 static const struct pedit_headers zero_masks = {};
2946 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2947 struct mlx5e_tc_flow_parse_attr *parse_attr,
2948 struct pedit_headers_action *hdrs,
2950 struct netlink_ext_ack *extack)
2952 struct pedit_headers *cmd_masks;
2956 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2957 action_flags, extack);
2959 goto out_dealloc_parsed_actions;
2961 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2962 cmd_masks = &hdrs[cmd].masks;
2963 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2964 NL_SET_ERR_MSG_MOD(extack,
2965 "attempt to offload an unsupported field");
2966 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2967 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2968 16, 1, cmd_masks, sizeof(zero_masks), true);
2970 goto out_dealloc_parsed_actions;
2976 out_dealloc_parsed_actions:
2977 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
2981 struct ip_ttl_word {
2987 struct ipv6_hoplimit_word {
2994 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
2995 bool *modify_ip_header, bool *modify_tuple,
2996 struct netlink_ext_ack *extack)
3001 htype = act->mangle.htype;
3002 offset = act->mangle.offset;
3003 mask = ~act->mangle.mask;
3004 /* For IPv4 & IPv6 header check 4 byte word,
3005 * to determine that modified fields
3006 * are NOT ttl & hop_limit only.
3008 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3009 struct ip_ttl_word *ttl_word =
3010 (struct ip_ttl_word *)&mask;
3012 if (offset != offsetof(struct iphdr, ttl) ||
3013 ttl_word->protocol ||
3015 *modify_ip_header = true;
3018 if (offset >= offsetof(struct iphdr, saddr))
3019 *modify_tuple = true;
3021 if (ct_flow && *modify_tuple) {
3022 NL_SET_ERR_MSG_MOD(extack,
3023 "can't offload re-write of ipv4 address with action ct");
3026 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3027 struct ipv6_hoplimit_word *hoplimit_word =
3028 (struct ipv6_hoplimit_word *)&mask;
3030 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3031 hoplimit_word->payload_len ||
3032 hoplimit_word->nexthdr) {
3033 *modify_ip_header = true;
3036 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3037 *modify_tuple = true;
3039 if (ct_flow && *modify_tuple) {
3040 NL_SET_ERR_MSG_MOD(extack,
3041 "can't offload re-write of ipv6 address with action ct");
3044 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3045 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3046 *modify_tuple = true;
3048 NL_SET_ERR_MSG_MOD(extack,
3049 "can't offload re-write of transport header ports with action ct");
3057 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3058 bool ct_flow, struct netlink_ext_ack *extack,
3059 struct mlx5e_priv *priv,
3060 struct mlx5_flow_spec *spec)
3062 if (!modify_tuple || ct_clear)
3066 NL_SET_ERR_MSG_MOD(extack,
3067 "can't offload tuple modification with non-clear ct()");
3068 netdev_info(priv->netdev,
3069 "can't offload tuple modification with non-clear ct()");
3073 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3074 * (or after clear action), as otherwise, since the tuple is changed,
3075 * we can't restore ct state
3077 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3078 NL_SET_ERR_MSG_MOD(extack,
3079 "can't offload tuple modification with ct matches and no ct(clear) action");
3080 netdev_info(priv->netdev,
3081 "can't offload tuple modification with ct matches and no ct(clear) action");
3088 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3089 struct mlx5_flow_spec *spec,
3090 struct flow_action *flow_action,
3091 u32 actions, bool ct_flow,
3093 struct netlink_ext_ack *extack)
3095 const struct flow_action_entry *act;
3096 bool modify_ip_header, modify_tuple;
3103 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3104 headers_v = mlx5e_get_match_headers_value(actions, spec);
3105 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3107 /* for non-IP we only re-write MACs, so we're okay */
3108 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3109 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3112 modify_ip_header = false;
3113 modify_tuple = false;
3114 flow_action_for_each(i, act, flow_action) {
3115 if (act->id != FLOW_ACTION_MANGLE &&
3116 act->id != FLOW_ACTION_ADD)
3119 if (!is_action_keys_supported(act, ct_flow,
3121 &modify_tuple, extack))
3125 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3129 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3130 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3131 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3132 NL_SET_ERR_MSG_MOD(extack,
3133 "can't offload re-write of non TCP/UDP");
3134 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3144 actions_match_supported_fdb(struct mlx5e_priv *priv,
3145 struct mlx5e_tc_flow_parse_attr *parse_attr,
3146 struct mlx5e_tc_flow *flow,
3147 struct netlink_ext_ack *extack)
3149 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3150 bool ct_flow, ct_clear;
3152 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3153 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3155 if (esw_attr->split_count && ct_flow &&
3156 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3157 /* All registers used by ct are cleared when using
3160 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3164 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3165 NL_SET_ERR_MSG_MOD(extack,
3166 "current firmware doesn't support split rule for port mirroring");
3167 netdev_warn_once(priv->netdev,
3168 "current firmware doesn't support split rule for port mirroring\n");
3176 actions_match_supported(struct mlx5e_priv *priv,
3177 struct flow_action *flow_action,
3178 struct mlx5e_tc_flow_parse_attr *parse_attr,
3179 struct mlx5e_tc_flow *flow,
3180 struct netlink_ext_ack *extack)
3182 u32 actions = flow->attr->action;
3183 bool ct_flow, ct_clear;
3185 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3186 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3189 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3190 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
3194 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3195 !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3196 actions, ct_flow, ct_clear, extack))
3199 if (mlx5e_is_eswitch_flow(flow) &&
3200 !actions_match_supported_fdb(priv, parse_attr, flow, extack))
3206 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3208 return priv->mdev == peer_priv->mdev;
3211 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3213 struct mlx5_core_dev *fmdev, *pmdev;
3214 u64 fsystem_guid, psystem_guid;
3217 pmdev = peer_priv->mdev;
3219 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3220 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3222 return (fsystem_guid == psystem_guid);
3226 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3227 struct flow_action *flow_action)
3229 struct netlink_ext_ack *extack = parse_state->extack;
3230 struct mlx5e_tc_flow *flow = parse_state->flow;
3231 struct mlx5_flow_attr *attr = flow->attr;
3232 enum mlx5_flow_namespace_type ns_type;
3233 struct mlx5e_priv *priv = flow->priv;
3234 const struct flow_action_entry *act;
3235 struct mlx5e_tc_act *tc_act;
3238 ns_type = mlx5e_get_flow_namespace(flow);
3240 flow_action_for_each(i, act, flow_action) {
3241 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3243 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3247 if (!tc_act->can_offload(parse_state, act, i))
3250 err = tc_act->parse_action(parse_state, act, priv, attr);
3255 flow_action_for_each(i, act, flow_action) {
3256 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3257 if (!tc_act || !tc_act->post_parse ||
3258 !tc_act->can_offload(parse_state, act, i))
3261 err = tc_act->post_parse(parse_state, priv, attr);
3270 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3271 struct mlx5e_tc_flow *flow,
3272 struct mlx5_flow_attr *attr,
3273 struct pedit_headers_action *hdrs,
3274 struct netlink_ext_ack *extack)
3276 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3277 enum mlx5_flow_namespace_type ns_type;
3280 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3281 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3284 ns_type = mlx5e_get_flow_namespace(flow);
3286 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
3287 &attr->action, extack);
3291 if (parse_attr->mod_hdr_acts.num_actions > 0)
3294 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3295 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3296 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3298 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3301 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3302 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3303 attr->esw_attr->split_count = 0;
3309 flow_action_supported(struct flow_action *flow_action,
3310 struct netlink_ext_ack *extack)
3312 if (!flow_action_has_entries(flow_action)) {
3313 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
3317 if (!flow_action_hw_stats_check(flow_action, extack,
3318 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
3319 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
3327 parse_tc_nic_actions(struct mlx5e_priv *priv,
3328 struct flow_action *flow_action,
3329 struct mlx5e_tc_flow *flow,
3330 struct netlink_ext_ack *extack)
3332 struct mlx5e_tc_act_parse_state *parse_state;
3333 struct mlx5e_tc_flow_parse_attr *parse_attr;
3334 struct mlx5_flow_attr *attr = flow->attr;
3335 struct pedit_headers_action *hdrs;
3338 err = flow_action_supported(flow_action, extack);
3342 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3343 parse_attr = attr->parse_attr;
3344 parse_state = &parse_attr->parse_state;
3345 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3346 parse_state->ct_priv = get_ct_priv(priv);
3347 hdrs = parse_state->hdrs;
3349 err = parse_tc_actions(parse_state, flow_action);
3353 err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
3357 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3363 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3364 struct net_device *peer_netdev)
3366 struct mlx5e_priv *peer_priv;
3368 peer_priv = netdev_priv(peer_netdev);
3370 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3371 mlx5e_eswitch_vf_rep(priv->netdev) &&
3372 mlx5e_eswitch_vf_rep(peer_netdev) &&
3373 mlx5e_same_hw_devs(priv, peer_priv));
3376 static bool same_hw_reps(struct mlx5e_priv *priv,
3377 struct net_device *peer_netdev)
3379 struct mlx5e_priv *peer_priv;
3381 peer_priv = netdev_priv(peer_netdev);
3383 return mlx5e_eswitch_rep(priv->netdev) &&
3384 mlx5e_eswitch_rep(peer_netdev) &&
3385 mlx5e_same_hw_devs(priv, peer_priv);
3388 static bool is_lag_dev(struct mlx5e_priv *priv,
3389 struct net_device *peer_netdev)
3391 return ((mlx5_lag_is_sriov(priv->mdev) ||
3392 mlx5_lag_is_multipath(priv->mdev)) &&
3393 same_hw_reps(priv, peer_netdev));
3396 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3397 struct net_device *out_dev)
3399 if (is_merged_eswitch_vfs(priv, out_dev))
3402 if (is_lag_dev(priv, out_dev))
3405 return mlx5e_eswitch_rep(out_dev) &&
3406 same_port_devs(priv, netdev_priv(out_dev));
3409 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
3410 struct mlx5_flow_attr *attr,
3412 enum mlx5e_tc_int_port_type type,
3416 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
3417 struct mlx5e_tc_int_port_priv *int_port_priv;
3418 struct mlx5e_tc_flow_parse_attr *parse_attr;
3419 struct mlx5e_tc_int_port *dest_int_port;
3422 parse_attr = attr->parse_attr;
3423 int_port_priv = mlx5e_get_int_port_priv(priv);
3425 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
3426 if (IS_ERR(dest_int_port))
3427 return PTR_ERR(dest_int_port);
3429 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
3430 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
3431 mlx5e_tc_int_port_get_metadata(dest_int_port));
3433 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
3437 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3439 esw_attr->dest_int_port = dest_int_port;
3440 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
3442 /* Forward to root fdb for matching against the new source vport */
3443 attr->dest_chain = 0;
3449 parse_tc_fdb_actions(struct mlx5e_priv *priv,
3450 struct flow_action *flow_action,
3451 struct mlx5e_tc_flow *flow,
3452 struct netlink_ext_ack *extack)
3454 struct mlx5e_tc_act_parse_state *parse_state;
3455 struct mlx5e_tc_flow_parse_attr *parse_attr;
3456 struct mlx5_flow_attr *attr = flow->attr;
3457 struct mlx5_esw_flow_attr *esw_attr;
3458 struct pedit_headers_action *hdrs;
3461 err = flow_action_supported(flow_action, extack);
3465 esw_attr = attr->esw_attr;
3466 parse_attr = attr->parse_attr;
3467 parse_state = &parse_attr->parse_state;
3468 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3469 parse_state->ct_priv = get_ct_priv(priv);
3470 hdrs = parse_state->hdrs;
3472 err = parse_tc_actions(parse_state, flow_action);
3476 /* Forward to/from internal port can only have 1 dest */
3477 if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
3478 esw_attr->out_count > 1) {
3479 NL_SET_ERR_MSG_MOD(extack,
3480 "Rules with internal port can have only one destination");
3484 err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
3488 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3494 static void get_flags(int flags, unsigned long *flow_flags)
3496 unsigned long __flow_flags = 0;
3498 if (flags & MLX5_TC_FLAG(INGRESS))
3499 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3500 if (flags & MLX5_TC_FLAG(EGRESS))
3501 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
3503 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3504 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3505 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3506 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3507 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
3508 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
3510 *flow_flags = __flow_flags;
3513 static const struct rhashtable_params tc_ht_params = {
3514 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3515 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3516 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3517 .automatic_shrinking = true,
3520 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3521 unsigned long flags)
3523 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3524 struct mlx5e_rep_priv *uplink_rpriv;
3526 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3527 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3528 return &uplink_rpriv->uplink_priv.tc_ht;
3529 } else /* NIC offload */
3530 return &priv->fs.tc.ht;
3533 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
3535 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3536 struct mlx5_flow_attr *attr = flow->attr;
3537 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3538 flow_flag_test(flow, INGRESS);
3539 bool act_is_encap = !!(attr->action &
3540 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
3541 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
3542 MLX5_DEVCOM_ESW_OFFLOADS);
3547 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
3548 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
3549 (is_rep_ingress || act_is_encap))
3555 struct mlx5_flow_attr *
3556 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
3558 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
3559 sizeof(struct mlx5_esw_flow_attr) :
3560 sizeof(struct mlx5_nic_flow_attr);
3561 struct mlx5_flow_attr *attr;
3563 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
3567 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3568 struct flow_cls_offload *f, unsigned long flow_flags,
3569 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3570 struct mlx5e_tc_flow **__flow)
3572 struct mlx5e_tc_flow_parse_attr *parse_attr;
3573 struct mlx5_flow_attr *attr;
3574 struct mlx5e_tc_flow *flow;
3578 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
3579 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3580 if (!parse_attr || !flow)
3583 flow->flags = flow_flags;
3584 flow->cookie = f->cookie;
3587 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
3593 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
3594 INIT_LIST_HEAD(&flow->encaps[out_index].list);
3595 INIT_LIST_HEAD(&flow->hairpin);
3596 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
3597 refcount_set(&flow->refcnt, 1);
3598 init_completion(&flow->init_done);
3599 init_completion(&flow->del_hw_done);
3602 *__parse_attr = parse_attr;
3613 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
3614 struct mlx5e_tc_flow_parse_attr *parse_attr,
3615 struct flow_cls_offload *f)
3617 attr->parse_attr = parse_attr;
3618 attr->chain = f->common.chain_index;
3619 attr->prio = f->common.prio;
3623 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
3624 struct mlx5e_priv *priv,
3625 struct mlx5e_tc_flow_parse_attr *parse_attr,
3626 struct flow_cls_offload *f,
3627 struct mlx5_eswitch_rep *in_rep,
3628 struct mlx5_core_dev *in_mdev)
3630 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3631 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
3633 mlx5e_flow_attr_init(attr, parse_attr, f);
3635 esw_attr->in_rep = in_rep;
3636 esw_attr->in_mdev = in_mdev;
3638 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
3639 MLX5_COUNTER_SOURCE_ESWITCH)
3640 esw_attr->counter_dev = in_mdev;
3642 esw_attr->counter_dev = priv->mdev;
3645 static struct mlx5e_tc_flow *
3646 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3647 struct flow_cls_offload *f,
3648 unsigned long flow_flags,
3649 struct net_device *filter_dev,
3650 struct mlx5_eswitch_rep *in_rep,
3651 struct mlx5_core_dev *in_mdev)
3653 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3654 struct netlink_ext_ack *extack = f->common.extack;
3655 struct mlx5e_tc_flow_parse_attr *parse_attr;
3656 struct mlx5e_tc_flow *flow;
3659 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3660 attr_size = sizeof(struct mlx5_esw_flow_attr);
3661 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3662 &parse_attr, &flow);
3666 parse_attr->filter_dev = filter_dev;
3667 mlx5e_flow_esw_attr_init(flow->attr,
3669 f, in_rep, in_mdev);
3671 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3676 /* actions validation depends on parsing the ct matches first */
3677 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
3678 &flow->attr->ct_attr, extack);
3682 /* always set IP version for indirect table handling */
3683 flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
3685 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
3689 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
3690 complete_all(&flow->init_done);
3692 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
3695 add_unready_flow(flow);
3701 mlx5e_flow_put(priv, flow);
3703 return ERR_PTR(err);
3706 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
3707 struct mlx5e_tc_flow *flow,
3708 unsigned long flow_flags)
3710 struct mlx5e_priv *priv = flow->priv, *peer_priv;
3711 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
3712 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3713 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3714 struct mlx5e_tc_flow_parse_attr *parse_attr;
3715 struct mlx5e_rep_priv *peer_urpriv;
3716 struct mlx5e_tc_flow *peer_flow;
3717 struct mlx5_core_dev *in_mdev;
3720 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3724 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
3725 peer_priv = netdev_priv(peer_urpriv->netdev);
3727 /* in_mdev is assigned of which the packet originated from.
3728 * So packets redirected to uplink use the same mdev of the
3729 * original flow and packets redirected from uplink use the
3732 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
3733 in_mdev = peer_priv->mdev;
3735 in_mdev = priv->mdev;
3737 parse_attr = flow->attr->parse_attr;
3738 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
3739 parse_attr->filter_dev,
3740 attr->in_rep, in_mdev);
3741 if (IS_ERR(peer_flow)) {
3742 err = PTR_ERR(peer_flow);
3746 flow->peer_flow = peer_flow;
3747 flow_flag_set(flow, DUP);
3748 mutex_lock(&esw->offloads.peer_mutex);
3749 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
3750 mutex_unlock(&esw->offloads.peer_mutex);
3753 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3758 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3759 struct flow_cls_offload *f,
3760 unsigned long flow_flags,
3761 struct net_device *filter_dev,
3762 struct mlx5e_tc_flow **__flow)
3764 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3765 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
3766 struct mlx5_core_dev *in_mdev = priv->mdev;
3767 struct mlx5e_tc_flow *flow;
3770 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
3773 return PTR_ERR(flow);
3775 if (is_peer_flow_needed(flow)) {
3776 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
3778 mlx5e_tc_del_fdb_flow(priv, flow);
3792 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3793 struct flow_cls_offload *f,
3794 unsigned long flow_flags,
3795 struct net_device *filter_dev,
3796 struct mlx5e_tc_flow **__flow)
3798 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3799 struct netlink_ext_ack *extack = f->common.extack;
3800 struct mlx5e_tc_flow_parse_attr *parse_attr;
3801 struct mlx5e_tc_flow *flow;
3804 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
3805 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3807 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
3811 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3812 attr_size = sizeof(struct mlx5_nic_flow_attr);
3813 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3814 &parse_attr, &flow);
3818 parse_attr->filter_dev = filter_dev;
3819 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
3821 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3826 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
3827 &flow->attr->ct_attr, extack);
3831 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
3835 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
3839 flow_flag_set(flow, OFFLOADED);
3845 flow_flag_set(flow, FAILED);
3846 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3847 mlx5e_flow_put(priv, flow);
3853 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3854 struct flow_cls_offload *f,
3855 unsigned long flags,
3856 struct net_device *filter_dev,
3857 struct mlx5e_tc_flow **flow)
3859 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3860 unsigned long flow_flags;
3863 get_flags(flags, &flow_flags);
3865 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3868 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
3869 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3872 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3878 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
3879 struct mlx5e_rep_priv *rpriv)
3881 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
3882 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
3883 * function is called from NIC mode.
3885 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
3888 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3889 struct flow_cls_offload *f, unsigned long flags)
3891 struct netlink_ext_ack *extack = f->common.extack;
3892 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3893 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3894 struct mlx5e_tc_flow *flow;
3897 if (!mlx5_esw_hold(priv->mdev))
3900 mlx5_esw_get(priv->mdev);
3903 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3905 /* Same flow rule offloaded to non-uplink representor sharing tc block,
3908 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
3911 NL_SET_ERR_MSG_MOD(extack,
3912 "flow cookie already exists, ignoring");
3913 netdev_warn_once(priv->netdev,
3914 "flow cookie %lx already exists, ignoring\n",
3924 trace_mlx5e_configure_flower(f);
3925 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3929 /* Flow rule offloaded to non-uplink representor sharing tc block,
3930 * set the flow's owner dev.
3932 if (is_flow_rule_duplicate_allowed(dev, rpriv))
3933 flow->orig_dev = dev;
3935 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
3939 mlx5_esw_release(priv->mdev);
3943 mlx5e_flow_put(priv, flow);
3945 mlx5_esw_put(priv->mdev);
3946 mlx5_esw_release(priv->mdev);
3950 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3952 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
3953 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
3955 return flow_flag_test(flow, INGRESS) == dir_ingress &&
3956 flow_flag_test(flow, EGRESS) == dir_egress;
3959 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3960 struct flow_cls_offload *f, unsigned long flags)
3962 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3963 struct mlx5e_tc_flow *flow;
3967 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3968 if (!flow || !same_flow_direction(flow, flags)) {
3973 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
3976 if (flow_flag_test_and_set(flow, DELETED)) {
3980 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3983 trace_mlx5e_delete_flower(f);
3984 mlx5e_flow_put(priv, flow);
3986 mlx5_esw_put(priv->mdev);
3994 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3995 struct flow_cls_offload *f, unsigned long flags)
3997 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3998 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3999 struct mlx5_eswitch *peer_esw;
4000 struct mlx5e_tc_flow *flow;
4001 struct mlx5_fc *counter;
4008 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4012 return PTR_ERR(flow);
4014 if (!same_flow_direction(flow, flags)) {
4019 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4020 counter = mlx5e_tc_get_counter(flow);
4024 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4027 /* Under multipath it's possible for one rule to be currently
4028 * un-offloaded while the other rule is offloaded.
4030 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4034 if (flow_flag_test(flow, DUP) &&
4035 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4040 counter = mlx5e_tc_get_counter(flow->peer_flow);
4042 goto no_peer_counter;
4043 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4046 packets += packets2;
4047 lastuse = max_t(u64, lastuse, lastuse2);
4051 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4053 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4054 FLOW_ACTION_HW_STATS_DELAYED);
4055 trace_mlx5e_stats_flower(f);
4057 mlx5e_flow_put(priv, flow);
4061 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4062 struct netlink_ext_ack *extack)
4064 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4065 struct mlx5_eswitch *esw;
4070 vport_num = rpriv->rep->vport;
4071 if (vport_num >= MLX5_VPORT_ECPF) {
4072 NL_SET_ERR_MSG_MOD(extack,
4073 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4077 esw = priv->mdev->priv.eswitch;
4078 /* rate is given in bytes/sec.
4079 * First convert to bits/sec and then round to the nearest mbit/secs.
4080 * mbit means million bits.
4081 * Moreover, if rate is non zero we choose to configure to a minimum of
4085 rate = (rate * BITS_PER_BYTE) + 500000;
4086 do_div(rate, 1000000);
4087 rate_mbps = max_t(u32, rate, 1);
4090 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4092 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4097 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4098 struct flow_action *flow_action,
4099 struct netlink_ext_ack *extack)
4101 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4102 const struct flow_action_entry *act;
4106 if (!flow_action_has_entries(flow_action)) {
4107 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4111 if (!flow_offload_has_one_action(flow_action)) {
4112 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4116 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4117 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4121 flow_action_for_each(i, act, flow_action) {
4123 case FLOW_ACTION_POLICE:
4124 if (act->police.rate_pkt_ps) {
4125 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
4128 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4132 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4135 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4143 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4144 struct tc_cls_matchall_offload *ma)
4146 struct netlink_ext_ack *extack = ma->common.extack;
4148 if (ma->common.prio != 1) {
4149 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4153 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4156 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4157 struct tc_cls_matchall_offload *ma)
4159 struct netlink_ext_ack *extack = ma->common.extack;
4161 return apply_police_params(priv, 0, extack);
4164 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4165 struct tc_cls_matchall_offload *ma)
4167 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4168 struct rtnl_link_stats64 cur_stats;
4172 cur_stats = priv->stats.vf_vport;
4173 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4174 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4175 rpriv->prev_vf_vport_stats = cur_stats;
4176 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4177 FLOW_ACTION_HW_STATS_DELAYED);
4180 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4181 struct mlx5e_priv *peer_priv)
4183 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4184 struct mlx5e_hairpin_entry *hpe, *tmp;
4185 LIST_HEAD(init_wait_list);
4189 if (!mlx5e_same_hw_devs(priv, peer_priv))
4192 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4194 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4195 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4196 if (refcount_inc_not_zero(&hpe->refcnt))
4197 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4198 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4200 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4201 wait_for_completion(&hpe->res_ready);
4202 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4203 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
4205 mlx5e_hairpin_put(priv, hpe);
4209 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4210 unsigned long event, void *ptr)
4212 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4213 struct mlx5e_flow_steering *fs;
4214 struct mlx5e_priv *peer_priv;
4215 struct mlx5e_tc_table *tc;
4216 struct mlx5e_priv *priv;
4218 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4219 event != NETDEV_UNREGISTER ||
4220 ndev->reg_state == NETREG_REGISTERED)
4223 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4224 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4225 priv = container_of(fs, struct mlx5e_priv, fs);
4226 peer_priv = netdev_priv(ndev);
4227 if (priv == peer_priv ||
4228 !(priv->netdev->features & NETIF_F_HW_TC))
4231 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4236 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4238 int tc_grp_size, tc_tbl_size;
4239 u32 max_flow_counter;
4241 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4242 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4244 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4246 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4247 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4252 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4254 struct mlx5e_tc_table *tc = &priv->fs.tc;
4255 struct mlx5_core_dev *dev = priv->mdev;
4256 struct mapping_ctx *chains_mapping;
4257 struct mlx5_chains_attr attr = {};
4261 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4262 mutex_init(&tc->t_lock);
4263 mutex_init(&tc->hairpin_tbl_lock);
4264 hash_init(tc->hairpin_tbl);
4266 err = rhashtable_init(&tc->ht, &tc_ht_params);
4270 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4272 mapping_id = mlx5_query_nic_system_image_guid(dev);
4274 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
4275 sizeof(struct mlx5_mapped_obj),
4276 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4278 if (IS_ERR(chains_mapping)) {
4279 err = PTR_ERR(chains_mapping);
4282 tc->mapping = chains_mapping;
4284 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4285 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4286 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4287 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4288 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4289 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4290 attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
4291 attr.mapping = chains_mapping;
4293 tc->chains = mlx5_chains_create(dev, &attr);
4294 if (IS_ERR(tc->chains)) {
4295 err = PTR_ERR(tc->chains);
4299 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
4300 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
4301 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
4303 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4304 err = register_netdevice_notifier_dev_net(priv->netdev,
4308 tc->netdevice_nb.notifier_call = NULL;
4309 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4316 mlx5_tc_ct_clean(tc->ct);
4317 mlx5e_tc_post_act_destroy(tc->post_act);
4318 mlx5_chains_destroy(tc->chains);
4320 mapping_destroy(chains_mapping);
4322 rhashtable_destroy(&tc->ht);
4326 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4328 struct mlx5e_tc_flow *flow = ptr;
4329 struct mlx5e_priv *priv = flow->priv;
4331 mlx5e_tc_del_flow(priv, flow);
4335 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4337 struct mlx5e_tc_table *tc = &priv->fs.tc;
4339 if (tc->netdevice_nb.notifier_call)
4340 unregister_netdevice_notifier_dev_net(priv->netdev,
4344 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4345 mutex_destroy(&tc->hairpin_tbl_lock);
4347 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
4349 if (!IS_ERR_OR_NULL(tc->t)) {
4350 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
4353 mutex_destroy(&tc->t_lock);
4355 mlx5_tc_ct_clean(tc->ct);
4356 mlx5e_tc_post_act_destroy(tc->post_act);
4357 mapping_destroy(tc->mapping);
4358 mlx5_chains_destroy(tc->chains);
4361 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4363 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4364 struct mlx5_rep_uplink_priv *uplink_priv;
4365 struct mlx5e_rep_priv *rpriv;
4366 struct mapping_ctx *mapping;
4367 struct mlx5_eswitch *esw;
4368 struct mlx5e_priv *priv;
4372 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4373 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4374 priv = netdev_priv(rpriv->netdev);
4375 esw = priv->mdev->priv.eswitch;
4377 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
4378 MLX5_FLOW_NAMESPACE_FDB);
4379 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
4381 &esw->offloads.mod_hdr,
4382 MLX5_FLOW_NAMESPACE_FDB,
4383 uplink_priv->post_act);
4385 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
4387 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
4389 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
4391 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
4392 sizeof(struct tunnel_match_key),
4393 TUNNEL_INFO_BITS_MASK, true);
4395 if (IS_ERR(mapping)) {
4396 err = PTR_ERR(mapping);
4397 goto err_tun_mapping;
4399 uplink_priv->tunnel_mapping = mapping;
4401 /* Two last values are reserved for stack devices slow path table mark
4402 * and bridge ingress push mark.
4404 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
4405 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
4406 if (IS_ERR(mapping)) {
4407 err = PTR_ERR(mapping);
4408 goto err_enc_opts_mapping;
4410 uplink_priv->tunnel_enc_opts_mapping = mapping;
4412 err = rhashtable_init(tc_ht, &tc_ht_params);
4416 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
4418 uplink_priv->encap = mlx5e_tc_tun_init(priv);
4419 if (IS_ERR(uplink_priv->encap)) {
4420 err = PTR_ERR(uplink_priv->encap);
4421 goto err_register_fib_notifier;
4426 err_register_fib_notifier:
4427 rhashtable_destroy(tc_ht);
4429 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4430 err_enc_opts_mapping:
4431 mapping_destroy(uplink_priv->tunnel_mapping);
4433 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
4434 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
4435 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4436 netdev_warn(priv->netdev,
4437 "Failed to initialize tc (eswitch), err: %d", err);
4438 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
4442 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4444 struct mlx5_rep_uplink_priv *uplink_priv;
4446 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4448 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4449 mlx5e_tc_tun_cleanup(uplink_priv->encap);
4451 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4452 mapping_destroy(uplink_priv->tunnel_mapping);
4454 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
4455 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
4456 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4457 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
4460 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4462 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4464 return atomic_read(&tc_ht->nelems);
4467 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4469 struct mlx5e_tc_flow *flow, *tmp;
4471 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4472 __mlx5e_tc_del_fdb_peer_flow(flow);
4475 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4477 struct mlx5_rep_uplink_priv *rpriv =
4478 container_of(work, struct mlx5_rep_uplink_priv,
4479 reoffload_flows_work);
4480 struct mlx5e_tc_flow *flow, *tmp;
4482 mutex_lock(&rpriv->unready_flows_lock);
4483 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4484 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4485 unready_flow_del(flow);
4487 mutex_unlock(&rpriv->unready_flows_lock);
4490 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
4491 struct flow_cls_offload *cls_flower,
4492 unsigned long flags)
4494 switch (cls_flower->command) {
4495 case FLOW_CLS_REPLACE:
4496 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
4498 case FLOW_CLS_DESTROY:
4499 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
4501 case FLOW_CLS_STATS:
4502 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
4509 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
4512 unsigned long flags = MLX5_TC_FLAG(INGRESS);
4513 struct mlx5e_priv *priv = cb_priv;
4515 if (!priv->netdev || !netif_device_present(priv->netdev))
4518 if (mlx5e_is_uplink_rep(priv))
4519 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
4521 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
4524 case TC_SETUP_CLSFLOWER:
4525 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
4531 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
4532 struct sk_buff *skb)
4534 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4535 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
4536 struct mlx5e_priv *priv = netdev_priv(skb->dev);
4537 struct mlx5e_tc_table *tc = &priv->fs.tc;
4538 struct mlx5_mapped_obj mapped_obj;
4539 struct tc_skb_ext *tc_skb_ext;
4542 reg_b = be32_to_cpu(cqe->ft_metadata);
4544 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
4546 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
4548 netdev_dbg(priv->netdev,
4549 "Couldn't find chain for chain tag: %d, err: %d\n",
4554 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
4555 chain = mapped_obj.chain;
4556 tc_skb_ext = tc_skb_ext_alloc(skb);
4557 if (WARN_ON(!tc_skb_ext))
4560 tc_skb_ext->chain = chain;
4562 zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
4565 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
4569 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
4572 #endif /* CONFIG_NET_TC_SKB_EXT */