2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
42 #include <net/tc_act/tc_pedit.h>
43 #include <net/tc_act/tc_csum.h>
44 #include <net/psample.h>
46 #include <net/ipv6_stubs.h>
47 #include <net/bareudp.h>
48 #include <net/bonding.h>
50 #include "en/tc/post_act.h"
52 #include "en/rep/tc.h"
53 #include "en/rep/neigh.h"
58 #include "en/tc_tun.h"
59 #include "en/mapping.h"
61 #include "en/mod_hdr.h"
62 #include "en/tc_priv.h"
63 #include "en/tc_tun_encap.h"
64 #include "en/tc/sample.h"
65 #include "lib/devcom.h"
66 #include "lib/geneve.h"
67 #include "lib/fs_chains.h"
68 #include "diag/en_tc_tracepoint.h"
69 #include <asm/div64.h>
71 #define nic_chains(priv) ((priv)->fs.tc.chains)
72 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
74 #define MLX5E_TC_TABLE_NUM_GROUPS 4
75 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
77 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
79 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
84 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
89 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
91 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
92 .soffset = MLX5_BYTE_OFF(fte_match_param,
93 misc_parameters_2.metadata_reg_c_1),
95 [ZONE_TO_REG] = zone_to_reg_ct,
96 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
97 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
98 [MARK_TO_REG] = mark_to_reg_ct,
99 [LABELS_TO_REG] = labels_to_reg_ct,
100 [FTEID_TO_REG] = fteid_to_reg_ct,
101 /* For NIC rules we store the restore metadata directly
102 * into reg_b that is passed to SW since we don't
103 * jump between steering domains.
105 [NIC_CHAIN_TO_REG] = {
106 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
110 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
113 /* To avoid false lock dependency warning set the tc_ht lock
114 * class different than the lock class of the ht being used when deleting
115 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
116 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
117 * it's different than the ht->mutex here.
119 static struct lock_class_key tc_ht_lock_key;
121 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
124 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
125 enum mlx5e_tc_attr_to_reg type,
129 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
130 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
131 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
132 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
133 u32 max_mask = GENMASK(match_len - 1, 0);
134 __be32 curr_mask_be, curr_val_be;
135 u32 curr_mask, curr_val;
137 fmask = headers_c + soffset;
138 fval = headers_v + soffset;
140 memcpy(&curr_mask_be, fmask, 4);
141 memcpy(&curr_val_be, fval, 4);
143 curr_mask = be32_to_cpu(curr_mask_be);
144 curr_val = be32_to_cpu(curr_val_be);
146 //move to correct offset
147 WARN_ON(mask > max_mask);
150 max_mask <<= moffset;
153 curr_mask &= ~max_mask;
154 curr_val &= ~max_mask;
156 //add current to mask
160 //back to be32 and write
161 curr_mask_be = cpu_to_be32(curr_mask);
162 curr_val_be = cpu_to_be32(curr_val);
164 memcpy(fmask, &curr_mask_be, 4);
165 memcpy(fval, &curr_val_be, 4);
167 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
171 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
172 enum mlx5e_tc_attr_to_reg type,
176 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
177 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
178 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
179 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
180 u32 max_mask = GENMASK(match_len - 1, 0);
181 __be32 curr_mask_be, curr_val_be;
182 u32 curr_mask, curr_val;
184 fmask = headers_c + soffset;
185 fval = headers_v + soffset;
187 memcpy(&curr_mask_be, fmask, 4);
188 memcpy(&curr_val_be, fval, 4);
190 curr_mask = be32_to_cpu(curr_mask_be);
191 curr_val = be32_to_cpu(curr_val_be);
193 *mask = (curr_mask >> moffset) & max_mask;
194 *val = (curr_val >> moffset) & max_mask;
198 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
199 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
200 enum mlx5_flow_namespace_type ns,
201 enum mlx5e_tc_attr_to_reg type,
204 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
205 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
206 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
210 err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
214 modact = mod_hdr_acts->actions +
215 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
217 /* Firmware has 5bit length field and 0 means 32bits */
221 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
222 MLX5_SET(set_action_in, modact, field, mfield);
223 MLX5_SET(set_action_in, modact, offset, moffset);
224 MLX5_SET(set_action_in, modact, length, mlen);
225 MLX5_SET(set_action_in, modact, data, data);
226 err = mod_hdr_acts->num_actions;
227 mod_hdr_acts->num_actions++;
232 static struct mlx5_tc_ct_priv *
233 get_ct_priv(struct mlx5e_priv *priv)
235 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
236 struct mlx5_rep_uplink_priv *uplink_priv;
237 struct mlx5e_rep_priv *uplink_rpriv;
239 if (is_mdev_switchdev_mode(priv->mdev)) {
240 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
241 uplink_priv = &uplink_rpriv->uplink_priv;
243 return uplink_priv->ct_priv;
246 return priv->fs.tc.ct;
249 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
250 static struct mlx5e_tc_psample *
251 get_sample_priv(struct mlx5e_priv *priv)
253 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
254 struct mlx5_rep_uplink_priv *uplink_priv;
255 struct mlx5e_rep_priv *uplink_rpriv;
257 if (is_mdev_switchdev_mode(priv->mdev)) {
258 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
259 uplink_priv = &uplink_rpriv->uplink_priv;
261 return uplink_priv->tc_psample;
268 struct mlx5_flow_handle *
269 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
270 struct mlx5_flow_spec *spec,
271 struct mlx5_flow_attr *attr)
273 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
275 if (is_mdev_switchdev_mode(priv->mdev))
276 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
278 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
282 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
283 struct mlx5_flow_handle *rule,
284 struct mlx5_flow_attr *attr)
286 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
288 if (is_mdev_switchdev_mode(priv->mdev)) {
289 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
294 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
298 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
299 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
300 enum mlx5_flow_namespace_type ns,
301 enum mlx5e_tc_attr_to_reg type,
304 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
306 return ret < 0 ? ret : 0;
309 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
310 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
311 enum mlx5e_tc_attr_to_reg type,
312 int act_id, u32 data)
314 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
315 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
316 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
319 modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
321 /* Firmware has 5bit length field and 0 means 32bits */
325 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
326 MLX5_SET(set_action_in, modact, field, mfield);
327 MLX5_SET(set_action_in, modact, offset, moffset);
328 MLX5_SET(set_action_in, modact, length, mlen);
329 MLX5_SET(set_action_in, modact, data, data);
332 struct mlx5e_hairpin {
333 struct mlx5_hairpin *pair;
335 struct mlx5_core_dev *func_mdev;
336 struct mlx5e_priv *func_priv;
338 struct mlx5e_tir direct_tir;
341 struct mlx5e_rqt indir_rqt;
342 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
343 struct mlx5_ttc_table *ttc;
346 struct mlx5e_hairpin_entry {
347 /* a node of a hash table which keeps all the hairpin entries */
348 struct hlist_node hairpin_hlist;
350 /* protects flows list */
351 spinlock_t flows_lock;
352 /* flows sharing the same hairpin */
353 struct list_head flows;
354 /* hpe's that were not fully initialized when dead peer update event
355 * function traversed them.
357 struct list_head dead_peer_wait_list;
361 struct mlx5e_hairpin *hp;
363 struct completion res_ready;
366 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
367 struct mlx5e_tc_flow *flow);
369 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
371 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
372 return ERR_PTR(-EINVAL);
376 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
378 if (refcount_dec_and_test(&flow->refcnt)) {
379 mlx5e_tc_del_flow(priv, flow);
380 kfree_rcu(flow, rcu_head);
384 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
386 return flow_flag_test(flow, ESWITCH);
389 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
391 return flow_flag_test(flow, FT);
394 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
396 return flow_flag_test(flow, OFFLOADED);
399 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
401 return mlx5e_is_eswitch_flow(flow) ?
402 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
405 static struct mod_hdr_tbl *
406 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
408 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
410 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
411 &esw->offloads.mod_hdr :
412 &priv->fs.tc.mod_hdr;
415 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
416 struct mlx5e_tc_flow *flow,
417 struct mlx5e_tc_flow_parse_attr *parse_attr)
419 struct mlx5_modify_hdr *modify_hdr;
420 struct mlx5e_mod_hdr_handle *mh;
422 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
423 get_flow_name_space(flow),
424 &parse_attr->mod_hdr_acts);
428 modify_hdr = mlx5e_mod_hdr_get(mh);
429 flow->attr->modify_hdr = modify_hdr;
435 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
436 struct mlx5e_tc_flow *flow)
438 /* flow wasn't fully initialized */
442 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
448 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
450 struct mlx5_core_dev *mdev;
451 struct net_device *netdev;
452 struct mlx5e_priv *priv;
454 netdev = dev_get_by_index(net, ifindex);
456 return ERR_PTR(-ENODEV);
458 priv = netdev_priv(netdev);
462 /* Mirred tc action holds a refcount on the ifindex net_device (see
463 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
464 * after dev_put(netdev), while we're in the context of adding a tc flow.
466 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
467 * stored in a hairpin object, which exists until all flows, that refer to it, get
470 * On the other hand, after a hairpin object has been created, the peer net_device may
471 * be removed/unbound while there are still some hairpin flows that are using it. This
472 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
473 * NETDEV_UNREGISTER event of the peer net_device.
478 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
480 struct mlx5e_tir_builder *builder;
483 builder = mlx5e_tir_builder_alloc(false);
487 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
491 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
492 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
497 mlx5e_tir_builder_free(builder);
501 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
506 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
508 mlx5e_tir_destroy(&hp->direct_tir);
509 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
512 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
514 struct mlx5e_priv *priv = hp->func_priv;
515 struct mlx5_core_dev *mdev = priv->mdev;
516 struct mlx5e_rss_params_indir *indir;
519 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
523 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
524 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
525 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
532 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
534 struct mlx5e_priv *priv = hp->func_priv;
535 struct mlx5e_rss_params_hash rss_hash;
536 enum mlx5_traffic_types tt, max_tt;
537 struct mlx5e_tir_builder *builder;
540 builder = mlx5e_tir_builder_alloc(false);
544 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
546 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
547 struct mlx5e_rss_params_traffic_type rss_tt;
549 rss_tt = mlx5e_rss_get_default_tt_config(tt);
551 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
552 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
554 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
556 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
558 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
559 goto err_destroy_tirs;
562 mlx5e_tir_builder_clear(builder);
566 mlx5e_tir_builder_free(builder);
571 for (tt = 0; tt < max_tt; tt++)
572 mlx5e_tir_destroy(&hp->indir_tir[tt]);
577 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
581 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
582 mlx5e_tir_destroy(&hp->indir_tir[tt]);
585 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
586 struct ttc_params *ttc_params)
588 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
591 memset(ttc_params, 0, sizeof(*ttc_params));
593 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
594 MLX5_FLOW_NAMESPACE_KERNEL);
595 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
596 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
597 ttc_params->dests[tt].tir_num =
599 mlx5e_tir_get_tirn(&hp->direct_tir) :
600 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
603 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
604 ft_attr->prio = MLX5E_TC_PRIO;
607 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
609 struct mlx5e_priv *priv = hp->func_priv;
610 struct ttc_params ttc_params;
613 err = mlx5e_hairpin_create_indirect_rqt(hp);
617 err = mlx5e_hairpin_create_indirect_tirs(hp);
619 goto err_create_indirect_tirs;
621 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
622 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
623 if (IS_ERR(hp->ttc)) {
624 err = PTR_ERR(hp->ttc);
625 goto err_create_ttc_table;
628 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
630 mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
634 err_create_ttc_table:
635 mlx5e_hairpin_destroy_indirect_tirs(hp);
636 err_create_indirect_tirs:
637 mlx5e_rqt_destroy(&hp->indir_rqt);
642 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
644 mlx5_destroy_ttc_table(hp->ttc);
645 mlx5e_hairpin_destroy_indirect_tirs(hp);
646 mlx5e_rqt_destroy(&hp->indir_rqt);
649 static struct mlx5e_hairpin *
650 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
653 struct mlx5_core_dev *func_mdev, *peer_mdev;
654 struct mlx5e_hairpin *hp;
655 struct mlx5_hairpin *pair;
658 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
660 return ERR_PTR(-ENOMEM);
662 func_mdev = priv->mdev;
663 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
664 if (IS_ERR(peer_mdev)) {
665 err = PTR_ERR(peer_mdev);
666 goto create_pair_err;
669 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
672 goto create_pair_err;
675 hp->func_mdev = func_mdev;
676 hp->func_priv = priv;
677 hp->num_channels = params->num_channels;
679 err = mlx5e_hairpin_create_transport(hp);
681 goto create_transport_err;
683 if (hp->num_channels > 1) {
684 err = mlx5e_hairpin_rss_init(hp);
692 mlx5e_hairpin_destroy_transport(hp);
693 create_transport_err:
694 mlx5_core_hairpin_destroy(hp->pair);
700 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
702 if (hp->num_channels > 1)
703 mlx5e_hairpin_rss_cleanup(hp);
704 mlx5e_hairpin_destroy_transport(hp);
705 mlx5_core_hairpin_destroy(hp->pair);
709 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
711 return (peer_vhca_id << 16 | prio);
714 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
715 u16 peer_vhca_id, u8 prio)
717 struct mlx5e_hairpin_entry *hpe;
718 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
720 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
721 hairpin_hlist, hash_key) {
722 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
723 refcount_inc(&hpe->refcnt);
731 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
732 struct mlx5e_hairpin_entry *hpe)
734 /* no more hairpin flows for us, release the hairpin pair */
735 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
737 hash_del(&hpe->hairpin_hlist);
738 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
740 if (!IS_ERR_OR_NULL(hpe->hp)) {
741 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
742 dev_name(hpe->hp->pair->peer_mdev->device));
744 mlx5e_hairpin_destroy(hpe->hp);
747 WARN_ON(!list_empty(&hpe->flows));
751 #define UNKNOWN_MATCH_PRIO 8
753 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
754 struct mlx5_flow_spec *spec, u8 *match_prio,
755 struct netlink_ext_ack *extack)
757 void *headers_c, *headers_v;
758 u8 prio_val, prio_mask = 0;
761 #ifdef CONFIG_MLX5_CORE_EN_DCB
762 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
763 NL_SET_ERR_MSG_MOD(extack,
764 "only PCP trust state supported for hairpin");
768 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
769 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
771 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
773 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
774 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
777 if (!vlan_present || !prio_mask) {
778 prio_val = UNKNOWN_MATCH_PRIO;
779 } else if (prio_mask != 0x7) {
780 NL_SET_ERR_MSG_MOD(extack,
781 "masked priority match not supported for hairpin");
785 *match_prio = prio_val;
789 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
790 struct mlx5e_tc_flow *flow,
791 struct mlx5e_tc_flow_parse_attr *parse_attr,
792 struct netlink_ext_ack *extack)
794 int peer_ifindex = parse_attr->mirred_ifindex[0];
795 struct mlx5_hairpin_params params;
796 struct mlx5_core_dev *peer_mdev;
797 struct mlx5e_hairpin_entry *hpe;
798 struct mlx5e_hairpin *hp;
805 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
806 if (IS_ERR(peer_mdev)) {
807 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
808 return PTR_ERR(peer_mdev);
811 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
812 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
816 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
817 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
822 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
823 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
825 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
826 wait_for_completion(&hpe->res_ready);
828 if (IS_ERR(hpe->hp)) {
835 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
837 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
841 spin_lock_init(&hpe->flows_lock);
842 INIT_LIST_HEAD(&hpe->flows);
843 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
844 hpe->peer_vhca_id = peer_id;
845 hpe->prio = match_prio;
846 refcount_set(&hpe->refcnt, 1);
847 init_completion(&hpe->res_ready);
849 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
850 hash_hairpin_info(peer_id, match_prio));
851 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
853 params.log_data_size = 16;
854 params.log_data_size = min_t(u8, params.log_data_size,
855 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
856 params.log_data_size = max_t(u8, params.log_data_size,
857 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
859 params.log_num_packets = params.log_data_size -
860 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
861 params.log_num_packets = min_t(u8, params.log_num_packets,
862 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
864 params.q_counter = priv->q_counter;
865 /* set hairpin pair per each 50Gbs share of the link */
866 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
867 link_speed = max_t(u32, link_speed, 50000);
868 link_speed64 = link_speed;
869 do_div(link_speed64, 50000);
870 params.num_channels = link_speed64;
872 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
874 complete_all(&hpe->res_ready);
880 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
881 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
882 dev_name(hp->pair->peer_mdev->device),
883 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
886 if (hpe->hp->num_channels > 1) {
887 flow_flag_set(flow, HAIRPIN_RSS);
888 flow->attr->nic_attr->hairpin_ft =
889 mlx5_get_ttc_flow_table(hpe->hp->ttc);
891 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
895 spin_lock(&hpe->flows_lock);
896 list_add(&flow->hairpin, &hpe->flows);
897 spin_unlock(&hpe->flows_lock);
902 mlx5e_hairpin_put(priv, hpe);
906 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
907 struct mlx5e_tc_flow *flow)
909 /* flow wasn't fully initialized */
913 spin_lock(&flow->hpe->flows_lock);
914 list_del(&flow->hairpin);
915 spin_unlock(&flow->hpe->flows_lock);
917 mlx5e_hairpin_put(priv, flow->hpe);
921 struct mlx5_flow_handle *
922 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
923 struct mlx5_flow_spec *spec,
924 struct mlx5_flow_attr *attr)
926 struct mlx5_flow_context *flow_context = &spec->flow_context;
927 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
928 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
929 struct mlx5e_tc_table *tc = &priv->fs.tc;
930 struct mlx5_flow_destination dest[2] = {};
931 struct mlx5_flow_act flow_act = {
932 .action = attr->action,
933 .flags = FLOW_ACT_NO_APPEND,
935 struct mlx5_flow_handle *rule;
936 struct mlx5_flow_table *ft;
939 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
940 flow_context->flow_tag = nic_attr->flow_tag;
943 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
944 dest[dest_ix].ft = attr->dest_ft;
946 } else if (nic_attr->hairpin_ft) {
947 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
948 dest[dest_ix].ft = nic_attr->hairpin_ft;
950 } else if (nic_attr->hairpin_tirn) {
951 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
952 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
954 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
955 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
956 if (attr->dest_chain) {
957 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
960 if (IS_ERR(dest[dest_ix].ft))
961 return ERR_CAST(dest[dest_ix].ft);
963 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
968 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
969 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
970 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
972 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
973 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
974 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
978 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
979 flow_act.modify_hdr = attr->modify_hdr;
981 mutex_lock(&tc->t_lock);
982 if (IS_ERR_OR_NULL(tc->t)) {
983 /* Create the root table here if doesn't exist yet */
985 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
988 mutex_unlock(&tc->t_lock);
989 netdev_err(priv->netdev,
990 "Failed to create tc offload table\n");
991 rule = ERR_CAST(priv->fs.tc.t);
995 mutex_unlock(&tc->t_lock);
997 if (attr->chain || attr->prio)
998 ft = mlx5_chains_get_table(nic_chains,
999 attr->chain, attr->prio,
1005 rule = ERR_CAST(ft);
1009 if (attr->outer_match_level != MLX5_MATCH_NONE)
1010 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1012 rule = mlx5_add_flow_rules(ft, spec,
1013 &flow_act, dest, dest_ix);
1020 if (attr->chain || attr->prio)
1021 mlx5_chains_put_table(nic_chains,
1022 attr->chain, attr->prio,
1025 if (attr->dest_chain)
1026 mlx5_chains_put_table(nic_chains,
1027 attr->dest_chain, 1,
1030 return ERR_CAST(rule);
1034 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1035 struct mlx5e_tc_flow *flow,
1036 struct netlink_ext_ack *extack)
1038 struct mlx5e_tc_flow_parse_attr *parse_attr;
1039 struct mlx5_flow_attr *attr = flow->attr;
1040 struct mlx5_core_dev *dev = priv->mdev;
1041 struct mlx5_fc *counter;
1044 parse_attr = attr->parse_attr;
1046 if (flow_flag_test(flow, HAIRPIN)) {
1047 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1052 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1053 counter = mlx5_fc_create(dev, true);
1054 if (IS_ERR(counter))
1055 return PTR_ERR(counter);
1057 attr->counter = counter;
1060 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1061 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1062 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1067 if (flow_flag_test(flow, CT))
1068 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1069 attr, &parse_attr->mod_hdr_acts);
1071 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1074 return PTR_ERR_OR_ZERO(flow->rule[0]);
1077 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1078 struct mlx5_flow_handle *rule,
1079 struct mlx5_flow_attr *attr)
1081 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1083 mlx5_del_flow_rules(rule);
1085 if (attr->chain || attr->prio)
1086 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1089 if (attr->dest_chain)
1090 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1094 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1095 struct mlx5e_tc_flow *flow)
1097 struct mlx5_flow_attr *attr = flow->attr;
1098 struct mlx5e_tc_table *tc = &priv->fs.tc;
1100 flow_flag_clear(flow, OFFLOADED);
1102 if (flow_flag_test(flow, CT))
1103 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1104 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1105 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1107 /* Remove root table if no rules are left to avoid
1108 * extra steering hops.
1110 mutex_lock(&priv->fs.tc.t_lock);
1111 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1112 !IS_ERR_OR_NULL(tc->t)) {
1113 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1114 priv->fs.tc.t = NULL;
1116 mutex_unlock(&priv->fs.tc.t_lock);
1118 kvfree(attr->parse_attr);
1120 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1121 mlx5e_detach_mod_hdr(priv, flow);
1123 mlx5_fc_destroy(priv->mdev, attr->counter);
1125 if (flow_flag_test(flow, HAIRPIN))
1126 mlx5e_hairpin_flow_del(priv, flow);
1131 struct mlx5_flow_handle *
1132 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1133 struct mlx5e_tc_flow *flow,
1134 struct mlx5_flow_spec *spec,
1135 struct mlx5_flow_attr *attr)
1137 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1138 struct mlx5_flow_handle *rule;
1140 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1141 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1143 if (flow_flag_test(flow, CT)) {
1144 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1146 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1149 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1150 } else if (flow_flag_test(flow, SAMPLE)) {
1151 rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
1152 mlx5e_tc_get_flow_tun_id(flow));
1155 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1161 if (attr->esw_attr->split_count) {
1162 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1163 if (IS_ERR(flow->rule[1])) {
1164 if (flow_flag_test(flow, CT))
1165 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1167 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1168 return flow->rule[1];
1175 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1176 struct mlx5e_tc_flow *flow,
1177 struct mlx5_flow_attr *attr)
1179 flow_flag_clear(flow, OFFLOADED);
1181 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1182 goto offload_rule_0;
1184 if (flow_flag_test(flow, CT)) {
1185 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1189 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1190 if (flow_flag_test(flow, SAMPLE)) {
1191 mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1196 if (attr->esw_attr->split_count)
1197 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1200 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1203 struct mlx5_flow_handle *
1204 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1205 struct mlx5e_tc_flow *flow,
1206 struct mlx5_flow_spec *spec)
1208 struct mlx5_flow_attr *slow_attr;
1209 struct mlx5_flow_handle *rule;
1211 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1213 return ERR_PTR(-ENOMEM);
1215 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1216 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1217 slow_attr->esw_attr->split_count = 0;
1218 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1220 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1222 flow_flag_set(flow, SLOW);
1229 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1230 struct mlx5e_tc_flow *flow)
1232 struct mlx5_flow_attr *slow_attr;
1234 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1236 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1240 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1241 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1242 slow_attr->esw_attr->split_count = 0;
1243 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1244 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1245 flow_flag_clear(flow, SLOW);
1249 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1252 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1253 struct list_head *unready_flows)
1255 flow_flag_set(flow, NOT_READY);
1256 list_add_tail(&flow->unready, unready_flows);
1259 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1262 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1264 list_del(&flow->unready);
1265 flow_flag_clear(flow, NOT_READY);
1268 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1270 struct mlx5_rep_uplink_priv *uplink_priv;
1271 struct mlx5e_rep_priv *rpriv;
1272 struct mlx5_eswitch *esw;
1274 esw = flow->priv->mdev->priv.eswitch;
1275 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1276 uplink_priv = &rpriv->uplink_priv;
1278 mutex_lock(&uplink_priv->unready_flows_lock);
1279 unready_flow_add(flow, &uplink_priv->unready_flows);
1280 mutex_unlock(&uplink_priv->unready_flows_lock);
1283 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1285 struct mlx5_rep_uplink_priv *uplink_priv;
1286 struct mlx5e_rep_priv *rpriv;
1287 struct mlx5_eswitch *esw;
1289 esw = flow->priv->mdev->priv.eswitch;
1290 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1291 uplink_priv = &rpriv->uplink_priv;
1293 mutex_lock(&uplink_priv->unready_flows_lock);
1294 unready_flow_del(flow);
1295 mutex_unlock(&uplink_priv->unready_flows_lock);
1298 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
1300 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1302 struct mlx5_core_dev *out_mdev, *route_mdev;
1303 struct mlx5e_priv *out_priv, *route_priv;
1305 out_priv = netdev_priv(out_dev);
1306 out_mdev = out_priv->mdev;
1307 route_priv = netdev_priv(route_dev);
1308 route_mdev = route_priv->mdev;
1310 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1311 route_mdev->coredev_type != MLX5_COREDEV_VF)
1314 return same_hw_devs(out_priv, route_priv);
1317 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1319 struct mlx5e_priv *out_priv, *route_priv;
1320 struct mlx5_devcom *devcom = NULL;
1321 struct mlx5_core_dev *route_mdev;
1322 struct mlx5_eswitch *esw;
1326 out_priv = netdev_priv(out_dev);
1327 esw = out_priv->mdev->priv.eswitch;
1328 route_priv = netdev_priv(route_dev);
1329 route_mdev = route_priv->mdev;
1331 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1332 if (mlx5_lag_is_active(out_priv->mdev)) {
1333 /* In lag case we may get devices from different eswitch instances.
1334 * If we failed to get vport num, it means, mostly, that we on the wrong
1337 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1341 devcom = out_priv->mdev->priv.devcom;
1342 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1347 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1349 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1353 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1354 struct mlx5e_tc_flow_parse_attr *parse_attr,
1355 struct mlx5e_tc_flow *flow)
1357 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
1358 struct mlx5_modify_hdr *mod_hdr;
1360 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1361 get_flow_name_space(flow),
1362 mod_hdr_acts->num_actions,
1363 mod_hdr_acts->actions);
1364 if (IS_ERR(mod_hdr))
1365 return PTR_ERR(mod_hdr);
1367 WARN_ON(flow->attr->modify_hdr);
1368 flow->attr->modify_hdr = mod_hdr;
1374 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1375 struct mlx5e_tc_flow *flow,
1376 struct netlink_ext_ack *extack)
1378 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1379 struct mlx5e_tc_flow_parse_attr *parse_attr;
1380 struct mlx5_flow_attr *attr = flow->attr;
1381 bool vf_tun = false, encap_valid = true;
1382 struct net_device *encap_dev = NULL;
1383 struct mlx5_esw_flow_attr *esw_attr;
1384 struct mlx5e_rep_priv *rpriv;
1385 struct mlx5e_priv *out_priv;
1386 struct mlx5_fc *counter;
1387 u32 max_prio, max_chain;
1391 /* We check chain range only for tc flows.
1392 * For ft flows, we checked attr->chain was originally 0 and set it to
1393 * FDB_FT_CHAIN which is outside tc range.
1394 * See mlx5e_rep_setup_ft_cb().
1396 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1397 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1398 NL_SET_ERR_MSG_MOD(extack,
1399 "Requested chain is out of supported range");
1404 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1405 if (attr->prio > max_prio) {
1406 NL_SET_ERR_MSG_MOD(extack,
1407 "Requested priority is out of supported range");
1412 if (flow_flag_test(flow, TUN_RX)) {
1413 err = mlx5e_attach_decap_route(priv, flow);
1418 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1419 err = mlx5e_attach_decap(priv, flow, extack);
1424 parse_attr = attr->parse_attr;
1425 esw_attr = attr->esw_attr;
1427 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1428 struct net_device *out_dev;
1431 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1434 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1435 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1437 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1441 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1442 extack, &encap_dev, &encap_valid);
1447 if (esw_attr->dests[out_index].flags &
1448 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1450 out_priv = netdev_priv(encap_dev);
1451 rpriv = out_priv->ppriv;
1452 esw_attr->dests[out_index].rep = rpriv->rep;
1453 esw_attr->dests[out_index].mdev = out_priv->mdev;
1456 if (vf_tun && esw_attr->out_count > 1) {
1457 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1462 err = mlx5_eswitch_add_vlan_action(esw, attr);
1466 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1467 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1469 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
1473 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1479 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1480 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1481 if (IS_ERR(counter)) {
1482 err = PTR_ERR(counter);
1486 attr->counter = counter;
1489 /* we get here if one of the following takes place:
1490 * (1) there's no error
1491 * (2) there's an encap action and we don't have valid neigh
1494 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1496 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1498 if (IS_ERR(flow->rule[0])) {
1499 err = PTR_ERR(flow->rule[0]);
1502 flow_flag_set(flow, OFFLOADED);
1507 flow_flag_set(flow, FAILED);
1511 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1513 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1514 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1517 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1519 geneve_tlv_option_0_data);
1521 return !!geneve_tlv_opt_0_data;
1524 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1525 struct mlx5e_tc_flow *flow)
1527 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1528 struct mlx5_flow_attr *attr = flow->attr;
1529 struct mlx5_esw_flow_attr *esw_attr;
1530 bool vf_tun = false;
1533 esw_attr = attr->esw_attr;
1534 mlx5e_put_flow_tunnel_id(flow);
1536 if (flow_flag_test(flow, NOT_READY))
1537 remove_unready_flow(flow);
1539 if (mlx5e_is_offloaded_flow(flow)) {
1540 if (flow_flag_test(flow, SLOW))
1541 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1543 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1546 if (mlx5_flow_has_geneve_opt(flow))
1547 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1549 mlx5_eswitch_del_vlan_action(esw, attr);
1551 if (flow->decap_route)
1552 mlx5e_detach_decap_route(priv, flow);
1554 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1555 if (esw_attr->dests[out_index].flags &
1556 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1558 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1559 mlx5e_detach_encap(priv, flow, out_index);
1560 kfree(attr->parse_attr->tun_info[out_index]);
1564 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1566 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1567 dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
1568 if (vf_tun && attr->modify_hdr)
1569 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1571 mlx5e_detach_mod_hdr(priv, flow);
1573 kfree(attr->sample_attr);
1574 kvfree(attr->parse_attr);
1575 kvfree(attr->esw_attr->rx_tun_attr);
1577 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1578 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1580 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1581 mlx5e_detach_decap(priv, flow);
1586 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1588 return flow->attr->counter;
1591 /* Iterate over tmp_list of flows attached to flow_list head. */
1592 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1594 struct mlx5e_tc_flow *flow, *tmp;
1596 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1597 mlx5e_flow_put(priv, flow);
1600 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1602 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1604 if (!flow_flag_test(flow, ESWITCH) ||
1605 !flow_flag_test(flow, DUP))
1608 mutex_lock(&esw->offloads.peer_mutex);
1609 list_del(&flow->peer);
1610 mutex_unlock(&esw->offloads.peer_mutex);
1612 flow_flag_clear(flow, DUP);
1614 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1615 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1616 kfree(flow->peer_flow);
1619 flow->peer_flow = NULL;
1622 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1624 struct mlx5_core_dev *dev = flow->priv->mdev;
1625 struct mlx5_devcom *devcom = dev->priv.devcom;
1626 struct mlx5_eswitch *peer_esw;
1628 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1632 __mlx5e_tc_del_fdb_peer_flow(flow);
1633 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1636 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1637 struct mlx5e_tc_flow *flow)
1639 if (mlx5e_is_eswitch_flow(flow)) {
1640 mlx5e_tc_del_fdb_peer_flow(flow);
1641 mlx5e_tc_del_fdb_flow(priv, flow);
1643 mlx5e_tc_del_nic_flow(priv, flow);
1647 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
1649 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1650 struct flow_action *flow_action = &rule->action;
1651 const struct flow_action_entry *act;
1657 flow_action_for_each(i, act, flow_action) {
1659 case FLOW_ACTION_GOTO:
1661 case FLOW_ACTION_SAMPLE:
1672 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1673 struct flow_dissector_key_enc_opts *opts,
1674 struct netlink_ext_ack *extack,
1677 struct geneve_opt *opt;
1682 while (opts->len > off) {
1683 opt = (struct geneve_opt *)&opts->data[off];
1685 if (!(*dont_care) || opt->opt_class || opt->type ||
1686 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1689 if (opt->opt_class != htons(U16_MAX) ||
1690 opt->type != U8_MAX) {
1691 NL_SET_ERR_MSG(extack,
1692 "Partial match of tunnel options in chain > 0 isn't supported");
1693 netdev_warn(priv->netdev,
1694 "Partial match of tunnel options in chain > 0 isn't supported");
1699 off += sizeof(struct geneve_opt) + opt->length * 4;
1705 #define COPY_DISSECTOR(rule, diss_key, dst)\
1707 struct flow_rule *__rule = (rule);\
1708 typeof(dst) __dst = dst;\
1711 skb_flow_dissector_target(__rule->match.dissector,\
1713 __rule->match.key),\
1717 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1718 struct mlx5e_tc_flow *flow,
1719 struct flow_cls_offload *f,
1720 struct net_device *filter_dev)
1722 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1723 struct netlink_ext_ack *extack = f->common.extack;
1724 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1725 struct flow_match_enc_opts enc_opts_match;
1726 struct tunnel_match_enc_opts tun_enc_opts;
1727 struct mlx5_rep_uplink_priv *uplink_priv;
1728 struct mlx5_flow_attr *attr = flow->attr;
1729 struct mlx5e_rep_priv *uplink_rpriv;
1730 struct tunnel_match_key tunnel_key;
1731 bool enc_opts_is_dont_care = true;
1732 u32 tun_id, enc_opts_id = 0;
1733 struct mlx5_eswitch *esw;
1737 esw = priv->mdev->priv.eswitch;
1738 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1739 uplink_priv = &uplink_rpriv->uplink_priv;
1741 memset(&tunnel_key, 0, sizeof(tunnel_key));
1742 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1743 &tunnel_key.enc_control);
1744 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1745 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1746 &tunnel_key.enc_ipv4);
1748 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1749 &tunnel_key.enc_ipv6);
1750 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1751 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1752 &tunnel_key.enc_tp);
1753 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1754 &tunnel_key.enc_key_id);
1755 tunnel_key.filter_ifindex = filter_dev->ifindex;
1757 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1761 flow_rule_match_enc_opts(rule, &enc_opts_match);
1762 err = enc_opts_is_dont_care_or_full_match(priv,
1763 enc_opts_match.mask,
1765 &enc_opts_is_dont_care);
1769 if (!enc_opts_is_dont_care) {
1770 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1771 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1772 sizeof(*enc_opts_match.key));
1773 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1774 sizeof(*enc_opts_match.mask));
1776 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1777 &tun_enc_opts, &enc_opts_id);
1782 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1783 mask = enc_opts_id ? TUNNEL_ID_MASK :
1784 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1787 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1788 TUNNEL_TO_REG, value, mask);
1790 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1791 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1792 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
1793 TUNNEL_TO_REG, value);
1797 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1800 flow->tunnel_id = value;
1805 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1808 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1812 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1814 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1815 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1816 struct mlx5_rep_uplink_priv *uplink_priv;
1817 struct mlx5e_rep_priv *uplink_rpriv;
1818 struct mlx5_eswitch *esw;
1820 esw = flow->priv->mdev->priv.eswitch;
1821 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1822 uplink_priv = &uplink_rpriv->uplink_priv;
1825 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1827 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1831 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1833 return flow->tunnel_id;
1836 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1837 struct flow_match_basic *match, bool outer,
1838 void *headers_c, void *headers_v)
1840 bool ip_version_cap;
1842 ip_version_cap = outer ?
1843 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1844 ft_field_support.outer_ip_version) :
1845 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1846 ft_field_support.inner_ip_version);
1848 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1849 (match->key->n_proto == htons(ETH_P_IP) ||
1850 match->key->n_proto == htons(ETH_P_IPV6))) {
1851 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1853 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1855 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1856 ntohs(match->mask->n_proto));
1857 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1858 ntohs(match->key->n_proto));
1862 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
1869 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1871 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
1873 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
1874 /* Return ip_version converted from ethertype anyway */
1876 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1877 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
1879 else if (ethertype == ETH_P_IPV6)
1885 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1886 struct mlx5e_tc_flow *flow,
1887 struct mlx5_flow_spec *spec,
1888 struct flow_cls_offload *f,
1889 struct net_device *filter_dev,
1893 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
1894 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1895 struct netlink_ext_ack *extack = f->common.extack;
1896 bool needs_mapping, sets_mapping;
1899 if (!mlx5e_is_eswitch_flow(flow))
1902 needs_mapping = !!flow->attr->chain;
1903 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
1904 *match_inner = !needs_mapping;
1906 if ((needs_mapping || sets_mapping) &&
1907 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1908 NL_SET_ERR_MSG(extack,
1909 "Chains on tunnel devices isn't supported without register loopback support");
1910 netdev_warn(priv->netdev,
1911 "Chains on tunnel devices isn't supported without register loopback support");
1915 if (!flow->attr->chain) {
1916 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1919 NL_SET_ERR_MSG_MOD(extack,
1920 "Failed to parse tunnel attributes");
1921 netdev_warn(priv->netdev,
1922 "Failed to parse tunnel attributes");
1926 /* With mpls over udp we decapsulate using packet reformat
1929 if (!netif_is_bareudp(filter_dev))
1930 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1931 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
1934 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
1935 struct mlx5_flow_spec *tmp_spec;
1937 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
1939 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
1940 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
1943 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
1945 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
1948 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
1949 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
1952 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
1958 if (!needs_mapping && !sets_mapping)
1961 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
1964 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
1966 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1970 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
1972 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1976 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
1978 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1982 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
1984 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1988 static void *get_match_headers_value(u32 flags,
1989 struct mlx5_flow_spec *spec)
1991 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1992 get_match_inner_headers_value(spec) :
1993 get_match_outer_headers_value(spec);
1996 static void *get_match_headers_criteria(u32 flags,
1997 struct mlx5_flow_spec *spec)
1999 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2000 get_match_inner_headers_criteria(spec) :
2001 get_match_outer_headers_criteria(spec);
2004 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2005 struct flow_cls_offload *f)
2007 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2008 struct netlink_ext_ack *extack = f->common.extack;
2009 struct net_device *ingress_dev;
2010 struct flow_match_meta match;
2012 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2015 flow_rule_match_meta(rule, &match);
2016 if (!match.mask->ingress_ifindex)
2019 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2020 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2024 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2025 match.key->ingress_ifindex);
2027 NL_SET_ERR_MSG_MOD(extack,
2028 "Can't find the ingress port to match on");
2032 if (ingress_dev != filter_dev) {
2033 NL_SET_ERR_MSG_MOD(extack,
2034 "Can't match on the ingress filter port");
2041 static bool skip_key_basic(struct net_device *filter_dev,
2042 struct flow_cls_offload *f)
2044 /* When doing mpls over udp decap, the user needs to provide
2045 * MPLS_UC as the protocol in order to be able to match on mpls
2046 * label fields. However, the actual ethertype is IP so we want to
2047 * avoid matching on this, otherwise we'll fail the match.
2049 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2055 static int __parse_cls_flower(struct mlx5e_priv *priv,
2056 struct mlx5e_tc_flow *flow,
2057 struct mlx5_flow_spec *spec,
2058 struct flow_cls_offload *f,
2059 struct net_device *filter_dev,
2060 u8 *inner_match_level, u8 *outer_match_level)
2062 struct netlink_ext_ack *extack = f->common.extack;
2063 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2065 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2067 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2069 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2071 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2073 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2075 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2076 struct flow_dissector *dissector = rule->match.dissector;
2077 enum fs_flow_table_type fs_type;
2083 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2084 match_level = outer_match_level;
2086 if (dissector->used_keys &
2087 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2088 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2089 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2090 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2091 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2092 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2093 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2094 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2095 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2096 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2097 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2098 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2099 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2100 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2101 BIT(FLOW_DISSECTOR_KEY_TCP) |
2102 BIT(FLOW_DISSECTOR_KEY_IP) |
2103 BIT(FLOW_DISSECTOR_KEY_CT) |
2104 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2105 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2106 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2107 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2108 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2109 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2110 dissector->used_keys);
2114 if (mlx5e_get_tc_tun(filter_dev)) {
2115 bool match_inner = false;
2117 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2118 outer_match_level, &match_inner);
2123 /* header pointers should point to the inner headers
2124 * if the packet was decapsulated already.
2125 * outer headers are set by parse_tunnel_attr.
2127 match_level = inner_match_level;
2128 headers_c = get_match_inner_headers_criteria(spec);
2129 headers_v = get_match_inner_headers_value(spec);
2133 err = mlx5e_flower_parse_meta(filter_dev, f);
2137 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2138 !skip_key_basic(filter_dev, f)) {
2139 struct flow_match_basic match;
2141 flow_rule_match_basic(rule, &match);
2142 mlx5e_tc_set_ethertype(priv->mdev, &match,
2143 match_level == outer_match_level,
2144 headers_c, headers_v);
2146 if (match.mask->n_proto)
2147 *match_level = MLX5_MATCH_L2;
2149 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2150 is_vlan_dev(filter_dev)) {
2151 struct flow_dissector_key_vlan filter_dev_mask;
2152 struct flow_dissector_key_vlan filter_dev_key;
2153 struct flow_match_vlan match;
2155 if (is_vlan_dev(filter_dev)) {
2156 match.key = &filter_dev_key;
2157 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2158 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2159 match.key->vlan_priority = 0;
2160 match.mask = &filter_dev_mask;
2161 memset(match.mask, 0xff, sizeof(*match.mask));
2162 match.mask->vlan_priority = 0;
2164 flow_rule_match_vlan(rule, &match);
2166 if (match.mask->vlan_id ||
2167 match.mask->vlan_priority ||
2168 match.mask->vlan_tpid) {
2169 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2170 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2172 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2175 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2177 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2181 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2182 match.mask->vlan_id);
2183 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2184 match.key->vlan_id);
2186 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2187 match.mask->vlan_priority);
2188 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2189 match.key->vlan_priority);
2191 *match_level = MLX5_MATCH_L2;
2193 } else if (*match_level != MLX5_MATCH_NONE) {
2194 /* cvlan_tag enabled in match criteria and
2195 * disabled in match value means both S & C tags
2196 * don't exist (untagged of both)
2198 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2199 *match_level = MLX5_MATCH_L2;
2202 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2203 struct flow_match_vlan match;
2205 flow_rule_match_cvlan(rule, &match);
2206 if (match.mask->vlan_id ||
2207 match.mask->vlan_priority ||
2208 match.mask->vlan_tpid) {
2209 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2211 NL_SET_ERR_MSG_MOD(extack,
2212 "Matching on CVLAN is not supported");
2216 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2217 MLX5_SET(fte_match_set_misc, misc_c,
2218 outer_second_svlan_tag, 1);
2219 MLX5_SET(fte_match_set_misc, misc_v,
2220 outer_second_svlan_tag, 1);
2222 MLX5_SET(fte_match_set_misc, misc_c,
2223 outer_second_cvlan_tag, 1);
2224 MLX5_SET(fte_match_set_misc, misc_v,
2225 outer_second_cvlan_tag, 1);
2228 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2229 match.mask->vlan_id);
2230 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2231 match.key->vlan_id);
2232 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2233 match.mask->vlan_priority);
2234 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2235 match.key->vlan_priority);
2237 *match_level = MLX5_MATCH_L2;
2238 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2242 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2243 struct flow_match_eth_addrs match;
2245 flow_rule_match_eth_addrs(rule, &match);
2246 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2249 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2253 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2256 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2260 if (!is_zero_ether_addr(match.mask->src) ||
2261 !is_zero_ether_addr(match.mask->dst))
2262 *match_level = MLX5_MATCH_L2;
2265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2266 struct flow_match_control match;
2268 flow_rule_match_control(rule, &match);
2269 addr_type = match.key->addr_type;
2271 /* the HW doesn't support frag first/later */
2272 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2275 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2276 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2277 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2278 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2280 /* the HW doesn't need L3 inline to match on frag=no */
2281 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2282 *match_level = MLX5_MATCH_L2;
2283 /* *** L2 attributes parsing up to here *** */
2285 *match_level = MLX5_MATCH_L3;
2289 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2290 struct flow_match_basic match;
2292 flow_rule_match_basic(rule, &match);
2293 ip_proto = match.key->ip_proto;
2295 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2296 match.mask->ip_proto);
2297 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2298 match.key->ip_proto);
2300 if (match.mask->ip_proto)
2301 *match_level = MLX5_MATCH_L3;
2304 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2305 struct flow_match_ipv4_addrs match;
2307 flow_rule_match_ipv4_addrs(rule, &match);
2308 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2309 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2310 &match.mask->src, sizeof(match.mask->src));
2311 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2312 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2313 &match.key->src, sizeof(match.key->src));
2314 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2315 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2316 &match.mask->dst, sizeof(match.mask->dst));
2317 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2318 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2319 &match.key->dst, sizeof(match.key->dst));
2321 if (match.mask->src || match.mask->dst)
2322 *match_level = MLX5_MATCH_L3;
2325 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2326 struct flow_match_ipv6_addrs match;
2328 flow_rule_match_ipv6_addrs(rule, &match);
2329 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2330 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2331 &match.mask->src, sizeof(match.mask->src));
2332 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2333 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2334 &match.key->src, sizeof(match.key->src));
2336 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2337 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2338 &match.mask->dst, sizeof(match.mask->dst));
2339 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2340 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2341 &match.key->dst, sizeof(match.key->dst));
2343 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2344 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2345 *match_level = MLX5_MATCH_L3;
2348 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2349 struct flow_match_ip match;
2351 flow_rule_match_ip(rule, &match);
2352 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2353 match.mask->tos & 0x3);
2354 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2355 match.key->tos & 0x3);
2357 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2358 match.mask->tos >> 2);
2359 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2360 match.key->tos >> 2);
2362 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2364 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2367 if (match.mask->ttl &&
2368 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2369 ft_field_support.outer_ipv4_ttl)) {
2370 NL_SET_ERR_MSG_MOD(extack,
2371 "Matching on TTL is not supported");
2375 if (match.mask->tos || match.mask->ttl)
2376 *match_level = MLX5_MATCH_L3;
2379 /* *** L3 attributes parsing up to here *** */
2381 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2382 struct flow_match_ports match;
2384 flow_rule_match_ports(rule, &match);
2387 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2388 tcp_sport, ntohs(match.mask->src));
2389 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2390 tcp_sport, ntohs(match.key->src));
2392 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2393 tcp_dport, ntohs(match.mask->dst));
2394 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2395 tcp_dport, ntohs(match.key->dst));
2399 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2400 udp_sport, ntohs(match.mask->src));
2401 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2402 udp_sport, ntohs(match.key->src));
2404 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2405 udp_dport, ntohs(match.mask->dst));
2406 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2407 udp_dport, ntohs(match.key->dst));
2410 NL_SET_ERR_MSG_MOD(extack,
2411 "Only UDP and TCP transports are supported for L4 matching");
2412 netdev_err(priv->netdev,
2413 "Only UDP and TCP transport are supported\n");
2417 if (match.mask->src || match.mask->dst)
2418 *match_level = MLX5_MATCH_L4;
2421 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2422 struct flow_match_tcp match;
2424 flow_rule_match_tcp(rule, &match);
2425 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2426 ntohs(match.mask->flags));
2427 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2428 ntohs(match.key->flags));
2430 if (match.mask->flags)
2431 *match_level = MLX5_MATCH_L4;
2433 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2434 struct flow_match_icmp match;
2436 flow_rule_match_icmp(rule, &match);
2439 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2440 MLX5_FLEX_PROTO_ICMP))
2442 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2444 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2446 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2448 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2451 case IPPROTO_ICMPV6:
2452 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2453 MLX5_FLEX_PROTO_ICMPV6))
2455 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2457 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2459 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2461 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2465 NL_SET_ERR_MSG_MOD(extack,
2466 "Code and type matching only with ICMP and ICMPv6");
2467 netdev_err(priv->netdev,
2468 "Code and type matching only with ICMP and ICMPv6\n");
2471 if (match.mask->code || match.mask->type) {
2472 *match_level = MLX5_MATCH_L4;
2473 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2476 /* Currently supported only for MPLS over UDP */
2477 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2478 !netif_is_bareudp(filter_dev)) {
2479 NL_SET_ERR_MSG_MOD(extack,
2480 "Matching on MPLS is supported only for MPLS over UDP");
2481 netdev_err(priv->netdev,
2482 "Matching on MPLS is supported only for MPLS over UDP\n");
2489 static int parse_cls_flower(struct mlx5e_priv *priv,
2490 struct mlx5e_tc_flow *flow,
2491 struct mlx5_flow_spec *spec,
2492 struct flow_cls_offload *f,
2493 struct net_device *filter_dev)
2495 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2496 struct netlink_ext_ack *extack = f->common.extack;
2497 struct mlx5_core_dev *dev = priv->mdev;
2498 struct mlx5_eswitch *esw = dev->priv.eswitch;
2499 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2500 struct mlx5_eswitch_rep *rep;
2501 bool is_eswitch_flow;
2504 inner_match_level = MLX5_MATCH_NONE;
2505 outer_match_level = MLX5_MATCH_NONE;
2507 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2508 &inner_match_level, &outer_match_level);
2509 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2510 outer_match_level : inner_match_level;
2512 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2513 if (!err && is_eswitch_flow) {
2515 if (rep->vport != MLX5_VPORT_UPLINK &&
2516 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2517 esw->offloads.inline_mode < non_tunnel_match_level)) {
2518 NL_SET_ERR_MSG_MOD(extack,
2519 "Flow is not offloaded due to min inline setting");
2520 netdev_warn(priv->netdev,
2521 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2522 non_tunnel_match_level, esw->offloads.inline_mode);
2527 flow->attr->inner_match_level = inner_match_level;
2528 flow->attr->outer_match_level = outer_match_level;
2534 struct pedit_headers {
2536 struct vlan_hdr vlan;
2543 struct pedit_headers_action {
2544 struct pedit_headers vals;
2545 struct pedit_headers masks;
2549 static int pedit_header_offsets[] = {
2550 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2551 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2552 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2553 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2554 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2557 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2559 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2560 struct pedit_headers_action *hdrs)
2562 u32 *curr_pmask, *curr_pval;
2564 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2565 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2567 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2570 *curr_pmask |= mask;
2571 *curr_pval |= (val & mask);
2579 struct mlx5_fields {
2587 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2588 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2589 offsetof(struct pedit_headers, field) + (off), \
2590 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2592 /* masked values are the same and there are no rewrites that do not have a
2595 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2596 type matchmaskx = *(type *)(matchmaskp); \
2597 type matchvalx = *(type *)(matchvalp); \
2598 type maskx = *(type *)(maskp); \
2599 type valx = *(type *)(valp); \
2601 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2605 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2606 void *matchmaskp, u8 bsize)
2612 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2615 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2618 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2625 static struct mlx5_fields fields[] = {
2626 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2627 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2628 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2629 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2630 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2631 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2633 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2634 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2635 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2636 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2638 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2639 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2640 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2641 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2642 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2643 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2644 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2645 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2646 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2647 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2648 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2649 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2650 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2651 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2652 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2653 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2654 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2655 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2657 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2658 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2659 /* in linux iphdr tcp_flags is 8 bits long */
2660 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2662 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2663 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2666 static unsigned long mask_to_le(unsigned long mask, int size)
2672 mask_be32 = (__force __be32)(mask);
2673 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2674 } else if (size == 16) {
2675 mask_be32 = (__force __be32)(mask);
2676 mask_be16 = *(__be16 *)&mask_be32;
2677 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2682 static int offload_pedit_fields(struct mlx5e_priv *priv,
2684 struct pedit_headers_action *hdrs,
2685 struct mlx5e_tc_flow_parse_attr *parse_attr,
2687 struct netlink_ext_ack *extack)
2689 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2690 int i, action_size, first, last, next_z;
2691 void *headers_c, *headers_v, *action, *vals_p;
2692 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2693 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2694 struct mlx5_fields *f;
2695 unsigned long mask, field_mask;
2699 mod_acts = &parse_attr->mod_hdr_acts;
2700 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2701 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2703 set_masks = &hdrs[0].masks;
2704 add_masks = &hdrs[1].masks;
2705 set_vals = &hdrs[0].vals;
2706 add_vals = &hdrs[1].vals;
2708 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2710 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2714 /* avoid seeing bits set from previous iterations */
2718 s_masks_p = (void *)set_masks + f->offset;
2719 a_masks_p = (void *)add_masks + f->offset;
2721 s_mask = *s_masks_p & f->field_mask;
2722 a_mask = *a_masks_p & f->field_mask;
2724 if (!s_mask && !a_mask) /* nothing to offload here */
2727 if (s_mask && a_mask) {
2728 NL_SET_ERR_MSG_MOD(extack,
2729 "can't set and add to the same HW field");
2730 netdev_warn(priv->netdev,
2731 "mlx5: can't set and add to the same HW field (%x)\n",
2738 void *match_mask = headers_c + f->match_offset;
2739 void *match_val = headers_v + f->match_offset;
2741 cmd = MLX5_ACTION_TYPE_SET;
2743 vals_p = (void *)set_vals + f->offset;
2744 /* don't rewrite if we have a match on the same value */
2745 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2746 match_mask, f->field_bsize))
2748 /* clear to denote we consumed this field */
2749 *s_masks_p &= ~f->field_mask;
2751 cmd = MLX5_ACTION_TYPE_ADD;
2753 vals_p = (void *)add_vals + f->offset;
2754 /* add 0 is no change */
2755 if ((*(u32 *)vals_p & f->field_mask) == 0)
2757 /* clear to denote we consumed this field */
2758 *a_masks_p &= ~f->field_mask;
2763 mask = mask_to_le(mask, f->field_bsize);
2765 first = find_first_bit(&mask, f->field_bsize);
2766 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2767 last = find_last_bit(&mask, f->field_bsize);
2768 if (first < next_z && next_z < last) {
2769 NL_SET_ERR_MSG_MOD(extack,
2770 "rewrite of few sub-fields isn't supported");
2771 netdev_warn(priv->netdev,
2772 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2777 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2779 NL_SET_ERR_MSG_MOD(extack,
2780 "too many pedit actions, can't offload");
2781 mlx5_core_warn(priv->mdev,
2782 "mlx5: parsed %d pedit actions, can't do more\n",
2783 mod_acts->num_actions);
2787 action = mod_acts->actions +
2788 (mod_acts->num_actions * action_size);
2789 MLX5_SET(set_action_in, action, action_type, cmd);
2790 MLX5_SET(set_action_in, action, field, f->field);
2792 if (cmd == MLX5_ACTION_TYPE_SET) {
2795 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2797 /* if field is bit sized it can start not from first bit */
2798 start = find_first_bit(&field_mask, f->field_bsize);
2800 MLX5_SET(set_action_in, action, offset, first - start);
2801 /* length is num of bits to be written, zero means length of 32 */
2802 MLX5_SET(set_action_in, action, length, (last - first + 1));
2805 if (f->field_bsize == 32)
2806 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2807 else if (f->field_bsize == 16)
2808 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2809 else if (f->field_bsize == 8)
2810 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2812 ++mod_acts->num_actions;
2818 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2821 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2822 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2823 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2824 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2827 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2829 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2831 int action_size, new_num_actions, max_hw_actions;
2832 size_t new_sz, old_sz;
2835 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2838 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2840 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2842 new_num_actions = min(max_hw_actions,
2843 mod_hdr_acts->actions ?
2844 mod_hdr_acts->max_actions * 2 : 1);
2845 if (mod_hdr_acts->max_actions == new_num_actions)
2848 new_sz = action_size * new_num_actions;
2849 old_sz = mod_hdr_acts->max_actions * action_size;
2850 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2854 memset(ret + old_sz, 0, new_sz - old_sz);
2855 mod_hdr_acts->actions = ret;
2856 mod_hdr_acts->max_actions = new_num_actions;
2861 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2863 kfree(mod_hdr_acts->actions);
2864 mod_hdr_acts->actions = NULL;
2865 mod_hdr_acts->num_actions = 0;
2866 mod_hdr_acts->max_actions = 0;
2869 static const struct pedit_headers zero_masks = {};
2872 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2873 const struct flow_action_entry *act, int namespace,
2874 struct mlx5e_tc_flow_parse_attr *parse_attr,
2875 struct pedit_headers_action *hdrs,
2876 struct netlink_ext_ack *extack)
2878 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2879 int err = -EOPNOTSUPP;
2880 u32 mask, val, offset;
2883 htype = act->mangle.htype;
2884 err = -EOPNOTSUPP; /* can't be all optimistic */
2886 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2887 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2891 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2892 NL_SET_ERR_MSG_MOD(extack,
2893 "The pedit offload action is not supported");
2897 mask = act->mangle.mask;
2898 val = act->mangle.val;
2899 offset = act->mangle.offset;
2901 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2913 parse_pedit_to_reformat(struct mlx5e_priv *priv,
2914 const struct flow_action_entry *act,
2915 struct mlx5e_tc_flow_parse_attr *parse_attr,
2916 struct netlink_ext_ack *extack)
2918 u32 mask, val, offset;
2921 if (act->id != FLOW_ACTION_MANGLE)
2924 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2925 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2929 mask = ~act->mangle.mask;
2930 val = act->mangle.val;
2931 offset = act->mangle.offset;
2932 p = (u32 *)&parse_attr->eth;
2933 *(p + (offset >> 2)) |= (val & mask);
2938 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2939 const struct flow_action_entry *act, int namespace,
2940 struct mlx5e_tc_flow_parse_attr *parse_attr,
2941 struct pedit_headers_action *hdrs,
2942 struct mlx5e_tc_flow *flow,
2943 struct netlink_ext_ack *extack)
2945 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
2946 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
2948 return parse_pedit_to_modify_hdr(priv, act, namespace,
2949 parse_attr, hdrs, extack);
2952 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2953 struct mlx5e_tc_flow_parse_attr *parse_attr,
2954 struct pedit_headers_action *hdrs,
2956 struct netlink_ext_ack *extack)
2958 struct pedit_headers *cmd_masks;
2962 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2963 action_flags, extack);
2965 goto out_dealloc_parsed_actions;
2967 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2968 cmd_masks = &hdrs[cmd].masks;
2969 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2970 NL_SET_ERR_MSG_MOD(extack,
2971 "attempt to offload an unsupported field");
2972 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2973 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2974 16, 1, cmd_masks, sizeof(zero_masks), true);
2976 goto out_dealloc_parsed_actions;
2982 out_dealloc_parsed_actions:
2983 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2987 static bool csum_offload_supported(struct mlx5e_priv *priv,
2990 struct netlink_ext_ack *extack)
2992 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2993 TCA_CSUM_UPDATE_FLAG_UDP;
2995 /* The HW recalcs checksums only if re-writing headers */
2996 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2997 NL_SET_ERR_MSG_MOD(extack,
2998 "TC csum action is only offloaded with pedit");
2999 netdev_warn(priv->netdev,
3000 "TC csum action is only offloaded with pedit\n");
3004 if (update_flags & ~prot_flags) {
3005 NL_SET_ERR_MSG_MOD(extack,
3006 "can't offload TC csum action for some header/s");
3007 netdev_warn(priv->netdev,
3008 "can't offload TC csum action for some header/s - flags %#x\n",
3016 struct ip_ttl_word {
3022 struct ipv6_hoplimit_word {
3028 static int is_action_keys_supported(const struct flow_action_entry *act,
3029 bool ct_flow, bool *modify_ip_header,
3031 struct netlink_ext_ack *extack)
3036 htype = act->mangle.htype;
3037 offset = act->mangle.offset;
3038 mask = ~act->mangle.mask;
3039 /* For IPv4 & IPv6 header check 4 byte word,
3040 * to determine that modified fields
3041 * are NOT ttl & hop_limit only.
3043 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3044 struct ip_ttl_word *ttl_word =
3045 (struct ip_ttl_word *)&mask;
3047 if (offset != offsetof(struct iphdr, ttl) ||
3048 ttl_word->protocol ||
3050 *modify_ip_header = true;
3053 if (offset >= offsetof(struct iphdr, saddr))
3054 *modify_tuple = true;
3056 if (ct_flow && *modify_tuple) {
3057 NL_SET_ERR_MSG_MOD(extack,
3058 "can't offload re-write of ipv4 address with action ct");
3061 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3062 struct ipv6_hoplimit_word *hoplimit_word =
3063 (struct ipv6_hoplimit_word *)&mask;
3065 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3066 hoplimit_word->payload_len ||
3067 hoplimit_word->nexthdr) {
3068 *modify_ip_header = true;
3071 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3072 *modify_tuple = true;
3074 if (ct_flow && *modify_tuple) {
3075 NL_SET_ERR_MSG_MOD(extack,
3076 "can't offload re-write of ipv6 address with action ct");
3079 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3080 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3081 *modify_tuple = true;
3083 NL_SET_ERR_MSG_MOD(extack,
3084 "can't offload re-write of transport header ports with action ct");
3092 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3093 bool ct_flow, struct netlink_ext_ack *extack,
3094 struct mlx5e_priv *priv,
3095 struct mlx5_flow_spec *spec)
3097 if (!modify_tuple || ct_clear)
3101 NL_SET_ERR_MSG_MOD(extack,
3102 "can't offload tuple modification with non-clear ct()");
3103 netdev_info(priv->netdev,
3104 "can't offload tuple modification with non-clear ct()");
3108 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3109 * (or after clear action), as otherwise, since the tuple is changed,
3110 * we can't restore ct state
3112 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3113 NL_SET_ERR_MSG_MOD(extack,
3114 "can't offload tuple modification with ct matches and no ct(clear) action");
3115 netdev_info(priv->netdev,
3116 "can't offload tuple modification with ct matches and no ct(clear) action");
3123 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3124 struct mlx5_flow_spec *spec,
3125 struct flow_action *flow_action,
3126 u32 actions, bool ct_flow,
3128 struct netlink_ext_ack *extack)
3130 const struct flow_action_entry *act;
3131 bool modify_ip_header, modify_tuple;
3138 headers_c = get_match_headers_criteria(actions, spec);
3139 headers_v = get_match_headers_value(actions, spec);
3140 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3142 /* for non-IP we only re-write MACs, so we're okay */
3143 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3144 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3147 modify_ip_header = false;
3148 modify_tuple = false;
3149 flow_action_for_each(i, act, flow_action) {
3150 if (act->id != FLOW_ACTION_MANGLE &&
3151 act->id != FLOW_ACTION_ADD)
3154 err = is_action_keys_supported(act, ct_flow,
3156 &modify_tuple, extack);
3161 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3165 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3166 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3167 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3168 NL_SET_ERR_MSG_MOD(extack,
3169 "can't offload re-write of non TCP/UDP");
3170 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3179 static bool actions_match_supported(struct mlx5e_priv *priv,
3180 struct flow_action *flow_action,
3181 struct mlx5e_tc_flow_parse_attr *parse_attr,
3182 struct mlx5e_tc_flow *flow,
3183 struct netlink_ext_ack *extack)
3185 bool ct_flow = false, ct_clear = false;
3188 ct_clear = flow->attr->ct_attr.ct_action &
3190 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3191 actions = flow->attr->action;
3193 if (mlx5e_is_eswitch_flow(flow)) {
3194 if (flow->attr->esw_attr->split_count && ct_flow &&
3195 !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
3196 /* All registers used by ct are cleared when using
3199 NL_SET_ERR_MSG_MOD(extack,
3200 "Can't offload mirroring with action ct");
3205 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3206 return modify_header_match_supported(priv, &parse_attr->spec,
3207 flow_action, actions,
3214 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3216 return priv->mdev == peer_priv->mdev;
3219 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3221 struct mlx5_core_dev *fmdev, *pmdev;
3222 u64 fsystem_guid, psystem_guid;
3225 pmdev = peer_priv->mdev;
3227 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3228 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3230 return (fsystem_guid == psystem_guid);
3233 static bool same_vf_reps(struct mlx5e_priv *priv,
3234 struct net_device *out_dev)
3236 return mlx5e_eswitch_vf_rep(priv->netdev) &&
3237 priv->netdev == out_dev;
3240 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3241 const struct flow_action_entry *act,
3242 struct mlx5e_tc_flow_parse_attr *parse_attr,
3243 struct pedit_headers_action *hdrs,
3244 u32 *action, struct netlink_ext_ack *extack)
3246 u16 mask16 = VLAN_VID_MASK;
3247 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3248 const struct flow_action_entry pedit_act = {
3249 .id = FLOW_ACTION_MANGLE,
3250 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3251 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3252 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3253 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3255 u8 match_prio_mask, match_prio_val;
3256 void *headers_c, *headers_v;
3259 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3260 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3262 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3263 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3264 NL_SET_ERR_MSG_MOD(extack,
3265 "VLAN rewrite action must have VLAN protocol match");
3269 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3270 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3271 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3272 NL_SET_ERR_MSG_MOD(extack,
3273 "Changing VLAN prio is not supported");
3277 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3278 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3284 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3285 struct mlx5e_tc_flow_parse_attr *parse_attr,
3286 struct pedit_headers_action *hdrs,
3287 u32 *action, struct netlink_ext_ack *extack)
3289 const struct flow_action_entry prio_tag_act = {
3292 MLX5_GET(fte_match_set_lyr_2_4,
3293 get_match_headers_value(*action,
3296 MLX5_GET(fte_match_set_lyr_2_4,
3297 get_match_headers_criteria(*action,
3302 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3303 &prio_tag_act, parse_attr, hdrs, action,
3307 static int validate_goto_chain(struct mlx5e_priv *priv,
3308 struct mlx5e_tc_flow *flow,
3309 const struct flow_action_entry *act,
3311 struct netlink_ext_ack *extack)
3313 bool is_esw = mlx5e_is_eswitch_flow(flow);
3314 struct mlx5_flow_attr *attr = flow->attr;
3315 bool ft_flow = mlx5e_is_ft_flow(flow);
3316 u32 dest_chain = act->chain_index;
3317 struct mlx5_fs_chains *chains;
3318 struct mlx5_eswitch *esw;
3319 u32 reformat_and_fwd;
3322 esw = priv->mdev->priv.eswitch;
3323 chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3324 max_chain = mlx5_chains_get_chain_range(chains);
3325 reformat_and_fwd = is_esw ?
3326 MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3327 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3330 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3334 if (!mlx5_chains_backwards_supported(chains) &&
3335 dest_chain <= attr->chain) {
3336 NL_SET_ERR_MSG_MOD(extack,
3337 "Goto lower numbered chain isn't supported");
3341 if (dest_chain > max_chain) {
3342 NL_SET_ERR_MSG_MOD(extack,
3343 "Requested destination chain is out of supported range");
3347 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3348 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3349 !reformat_and_fwd) {
3350 NL_SET_ERR_MSG_MOD(extack,
3351 "Goto chain is not allowed if action has reformat or decap");
3358 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3359 struct flow_action *flow_action,
3360 struct mlx5e_tc_flow *flow,
3361 struct netlink_ext_ack *extack)
3363 struct mlx5e_tc_flow_parse_attr *parse_attr;
3364 struct mlx5_flow_attr *attr = flow->attr;
3365 struct pedit_headers_action hdrs[2] = {};
3366 const struct flow_action_entry *act;
3367 struct mlx5_nic_flow_attr *nic_attr;
3371 if (!flow_action_has_entries(flow_action))
3374 if (!flow_action_hw_stats_check(flow_action, extack,
3375 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3378 nic_attr = attr->nic_attr;
3379 nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3380 parse_attr = attr->parse_attr;
3382 flow_action_for_each(i, act, flow_action) {
3384 case FLOW_ACTION_ACCEPT:
3385 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3386 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3388 case FLOW_ACTION_DROP:
3389 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3390 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3392 case FLOW_ACTION_MANGLE:
3393 case FLOW_ACTION_ADD:
3394 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3395 parse_attr, hdrs, NULL, extack);
3399 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3401 case FLOW_ACTION_VLAN_MANGLE:
3402 err = add_vlan_rewrite_action(priv,
3403 MLX5_FLOW_NAMESPACE_KERNEL,
3404 act, parse_attr, hdrs,
3410 case FLOW_ACTION_CSUM:
3411 if (csum_offload_supported(priv, action,
3417 case FLOW_ACTION_REDIRECT: {
3418 struct net_device *peer_dev = act->dev;
3420 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3421 same_hw_devs(priv, netdev_priv(peer_dev))) {
3422 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3423 flow_flag_set(flow, HAIRPIN);
3424 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3425 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3427 NL_SET_ERR_MSG_MOD(extack,
3428 "device is not on same HW, can't offload");
3429 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3435 case FLOW_ACTION_MARK: {
3436 u32 mark = act->mark;
3438 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3439 NL_SET_ERR_MSG_MOD(extack,
3440 "Bad flow mark - only 16 bit is supported");
3444 nic_attr->flow_tag = mark;
3445 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3448 case FLOW_ACTION_GOTO:
3449 err = validate_goto_chain(priv, flow, act, action,
3454 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3455 attr->dest_chain = act->chain_index;
3457 case FLOW_ACTION_CT:
3458 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3462 flow_flag_set(flow, CT);
3465 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3470 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3471 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3472 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3473 parse_attr, hdrs, &action, extack);
3476 /* in case all pedit actions are skipped, remove the MOD_HDR
3479 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3480 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3481 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3485 attr->action = action;
3487 if (attr->dest_chain) {
3488 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3489 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3492 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3495 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3496 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3498 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3504 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3505 struct net_device *peer_netdev)
3507 struct mlx5e_priv *peer_priv;
3509 peer_priv = netdev_priv(peer_netdev);
3511 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3512 mlx5e_eswitch_vf_rep(priv->netdev) &&
3513 mlx5e_eswitch_vf_rep(peer_netdev) &&
3514 same_hw_devs(priv, peer_priv));
3517 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3518 const struct flow_action_entry *act,
3519 struct mlx5_esw_flow_attr *attr,
3522 u8 vlan_idx = attr->total_vlan;
3524 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3528 case FLOW_ACTION_VLAN_POP:
3530 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3531 MLX5_FS_VLAN_DEPTH))
3534 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3536 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3539 case FLOW_ACTION_VLAN_PUSH:
3540 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3541 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3542 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3543 if (!attr->vlan_proto[vlan_idx])
3544 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3547 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3548 MLX5_FS_VLAN_DEPTH))
3551 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3553 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3554 (act->vlan.proto != htons(ETH_P_8021Q) ||
3558 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3565 attr->total_vlan = vlan_idx + 1;
3570 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3571 struct net_device *out_dev)
3573 struct net_device *fdb_out_dev = out_dev;
3574 struct net_device *uplink_upper;
3577 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3578 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3579 uplink_upper == out_dev) {
3580 fdb_out_dev = uplink_dev;
3581 } else if (netif_is_lag_master(out_dev)) {
3582 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3584 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3585 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3592 static int add_vlan_push_action(struct mlx5e_priv *priv,
3593 struct mlx5_flow_attr *attr,
3594 struct net_device **out_dev,
3597 struct net_device *vlan_dev = *out_dev;
3598 struct flow_action_entry vlan_act = {
3599 .id = FLOW_ACTION_VLAN_PUSH,
3600 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3601 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3606 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3611 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
3616 if (is_vlan_dev(*out_dev))
3617 err = add_vlan_push_action(priv, attr, out_dev, action);
3622 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3623 struct mlx5_flow_attr *attr,
3626 struct flow_action_entry vlan_act = {
3627 .id = FLOW_ACTION_VLAN_POP,
3629 int nest_level, err = 0;
3631 nest_level = attr->parse_attr->filter_dev->lower_level -
3632 priv->netdev->lower_level;
3633 while (nest_level--) {
3634 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3642 static bool same_hw_reps(struct mlx5e_priv *priv,
3643 struct net_device *peer_netdev)
3645 struct mlx5e_priv *peer_priv;
3647 peer_priv = netdev_priv(peer_netdev);
3649 return mlx5e_eswitch_rep(priv->netdev) &&
3650 mlx5e_eswitch_rep(peer_netdev) &&
3651 same_hw_devs(priv, peer_priv);
3654 static bool is_lag_dev(struct mlx5e_priv *priv,
3655 struct net_device *peer_netdev)
3657 return ((mlx5_lag_is_sriov(priv->mdev) ||
3658 mlx5_lag_is_multipath(priv->mdev)) &&
3659 same_hw_reps(priv, peer_netdev));
3662 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3663 struct net_device *out_dev)
3665 if (is_merged_eswitch_vfs(priv, out_dev))
3668 if (is_lag_dev(priv, out_dev))
3671 return mlx5e_eswitch_rep(out_dev) &&
3672 same_port_devs(priv, netdev_priv(out_dev));
3675 static bool is_duplicated_output_device(struct net_device *dev,
3676 struct net_device *out_dev,
3677 int *ifindexes, int if_count,
3678 struct netlink_ext_ack *extack)
3682 for (i = 0; i < if_count; i++) {
3683 if (ifindexes[i] == out_dev->ifindex) {
3684 NL_SET_ERR_MSG_MOD(extack,
3685 "can't duplicate output to same device");
3686 netdev_err(dev, "can't duplicate output to same device: %s\n",
3695 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3696 struct mlx5e_tc_flow *flow,
3697 struct net_device *out_dev,
3698 struct netlink_ext_ack *extack)
3700 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3701 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3702 struct mlx5e_rep_priv *rep_priv;
3704 /* Forwarding non encapsulated traffic between
3705 * uplink ports is allowed only if
3706 * termination_table_raw_traffic cap is set.
3708 * Input vport was stored attr->in_rep.
3709 * In LAG case, *priv* is the private data of
3710 * uplink which may be not the input vport.
3712 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3714 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3715 mlx5e_eswitch_uplink_rep(out_dev)))
3718 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3719 termination_table_raw_traffic)) {
3720 NL_SET_ERR_MSG_MOD(extack,
3721 "devices are both uplink, can't offload forwarding");
3722 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3723 priv->netdev->name, out_dev->name);
3725 } else if (out_dev != rep_priv->netdev) {
3726 NL_SET_ERR_MSG_MOD(extack,
3727 "devices are not the same uplink, can't offload forwarding");
3728 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3729 priv->netdev->name, out_dev->name);
3735 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3736 struct flow_action *flow_action,
3737 struct mlx5e_tc_flow *flow,
3738 struct netlink_ext_ack *extack)
3740 struct pedit_headers_action hdrs[2] = {};
3741 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3742 struct mlx5e_tc_flow_parse_attr *parse_attr;
3743 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3744 struct mlx5e_sample_attr sample_attr = {};
3745 const struct ip_tunnel_info *info = NULL;
3746 struct mlx5_flow_attr *attr = flow->attr;
3747 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3748 bool ft_flow = mlx5e_is_ft_flow(flow);
3749 const struct flow_action_entry *act;
3750 struct mlx5_esw_flow_attr *esw_attr;
3751 bool encap = false, decap = false;
3752 u32 action = attr->action;
3753 int err, i, if_count = 0;
3754 bool mpls_push = false;
3756 if (!flow_action_has_entries(flow_action))
3759 if (!flow_action_hw_stats_check(flow_action, extack,
3760 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3763 esw_attr = attr->esw_attr;
3764 parse_attr = attr->parse_attr;
3766 flow_action_for_each(i, act, flow_action) {
3768 case FLOW_ACTION_DROP:
3769 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3770 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3772 case FLOW_ACTION_TRAP:
3773 if (!flow_offload_has_one_action(flow_action)) {
3774 NL_SET_ERR_MSG_MOD(extack,
3775 "action trap is supported as a sole action only");
3778 action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3779 MLX5_FLOW_CONTEXT_ACTION_COUNT);
3780 attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
3782 case FLOW_ACTION_MPLS_PUSH:
3783 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3784 reformat_l2_to_l3_tunnel) ||
3785 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3786 NL_SET_ERR_MSG_MOD(extack,
3787 "mpls push is supported only for mpls_uc protocol");
3792 case FLOW_ACTION_MPLS_POP:
3793 /* we only support mpls pop if it is the first action
3794 * and the filter net device is bareudp. Subsequent
3795 * actions can be pedit and the last can be mirred
3799 NL_SET_ERR_MSG_MOD(extack,
3800 "mpls pop supported only as first action");
3803 if (!netif_is_bareudp(parse_attr->filter_dev)) {
3804 NL_SET_ERR_MSG_MOD(extack,
3805 "mpls pop supported only on bareudp devices");
3809 parse_attr->eth.h_proto = act->mpls_pop.proto;
3810 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3811 flow_flag_set(flow, L3_TO_L2_DECAP);
3813 case FLOW_ACTION_MANGLE:
3814 case FLOW_ACTION_ADD:
3815 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3816 parse_attr, hdrs, flow, extack);
3820 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3821 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3822 esw_attr->split_count = esw_attr->out_count;
3825 case FLOW_ACTION_CSUM:
3826 if (csum_offload_supported(priv, action,
3827 act->csum_flags, extack))
3831 case FLOW_ACTION_REDIRECT:
3832 case FLOW_ACTION_MIRRED: {
3833 struct mlx5e_priv *out_priv;
3834 struct net_device *out_dev;
3838 /* out_dev is NULL when filters with
3839 * non-existing mirred device are replayed to
3845 if (mpls_push && !netif_is_bareudp(out_dev)) {
3846 NL_SET_ERR_MSG_MOD(extack,
3847 "mpls is supported only through a bareudp device");
3851 if (ft_flow && out_dev == priv->netdev) {
3852 /* Ignore forward to self rules generated
3853 * by adding both mlx5 devs to the flow table
3854 * block on a normal nft offload setup.
3859 if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3860 NL_SET_ERR_MSG_MOD(extack,
3861 "can't support more output ports, can't offload forwarding");
3862 netdev_warn(priv->netdev,
3863 "can't support more than %d output ports, can't offload forwarding\n",
3864 esw_attr->out_count);
3868 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3869 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3871 parse_attr->mirred_ifindex[esw_attr->out_count] =
3873 parse_attr->tun_info[esw_attr->out_count] =
3874 mlx5e_dup_tun_info(info);
3875 if (!parse_attr->tun_info[esw_attr->out_count])
3878 esw_attr->dests[esw_attr->out_count].flags |=
3879 MLX5_ESW_DEST_ENCAP;
3880 esw_attr->out_count++;
3881 /* attr->dests[].rep is resolved when we
3884 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3885 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3886 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3888 if (is_duplicated_output_device(priv->netdev,
3895 ifindexes[if_count] = out_dev->ifindex;
3898 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
3902 if (is_vlan_dev(out_dev)) {
3903 err = add_vlan_push_action(priv, attr,
3910 if (is_vlan_dev(parse_attr->filter_dev)) {
3911 err = add_vlan_pop_action(priv, attr,
3917 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3921 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3922 NL_SET_ERR_MSG_MOD(extack,
3923 "devices are not on same switch HW, can't offload forwarding");
3927 if (same_vf_reps(priv, out_dev)) {
3928 NL_SET_ERR_MSG_MOD(extack,
3929 "can't forward from a VF to itself");
3933 out_priv = netdev_priv(out_dev);
3934 rpriv = out_priv->ppriv;
3935 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
3936 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
3937 esw_attr->out_count++;
3938 } else if (parse_attr->filter_dev != priv->netdev) {
3939 /* All mlx5 devices are called to configure
3940 * high level device filters. Therefore, the
3941 * *attempt* to install a filter on invalid
3942 * eswitch should not trigger an explicit error
3946 NL_SET_ERR_MSG_MOD(extack,
3947 "devices are not on same switch HW, can't offload forwarding");
3948 netdev_warn(priv->netdev,
3949 "devices %s %s not on same switch HW, can't offload forwarding\n",
3956 case FLOW_ACTION_TUNNEL_ENCAP:
3964 case FLOW_ACTION_VLAN_PUSH:
3965 case FLOW_ACTION_VLAN_POP:
3966 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3967 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3968 /* Replace vlan pop+push with vlan modify */
3969 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3970 err = add_vlan_rewrite_action(priv,
3971 MLX5_FLOW_NAMESPACE_FDB,
3972 act, parse_attr, hdrs,
3975 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
3980 esw_attr->split_count = esw_attr->out_count;
3982 case FLOW_ACTION_VLAN_MANGLE:
3983 err = add_vlan_rewrite_action(priv,
3984 MLX5_FLOW_NAMESPACE_FDB,
3985 act, parse_attr, hdrs,
3990 esw_attr->split_count = esw_attr->out_count;
3992 case FLOW_ACTION_TUNNEL_DECAP:
3995 case FLOW_ACTION_GOTO:
3996 err = validate_goto_chain(priv, flow, act, action,
4001 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4002 attr->dest_chain = act->chain_index;
4004 case FLOW_ACTION_CT:
4005 if (flow_flag_test(flow, SAMPLE)) {
4006 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
4009 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
4013 flow_flag_set(flow, CT);
4014 esw_attr->split_count = esw_attr->out_count;
4016 case FLOW_ACTION_SAMPLE:
4017 if (flow_flag_test(flow, CT)) {
4018 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
4021 sample_attr.rate = act->sample.rate;
4022 sample_attr.group_num = act->sample.psample_group->group_num;
4023 if (act->sample.truncate)
4024 sample_attr.trunc_size = act->sample.trunc_size;
4025 flow_flag_set(flow, SAMPLE);
4028 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4033 /* always set IP version for indirect table handling */
4034 attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4036 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4037 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4038 /* For prio tag mode, replace vlan pop with rewrite vlan prio
4041 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4042 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4048 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4049 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4050 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4051 parse_attr, hdrs, &action, extack);
4054 /* in case all pedit actions are skipped, remove the MOD_HDR
4055 * flag. we might have set split_count either by pedit or
4056 * pop/push. if there is no pop/push either, reset it too.
4058 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4059 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4060 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4061 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4062 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4063 esw_attr->split_count = 0;
4067 attr->action = action;
4068 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4071 if (attr->dest_chain) {
4073 /* It can be supported if we'll create a mapping for
4074 * the tunnel device only (without tunnel), and set
4075 * this tunnel id with this decap flow.
4077 * On restore (miss), we'll just set this saved tunnel
4081 NL_SET_ERR_MSG(extack,
4082 "Decap with goto isn't supported");
4083 netdev_warn(priv->netdev,
4084 "Decap with goto isn't supported");
4088 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4091 if (!(attr->action &
4092 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4093 NL_SET_ERR_MSG_MOD(extack,
4094 "Rule must have at least one forward/drop action");
4098 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4099 NL_SET_ERR_MSG_MOD(extack,
4100 "current firmware doesn't support split rule for port mirroring");
4101 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4105 /* Allocate sample attribute only when there is a sample action and
4106 * no errors after parsing.
4108 if (flow_flag_test(flow, SAMPLE)) {
4109 attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL);
4110 if (!attr->sample_attr)
4112 *attr->sample_attr = sample_attr;
4118 static void get_flags(int flags, unsigned long *flow_flags)
4120 unsigned long __flow_flags = 0;
4122 if (flags & MLX5_TC_FLAG(INGRESS))
4123 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4124 if (flags & MLX5_TC_FLAG(EGRESS))
4125 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4127 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4128 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4129 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4130 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4131 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4132 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4134 *flow_flags = __flow_flags;
4137 static const struct rhashtable_params tc_ht_params = {
4138 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4139 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4140 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4141 .automatic_shrinking = true,
4144 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4145 unsigned long flags)
4147 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4148 struct mlx5e_rep_priv *uplink_rpriv;
4150 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4151 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4152 return &uplink_rpriv->uplink_priv.tc_ht;
4153 } else /* NIC offload */
4154 return &priv->fs.tc.ht;
4157 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4159 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4160 struct mlx5_flow_attr *attr = flow->attr;
4161 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4162 flow_flag_test(flow, INGRESS);
4163 bool act_is_encap = !!(attr->action &
4164 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4165 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4166 MLX5_DEVCOM_ESW_OFFLOADS);
4171 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4172 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4173 (is_rep_ingress || act_is_encap))
4179 struct mlx5_flow_attr *
4180 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4182 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4183 sizeof(struct mlx5_esw_flow_attr) :
4184 sizeof(struct mlx5_nic_flow_attr);
4185 struct mlx5_flow_attr *attr;
4187 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4191 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4192 struct flow_cls_offload *f, unsigned long flow_flags,
4193 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4194 struct mlx5e_tc_flow **__flow)
4196 struct mlx5e_tc_flow_parse_attr *parse_attr;
4197 struct mlx5_flow_attr *attr;
4198 struct mlx5e_tc_flow *flow;
4202 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4203 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4204 if (!parse_attr || !flow)
4207 flow->flags = flow_flags;
4208 flow->cookie = f->cookie;
4211 attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4217 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4218 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4219 INIT_LIST_HEAD(&flow->hairpin);
4220 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4221 refcount_set(&flow->refcnt, 1);
4222 init_completion(&flow->init_done);
4225 *__parse_attr = parse_attr;
4236 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4237 struct mlx5e_tc_flow_parse_attr *parse_attr,
4238 struct flow_cls_offload *f)
4240 attr->parse_attr = parse_attr;
4241 attr->chain = f->common.chain_index;
4242 attr->prio = f->common.prio;
4246 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4247 struct mlx5e_priv *priv,
4248 struct mlx5e_tc_flow_parse_attr *parse_attr,
4249 struct flow_cls_offload *f,
4250 struct mlx5_eswitch_rep *in_rep,
4251 struct mlx5_core_dev *in_mdev)
4253 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4254 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4256 mlx5e_flow_attr_init(attr, parse_attr, f);
4258 esw_attr->in_rep = in_rep;
4259 esw_attr->in_mdev = in_mdev;
4261 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4262 MLX5_COUNTER_SOURCE_ESWITCH)
4263 esw_attr->counter_dev = in_mdev;
4265 esw_attr->counter_dev = priv->mdev;
4268 static struct mlx5e_tc_flow *
4269 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4270 struct flow_cls_offload *f,
4271 unsigned long flow_flags,
4272 struct net_device *filter_dev,
4273 struct mlx5_eswitch_rep *in_rep,
4274 struct mlx5_core_dev *in_mdev)
4276 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4277 struct netlink_ext_ack *extack = f->common.extack;
4278 struct mlx5e_tc_flow_parse_attr *parse_attr;
4279 struct mlx5e_tc_flow *flow;
4282 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4283 attr_size = sizeof(struct mlx5_esw_flow_attr);
4284 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4285 &parse_attr, &flow);
4289 parse_attr->filter_dev = filter_dev;
4290 mlx5e_flow_esw_attr_init(flow->attr,
4292 f, in_rep, in_mdev);
4294 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4299 /* actions validation depends on parsing the ct matches first */
4300 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4301 &flow->attr->ct_attr, extack);
4305 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4309 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4310 complete_all(&flow->init_done);
4312 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4315 add_unready_flow(flow);
4321 mlx5e_flow_put(priv, flow);
4323 return ERR_PTR(err);
4326 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4327 struct mlx5e_tc_flow *flow,
4328 unsigned long flow_flags)
4330 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4331 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4332 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4333 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4334 struct mlx5e_tc_flow_parse_attr *parse_attr;
4335 struct mlx5e_rep_priv *peer_urpriv;
4336 struct mlx5e_tc_flow *peer_flow;
4337 struct mlx5_core_dev *in_mdev;
4340 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4344 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4345 peer_priv = netdev_priv(peer_urpriv->netdev);
4347 /* in_mdev is assigned of which the packet originated from.
4348 * So packets redirected to uplink use the same mdev of the
4349 * original flow and packets redirected from uplink use the
4352 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4353 in_mdev = peer_priv->mdev;
4355 in_mdev = priv->mdev;
4357 parse_attr = flow->attr->parse_attr;
4358 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4359 parse_attr->filter_dev,
4360 attr->in_rep, in_mdev);
4361 if (IS_ERR(peer_flow)) {
4362 err = PTR_ERR(peer_flow);
4366 flow->peer_flow = peer_flow;
4367 flow_flag_set(flow, DUP);
4368 mutex_lock(&esw->offloads.peer_mutex);
4369 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4370 mutex_unlock(&esw->offloads.peer_mutex);
4373 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4378 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4379 struct flow_cls_offload *f,
4380 unsigned long flow_flags,
4381 struct net_device *filter_dev,
4382 struct mlx5e_tc_flow **__flow)
4384 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4385 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4386 struct mlx5_core_dev *in_mdev = priv->mdev;
4387 struct mlx5e_tc_flow *flow;
4390 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4393 return PTR_ERR(flow);
4395 if (is_peer_flow_needed(flow)) {
4396 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4398 mlx5e_tc_del_fdb_flow(priv, flow);
4412 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4413 struct flow_cls_offload *f,
4414 unsigned long flow_flags,
4415 struct net_device *filter_dev,
4416 struct mlx5e_tc_flow **__flow)
4418 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4419 struct netlink_ext_ack *extack = f->common.extack;
4420 struct mlx5e_tc_flow_parse_attr *parse_attr;
4421 struct mlx5e_tc_flow *flow;
4424 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4425 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4427 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4431 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4432 attr_size = sizeof(struct mlx5_nic_flow_attr);
4433 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4434 &parse_attr, &flow);
4438 parse_attr->filter_dev = filter_dev;
4439 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4441 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4446 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4447 &flow->attr->ct_attr, extack);
4451 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4455 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4459 flow_flag_set(flow, OFFLOADED);
4465 flow_flag_set(flow, FAILED);
4466 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4467 mlx5e_flow_put(priv, flow);
4473 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4474 struct flow_cls_offload *f,
4475 unsigned long flags,
4476 struct net_device *filter_dev,
4477 struct mlx5e_tc_flow **flow)
4479 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4480 unsigned long flow_flags;
4483 get_flags(flags, &flow_flags);
4485 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4488 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4489 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4492 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4498 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4499 struct mlx5e_rep_priv *rpriv)
4501 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4502 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4503 * function is called from NIC mode.
4505 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4508 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4509 struct flow_cls_offload *f, unsigned long flags)
4511 struct netlink_ext_ack *extack = f->common.extack;
4512 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4513 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4514 struct mlx5e_tc_flow *flow;
4517 if (!mlx5_esw_hold(priv->mdev))
4520 mlx5_esw_get(priv->mdev);
4523 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4525 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4528 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4531 NL_SET_ERR_MSG_MOD(extack,
4532 "flow cookie already exists, ignoring");
4533 netdev_warn_once(priv->netdev,
4534 "flow cookie %lx already exists, ignoring\n",
4544 trace_mlx5e_configure_flower(f);
4545 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4549 /* Flow rule offloaded to non-uplink representor sharing tc block,
4550 * set the flow's owner dev.
4552 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4553 flow->orig_dev = dev;
4555 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4559 mlx5_esw_release(priv->mdev);
4563 mlx5e_flow_put(priv, flow);
4565 mlx5_esw_put(priv->mdev);
4566 mlx5_esw_release(priv->mdev);
4570 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4572 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4573 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4575 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4576 flow_flag_test(flow, EGRESS) == dir_egress;
4579 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4580 struct flow_cls_offload *f, unsigned long flags)
4582 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4583 struct mlx5e_tc_flow *flow;
4587 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4588 if (!flow || !same_flow_direction(flow, flags)) {
4593 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4596 if (flow_flag_test_and_set(flow, DELETED)) {
4600 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4603 trace_mlx5e_delete_flower(f);
4604 mlx5e_flow_put(priv, flow);
4606 mlx5_esw_put(priv->mdev);
4614 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4615 struct flow_cls_offload *f, unsigned long flags)
4617 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4618 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4619 struct mlx5_eswitch *peer_esw;
4620 struct mlx5e_tc_flow *flow;
4621 struct mlx5_fc *counter;
4628 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4632 return PTR_ERR(flow);
4634 if (!same_flow_direction(flow, flags)) {
4639 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4640 counter = mlx5e_tc_get_counter(flow);
4644 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4647 /* Under multipath it's possible for one rule to be currently
4648 * un-offloaded while the other rule is offloaded.
4650 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4654 if (flow_flag_test(flow, DUP) &&
4655 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4660 counter = mlx5e_tc_get_counter(flow->peer_flow);
4662 goto no_peer_counter;
4663 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4666 packets += packets2;
4667 lastuse = max_t(u64, lastuse, lastuse2);
4671 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4673 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4674 FLOW_ACTION_HW_STATS_DELAYED);
4675 trace_mlx5e_stats_flower(f);
4677 mlx5e_flow_put(priv, flow);
4681 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4682 struct netlink_ext_ack *extack)
4684 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4685 struct mlx5_eswitch *esw;
4690 vport_num = rpriv->rep->vport;
4691 if (vport_num >= MLX5_VPORT_ECPF) {
4692 NL_SET_ERR_MSG_MOD(extack,
4693 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4697 esw = priv->mdev->priv.eswitch;
4698 /* rate is given in bytes/sec.
4699 * First convert to bits/sec and then round to the nearest mbit/secs.
4700 * mbit means million bits.
4701 * Moreover, if rate is non zero we choose to configure to a minimum of
4705 rate = (rate * BITS_PER_BYTE) + 500000;
4706 do_div(rate, 1000000);
4707 rate_mbps = max_t(u32, rate, 1);
4710 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4712 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4717 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4718 struct flow_action *flow_action,
4719 struct netlink_ext_ack *extack)
4721 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4722 const struct flow_action_entry *act;
4726 if (!flow_action_has_entries(flow_action)) {
4727 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4731 if (!flow_offload_has_one_action(flow_action)) {
4732 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4736 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4739 flow_action_for_each(i, act, flow_action) {
4741 case FLOW_ACTION_POLICE:
4742 if (act->police.rate_pkt_ps) {
4743 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
4746 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4750 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4753 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4761 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4762 struct tc_cls_matchall_offload *ma)
4764 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4765 struct netlink_ext_ack *extack = ma->common.extack;
4767 if (!mlx5_esw_qos_enabled(esw)) {
4768 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4772 if (ma->common.prio != 1) {
4773 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4777 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4780 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4781 struct tc_cls_matchall_offload *ma)
4783 struct netlink_ext_ack *extack = ma->common.extack;
4785 return apply_police_params(priv, 0, extack);
4788 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4789 struct tc_cls_matchall_offload *ma)
4791 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4792 struct rtnl_link_stats64 cur_stats;
4796 cur_stats = priv->stats.vf_vport;
4797 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4798 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4799 rpriv->prev_vf_vport_stats = cur_stats;
4800 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4801 FLOW_ACTION_HW_STATS_DELAYED);
4804 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4805 struct mlx5e_priv *peer_priv)
4807 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4808 struct mlx5e_hairpin_entry *hpe, *tmp;
4809 LIST_HEAD(init_wait_list);
4813 if (!same_hw_devs(priv, peer_priv))
4816 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4818 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4819 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4820 if (refcount_inc_not_zero(&hpe->refcnt))
4821 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4822 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4824 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4825 wait_for_completion(&hpe->res_ready);
4826 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4827 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
4829 mlx5e_hairpin_put(priv, hpe);
4833 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4834 unsigned long event, void *ptr)
4836 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4837 struct mlx5e_flow_steering *fs;
4838 struct mlx5e_priv *peer_priv;
4839 struct mlx5e_tc_table *tc;
4840 struct mlx5e_priv *priv;
4842 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4843 event != NETDEV_UNREGISTER ||
4844 ndev->reg_state == NETREG_REGISTERED)
4847 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4848 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4849 priv = container_of(fs, struct mlx5e_priv, fs);
4850 peer_priv = netdev_priv(ndev);
4851 if (priv == peer_priv ||
4852 !(priv->netdev->features & NETIF_F_HW_TC))
4855 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4860 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4862 int tc_grp_size, tc_tbl_size;
4863 u32 max_flow_counter;
4865 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4866 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4868 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4870 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4871 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4876 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4878 struct mlx5e_tc_table *tc = &priv->fs.tc;
4879 struct mlx5_core_dev *dev = priv->mdev;
4880 struct mapping_ctx *chains_mapping;
4881 struct mlx5_chains_attr attr = {};
4885 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4886 mutex_init(&tc->t_lock);
4887 mutex_init(&tc->hairpin_tbl_lock);
4888 hash_init(tc->hairpin_tbl);
4890 err = rhashtable_init(&tc->ht, &tc_ht_params);
4894 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4896 mapping_id = mlx5_query_nic_system_image_guid(dev);
4898 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
4899 sizeof(struct mlx5_mapped_obj),
4900 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4902 if (IS_ERR(chains_mapping)) {
4903 err = PTR_ERR(chains_mapping);
4906 tc->mapping = chains_mapping;
4908 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4909 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4910 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4911 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4912 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4913 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4914 attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
4915 attr.mapping = chains_mapping;
4917 tc->chains = mlx5_chains_create(dev, &attr);
4918 if (IS_ERR(tc->chains)) {
4919 err = PTR_ERR(tc->chains);
4923 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
4924 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
4925 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
4927 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4928 err = register_netdevice_notifier_dev_net(priv->netdev,
4932 tc->netdevice_nb.notifier_call = NULL;
4933 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4940 mlx5_tc_ct_clean(tc->ct);
4941 mlx5e_tc_post_act_destroy(tc->post_act);
4942 mlx5_chains_destroy(tc->chains);
4944 mapping_destroy(chains_mapping);
4946 rhashtable_destroy(&tc->ht);
4950 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4952 struct mlx5e_tc_flow *flow = ptr;
4953 struct mlx5e_priv *priv = flow->priv;
4955 mlx5e_tc_del_flow(priv, flow);
4959 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4961 struct mlx5e_tc_table *tc = &priv->fs.tc;
4963 if (tc->netdevice_nb.notifier_call)
4964 unregister_netdevice_notifier_dev_net(priv->netdev,
4968 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4969 mutex_destroy(&tc->hairpin_tbl_lock);
4971 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
4973 if (!IS_ERR_OR_NULL(tc->t)) {
4974 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
4977 mutex_destroy(&tc->t_lock);
4979 mlx5_tc_ct_clean(tc->ct);
4980 mlx5e_tc_post_act_destroy(tc->post_act);
4981 mapping_destroy(tc->mapping);
4982 mlx5_chains_destroy(tc->chains);
4985 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4987 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4988 struct mlx5_rep_uplink_priv *uplink_priv;
4989 struct mlx5e_rep_priv *rpriv;
4990 struct mapping_ctx *mapping;
4991 struct mlx5_eswitch *esw;
4992 struct mlx5e_priv *priv;
4996 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4997 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4998 priv = netdev_priv(rpriv->netdev);
4999 esw = priv->mdev->priv.eswitch;
5001 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5002 MLX5_FLOW_NAMESPACE_FDB);
5003 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5005 &esw->offloads.mod_hdr,
5006 MLX5_FLOW_NAMESPACE_FDB,
5007 uplink_priv->post_act);
5009 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5010 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5013 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5015 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5016 sizeof(struct tunnel_match_key),
5017 TUNNEL_INFO_BITS_MASK, true);
5019 if (IS_ERR(mapping)) {
5020 err = PTR_ERR(mapping);
5021 goto err_tun_mapping;
5023 uplink_priv->tunnel_mapping = mapping;
5025 /* 0xFFF is reserved for stack devices slow path table mark */
5026 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5027 sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
5028 if (IS_ERR(mapping)) {
5029 err = PTR_ERR(mapping);
5030 goto err_enc_opts_mapping;
5032 uplink_priv->tunnel_enc_opts_mapping = mapping;
5034 err = rhashtable_init(tc_ht, &tc_ht_params);
5038 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5040 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5041 if (IS_ERR(uplink_priv->encap)) {
5042 err = PTR_ERR(uplink_priv->encap);
5043 goto err_register_fib_notifier;
5048 err_register_fib_notifier:
5049 rhashtable_destroy(tc_ht);
5051 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5052 err_enc_opts_mapping:
5053 mapping_destroy(uplink_priv->tunnel_mapping);
5055 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5056 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5058 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5059 netdev_warn(priv->netdev,
5060 "Failed to initialize tc (eswitch), err: %d", err);
5061 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5065 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5067 struct mlx5_rep_uplink_priv *uplink_priv;
5069 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5071 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5072 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5074 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5075 mapping_destroy(uplink_priv->tunnel_mapping);
5077 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
5078 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5080 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5081 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5084 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5086 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5088 return atomic_read(&tc_ht->nelems);
5091 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5093 struct mlx5e_tc_flow *flow, *tmp;
5095 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5096 __mlx5e_tc_del_fdb_peer_flow(flow);
5099 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5101 struct mlx5_rep_uplink_priv *rpriv =
5102 container_of(work, struct mlx5_rep_uplink_priv,
5103 reoffload_flows_work);
5104 struct mlx5e_tc_flow *flow, *tmp;
5106 mutex_lock(&rpriv->unready_flows_lock);
5107 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5108 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5109 unready_flow_del(flow);
5111 mutex_unlock(&rpriv->unready_flows_lock);
5114 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5115 struct flow_cls_offload *cls_flower,
5116 unsigned long flags)
5118 switch (cls_flower->command) {
5119 case FLOW_CLS_REPLACE:
5120 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5122 case FLOW_CLS_DESTROY:
5123 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5125 case FLOW_CLS_STATS:
5126 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5133 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5136 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5137 struct mlx5e_priv *priv = cb_priv;
5139 if (!priv->netdev || !netif_device_present(priv->netdev))
5142 if (mlx5e_is_uplink_rep(priv))
5143 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5145 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5148 case TC_SETUP_CLSFLOWER:
5149 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5155 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5156 struct sk_buff *skb)
5158 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5159 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5160 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5161 struct mlx5e_tc_table *tc = &priv->fs.tc;
5162 struct mlx5_mapped_obj mapped_obj;
5163 struct tc_skb_ext *tc_skb_ext;
5166 reg_b = be32_to_cpu(cqe->ft_metadata);
5168 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5170 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5172 netdev_dbg(priv->netdev,
5173 "Couldn't find chain for chain tag: %d, err: %d\n",
5178 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5179 chain = mapped_obj.chain;
5180 tc_skb_ext = tc_skb_ext_alloc(skb);
5181 if (WARN_ON(!tc_skb_ext))
5184 tc_skb_ext->chain = chain;
5186 zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5189 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5193 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5196 #endif /* CONFIG_NET_TC_SKB_EXT */