2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
47 #include "en/tc/post_act.h"
49 #include "en/rep/tc.h"
50 #include "en/rep/neigh.h"
55 #include "en/tc_tun.h"
56 #include "en/mapping.h"
58 #include "en/mod_hdr.h"
59 #include "en/tc_tun_encap.h"
60 #include "en/tc/sample.h"
61 #include "en/tc/act/act.h"
62 #include "en/tc/post_meter.h"
63 #include "lib/devcom.h"
64 #include "lib/geneve.h"
65 #include "lib/fs_chains.h"
66 #include "diag/en_tc_tracepoint.h"
67 #include <asm/div64.h>
71 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
74 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
76 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
81 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
86 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
88 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
89 .soffset = MLX5_BYTE_OFF(fte_match_param,
90 misc_parameters_2.metadata_reg_c_1),
92 [ZONE_TO_REG] = zone_to_reg_ct,
93 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
94 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
95 [MARK_TO_REG] = mark_to_reg_ct,
96 [LABELS_TO_REG] = labels_to_reg_ct,
97 [FTEID_TO_REG] = fteid_to_reg_ct,
98 /* For NIC rules we store the restore metadata directly
99 * into reg_b that is passed to SW since we don't
100 * jump between steering domains.
102 [NIC_CHAIN_TO_REG] = {
103 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
107 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
108 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
111 /* To avoid false lock dependency warning set the tc_ht lock
112 * class different than the lock class of the ht being used when deleting
113 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
114 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
115 * it's different than the ht->mutex here.
117 static struct lock_class_key tc_ht_lock_key;
119 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
120 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
123 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
124 enum mlx5e_tc_attr_to_reg type,
128 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
129 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
130 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
131 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
132 u32 max_mask = GENMASK(match_len - 1, 0);
133 __be32 curr_mask_be, curr_val_be;
134 u32 curr_mask, curr_val;
136 fmask = headers_c + soffset;
137 fval = headers_v + soffset;
139 memcpy(&curr_mask_be, fmask, 4);
140 memcpy(&curr_val_be, fval, 4);
142 curr_mask = be32_to_cpu(curr_mask_be);
143 curr_val = be32_to_cpu(curr_val_be);
145 //move to correct offset
146 WARN_ON(mask > max_mask);
149 max_mask <<= moffset;
152 curr_mask &= ~max_mask;
153 curr_val &= ~max_mask;
155 //add current to mask
159 //back to be32 and write
160 curr_mask_be = cpu_to_be32(curr_mask);
161 curr_val_be = cpu_to_be32(curr_val);
163 memcpy(fmask, &curr_mask_be, 4);
164 memcpy(fval, &curr_val_be, 4);
166 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
170 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
171 enum mlx5e_tc_attr_to_reg type,
175 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
176 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
177 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
178 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
179 u32 max_mask = GENMASK(match_len - 1, 0);
180 __be32 curr_mask_be, curr_val_be;
181 u32 curr_mask, curr_val;
183 fmask = headers_c + soffset;
184 fval = headers_v + soffset;
186 memcpy(&curr_mask_be, fmask, 4);
187 memcpy(&curr_val_be, fval, 4);
189 curr_mask = be32_to_cpu(curr_mask_be);
190 curr_val = be32_to_cpu(curr_val_be);
192 *mask = (curr_mask >> moffset) & max_mask;
193 *val = (curr_val >> moffset) & max_mask;
197 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
198 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
199 enum mlx5_flow_namespace_type ns,
200 enum mlx5e_tc_attr_to_reg type,
203 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
204 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
205 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
209 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
211 return PTR_ERR(modact);
213 /* Firmware has 5bit length field and 0 means 32bits */
217 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
218 MLX5_SET(set_action_in, modact, field, mfield);
219 MLX5_SET(set_action_in, modact, offset, moffset);
220 MLX5_SET(set_action_in, modact, length, mlen);
221 MLX5_SET(set_action_in, modact, data, data);
222 err = mod_hdr_acts->num_actions;
223 mod_hdr_acts->num_actions++;
228 struct mlx5e_tc_int_port_priv *
229 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
231 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
232 struct mlx5_rep_uplink_priv *uplink_priv;
233 struct mlx5e_rep_priv *uplink_rpriv;
235 if (is_mdev_switchdev_mode(priv->mdev)) {
236 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
237 uplink_priv = &uplink_rpriv->uplink_priv;
239 return uplink_priv->int_port_priv;
245 struct mlx5e_flow_meters *
246 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
248 struct mlx5_eswitch *esw = dev->priv.eswitch;
249 struct mlx5_rep_uplink_priv *uplink_priv;
250 struct mlx5e_rep_priv *uplink_rpriv;
251 struct mlx5e_priv *priv;
253 if (is_mdev_switchdev_mode(dev)) {
254 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
255 uplink_priv = &uplink_rpriv->uplink_priv;
256 priv = netdev_priv(uplink_rpriv->netdev);
257 if (!uplink_priv->flow_meters)
258 uplink_priv->flow_meters =
259 mlx5e_flow_meters_init(priv,
260 MLX5_FLOW_NAMESPACE_FDB,
261 uplink_priv->post_act);
262 if (!IS_ERR(uplink_priv->flow_meters))
263 return uplink_priv->flow_meters;
269 static struct mlx5_tc_ct_priv *
270 get_ct_priv(struct mlx5e_priv *priv)
272 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
273 struct mlx5_rep_uplink_priv *uplink_priv;
274 struct mlx5e_rep_priv *uplink_rpriv;
276 if (is_mdev_switchdev_mode(priv->mdev)) {
277 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
278 uplink_priv = &uplink_rpriv->uplink_priv;
280 return uplink_priv->ct_priv;
283 return priv->fs.tc.ct;
286 static struct mlx5e_tc_psample *
287 get_sample_priv(struct mlx5e_priv *priv)
289 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
290 struct mlx5_rep_uplink_priv *uplink_priv;
291 struct mlx5e_rep_priv *uplink_rpriv;
293 if (is_mdev_switchdev_mode(priv->mdev)) {
294 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
295 uplink_priv = &uplink_rpriv->uplink_priv;
297 return uplink_priv->tc_psample;
303 static struct mlx5e_post_act *
304 get_post_action(struct mlx5e_priv *priv)
306 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
307 struct mlx5_rep_uplink_priv *uplink_priv;
308 struct mlx5e_rep_priv *uplink_rpriv;
310 if (is_mdev_switchdev_mode(priv->mdev)) {
311 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
312 uplink_priv = &uplink_rpriv->uplink_priv;
314 return uplink_priv->post_act;
317 return priv->fs.tc.post_act;
320 struct mlx5_flow_handle *
321 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
322 struct mlx5_flow_spec *spec,
323 struct mlx5_flow_attr *attr)
325 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
327 if (is_mdev_switchdev_mode(priv->mdev))
328 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
330 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
334 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
335 struct mlx5_flow_handle *rule,
336 struct mlx5_flow_attr *attr)
338 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
340 if (is_mdev_switchdev_mode(priv->mdev)) {
341 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
345 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
349 is_flow_meter_action(struct mlx5_flow_attr *attr)
351 return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
352 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
356 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
357 struct mlx5_flow_attr *attr)
359 struct mlx5e_flow_meter_handle *meter;
361 meter = mlx5e_tc_meter_get(priv->mdev, &attr->meter_attr.params);
363 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
364 return PTR_ERR(meter);
367 attr->meter_attr.meter = meter;
368 attr->dest_ft = mlx5e_tc_meter_get_post_meter_ft(meter->flow_meters);
369 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
374 struct mlx5_flow_handle *
375 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
376 struct mlx5_flow_spec *spec,
377 struct mlx5_flow_attr *attr)
379 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
382 if (attr->flags & MLX5_ATTR_FLAG_CT) {
383 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
384 &attr->parse_attr->mod_hdr_acts;
386 return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
391 if (!is_mdev_switchdev_mode(priv->mdev))
392 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
394 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
395 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
397 if (is_flow_meter_action(attr)) {
398 err = mlx5e_tc_add_flow_meter(priv, attr);
403 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
407 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
408 struct mlx5_flow_handle *rule,
409 struct mlx5_flow_attr *attr)
411 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
413 if (attr->flags & MLX5_ATTR_FLAG_CT) {
414 mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
418 if (!is_mdev_switchdev_mode(priv->mdev)) {
419 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
423 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
424 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
428 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
430 if (attr->meter_attr.meter)
431 mlx5e_tc_meter_put(attr->meter_attr.meter);
435 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
436 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
437 enum mlx5_flow_namespace_type ns,
438 enum mlx5e_tc_attr_to_reg type,
441 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
443 return ret < 0 ? ret : 0;
446 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
447 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
448 enum mlx5e_tc_attr_to_reg type,
449 int act_id, u32 data)
451 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
452 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
453 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
456 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
458 /* Firmware has 5bit length field and 0 means 32bits */
462 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
463 MLX5_SET(set_action_in, modact, field, mfield);
464 MLX5_SET(set_action_in, modact, offset, moffset);
465 MLX5_SET(set_action_in, modact, length, mlen);
466 MLX5_SET(set_action_in, modact, data, data);
469 struct mlx5e_hairpin {
470 struct mlx5_hairpin *pair;
472 struct mlx5_core_dev *func_mdev;
473 struct mlx5e_priv *func_priv;
475 struct mlx5e_tir direct_tir;
478 struct mlx5e_rqt indir_rqt;
479 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
480 struct mlx5_ttc_table *ttc;
483 struct mlx5e_hairpin_entry {
484 /* a node of a hash table which keeps all the hairpin entries */
485 struct hlist_node hairpin_hlist;
487 /* protects flows list */
488 spinlock_t flows_lock;
489 /* flows sharing the same hairpin */
490 struct list_head flows;
491 /* hpe's that were not fully initialized when dead peer update event
492 * function traversed them.
494 struct list_head dead_peer_wait_list;
498 struct mlx5e_hairpin *hp;
500 struct completion res_ready;
503 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
504 struct mlx5e_tc_flow *flow);
506 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
508 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
509 return ERR_PTR(-EINVAL);
513 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
515 if (refcount_dec_and_test(&flow->refcnt)) {
516 mlx5e_tc_del_flow(priv, flow);
517 kfree_rcu(flow, rcu_head);
521 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
523 return flow_flag_test(flow, ESWITCH);
526 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
528 return flow_flag_test(flow, FT);
531 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
533 return flow_flag_test(flow, OFFLOADED);
536 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
538 return mlx5e_is_eswitch_flow(flow) ?
539 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
542 static struct mod_hdr_tbl *
543 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
545 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
547 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
548 &esw->offloads.mod_hdr :
549 &priv->fs.tc.mod_hdr;
552 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
553 struct mlx5e_tc_flow *flow,
554 struct mlx5e_tc_flow_parse_attr *parse_attr)
556 struct mlx5_modify_hdr *modify_hdr;
557 struct mlx5e_mod_hdr_handle *mh;
559 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
560 mlx5e_get_flow_namespace(flow),
561 &parse_attr->mod_hdr_acts);
565 modify_hdr = mlx5e_mod_hdr_get(mh);
566 flow->attr->modify_hdr = modify_hdr;
572 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
573 struct mlx5e_tc_flow *flow)
575 /* flow wasn't fully initialized */
579 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
585 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
587 struct mlx5_core_dev *mdev;
588 struct net_device *netdev;
589 struct mlx5e_priv *priv;
591 netdev = dev_get_by_index(net, ifindex);
593 return ERR_PTR(-ENODEV);
595 priv = netdev_priv(netdev);
599 /* Mirred tc action holds a refcount on the ifindex net_device (see
600 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
601 * after dev_put(netdev), while we're in the context of adding a tc flow.
603 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
604 * stored in a hairpin object, which exists until all flows, that refer to it, get
607 * On the other hand, after a hairpin object has been created, the peer net_device may
608 * be removed/unbound while there are still some hairpin flows that are using it. This
609 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
610 * NETDEV_UNREGISTER event of the peer net_device.
615 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
617 struct mlx5e_tir_builder *builder;
620 builder = mlx5e_tir_builder_alloc(false);
624 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
628 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
629 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
634 mlx5e_tir_builder_free(builder);
638 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
643 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
645 mlx5e_tir_destroy(&hp->direct_tir);
646 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
649 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
651 struct mlx5e_priv *priv = hp->func_priv;
652 struct mlx5_core_dev *mdev = priv->mdev;
653 struct mlx5e_rss_params_indir *indir;
656 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
660 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
661 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
662 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
669 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
671 struct mlx5e_priv *priv = hp->func_priv;
672 struct mlx5e_rss_params_hash rss_hash;
673 enum mlx5_traffic_types tt, max_tt;
674 struct mlx5e_tir_builder *builder;
677 builder = mlx5e_tir_builder_alloc(false);
681 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
683 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
684 struct mlx5e_rss_params_traffic_type rss_tt;
686 rss_tt = mlx5e_rss_get_default_tt_config(tt);
688 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
689 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
691 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
693 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
695 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
696 goto err_destroy_tirs;
699 mlx5e_tir_builder_clear(builder);
703 mlx5e_tir_builder_free(builder);
708 for (tt = 0; tt < max_tt; tt++)
709 mlx5e_tir_destroy(&hp->indir_tir[tt]);
714 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
718 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
719 mlx5e_tir_destroy(&hp->indir_tir[tt]);
722 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
723 struct ttc_params *ttc_params)
725 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
728 memset(ttc_params, 0, sizeof(*ttc_params));
730 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
731 MLX5_FLOW_NAMESPACE_KERNEL);
732 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
733 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
734 ttc_params->dests[tt].tir_num =
736 mlx5e_tir_get_tirn(&hp->direct_tir) :
737 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
740 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
741 ft_attr->prio = MLX5E_TC_PRIO;
744 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
746 struct mlx5e_priv *priv = hp->func_priv;
747 struct ttc_params ttc_params;
750 err = mlx5e_hairpin_create_indirect_rqt(hp);
754 err = mlx5e_hairpin_create_indirect_tirs(hp);
756 goto err_create_indirect_tirs;
758 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
759 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
760 if (IS_ERR(hp->ttc)) {
761 err = PTR_ERR(hp->ttc);
762 goto err_create_ttc_table;
765 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
767 mlx5_get_ttc_flow_table(priv->fs.ttc)->id);
771 err_create_ttc_table:
772 mlx5e_hairpin_destroy_indirect_tirs(hp);
773 err_create_indirect_tirs:
774 mlx5e_rqt_destroy(&hp->indir_rqt);
779 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
781 mlx5_destroy_ttc_table(hp->ttc);
782 mlx5e_hairpin_destroy_indirect_tirs(hp);
783 mlx5e_rqt_destroy(&hp->indir_rqt);
786 static struct mlx5e_hairpin *
787 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
790 struct mlx5_core_dev *func_mdev, *peer_mdev;
791 struct mlx5e_hairpin *hp;
792 struct mlx5_hairpin *pair;
795 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
797 return ERR_PTR(-ENOMEM);
799 func_mdev = priv->mdev;
800 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
801 if (IS_ERR(peer_mdev)) {
802 err = PTR_ERR(peer_mdev);
803 goto create_pair_err;
806 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
809 goto create_pair_err;
812 hp->func_mdev = func_mdev;
813 hp->func_priv = priv;
814 hp->num_channels = params->num_channels;
816 err = mlx5e_hairpin_create_transport(hp);
818 goto create_transport_err;
820 if (hp->num_channels > 1) {
821 err = mlx5e_hairpin_rss_init(hp);
829 mlx5e_hairpin_destroy_transport(hp);
830 create_transport_err:
831 mlx5_core_hairpin_destroy(hp->pair);
837 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
839 if (hp->num_channels > 1)
840 mlx5e_hairpin_rss_cleanup(hp);
841 mlx5e_hairpin_destroy_transport(hp);
842 mlx5_core_hairpin_destroy(hp->pair);
846 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
848 return (peer_vhca_id << 16 | prio);
851 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
852 u16 peer_vhca_id, u8 prio)
854 struct mlx5e_hairpin_entry *hpe;
855 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
857 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
858 hairpin_hlist, hash_key) {
859 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
860 refcount_inc(&hpe->refcnt);
868 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
869 struct mlx5e_hairpin_entry *hpe)
871 /* no more hairpin flows for us, release the hairpin pair */
872 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
874 hash_del(&hpe->hairpin_hlist);
875 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
877 if (!IS_ERR_OR_NULL(hpe->hp)) {
878 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
879 dev_name(hpe->hp->pair->peer_mdev->device));
881 mlx5e_hairpin_destroy(hpe->hp);
884 WARN_ON(!list_empty(&hpe->flows));
888 #define UNKNOWN_MATCH_PRIO 8
890 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
891 struct mlx5_flow_spec *spec, u8 *match_prio,
892 struct netlink_ext_ack *extack)
894 void *headers_c, *headers_v;
895 u8 prio_val, prio_mask = 0;
898 #ifdef CONFIG_MLX5_CORE_EN_DCB
899 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
900 NL_SET_ERR_MSG_MOD(extack,
901 "only PCP trust state supported for hairpin");
905 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
906 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
908 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
910 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
911 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
914 if (!vlan_present || !prio_mask) {
915 prio_val = UNKNOWN_MATCH_PRIO;
916 } else if (prio_mask != 0x7) {
917 NL_SET_ERR_MSG_MOD(extack,
918 "masked priority match not supported for hairpin");
922 *match_prio = prio_val;
926 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
927 struct mlx5e_tc_flow *flow,
928 struct mlx5e_tc_flow_parse_attr *parse_attr,
929 struct netlink_ext_ack *extack)
931 int peer_ifindex = parse_attr->mirred_ifindex[0];
932 struct mlx5_hairpin_params params;
933 struct mlx5_core_dev *peer_mdev;
934 struct mlx5e_hairpin_entry *hpe;
935 struct mlx5e_hairpin *hp;
942 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
943 if (IS_ERR(peer_mdev)) {
944 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
945 return PTR_ERR(peer_mdev);
948 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
949 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
953 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
954 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
959 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
960 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
962 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
963 wait_for_completion(&hpe->res_ready);
965 if (IS_ERR(hpe->hp)) {
972 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
974 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
978 spin_lock_init(&hpe->flows_lock);
979 INIT_LIST_HEAD(&hpe->flows);
980 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
981 hpe->peer_vhca_id = peer_id;
982 hpe->prio = match_prio;
983 refcount_set(&hpe->refcnt, 1);
984 init_completion(&hpe->res_ready);
986 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
987 hash_hairpin_info(peer_id, match_prio));
988 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
990 params.log_data_size = 16;
991 params.log_data_size = min_t(u8, params.log_data_size,
992 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
993 params.log_data_size = max_t(u8, params.log_data_size,
994 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
996 params.log_num_packets = params.log_data_size -
997 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
998 params.log_num_packets = min_t(u8, params.log_num_packets,
999 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
1001 params.q_counter = priv->q_counter;
1002 /* set hairpin pair per each 50Gbs share of the link */
1003 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
1004 link_speed = max_t(u32, link_speed, 50000);
1005 link_speed64 = link_speed;
1006 do_div(link_speed64, 50000);
1007 params.num_channels = link_speed64;
1009 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1011 complete_all(&hpe->res_ready);
1017 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1018 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1019 dev_name(hp->pair->peer_mdev->device),
1020 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1023 if (hpe->hp->num_channels > 1) {
1024 flow_flag_set(flow, HAIRPIN_RSS);
1025 flow->attr->nic_attr->hairpin_ft =
1026 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1028 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1032 spin_lock(&hpe->flows_lock);
1033 list_add(&flow->hairpin, &hpe->flows);
1034 spin_unlock(&hpe->flows_lock);
1039 mlx5e_hairpin_put(priv, hpe);
1043 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1044 struct mlx5e_tc_flow *flow)
1046 /* flow wasn't fully initialized */
1050 spin_lock(&flow->hpe->flows_lock);
1051 list_del(&flow->hairpin);
1052 spin_unlock(&flow->hpe->flows_lock);
1054 mlx5e_hairpin_put(priv, flow->hpe);
1058 struct mlx5_flow_handle *
1059 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1060 struct mlx5_flow_spec *spec,
1061 struct mlx5_flow_attr *attr)
1063 struct mlx5_flow_context *flow_context = &spec->flow_context;
1064 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
1065 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1066 struct mlx5e_tc_table *tc = &priv->fs.tc;
1067 struct mlx5_flow_destination dest[2] = {};
1068 struct mlx5_flow_act flow_act = {
1069 .action = attr->action,
1070 .flags = FLOW_ACT_NO_APPEND,
1072 struct mlx5_flow_handle *rule;
1073 struct mlx5_flow_table *ft;
1076 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1077 flow_context->flow_tag = nic_attr->flow_tag;
1079 if (attr->dest_ft) {
1080 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1081 dest[dest_ix].ft = attr->dest_ft;
1083 } else if (nic_attr->hairpin_ft) {
1084 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1085 dest[dest_ix].ft = nic_attr->hairpin_ft;
1087 } else if (nic_attr->hairpin_tirn) {
1088 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1089 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1091 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1092 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1093 if (attr->dest_chain) {
1094 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1095 attr->dest_chain, 1,
1097 if (IS_ERR(dest[dest_ix].ft))
1098 return ERR_CAST(dest[dest_ix].ft);
1100 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
1105 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1106 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1107 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1109 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1110 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1111 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1115 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1116 flow_act.modify_hdr = attr->modify_hdr;
1118 mutex_lock(&tc->t_lock);
1119 if (IS_ERR_OR_NULL(tc->t)) {
1120 /* Create the root table here if doesn't exist yet */
1122 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1124 if (IS_ERR(tc->t)) {
1125 mutex_unlock(&tc->t_lock);
1126 netdev_err(priv->netdev,
1127 "Failed to create tc offload table\n");
1128 rule = ERR_CAST(priv->fs.tc.t);
1132 mutex_unlock(&tc->t_lock);
1134 if (attr->chain || attr->prio)
1135 ft = mlx5_chains_get_table(nic_chains,
1136 attr->chain, attr->prio,
1142 rule = ERR_CAST(ft);
1146 if (attr->outer_match_level != MLX5_MATCH_NONE)
1147 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1149 rule = mlx5_add_flow_rules(ft, spec,
1150 &flow_act, dest, dest_ix);
1157 if (attr->chain || attr->prio)
1158 mlx5_chains_put_table(nic_chains,
1159 attr->chain, attr->prio,
1162 if (attr->dest_chain)
1163 mlx5_chains_put_table(nic_chains,
1164 attr->dest_chain, 1,
1167 return ERR_CAST(rule);
1171 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1172 struct mlx5_flow_attr *attr)
1175 struct mlx5_fc *counter;
1177 counter = mlx5_fc_create(counter_dev, true);
1178 if (IS_ERR(counter))
1179 return PTR_ERR(counter);
1181 attr->counter = counter;
1186 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1187 struct mlx5e_tc_flow *flow,
1188 struct netlink_ext_ack *extack)
1190 struct mlx5e_tc_flow_parse_attr *parse_attr;
1191 struct mlx5_flow_attr *attr = flow->attr;
1192 struct mlx5_core_dev *dev = priv->mdev;
1195 parse_attr = attr->parse_attr;
1197 if (flow_flag_test(flow, HAIRPIN)) {
1198 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1203 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1204 err = alloc_flow_attr_counter(dev, attr);
1209 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1210 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1211 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
1216 if (attr->flags & MLX5_ATTR_FLAG_CT)
1217 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
1218 attr, &parse_attr->mod_hdr_acts);
1220 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1223 return PTR_ERR_OR_ZERO(flow->rule[0]);
1226 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1227 struct mlx5_flow_handle *rule,
1228 struct mlx5_flow_attr *attr)
1230 struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv);
1232 mlx5_del_flow_rules(rule);
1234 if (attr->chain || attr->prio)
1235 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1238 if (attr->dest_chain)
1239 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1243 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1244 struct mlx5e_tc_flow *flow)
1246 struct mlx5_flow_attr *attr = flow->attr;
1247 struct mlx5e_tc_table *tc = &priv->fs.tc;
1249 flow_flag_clear(flow, OFFLOADED);
1251 if (attr->flags & MLX5_ATTR_FLAG_CT)
1252 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1253 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1254 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1256 /* Remove root table if no rules are left to avoid
1257 * extra steering hops.
1259 mutex_lock(&priv->fs.tc.t_lock);
1260 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1261 !IS_ERR_OR_NULL(tc->t)) {
1262 mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1263 priv->fs.tc.t = NULL;
1265 mutex_unlock(&priv->fs.tc.t_lock);
1267 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1268 mlx5e_detach_mod_hdr(priv, flow);
1270 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1271 mlx5_fc_destroy(priv->mdev, attr->counter);
1273 if (flow_flag_test(flow, HAIRPIN))
1274 mlx5e_hairpin_flow_del(priv, flow);
1276 free_flow_post_acts(flow);
1278 kvfree(attr->parse_attr);
1282 struct mlx5_flow_handle *
1283 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1284 struct mlx5e_tc_flow *flow,
1285 struct mlx5_flow_spec *spec,
1286 struct mlx5_flow_attr *attr)
1288 struct mlx5_flow_handle *rule;
1290 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1291 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1293 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1298 if (attr->esw_attr->split_count) {
1299 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1300 if (IS_ERR(flow->rule[1]))
1307 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1308 return flow->rule[1];
1311 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1312 struct mlx5e_tc_flow *flow,
1313 struct mlx5_flow_attr *attr)
1315 flow_flag_clear(flow, OFFLOADED);
1317 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1318 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1320 if (attr->esw_attr->split_count)
1321 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1323 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1326 struct mlx5_flow_handle *
1327 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1328 struct mlx5e_tc_flow *flow,
1329 struct mlx5_flow_spec *spec)
1331 struct mlx5_flow_attr *slow_attr;
1332 struct mlx5_flow_handle *rule;
1334 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1336 return ERR_PTR(-ENOMEM);
1338 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1339 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1340 slow_attr->esw_attr->split_count = 0;
1341 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1343 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1345 flow_flag_set(flow, SLOW);
1352 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1353 struct mlx5e_tc_flow *flow)
1355 struct mlx5_flow_attr *slow_attr;
1357 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1359 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1363 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1364 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1365 slow_attr->esw_attr->split_count = 0;
1366 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1367 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1368 flow_flag_clear(flow, SLOW);
1372 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1375 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1376 struct list_head *unready_flows)
1378 flow_flag_set(flow, NOT_READY);
1379 list_add_tail(&flow->unready, unready_flows);
1382 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1385 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1387 list_del(&flow->unready);
1388 flow_flag_clear(flow, NOT_READY);
1391 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1393 struct mlx5_rep_uplink_priv *uplink_priv;
1394 struct mlx5e_rep_priv *rpriv;
1395 struct mlx5_eswitch *esw;
1397 esw = flow->priv->mdev->priv.eswitch;
1398 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1399 uplink_priv = &rpriv->uplink_priv;
1401 mutex_lock(&uplink_priv->unready_flows_lock);
1402 unready_flow_add(flow, &uplink_priv->unready_flows);
1403 mutex_unlock(&uplink_priv->unready_flows_lock);
1406 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1408 struct mlx5_rep_uplink_priv *uplink_priv;
1409 struct mlx5e_rep_priv *rpriv;
1410 struct mlx5_eswitch *esw;
1412 esw = flow->priv->mdev->priv.eswitch;
1413 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1414 uplink_priv = &rpriv->uplink_priv;
1416 mutex_lock(&uplink_priv->unready_flows_lock);
1417 unready_flow_del(flow);
1418 mutex_unlock(&uplink_priv->unready_flows_lock);
1421 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1423 struct mlx5_core_dev *out_mdev, *route_mdev;
1424 struct mlx5e_priv *out_priv, *route_priv;
1426 out_priv = netdev_priv(out_dev);
1427 out_mdev = out_priv->mdev;
1428 route_priv = netdev_priv(route_dev);
1429 route_mdev = route_priv->mdev;
1431 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1432 route_mdev->coredev_type != MLX5_COREDEV_VF)
1435 return mlx5e_same_hw_devs(out_priv, route_priv);
1438 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1440 struct mlx5e_priv *out_priv, *route_priv;
1441 struct mlx5_devcom *devcom = NULL;
1442 struct mlx5_core_dev *route_mdev;
1443 struct mlx5_eswitch *esw;
1447 out_priv = netdev_priv(out_dev);
1448 esw = out_priv->mdev->priv.eswitch;
1449 route_priv = netdev_priv(route_dev);
1450 route_mdev = route_priv->mdev;
1452 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1453 if (mlx5_lag_is_active(out_priv->mdev)) {
1454 /* In lag case we may get devices from different eswitch instances.
1455 * If we failed to get vport num, it means, mostly, that we on the wrong
1458 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1462 devcom = out_priv->mdev->priv.devcom;
1463 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1468 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1470 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1474 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1475 struct mlx5e_tc_flow *flow,
1476 struct mlx5_flow_attr *attr)
1478 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1479 struct mlx5_modify_hdr *mod_hdr;
1481 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1482 mlx5e_get_flow_namespace(flow),
1483 mod_hdr_acts->num_actions,
1484 mod_hdr_acts->actions);
1485 if (IS_ERR(mod_hdr))
1486 return PTR_ERR(mod_hdr);
1488 WARN_ON(attr->modify_hdr);
1489 attr->modify_hdr = mod_hdr;
1495 set_encap_dests(struct mlx5e_priv *priv,
1496 struct mlx5e_tc_flow *flow,
1497 struct mlx5_flow_attr *attr,
1498 struct netlink_ext_ack *extack,
1502 struct mlx5e_tc_flow_parse_attr *parse_attr;
1503 struct mlx5_esw_flow_attr *esw_attr;
1504 struct net_device *encap_dev = NULL;
1505 struct mlx5e_rep_priv *rpriv;
1506 struct mlx5e_priv *out_priv;
1510 if (!mlx5e_is_eswitch_flow(flow))
1513 parse_attr = attr->parse_attr;
1514 esw_attr = attr->esw_attr;
1516 *encap_valid = true;
1518 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1519 struct net_device *out_dev;
1522 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1525 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1526 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1528 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1532 err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1533 extack, &encap_dev, encap_valid);
1538 if (esw_attr->dests[out_index].flags &
1539 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1540 !esw_attr->dest_int_port)
1543 out_priv = netdev_priv(encap_dev);
1544 rpriv = out_priv->ppriv;
1545 esw_attr->dests[out_index].rep = rpriv->rep;
1546 esw_attr->dests[out_index].mdev = out_priv->mdev;
1549 if (*vf_tun && esw_attr->out_count > 1) {
1550 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1560 clean_encap_dests(struct mlx5e_priv *priv,
1561 struct mlx5e_tc_flow *flow,
1562 struct mlx5_flow_attr *attr,
1565 struct mlx5_esw_flow_attr *esw_attr;
1568 if (!mlx5e_is_eswitch_flow(flow))
1571 esw_attr = attr->esw_attr;
1574 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1575 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1578 if (esw_attr->dests[out_index].flags &
1579 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1580 !esw_attr->dest_int_port)
1583 mlx5e_detach_encap(priv, flow, attr, out_index);
1584 kfree(attr->parse_attr->tun_info[out_index]);
1589 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1590 struct mlx5e_tc_flow *flow,
1591 struct netlink_ext_ack *extack)
1593 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1594 struct mlx5e_tc_flow_parse_attr *parse_attr;
1595 struct mlx5_flow_attr *attr = flow->attr;
1596 struct mlx5_esw_flow_attr *esw_attr;
1597 bool vf_tun, encap_valid;
1598 u32 max_prio, max_chain;
1601 parse_attr = attr->parse_attr;
1602 esw_attr = attr->esw_attr;
1604 /* We check chain range only for tc flows.
1605 * For ft flows, we checked attr->chain was originally 0 and set it to
1606 * FDB_FT_CHAIN which is outside tc range.
1607 * See mlx5e_rep_setup_ft_cb().
1609 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1610 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1611 NL_SET_ERR_MSG_MOD(extack,
1612 "Requested chain is out of supported range");
1617 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1618 if (attr->prio > max_prio) {
1619 NL_SET_ERR_MSG_MOD(extack,
1620 "Requested priority is out of supported range");
1625 if (flow_flag_test(flow, TUN_RX)) {
1626 err = mlx5e_attach_decap_route(priv, flow);
1630 if (!attr->chain && esw_attr->int_port &&
1631 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1632 /* If decap route device is internal port, change the
1633 * source vport value in reg_c0 back to uplink just in
1634 * case the rule performs goto chain > 0. If we have a miss
1635 * on chain > 0 we want the metadata regs to hold the
1636 * chain id so SW will resume handling of this packet
1637 * from the proper chain.
1639 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1640 esw_attr->in_rep->vport);
1642 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1643 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1648 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1652 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1653 err = mlx5e_attach_decap(priv, flow, extack);
1658 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1659 struct mlx5e_tc_int_port *int_port;
1662 NL_SET_ERR_MSG_MOD(extack,
1663 "Internal port rule is only supported on chain 0");
1668 if (attr->dest_chain) {
1669 NL_SET_ERR_MSG_MOD(extack,
1670 "Internal port rule offload doesn't support goto action");
1675 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1676 parse_attr->filter_dev->ifindex,
1677 flow_flag_test(flow, EGRESS) ?
1678 MLX5E_TC_INT_PORT_EGRESS :
1679 MLX5E_TC_INT_PORT_INGRESS);
1680 if (IS_ERR(int_port)) {
1681 err = PTR_ERR(int_port);
1685 esw_attr->int_port = int_port;
1688 err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
1692 err = mlx5_eswitch_add_vlan_action(esw, attr);
1696 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1698 err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
1702 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1708 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1709 err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
1714 /* we get here if one of the following takes place:
1715 * (1) there's no error
1716 * (2) there's an encap action and we don't have valid neigh
1718 if (!encap_valid || flow_flag_test(flow, SLOW))
1719 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1721 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1723 if (IS_ERR(flow->rule[0])) {
1724 err = PTR_ERR(flow->rule[0]);
1727 flow_flag_set(flow, OFFLOADED);
1732 flow_flag_set(flow, FAILED);
1736 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1738 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1739 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1742 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1744 geneve_tlv_option_0_data);
1746 return !!geneve_tlv_opt_0_data;
1749 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1750 struct mlx5e_tc_flow *flow)
1752 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1753 struct mlx5_flow_attr *attr = flow->attr;
1754 struct mlx5_esw_flow_attr *esw_attr;
1757 esw_attr = attr->esw_attr;
1758 mlx5e_put_flow_tunnel_id(flow);
1760 if (flow_flag_test(flow, NOT_READY))
1761 remove_unready_flow(flow);
1763 if (mlx5e_is_offloaded_flow(flow)) {
1764 if (flow_flag_test(flow, SLOW))
1765 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1767 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1769 complete_all(&flow->del_hw_done);
1771 if (mlx5_flow_has_geneve_opt(flow))
1772 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1774 mlx5_eswitch_del_vlan_action(esw, attr);
1776 if (flow->decap_route)
1777 mlx5e_detach_decap_route(priv, flow);
1779 clean_encap_dests(priv, flow, attr, &vf_tun);
1781 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1783 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1784 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1785 if (vf_tun && attr->modify_hdr)
1786 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1788 mlx5e_detach_mod_hdr(priv, flow);
1791 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1792 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1794 if (esw_attr->int_port)
1795 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
1797 if (esw_attr->dest_int_port)
1798 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
1800 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1801 mlx5e_detach_decap(priv, flow);
1803 free_flow_post_acts(flow);
1805 if (flow->attr->lag.count)
1806 mlx5_lag_del_mpesw_rule(esw->dev);
1808 kvfree(attr->esw_attr->rx_tun_attr);
1809 kvfree(attr->parse_attr);
1813 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1815 struct mlx5_flow_attr *attr;
1817 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
1818 return attr->counter;
1821 /* Iterate over tmp_list of flows attached to flow_list head. */
1822 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1824 struct mlx5e_tc_flow *flow, *tmp;
1826 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1827 mlx5e_flow_put(priv, flow);
1830 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1832 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1834 if (!flow_flag_test(flow, ESWITCH) ||
1835 !flow_flag_test(flow, DUP))
1838 mutex_lock(&esw->offloads.peer_mutex);
1839 list_del(&flow->peer);
1840 mutex_unlock(&esw->offloads.peer_mutex);
1842 flow_flag_clear(flow, DUP);
1844 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1845 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1846 kfree(flow->peer_flow);
1849 flow->peer_flow = NULL;
1852 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1854 struct mlx5_core_dev *dev = flow->priv->mdev;
1855 struct mlx5_devcom *devcom = dev->priv.devcom;
1856 struct mlx5_eswitch *peer_esw;
1858 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1862 __mlx5e_tc_del_fdb_peer_flow(flow);
1863 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1866 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1867 struct mlx5e_tc_flow *flow)
1869 if (mlx5e_is_eswitch_flow(flow)) {
1870 mlx5e_tc_del_fdb_peer_flow(flow);
1871 mlx5e_tc_del_fdb_flow(priv, flow);
1873 mlx5e_tc_del_nic_flow(priv, flow);
1877 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
1879 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1880 struct flow_action *flow_action = &rule->action;
1881 const struct flow_action_entry *act;
1887 flow_action_for_each(i, act, flow_action) {
1889 case FLOW_ACTION_GOTO:
1891 case FLOW_ACTION_SAMPLE:
1902 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1903 struct flow_dissector_key_enc_opts *opts,
1904 struct netlink_ext_ack *extack,
1907 struct geneve_opt *opt;
1912 while (opts->len > off) {
1913 opt = (struct geneve_opt *)&opts->data[off];
1915 if (!(*dont_care) || opt->opt_class || opt->type ||
1916 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1919 if (opt->opt_class != htons(U16_MAX) ||
1920 opt->type != U8_MAX) {
1921 NL_SET_ERR_MSG_MOD(extack,
1922 "Partial match of tunnel options in chain > 0 isn't supported");
1923 netdev_warn(priv->netdev,
1924 "Partial match of tunnel options in chain > 0 isn't supported");
1929 off += sizeof(struct geneve_opt) + opt->length * 4;
1935 #define COPY_DISSECTOR(rule, diss_key, dst)\
1937 struct flow_rule *__rule = (rule);\
1938 typeof(dst) __dst = dst;\
1941 skb_flow_dissector_target(__rule->match.dissector,\
1943 __rule->match.key),\
1947 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1948 struct mlx5e_tc_flow *flow,
1949 struct flow_cls_offload *f,
1950 struct net_device *filter_dev)
1952 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1953 struct netlink_ext_ack *extack = f->common.extack;
1954 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1955 struct flow_match_enc_opts enc_opts_match;
1956 struct tunnel_match_enc_opts tun_enc_opts;
1957 struct mlx5_rep_uplink_priv *uplink_priv;
1958 struct mlx5_flow_attr *attr = flow->attr;
1959 struct mlx5e_rep_priv *uplink_rpriv;
1960 struct tunnel_match_key tunnel_key;
1961 bool enc_opts_is_dont_care = true;
1962 u32 tun_id, enc_opts_id = 0;
1963 struct mlx5_eswitch *esw;
1967 esw = priv->mdev->priv.eswitch;
1968 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1969 uplink_priv = &uplink_rpriv->uplink_priv;
1971 memset(&tunnel_key, 0, sizeof(tunnel_key));
1972 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1973 &tunnel_key.enc_control);
1974 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1975 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1976 &tunnel_key.enc_ipv4);
1978 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1979 &tunnel_key.enc_ipv6);
1980 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1981 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1982 &tunnel_key.enc_tp);
1983 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1984 &tunnel_key.enc_key_id);
1985 tunnel_key.filter_ifindex = filter_dev->ifindex;
1987 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1991 flow_rule_match_enc_opts(rule, &enc_opts_match);
1992 err = enc_opts_is_dont_care_or_full_match(priv,
1993 enc_opts_match.mask,
1995 &enc_opts_is_dont_care);
1999 if (!enc_opts_is_dont_care) {
2000 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2001 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2002 sizeof(*enc_opts_match.key));
2003 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2004 sizeof(*enc_opts_match.mask));
2006 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2007 &tun_enc_opts, &enc_opts_id);
2012 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2013 mask = enc_opts_id ? TUNNEL_ID_MASK :
2014 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2017 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2018 TUNNEL_TO_REG, value, mask);
2020 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2021 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2022 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2023 TUNNEL_TO_REG, value);
2027 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2030 flow->attr->tunnel_id = value;
2035 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2038 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2042 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2044 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2045 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2046 struct mlx5_rep_uplink_priv *uplink_priv;
2047 struct mlx5e_rep_priv *uplink_rpriv;
2048 struct mlx5_eswitch *esw;
2050 esw = flow->priv->mdev->priv.eswitch;
2051 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2052 uplink_priv = &uplink_rpriv->uplink_priv;
2055 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2057 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2061 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2062 struct flow_match_basic *match, bool outer,
2063 void *headers_c, void *headers_v)
2065 bool ip_version_cap;
2067 ip_version_cap = outer ?
2068 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2069 ft_field_support.outer_ip_version) :
2070 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2071 ft_field_support.inner_ip_version);
2073 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2074 (match->key->n_proto == htons(ETH_P_IP) ||
2075 match->key->n_proto == htons(ETH_P_IPV6))) {
2076 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2077 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2078 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2080 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2081 ntohs(match->mask->n_proto));
2082 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2083 ntohs(match->key->n_proto));
2087 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2094 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2096 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2098 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2099 /* Return ip_version converted from ethertype anyway */
2101 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2102 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2104 else if (ethertype == ETH_P_IPV6)
2110 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2111 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2112 * +---------+----------------------------------------+
2113 * |Arriving | Arriving Outer Header |
2114 * | Inner +---------+---------+---------+----------+
2115 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2116 * +---------+---------+---------+---------+----------+
2117 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2118 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2119 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2120 * | CE | CE | CE | CE | CE |
2121 * +---------+---------+---------+---------+----------+
2123 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2124 * the inner ip_ecn value before hardware decap action.
2126 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2127 * so matching those values on inner ip_ecn before decap will fail.
2129 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2130 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2131 * and such we can drop the inner ip_ecn=CE match.
2134 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2135 struct flow_cls_offload *f,
2136 bool *match_inner_ecn)
2138 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2139 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2140 struct netlink_ext_ack *extack = f->common.extack;
2141 struct flow_match_ip match;
2143 *match_inner_ecn = true;
2145 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2146 flow_rule_match_enc_ip(rule, &match);
2147 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2148 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2151 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2152 flow_rule_match_ip(rule, &match);
2153 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2154 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2157 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2158 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2159 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2163 if (!outer_ecn_mask) {
2164 if (!inner_ecn_mask)
2167 NL_SET_ERR_MSG_MOD(extack,
2168 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2169 netdev_warn(priv->netdev,
2170 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2174 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2175 NL_SET_ERR_MSG_MOD(extack,
2176 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2177 netdev_warn(priv->netdev,
2178 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2182 if (!inner_ecn_mask)
2185 /* Both inner and outer have full mask on ecn */
2187 if (outer_ecn_key == INET_ECN_ECT_1) {
2188 /* inner ecn might change by DECAP action */
2190 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2191 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2195 if (outer_ecn_key != INET_ECN_CE)
2198 if (inner_ecn_key != INET_ECN_CE) {
2199 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2200 NL_SET_ERR_MSG_MOD(extack,
2201 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2202 netdev_warn(priv->netdev,
2203 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2207 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2208 * drop match on inner ecn
2210 *match_inner_ecn = false;
2215 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2216 struct mlx5e_tc_flow *flow,
2217 struct mlx5_flow_spec *spec,
2218 struct flow_cls_offload *f,
2219 struct net_device *filter_dev,
2223 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2224 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2225 struct netlink_ext_ack *extack = f->common.extack;
2226 bool needs_mapping, sets_mapping;
2229 if (!mlx5e_is_eswitch_flow(flow)) {
2230 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2234 needs_mapping = !!flow->attr->chain;
2235 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2236 *match_inner = !needs_mapping;
2238 if ((needs_mapping || sets_mapping) &&
2239 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2240 NL_SET_ERR_MSG_MOD(extack,
2241 "Chains on tunnel devices isn't supported without register loopback support");
2242 netdev_warn(priv->netdev,
2243 "Chains on tunnel devices isn't supported without register loopback support");
2247 if (!flow->attr->chain) {
2248 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2251 NL_SET_ERR_MSG_MOD(extack,
2252 "Failed to parse tunnel attributes");
2253 netdev_warn(priv->netdev,
2254 "Failed to parse tunnel attributes");
2258 /* With mpls over udp we decapsulate using packet reformat
2261 if (!netif_is_bareudp(filter_dev))
2262 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2263 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2266 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2267 struct mlx5_flow_spec *tmp_spec;
2269 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2271 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2272 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2275 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2277 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2280 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2281 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2284 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2290 if (!needs_mapping && !sets_mapping)
2293 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2296 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2298 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2302 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2304 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2308 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2310 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2314 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2316 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2320 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2322 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2323 get_match_inner_headers_value(spec) :
2324 get_match_outer_headers_value(spec);
2327 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2329 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2330 get_match_inner_headers_criteria(spec) :
2331 get_match_outer_headers_criteria(spec);
2334 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2335 struct flow_cls_offload *f)
2337 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2338 struct netlink_ext_ack *extack = f->common.extack;
2339 struct net_device *ingress_dev;
2340 struct flow_match_meta match;
2342 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2345 flow_rule_match_meta(rule, &match);
2346 if (!match.mask->ingress_ifindex)
2349 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2350 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2354 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2355 match.key->ingress_ifindex);
2357 NL_SET_ERR_MSG_MOD(extack,
2358 "Can't find the ingress port to match on");
2362 if (ingress_dev != filter_dev) {
2363 NL_SET_ERR_MSG_MOD(extack,
2364 "Can't match on the ingress filter port");
2371 static bool skip_key_basic(struct net_device *filter_dev,
2372 struct flow_cls_offload *f)
2374 /* When doing mpls over udp decap, the user needs to provide
2375 * MPLS_UC as the protocol in order to be able to match on mpls
2376 * label fields. However, the actual ethertype is IP so we want to
2377 * avoid matching on this, otherwise we'll fail the match.
2379 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2385 static int __parse_cls_flower(struct mlx5e_priv *priv,
2386 struct mlx5e_tc_flow *flow,
2387 struct mlx5_flow_spec *spec,
2388 struct flow_cls_offload *f,
2389 struct net_device *filter_dev,
2390 u8 *inner_match_level, u8 *outer_match_level)
2392 struct netlink_ext_ack *extack = f->common.extack;
2393 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2395 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2397 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2399 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2401 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2403 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2405 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2406 struct flow_dissector *dissector = rule->match.dissector;
2407 enum fs_flow_table_type fs_type;
2408 bool match_inner_ecn = true;
2414 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2415 match_level = outer_match_level;
2417 if (dissector->used_keys &
2418 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2419 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2420 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2421 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2422 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2423 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2424 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2425 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2426 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2427 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2428 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2429 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2430 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2431 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2432 BIT(FLOW_DISSECTOR_KEY_TCP) |
2433 BIT(FLOW_DISSECTOR_KEY_IP) |
2434 BIT(FLOW_DISSECTOR_KEY_CT) |
2435 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2436 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2437 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2438 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2439 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2440 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2441 dissector->used_keys);
2445 if (mlx5e_get_tc_tun(filter_dev)) {
2446 bool match_inner = false;
2448 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2449 outer_match_level, &match_inner);
2454 /* header pointers should point to the inner headers
2455 * if the packet was decapsulated already.
2456 * outer headers are set by parse_tunnel_attr.
2458 match_level = inner_match_level;
2459 headers_c = get_match_inner_headers_criteria(spec);
2460 headers_v = get_match_inner_headers_value(spec);
2463 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2468 err = mlx5e_flower_parse_meta(filter_dev, f);
2472 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2473 !skip_key_basic(filter_dev, f)) {
2474 struct flow_match_basic match;
2476 flow_rule_match_basic(rule, &match);
2477 mlx5e_tc_set_ethertype(priv->mdev, &match,
2478 match_level == outer_match_level,
2479 headers_c, headers_v);
2481 if (match.mask->n_proto)
2482 *match_level = MLX5_MATCH_L2;
2484 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2485 is_vlan_dev(filter_dev)) {
2486 struct flow_dissector_key_vlan filter_dev_mask;
2487 struct flow_dissector_key_vlan filter_dev_key;
2488 struct flow_match_vlan match;
2490 if (is_vlan_dev(filter_dev)) {
2491 match.key = &filter_dev_key;
2492 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2493 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2494 match.key->vlan_priority = 0;
2495 match.mask = &filter_dev_mask;
2496 memset(match.mask, 0xff, sizeof(*match.mask));
2497 match.mask->vlan_priority = 0;
2499 flow_rule_match_vlan(rule, &match);
2501 if (match.mask->vlan_id ||
2502 match.mask->vlan_priority ||
2503 match.mask->vlan_tpid) {
2504 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2505 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2507 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2510 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2512 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2516 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2517 match.mask->vlan_id);
2518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2519 match.key->vlan_id);
2521 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2522 match.mask->vlan_priority);
2523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2524 match.key->vlan_priority);
2526 *match_level = MLX5_MATCH_L2;
2528 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2529 match.mask->vlan_eth_type &&
2530 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2531 ft_field_support.outer_second_vid,
2533 MLX5_SET(fte_match_set_misc, misc_c,
2534 outer_second_cvlan_tag, 1);
2535 spec->match_criteria_enable |=
2536 MLX5_MATCH_MISC_PARAMETERS;
2539 } else if (*match_level != MLX5_MATCH_NONE) {
2540 /* cvlan_tag enabled in match criteria and
2541 * disabled in match value means both S & C tags
2542 * don't exist (untagged of both)
2544 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2545 *match_level = MLX5_MATCH_L2;
2548 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2549 struct flow_match_vlan match;
2551 flow_rule_match_cvlan(rule, &match);
2552 if (match.mask->vlan_id ||
2553 match.mask->vlan_priority ||
2554 match.mask->vlan_tpid) {
2555 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2557 NL_SET_ERR_MSG_MOD(extack,
2558 "Matching on CVLAN is not supported");
2562 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2563 MLX5_SET(fte_match_set_misc, misc_c,
2564 outer_second_svlan_tag, 1);
2565 MLX5_SET(fte_match_set_misc, misc_v,
2566 outer_second_svlan_tag, 1);
2568 MLX5_SET(fte_match_set_misc, misc_c,
2569 outer_second_cvlan_tag, 1);
2570 MLX5_SET(fte_match_set_misc, misc_v,
2571 outer_second_cvlan_tag, 1);
2574 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2575 match.mask->vlan_id);
2576 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2577 match.key->vlan_id);
2578 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2579 match.mask->vlan_priority);
2580 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2581 match.key->vlan_priority);
2583 *match_level = MLX5_MATCH_L2;
2584 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2588 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2589 struct flow_match_eth_addrs match;
2591 flow_rule_match_eth_addrs(rule, &match);
2592 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2595 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2599 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2602 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2606 if (!is_zero_ether_addr(match.mask->src) ||
2607 !is_zero_ether_addr(match.mask->dst))
2608 *match_level = MLX5_MATCH_L2;
2611 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2612 struct flow_match_control match;
2614 flow_rule_match_control(rule, &match);
2615 addr_type = match.key->addr_type;
2617 /* the HW doesn't support frag first/later */
2618 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2619 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2623 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2624 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2625 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2626 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2628 /* the HW doesn't need L3 inline to match on frag=no */
2629 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2630 *match_level = MLX5_MATCH_L2;
2631 /* *** L2 attributes parsing up to here *** */
2633 *match_level = MLX5_MATCH_L3;
2637 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2638 struct flow_match_basic match;
2640 flow_rule_match_basic(rule, &match);
2641 ip_proto = match.key->ip_proto;
2643 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2644 match.mask->ip_proto);
2645 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2646 match.key->ip_proto);
2648 if (match.mask->ip_proto)
2649 *match_level = MLX5_MATCH_L3;
2652 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2653 struct flow_match_ipv4_addrs match;
2655 flow_rule_match_ipv4_addrs(rule, &match);
2656 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2657 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2658 &match.mask->src, sizeof(match.mask->src));
2659 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2660 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2661 &match.key->src, sizeof(match.key->src));
2662 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2663 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2664 &match.mask->dst, sizeof(match.mask->dst));
2665 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2666 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2667 &match.key->dst, sizeof(match.key->dst));
2669 if (match.mask->src || match.mask->dst)
2670 *match_level = MLX5_MATCH_L3;
2673 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2674 struct flow_match_ipv6_addrs match;
2676 flow_rule_match_ipv6_addrs(rule, &match);
2677 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2678 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2679 &match.mask->src, sizeof(match.mask->src));
2680 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2681 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2682 &match.key->src, sizeof(match.key->src));
2684 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2685 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2686 &match.mask->dst, sizeof(match.mask->dst));
2687 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2688 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2689 &match.key->dst, sizeof(match.key->dst));
2691 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2692 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2693 *match_level = MLX5_MATCH_L3;
2696 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2697 struct flow_match_ip match;
2699 flow_rule_match_ip(rule, &match);
2700 if (match_inner_ecn) {
2701 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2702 match.mask->tos & 0x3);
2703 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2704 match.key->tos & 0x3);
2707 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2708 match.mask->tos >> 2);
2709 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2710 match.key->tos >> 2);
2712 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2714 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2717 if (match.mask->ttl &&
2718 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2719 ft_field_support.outer_ipv4_ttl)) {
2720 NL_SET_ERR_MSG_MOD(extack,
2721 "Matching on TTL is not supported");
2725 if (match.mask->tos || match.mask->ttl)
2726 *match_level = MLX5_MATCH_L3;
2729 /* *** L3 attributes parsing up to here *** */
2731 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2732 struct flow_match_ports match;
2734 flow_rule_match_ports(rule, &match);
2737 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2738 tcp_sport, ntohs(match.mask->src));
2739 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2740 tcp_sport, ntohs(match.key->src));
2742 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2743 tcp_dport, ntohs(match.mask->dst));
2744 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2745 tcp_dport, ntohs(match.key->dst));
2749 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2750 udp_sport, ntohs(match.mask->src));
2751 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2752 udp_sport, ntohs(match.key->src));
2754 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2755 udp_dport, ntohs(match.mask->dst));
2756 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2757 udp_dport, ntohs(match.key->dst));
2760 NL_SET_ERR_MSG_MOD(extack,
2761 "Only UDP and TCP transports are supported for L4 matching");
2762 netdev_err(priv->netdev,
2763 "Only UDP and TCP transport are supported\n");
2767 if (match.mask->src || match.mask->dst)
2768 *match_level = MLX5_MATCH_L4;
2771 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2772 struct flow_match_tcp match;
2774 flow_rule_match_tcp(rule, &match);
2775 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2776 ntohs(match.mask->flags));
2777 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2778 ntohs(match.key->flags));
2780 if (match.mask->flags)
2781 *match_level = MLX5_MATCH_L4;
2783 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2784 struct flow_match_icmp match;
2786 flow_rule_match_icmp(rule, &match);
2789 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2790 MLX5_FLEX_PROTO_ICMP)) {
2791 NL_SET_ERR_MSG_MOD(extack,
2792 "Match on Flex protocols for ICMP is not supported");
2795 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2797 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2799 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2801 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2804 case IPPROTO_ICMPV6:
2805 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2806 MLX5_FLEX_PROTO_ICMPV6)) {
2807 NL_SET_ERR_MSG_MOD(extack,
2808 "Match on Flex protocols for ICMPV6 is not supported");
2811 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2813 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2815 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2817 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2821 NL_SET_ERR_MSG_MOD(extack,
2822 "Code and type matching only with ICMP and ICMPv6");
2823 netdev_err(priv->netdev,
2824 "Code and type matching only with ICMP and ICMPv6\n");
2827 if (match.mask->code || match.mask->type) {
2828 *match_level = MLX5_MATCH_L4;
2829 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2832 /* Currently supported only for MPLS over UDP */
2833 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2834 !netif_is_bareudp(filter_dev)) {
2835 NL_SET_ERR_MSG_MOD(extack,
2836 "Matching on MPLS is supported only for MPLS over UDP");
2837 netdev_err(priv->netdev,
2838 "Matching on MPLS is supported only for MPLS over UDP\n");
2845 static int parse_cls_flower(struct mlx5e_priv *priv,
2846 struct mlx5e_tc_flow *flow,
2847 struct mlx5_flow_spec *spec,
2848 struct flow_cls_offload *f,
2849 struct net_device *filter_dev)
2851 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2852 struct netlink_ext_ack *extack = f->common.extack;
2853 struct mlx5_core_dev *dev = priv->mdev;
2854 struct mlx5_eswitch *esw = dev->priv.eswitch;
2855 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2856 struct mlx5_eswitch_rep *rep;
2857 bool is_eswitch_flow;
2860 inner_match_level = MLX5_MATCH_NONE;
2861 outer_match_level = MLX5_MATCH_NONE;
2863 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2864 &inner_match_level, &outer_match_level);
2865 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2866 outer_match_level : inner_match_level;
2868 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2869 if (!err && is_eswitch_flow) {
2871 if (rep->vport != MLX5_VPORT_UPLINK &&
2872 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2873 esw->offloads.inline_mode < non_tunnel_match_level)) {
2874 NL_SET_ERR_MSG_MOD(extack,
2875 "Flow is not offloaded due to min inline setting");
2876 netdev_warn(priv->netdev,
2877 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2878 non_tunnel_match_level, esw->offloads.inline_mode);
2883 flow->attr->inner_match_level = inner_match_level;
2884 flow->attr->outer_match_level = outer_match_level;
2890 struct mlx5_fields {
2898 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2899 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2900 offsetof(struct pedit_headers, field) + (off), \
2901 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2903 /* masked values are the same and there are no rewrites that do not have a
2906 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2907 type matchmaskx = *(type *)(matchmaskp); \
2908 type matchvalx = *(type *)(matchvalp); \
2909 type maskx = *(type *)(maskp); \
2910 type valx = *(type *)(valp); \
2912 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2916 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2917 void *matchmaskp, u8 bsize)
2923 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2926 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2929 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2936 static struct mlx5_fields fields[] = {
2937 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2938 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2939 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2940 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2941 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2942 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2944 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2945 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2946 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2947 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2949 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2950 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2951 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2952 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2953 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2954 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2955 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2956 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2957 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2958 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2959 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2960 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2961 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2962 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2963 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2964 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2965 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2966 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2968 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2969 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2970 /* in linux iphdr tcp_flags is 8 bits long */
2971 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2973 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2974 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2977 static unsigned long mask_to_le(unsigned long mask, int size)
2983 mask_be32 = (__force __be32)(mask);
2984 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2985 } else if (size == 16) {
2986 mask_be32 = (__force __be32)(mask);
2987 mask_be16 = *(__be16 *)&mask_be32;
2988 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2994 static int offload_pedit_fields(struct mlx5e_priv *priv,
2996 struct mlx5e_tc_flow_parse_attr *parse_attr,
2998 struct netlink_ext_ack *extack)
3000 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3001 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3002 void *headers_c, *headers_v, *action, *vals_p;
3003 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3004 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3005 unsigned long mask, field_mask;
3006 int i, first, last, next_z;
3007 struct mlx5_fields *f;
3010 mod_acts = &parse_attr->mod_hdr_acts;
3011 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3012 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3014 set_masks = &hdrs[0].masks;
3015 add_masks = &hdrs[1].masks;
3016 set_vals = &hdrs[0].vals;
3017 add_vals = &hdrs[1].vals;
3019 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3023 /* avoid seeing bits set from previous iterations */
3027 s_masks_p = (void *)set_masks + f->offset;
3028 a_masks_p = (void *)add_masks + f->offset;
3030 s_mask = *s_masks_p & f->field_mask;
3031 a_mask = *a_masks_p & f->field_mask;
3033 if (!s_mask && !a_mask) /* nothing to offload here */
3036 if (s_mask && a_mask) {
3037 NL_SET_ERR_MSG_MOD(extack,
3038 "can't set and add to the same HW field");
3039 netdev_warn(priv->netdev,
3040 "mlx5: can't set and add to the same HW field (%x)\n",
3047 void *match_mask = headers_c + f->match_offset;
3048 void *match_val = headers_v + f->match_offset;
3050 cmd = MLX5_ACTION_TYPE_SET;
3052 vals_p = (void *)set_vals + f->offset;
3053 /* don't rewrite if we have a match on the same value */
3054 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3055 match_mask, f->field_bsize))
3057 /* clear to denote we consumed this field */
3058 *s_masks_p &= ~f->field_mask;
3060 cmd = MLX5_ACTION_TYPE_ADD;
3062 vals_p = (void *)add_vals + f->offset;
3063 /* add 0 is no change */
3064 if ((*(u32 *)vals_p & f->field_mask) == 0)
3066 /* clear to denote we consumed this field */
3067 *a_masks_p &= ~f->field_mask;
3072 mask = mask_to_le(mask, f->field_bsize);
3074 first = find_first_bit(&mask, f->field_bsize);
3075 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3076 last = find_last_bit(&mask, f->field_bsize);
3077 if (first < next_z && next_z < last) {
3078 NL_SET_ERR_MSG_MOD(extack,
3079 "rewrite of few sub-fields isn't supported");
3080 netdev_warn(priv->netdev,
3081 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3086 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3087 if (IS_ERR(action)) {
3088 NL_SET_ERR_MSG_MOD(extack,
3089 "too many pedit actions, can't offload");
3090 mlx5_core_warn(priv->mdev,
3091 "mlx5: parsed %d pedit actions, can't do more\n",
3092 mod_acts->num_actions);
3093 return PTR_ERR(action);
3096 MLX5_SET(set_action_in, action, action_type, cmd);
3097 MLX5_SET(set_action_in, action, field, f->field);
3099 if (cmd == MLX5_ACTION_TYPE_SET) {
3102 field_mask = mask_to_le(f->field_mask, f->field_bsize);
3104 /* if field is bit sized it can start not from first bit */
3105 start = find_first_bit(&field_mask, f->field_bsize);
3107 MLX5_SET(set_action_in, action, offset, first - start);
3108 /* length is num of bits to be written, zero means length of 32 */
3109 MLX5_SET(set_action_in, action, length, (last - first + 1));
3112 if (f->field_bsize == 32)
3113 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3114 else if (f->field_bsize == 16)
3115 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3116 else if (f->field_bsize == 8)
3117 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3119 ++mod_acts->num_actions;
3125 static const struct pedit_headers zero_masks = {};
3127 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3128 struct mlx5e_tc_flow_parse_attr *parse_attr,
3129 struct netlink_ext_ack *extack)
3131 struct pedit_headers *cmd_masks;
3134 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3135 cmd_masks = &parse_attr->hdrs[cmd].masks;
3136 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3137 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3138 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3139 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3140 16, 1, cmd_masks, sizeof(zero_masks), true);
3148 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3149 struct mlx5e_tc_flow_parse_attr *parse_attr,
3151 struct netlink_ext_ack *extack)
3155 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3157 goto out_dealloc_parsed_actions;
3159 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3161 goto out_dealloc_parsed_actions;
3165 out_dealloc_parsed_actions:
3166 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3170 struct ip_ttl_word {
3176 struct ipv6_hoplimit_word {
3183 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
3184 bool *modify_ip_header, bool *modify_tuple,
3185 struct netlink_ext_ack *extack)
3190 htype = act->mangle.htype;
3191 offset = act->mangle.offset;
3192 mask = ~act->mangle.mask;
3193 /* For IPv4 & IPv6 header check 4 byte word,
3194 * to determine that modified fields
3195 * are NOT ttl & hop_limit only.
3197 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3198 struct ip_ttl_word *ttl_word =
3199 (struct ip_ttl_word *)&mask;
3201 if (offset != offsetof(struct iphdr, ttl) ||
3202 ttl_word->protocol ||
3204 *modify_ip_header = true;
3207 if (offset >= offsetof(struct iphdr, saddr))
3208 *modify_tuple = true;
3210 if (ct_flow && *modify_tuple) {
3211 NL_SET_ERR_MSG_MOD(extack,
3212 "can't offload re-write of ipv4 address with action ct");
3215 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3216 struct ipv6_hoplimit_word *hoplimit_word =
3217 (struct ipv6_hoplimit_word *)&mask;
3219 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3220 hoplimit_word->payload_len ||
3221 hoplimit_word->nexthdr) {
3222 *modify_ip_header = true;
3225 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3226 *modify_tuple = true;
3228 if (ct_flow && *modify_tuple) {
3229 NL_SET_ERR_MSG_MOD(extack,
3230 "can't offload re-write of ipv6 address with action ct");
3233 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3234 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3235 *modify_tuple = true;
3237 NL_SET_ERR_MSG_MOD(extack,
3238 "can't offload re-write of transport header ports with action ct");
3246 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3247 bool ct_flow, struct netlink_ext_ack *extack,
3248 struct mlx5e_priv *priv,
3249 struct mlx5_flow_spec *spec)
3251 if (!modify_tuple || ct_clear)
3255 NL_SET_ERR_MSG_MOD(extack,
3256 "can't offload tuple modification with non-clear ct()");
3257 netdev_info(priv->netdev,
3258 "can't offload tuple modification with non-clear ct()");
3262 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3263 * (or after clear action), as otherwise, since the tuple is changed,
3264 * we can't restore ct state
3266 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3267 NL_SET_ERR_MSG_MOD(extack,
3268 "can't offload tuple modification with ct matches and no ct(clear) action");
3269 netdev_info(priv->netdev,
3270 "can't offload tuple modification with ct matches and no ct(clear) action");
3277 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3278 struct mlx5_flow_spec *spec,
3279 struct flow_action *flow_action,
3280 u32 actions, bool ct_flow,
3282 struct netlink_ext_ack *extack)
3284 const struct flow_action_entry *act;
3285 bool modify_ip_header, modify_tuple;
3292 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3293 headers_v = mlx5e_get_match_headers_value(actions, spec);
3294 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3296 /* for non-IP we only re-write MACs, so we're okay */
3297 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3298 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3301 modify_ip_header = false;
3302 modify_tuple = false;
3303 flow_action_for_each(i, act, flow_action) {
3304 if (act->id != FLOW_ACTION_MANGLE &&
3305 act->id != FLOW_ACTION_ADD)
3308 if (!is_action_keys_supported(act, ct_flow,
3310 &modify_tuple, extack))
3314 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3318 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3319 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3320 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3321 NL_SET_ERR_MSG_MOD(extack,
3322 "can't offload re-write of non TCP/UDP");
3323 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3333 actions_match_supported_fdb(struct mlx5e_priv *priv,
3334 struct mlx5e_tc_flow_parse_attr *parse_attr,
3335 struct mlx5e_tc_flow *flow,
3336 struct netlink_ext_ack *extack)
3338 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3339 bool ct_flow, ct_clear;
3341 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3342 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3344 if (esw_attr->split_count && ct_flow &&
3345 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3346 /* All registers used by ct are cleared when using
3349 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3353 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3354 NL_SET_ERR_MSG_MOD(extack,
3355 "current firmware doesn't support split rule for port mirroring");
3356 netdev_warn_once(priv->netdev,
3357 "current firmware doesn't support split rule for port mirroring\n");
3365 actions_match_supported(struct mlx5e_priv *priv,
3366 struct flow_action *flow_action,
3368 struct mlx5e_tc_flow_parse_attr *parse_attr,
3369 struct mlx5e_tc_flow *flow,
3370 struct netlink_ext_ack *extack)
3372 bool ct_flow, ct_clear;
3374 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3375 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3378 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3379 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
3384 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3385 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3389 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3390 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3391 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3396 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3397 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3401 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3402 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3403 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3407 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3408 !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3409 actions, ct_flow, ct_clear, extack))
3412 if (mlx5e_is_eswitch_flow(flow) &&
3413 !actions_match_supported_fdb(priv, parse_attr, flow, extack))
3419 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3421 return priv->mdev == peer_priv->mdev;
3424 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3426 struct mlx5_core_dev *fmdev, *pmdev;
3427 u64 fsystem_guid, psystem_guid;
3430 pmdev = peer_priv->mdev;
3432 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3433 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3435 return (fsystem_guid == psystem_guid);
3439 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3440 struct mlx5e_tc_flow *flow,
3441 struct mlx5_flow_attr *attr,
3442 struct netlink_ext_ack *extack)
3444 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3445 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3446 enum mlx5_flow_namespace_type ns_type;
3449 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3450 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3453 ns_type = mlx5e_get_flow_namespace(flow);
3455 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3459 if (parse_attr->mod_hdr_acts.num_actions > 0)
3462 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3463 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3464 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3466 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3469 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3470 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3471 attr->esw_attr->split_count = 0;
3476 static struct mlx5_flow_attr*
3477 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3478 enum mlx5_flow_namespace_type ns_type)
3480 struct mlx5e_tc_flow_parse_attr *parse_attr;
3481 u32 attr_sz = ns_to_attr_sz(ns_type);
3482 struct mlx5_flow_attr *attr2;
3484 attr2 = mlx5_alloc_flow_attr(ns_type);
3485 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3486 if (!attr2 || !parse_attr) {
3492 memcpy(attr2, attr, attr_sz);
3493 INIT_LIST_HEAD(&attr2->list);
3494 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3497 attr2->parse_attr = parse_attr;
3501 static struct mlx5_core_dev *
3502 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
3504 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
3507 struct mlx5_flow_attr *
3508 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3510 struct mlx5_esw_flow_attr *esw_attr;
3511 struct mlx5_flow_attr *attr;
3514 list_for_each_entry(attr, &flow->attrs, list) {
3515 esw_attr = attr->esw_attr;
3516 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3517 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3526 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3528 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3529 struct mlx5_flow_attr *attr;
3531 list_for_each_entry(attr, &flow->attrs, list) {
3532 if (list_is_last(&attr->list, &flow->attrs))
3535 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3540 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3542 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
3543 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3544 struct mlx5_flow_attr *attr, *tmp;
3547 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3548 if (list_is_last(&attr->list, &flow->attrs))
3551 if (attr->post_act_handle)
3552 mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
3554 clean_encap_dests(flow->priv, flow, attr, &vf_tun);
3556 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3557 mlx5_fc_destroy(counter_dev, attr->counter);
3559 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3560 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
3561 if (attr->modify_hdr)
3562 mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
3565 list_del(&attr->list);
3566 kvfree(attr->parse_attr);
3572 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3574 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3575 struct mlx5_flow_attr *attr;
3578 list_for_each_entry(attr, &flow->attrs, list) {
3579 if (list_is_last(&attr->list, &flow->attrs))
3582 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3590 /* TC filter rule HW translation:
3592 * +---------------------+
3593 * + ft prio (tc chain) +
3594 * + original match +
3595 * +---------------------+
3597 * | if multi table action
3600 * +---------------------+
3601 * + post act ft |<----.
3602 * + match fte id | | split on multi table action
3603 * + do actions |-----'
3604 * +---------------------+
3608 * Do rest of the actions after last multi table action.
3611 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3613 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3614 struct mlx5_flow_attr *attr, *next_attr = NULL;
3615 struct mlx5e_post_act_handle *handle;
3616 bool vf_tun, encap_valid = true;
3619 /* This is going in reverse order as needed.
3620 * The first entry is the last attribute.
3622 list_for_each_entry(attr, &flow->attrs, list) {
3624 /* Set counter action on last post act rule. */
3625 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3627 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3632 /* Don't add post_act rule for first attr (last in the list).
3633 * It's being handled by the caller.
3635 if (list_is_last(&attr->list, &flow->attrs))
3638 err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
3643 flow_flag_set(flow, SLOW);
3645 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3649 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3650 err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
3655 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3656 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
3661 handle = mlx5e_tc_post_act_add(post_act, attr);
3662 if (IS_ERR(handle)) {
3663 err = PTR_ERR(handle);
3667 attr->post_act_handle = handle;
3671 if (flow_flag_test(flow, SLOW))
3674 err = mlx5e_tc_offload_flow_post_acts(flow);
3682 free_flow_post_acts(flow);
3687 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3688 struct flow_action *flow_action)
3690 struct netlink_ext_ack *extack = parse_state->extack;
3691 struct mlx5e_tc_flow_action flow_action_reorder;
3692 struct mlx5e_tc_flow *flow = parse_state->flow;
3693 struct mlx5_flow_attr *attr = flow->attr;
3694 enum mlx5_flow_namespace_type ns_type;
3695 struct mlx5e_priv *priv = flow->priv;
3696 struct flow_action_entry *act, **_act;
3697 struct mlx5e_tc_act *tc_act;
3700 flow_action_reorder.num_entries = flow_action->num_entries;
3701 flow_action_reorder.entries = kcalloc(flow_action->num_entries,
3702 sizeof(flow_action), GFP_KERNEL);
3703 if (!flow_action_reorder.entries)
3706 mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
3708 ns_type = mlx5e_get_flow_namespace(flow);
3709 list_add(&attr->list, &flow->attrs);
3711 flow_action_for_each(i, _act, &flow_action_reorder) {
3713 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3715 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3720 if (!tc_act->can_offload(parse_state, act, i, attr)) {
3725 err = tc_act->parse_action(parse_state, act, priv, attr);
3729 parse_state->actions |= attr->action;
3731 /* Split attr for multi table act if not the last act. */
3732 if (tc_act->is_multi_table_act &&
3733 tc_act->is_multi_table_act(priv, act, attr) &&
3734 i < flow_action_reorder.num_entries - 1) {
3735 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3739 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3745 list_add(&attr->list, &flow->attrs);
3749 kfree(flow_action_reorder.entries);
3751 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3753 goto out_free_post_acts;
3755 err = alloc_flow_post_acts(flow, extack);
3757 goto out_free_post_acts;
3762 kfree(flow_action_reorder.entries);
3764 free_flow_post_acts(flow);
3770 flow_action_supported(struct flow_action *flow_action,
3771 struct netlink_ext_ack *extack)
3773 if (!flow_action_has_entries(flow_action)) {
3774 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
3778 if (!flow_action_hw_stats_check(flow_action, extack,
3779 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
3780 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
3788 parse_tc_nic_actions(struct mlx5e_priv *priv,
3789 struct flow_action *flow_action,
3790 struct mlx5e_tc_flow *flow,
3791 struct netlink_ext_ack *extack)
3793 struct mlx5e_tc_act_parse_state *parse_state;
3794 struct mlx5e_tc_flow_parse_attr *parse_attr;
3795 struct mlx5_flow_attr *attr = flow->attr;
3798 err = flow_action_supported(flow_action, extack);
3802 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3803 parse_attr = attr->parse_attr;
3804 parse_state = &parse_attr->parse_state;
3805 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3806 parse_state->ct_priv = get_ct_priv(priv);
3808 err = parse_tc_actions(parse_state, flow_action);
3812 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
3816 if (!actions_match_supported(priv, flow_action, parse_state->actions,
3817 parse_attr, flow, extack))
3823 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3824 struct net_device *peer_netdev)
3826 struct mlx5e_priv *peer_priv;
3828 peer_priv = netdev_priv(peer_netdev);
3830 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3831 mlx5e_eswitch_vf_rep(priv->netdev) &&
3832 mlx5e_eswitch_vf_rep(peer_netdev) &&
3833 mlx5e_same_hw_devs(priv, peer_priv));
3836 static bool same_hw_reps(struct mlx5e_priv *priv,
3837 struct net_device *peer_netdev)
3839 struct mlx5e_priv *peer_priv;
3841 peer_priv = netdev_priv(peer_netdev);
3843 return mlx5e_eswitch_rep(priv->netdev) &&
3844 mlx5e_eswitch_rep(peer_netdev) &&
3845 mlx5e_same_hw_devs(priv, peer_priv);
3848 static bool is_lag_dev(struct mlx5e_priv *priv,
3849 struct net_device *peer_netdev)
3851 return ((mlx5_lag_is_sriov(priv->mdev) ||
3852 mlx5_lag_is_multipath(priv->mdev)) &&
3853 same_hw_reps(priv, peer_netdev));
3856 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
3858 if (mlx5e_eswitch_uplink_rep(out_dev) &&
3859 MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
3860 MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
3866 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3867 struct net_device *out_dev)
3869 if (is_merged_eswitch_vfs(priv, out_dev))
3872 if (is_multiport_eligible(priv, out_dev))
3875 if (is_lag_dev(priv, out_dev))
3878 return mlx5e_eswitch_rep(out_dev) &&
3879 same_port_devs(priv, netdev_priv(out_dev));
3882 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
3883 struct mlx5_flow_attr *attr,
3885 enum mlx5e_tc_int_port_type type,
3889 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
3890 struct mlx5e_tc_int_port_priv *int_port_priv;
3891 struct mlx5e_tc_flow_parse_attr *parse_attr;
3892 struct mlx5e_tc_int_port *dest_int_port;
3895 parse_attr = attr->parse_attr;
3896 int_port_priv = mlx5e_get_int_port_priv(priv);
3898 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
3899 if (IS_ERR(dest_int_port))
3900 return PTR_ERR(dest_int_port);
3902 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
3903 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
3904 mlx5e_tc_int_port_get_metadata(dest_int_port));
3906 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
3910 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3912 esw_attr->dest_int_port = dest_int_port;
3913 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
3915 /* Forward to root fdb for matching against the new source vport */
3916 attr->dest_chain = 0;
3922 parse_tc_fdb_actions(struct mlx5e_priv *priv,
3923 struct flow_action *flow_action,
3924 struct mlx5e_tc_flow *flow,
3925 struct netlink_ext_ack *extack)
3927 struct mlx5e_tc_act_parse_state *parse_state;
3928 struct mlx5e_tc_flow_parse_attr *parse_attr;
3929 struct mlx5_flow_attr *attr = flow->attr;
3930 struct mlx5_esw_flow_attr *esw_attr;
3933 err = flow_action_supported(flow_action, extack);
3937 esw_attr = attr->esw_attr;
3938 parse_attr = attr->parse_attr;
3939 parse_state = &parse_attr->parse_state;
3940 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3941 parse_state->ct_priv = get_ct_priv(priv);
3943 err = parse_tc_actions(parse_state, flow_action);
3947 /* Forward to/from internal port can only have 1 dest */
3948 if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
3949 esw_attr->out_count > 1) {
3950 NL_SET_ERR_MSG_MOD(extack,
3951 "Rules with internal port can have only one destination");
3955 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
3959 if (!actions_match_supported(priv, flow_action, parse_state->actions,
3960 parse_attr, flow, extack))
3966 static void get_flags(int flags, unsigned long *flow_flags)
3968 unsigned long __flow_flags = 0;
3970 if (flags & MLX5_TC_FLAG(INGRESS))
3971 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3972 if (flags & MLX5_TC_FLAG(EGRESS))
3973 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
3975 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3976 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3977 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3978 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3979 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
3980 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
3982 *flow_flags = __flow_flags;
3985 static const struct rhashtable_params tc_ht_params = {
3986 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3987 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3988 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3989 .automatic_shrinking = true,
3992 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3993 unsigned long flags)
3995 struct mlx5e_rep_priv *rpriv;
3997 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3998 rpriv = priv->ppriv;
3999 return &rpriv->tc_ht;
4000 } else /* NIC offload */
4001 return &priv->fs.tc.ht;
4004 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4006 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4007 struct mlx5_flow_attr *attr = flow->attr;
4008 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4009 flow_flag_test(flow, INGRESS);
4010 bool act_is_encap = !!(attr->action &
4011 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4012 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4013 MLX5_DEVCOM_ESW_OFFLOADS);
4018 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4019 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4020 (is_rep_ingress || act_is_encap))
4026 struct mlx5_flow_attr *
4027 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4029 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4030 sizeof(struct mlx5_esw_flow_attr) :
4031 sizeof(struct mlx5_nic_flow_attr);
4032 struct mlx5_flow_attr *attr;
4034 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4038 INIT_LIST_HEAD(&attr->list);
4043 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4044 struct flow_cls_offload *f, unsigned long flow_flags,
4045 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4046 struct mlx5e_tc_flow **__flow)
4048 struct mlx5e_tc_flow_parse_attr *parse_attr;
4049 struct mlx5_flow_attr *attr;
4050 struct mlx5e_tc_flow *flow;
4054 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4055 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4056 if (!parse_attr || !flow)
4059 flow->flags = flow_flags;
4060 flow->cookie = f->cookie;
4063 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4069 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4070 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4071 INIT_LIST_HEAD(&flow->hairpin);
4072 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4073 INIT_LIST_HEAD(&flow->attrs);
4074 refcount_set(&flow->refcnt, 1);
4075 init_completion(&flow->init_done);
4076 init_completion(&flow->del_hw_done);
4079 *__parse_attr = parse_attr;
4090 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4091 struct mlx5e_tc_flow_parse_attr *parse_attr,
4092 struct flow_cls_offload *f)
4094 attr->parse_attr = parse_attr;
4095 attr->chain = f->common.chain_index;
4096 attr->prio = f->common.prio;
4100 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4101 struct mlx5e_priv *priv,
4102 struct mlx5e_tc_flow_parse_attr *parse_attr,
4103 struct flow_cls_offload *f,
4104 struct mlx5_eswitch_rep *in_rep,
4105 struct mlx5_core_dev *in_mdev)
4107 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4108 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4110 mlx5e_flow_attr_init(attr, parse_attr, f);
4112 esw_attr->in_rep = in_rep;
4113 esw_attr->in_mdev = in_mdev;
4115 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4116 MLX5_COUNTER_SOURCE_ESWITCH)
4117 esw_attr->counter_dev = in_mdev;
4119 esw_attr->counter_dev = priv->mdev;
4122 static struct mlx5e_tc_flow *
4123 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4124 struct flow_cls_offload *f,
4125 unsigned long flow_flags,
4126 struct net_device *filter_dev,
4127 struct mlx5_eswitch_rep *in_rep,
4128 struct mlx5_core_dev *in_mdev)
4130 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4131 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4132 struct netlink_ext_ack *extack = f->common.extack;
4133 struct mlx5e_tc_flow_parse_attr *parse_attr;
4134 struct mlx5e_tc_flow *flow;
4137 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4138 attr_size = sizeof(struct mlx5_esw_flow_attr);
4139 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4140 &parse_attr, &flow);
4144 parse_attr->filter_dev = filter_dev;
4145 mlx5e_flow_esw_attr_init(flow->attr,
4147 f, in_rep, in_mdev);
4149 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4154 /* actions validation depends on parsing the ct matches first */
4155 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4156 &flow->attr->ct_attr, extack);
4160 /* always set IP version for indirect table handling */
4161 flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4163 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4167 if (flow->attr->lag.count) {
4168 err = mlx5_lag_add_mpesw_rule(esw->dev);
4173 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4174 complete_all(&flow->init_done);
4176 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4179 add_unready_flow(flow);
4185 if (flow->attr->lag.count)
4186 mlx5_lag_del_mpesw_rule(esw->dev);
4188 mlx5e_flow_put(priv, flow);
4190 return ERR_PTR(err);
4193 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4194 struct mlx5e_tc_flow *flow,
4195 unsigned long flow_flags)
4197 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4198 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4199 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4200 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4201 struct mlx5e_tc_flow_parse_attr *parse_attr;
4202 struct mlx5e_rep_priv *peer_urpriv;
4203 struct mlx5e_tc_flow *peer_flow;
4204 struct mlx5_core_dev *in_mdev;
4207 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4211 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4212 peer_priv = netdev_priv(peer_urpriv->netdev);
4214 /* in_mdev is assigned of which the packet originated from.
4215 * So packets redirected to uplink use the same mdev of the
4216 * original flow and packets redirected from uplink use the
4219 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4220 in_mdev = peer_priv->mdev;
4222 in_mdev = priv->mdev;
4224 parse_attr = flow->attr->parse_attr;
4225 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4226 parse_attr->filter_dev,
4227 attr->in_rep, in_mdev);
4228 if (IS_ERR(peer_flow)) {
4229 err = PTR_ERR(peer_flow);
4233 flow->peer_flow = peer_flow;
4234 flow_flag_set(flow, DUP);
4235 mutex_lock(&esw->offloads.peer_mutex);
4236 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4237 mutex_unlock(&esw->offloads.peer_mutex);
4240 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4245 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4246 struct flow_cls_offload *f,
4247 unsigned long flow_flags,
4248 struct net_device *filter_dev,
4249 struct mlx5e_tc_flow **__flow)
4251 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4252 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4253 struct mlx5_core_dev *in_mdev = priv->mdev;
4254 struct mlx5e_tc_flow *flow;
4257 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4260 return PTR_ERR(flow);
4262 if (is_peer_flow_needed(flow)) {
4263 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4265 mlx5e_tc_del_fdb_flow(priv, flow);
4279 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4280 struct flow_cls_offload *f,
4281 unsigned long flow_flags,
4282 struct net_device *filter_dev,
4283 struct mlx5e_tc_flow **__flow)
4285 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4286 struct netlink_ext_ack *extack = f->common.extack;
4287 struct mlx5e_tc_flow_parse_attr *parse_attr;
4288 struct mlx5e_tc_flow *flow;
4291 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4292 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4294 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4298 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4299 attr_size = sizeof(struct mlx5_nic_flow_attr);
4300 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4301 &parse_attr, &flow);
4305 parse_attr->filter_dev = filter_dev;
4306 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4308 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4313 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4314 &flow->attr->ct_attr, extack);
4318 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4322 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4326 flow_flag_set(flow, OFFLOADED);
4332 flow_flag_set(flow, FAILED);
4333 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4334 mlx5e_flow_put(priv, flow);
4340 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4341 struct flow_cls_offload *f,
4342 unsigned long flags,
4343 struct net_device *filter_dev,
4344 struct mlx5e_tc_flow **flow)
4346 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4347 unsigned long flow_flags;
4350 get_flags(flags, &flow_flags);
4352 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4355 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4356 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4359 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4365 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4366 struct mlx5e_rep_priv *rpriv)
4368 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4369 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4370 * function is called from NIC mode.
4372 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4375 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4376 struct flow_cls_offload *f, unsigned long flags)
4378 struct netlink_ext_ack *extack = f->common.extack;
4379 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4380 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4381 struct mlx5e_tc_flow *flow;
4384 if (!mlx5_esw_hold(priv->mdev))
4387 mlx5_esw_get(priv->mdev);
4390 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4392 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4395 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4398 NL_SET_ERR_MSG_MOD(extack,
4399 "flow cookie already exists, ignoring");
4400 netdev_warn_once(priv->netdev,
4401 "flow cookie %lx already exists, ignoring\n",
4411 trace_mlx5e_configure_flower(f);
4412 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4416 /* Flow rule offloaded to non-uplink representor sharing tc block,
4417 * set the flow's owner dev.
4419 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4420 flow->orig_dev = dev;
4422 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4426 mlx5_esw_release(priv->mdev);
4430 mlx5e_flow_put(priv, flow);
4432 mlx5_esw_put(priv->mdev);
4433 mlx5_esw_release(priv->mdev);
4437 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4439 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4440 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4442 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4443 flow_flag_test(flow, EGRESS) == dir_egress;
4446 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4447 struct flow_cls_offload *f, unsigned long flags)
4449 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4450 struct mlx5e_tc_flow *flow;
4454 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4455 if (!flow || !same_flow_direction(flow, flags)) {
4460 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4463 if (flow_flag_test_and_set(flow, DELETED)) {
4467 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4470 trace_mlx5e_delete_flower(f);
4471 mlx5e_flow_put(priv, flow);
4473 mlx5_esw_put(priv->mdev);
4481 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4482 struct flow_cls_offload *f, unsigned long flags)
4484 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4485 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4486 struct mlx5_eswitch *peer_esw;
4487 struct mlx5e_tc_flow *flow;
4488 struct mlx5_fc *counter;
4495 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4499 return PTR_ERR(flow);
4501 if (!same_flow_direction(flow, flags)) {
4506 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4507 counter = mlx5e_tc_get_counter(flow);
4511 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4514 /* Under multipath it's possible for one rule to be currently
4515 * un-offloaded while the other rule is offloaded.
4517 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4521 if (flow_flag_test(flow, DUP) &&
4522 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4527 counter = mlx5e_tc_get_counter(flow->peer_flow);
4529 goto no_peer_counter;
4530 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4533 packets += packets2;
4534 lastuse = max_t(u64, lastuse, lastuse2);
4538 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4540 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4541 FLOW_ACTION_HW_STATS_DELAYED);
4542 trace_mlx5e_stats_flower(f);
4544 mlx5e_flow_put(priv, flow);
4548 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4549 struct netlink_ext_ack *extack)
4551 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4552 struct mlx5_eswitch *esw;
4557 vport_num = rpriv->rep->vport;
4558 if (vport_num >= MLX5_VPORT_ECPF) {
4559 NL_SET_ERR_MSG_MOD(extack,
4560 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4564 esw = priv->mdev->priv.eswitch;
4565 /* rate is given in bytes/sec.
4566 * First convert to bits/sec and then round to the nearest mbit/secs.
4567 * mbit means million bits.
4568 * Moreover, if rate is non zero we choose to configure to a minimum of
4572 rate = (rate * BITS_PER_BYTE) + 500000;
4573 do_div(rate, 1000000);
4574 rate_mbps = max_t(u32, rate, 1);
4577 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4579 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4584 int mlx5e_policer_validate(const struct flow_action *action,
4585 const struct flow_action_entry *act,
4586 struct netlink_ext_ack *extack)
4588 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
4589 NL_SET_ERR_MSG_MOD(extack,
4590 "Offload not supported when exceed action is not drop");
4594 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
4595 !flow_action_is_last_entry(action, act)) {
4596 NL_SET_ERR_MSG_MOD(extack,
4597 "Offload not supported when conform action is ok, but action is not last");
4601 if (act->police.peakrate_bytes_ps ||
4602 act->police.avrate || act->police.overhead) {
4603 NL_SET_ERR_MSG_MOD(extack,
4604 "Offload not supported when peakrate/avrate/overhead is configured");
4608 if (act->police.rate_pkt_ps) {
4609 NL_SET_ERR_MSG_MOD(extack,
4610 "QoS offload not support packets per second");
4617 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4618 struct flow_action *flow_action,
4619 struct netlink_ext_ack *extack)
4621 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4622 const struct flow_action_entry *act;
4626 if (!flow_action_has_entries(flow_action)) {
4627 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4631 if (!flow_offload_has_one_action(flow_action)) {
4632 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4636 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4637 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4641 flow_action_for_each(i, act, flow_action) {
4643 case FLOW_ACTION_POLICE:
4644 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
4645 NL_SET_ERR_MSG_MOD(extack,
4646 "Offload not supported when conform action is not continue");
4650 err = mlx5e_policer_validate(flow_action, act, extack);
4654 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4658 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4661 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4669 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4670 struct tc_cls_matchall_offload *ma)
4672 struct netlink_ext_ack *extack = ma->common.extack;
4674 if (ma->common.prio != 1) {
4675 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4679 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4682 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4683 struct tc_cls_matchall_offload *ma)
4685 struct netlink_ext_ack *extack = ma->common.extack;
4687 return apply_police_params(priv, 0, extack);
4690 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4691 struct tc_cls_matchall_offload *ma)
4693 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4694 struct rtnl_link_stats64 cur_stats;
4698 cur_stats = priv->stats.vf_vport;
4699 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4700 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4701 rpriv->prev_vf_vport_stats = cur_stats;
4702 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4703 FLOW_ACTION_HW_STATS_DELAYED);
4706 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4707 struct mlx5e_priv *peer_priv)
4709 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4710 struct mlx5e_hairpin_entry *hpe, *tmp;
4711 LIST_HEAD(init_wait_list);
4715 if (!mlx5e_same_hw_devs(priv, peer_priv))
4718 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4720 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4721 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4722 if (refcount_inc_not_zero(&hpe->refcnt))
4723 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4724 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4726 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4727 wait_for_completion(&hpe->res_ready);
4728 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4729 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
4731 mlx5e_hairpin_put(priv, hpe);
4735 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4736 unsigned long event, void *ptr)
4738 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4739 struct mlx5e_flow_steering *fs;
4740 struct mlx5e_priv *peer_priv;
4741 struct mlx5e_tc_table *tc;
4742 struct mlx5e_priv *priv;
4744 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4745 event != NETDEV_UNREGISTER ||
4746 ndev->reg_state == NETREG_REGISTERED)
4749 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4750 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4751 priv = container_of(fs, struct mlx5e_priv, fs);
4752 peer_priv = netdev_priv(ndev);
4753 if (priv == peer_priv ||
4754 !(priv->netdev->features & NETIF_F_HW_TC))
4757 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4762 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4764 int tc_grp_size, tc_tbl_size;
4765 u32 max_flow_counter;
4767 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4768 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4770 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4772 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4773 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4778 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
4780 struct mlx5_flow_table **ft = &priv->fs.tc.miss_t;
4781 struct mlx5_flow_table_attr ft_attr = {};
4782 struct mlx5_flow_namespace *ns;
4785 ft_attr.max_fte = 1;
4786 ft_attr.autogroup.max_num_groups = 1;
4787 ft_attr.level = MLX5E_TC_MISS_LEVEL;
4789 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
4791 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
4794 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
4800 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
4802 mlx5_destroy_flow_table(priv->fs.tc.miss_t);
4805 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4807 struct mlx5e_tc_table *tc = &priv->fs.tc;
4808 struct mlx5_core_dev *dev = priv->mdev;
4809 struct mapping_ctx *chains_mapping;
4810 struct mlx5_chains_attr attr = {};
4814 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4815 mutex_init(&tc->t_lock);
4816 mutex_init(&tc->hairpin_tbl_lock);
4817 hash_init(tc->hairpin_tbl);
4819 err = rhashtable_init(&tc->ht, &tc_ht_params);
4823 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4825 mapping_id = mlx5_query_nic_system_image_guid(dev);
4827 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
4828 sizeof(struct mlx5_mapped_obj),
4829 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4831 if (IS_ERR(chains_mapping)) {
4832 err = PTR_ERR(chains_mapping);
4835 tc->mapping = chains_mapping;
4837 err = mlx5e_tc_nic_create_miss_table(priv);
4841 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4842 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4843 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4844 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4845 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4846 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4847 attr.default_ft = priv->fs.tc.miss_t;
4848 attr.mapping = chains_mapping;
4850 tc->chains = mlx5_chains_create(dev, &attr);
4851 if (IS_ERR(tc->chains)) {
4852 err = PTR_ERR(tc->chains);
4856 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
4857 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
4858 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
4860 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4861 err = register_netdevice_notifier_dev_net(priv->netdev,
4865 tc->netdevice_nb.notifier_call = NULL;
4866 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4873 mlx5_tc_ct_clean(tc->ct);
4874 mlx5e_tc_post_act_destroy(tc->post_act);
4875 mlx5_chains_destroy(tc->chains);
4877 mlx5e_tc_nic_destroy_miss_table(priv);
4879 mapping_destroy(chains_mapping);
4881 rhashtable_destroy(&tc->ht);
4885 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4887 struct mlx5e_tc_flow *flow = ptr;
4888 struct mlx5e_priv *priv = flow->priv;
4890 mlx5e_tc_del_flow(priv, flow);
4894 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4896 struct mlx5e_tc_table *tc = &priv->fs.tc;
4898 if (tc->netdevice_nb.notifier_call)
4899 unregister_netdevice_notifier_dev_net(priv->netdev,
4903 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4904 mutex_destroy(&tc->hairpin_tbl_lock);
4906 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
4908 if (!IS_ERR_OR_NULL(tc->t)) {
4909 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
4912 mutex_destroy(&tc->t_lock);
4914 mlx5_tc_ct_clean(tc->ct);
4915 mlx5e_tc_post_act_destroy(tc->post_act);
4916 mapping_destroy(tc->mapping);
4917 mlx5_chains_destroy(tc->chains);
4918 mlx5e_tc_nic_destroy_miss_table(priv);
4921 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
4925 err = rhashtable_init(tc_ht, &tc_ht_params);
4929 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
4934 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
4936 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4939 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
4941 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4942 struct mlx5e_rep_priv *rpriv;
4943 struct mapping_ctx *mapping;
4944 struct mlx5_eswitch *esw;
4945 struct mlx5e_priv *priv;
4949 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4950 priv = netdev_priv(rpriv->netdev);
4951 esw = priv->mdev->priv.eswitch;
4953 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
4954 MLX5_FLOW_NAMESPACE_FDB);
4955 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
4957 &esw->offloads.mod_hdr,
4958 MLX5_FLOW_NAMESPACE_FDB,
4959 uplink_priv->post_act);
4961 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
4963 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
4965 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
4967 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
4968 sizeof(struct tunnel_match_key),
4969 TUNNEL_INFO_BITS_MASK, true);
4971 if (IS_ERR(mapping)) {
4972 err = PTR_ERR(mapping);
4973 goto err_tun_mapping;
4975 uplink_priv->tunnel_mapping = mapping;
4977 /* Two last values are reserved for stack devices slow path table mark
4978 * and bridge ingress push mark.
4980 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
4981 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
4982 if (IS_ERR(mapping)) {
4983 err = PTR_ERR(mapping);
4984 goto err_enc_opts_mapping;
4986 uplink_priv->tunnel_enc_opts_mapping = mapping;
4988 uplink_priv->encap = mlx5e_tc_tun_init(priv);
4989 if (IS_ERR(uplink_priv->encap)) {
4990 err = PTR_ERR(uplink_priv->encap);
4991 goto err_register_fib_notifier;
4996 err_register_fib_notifier:
4997 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4998 err_enc_opts_mapping:
4999 mapping_destroy(uplink_priv->tunnel_mapping);
5001 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5002 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5003 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5004 netdev_warn(priv->netdev,
5005 "Failed to initialize tc (eswitch), err: %d", err);
5006 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5010 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5012 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5014 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5015 mapping_destroy(uplink_priv->tunnel_mapping);
5017 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5018 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5019 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5020 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5021 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5024 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5026 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5028 return atomic_read(&tc_ht->nelems);
5031 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5033 struct mlx5e_tc_flow *flow, *tmp;
5035 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5036 __mlx5e_tc_del_fdb_peer_flow(flow);
5039 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5041 struct mlx5_rep_uplink_priv *rpriv =
5042 container_of(work, struct mlx5_rep_uplink_priv,
5043 reoffload_flows_work);
5044 struct mlx5e_tc_flow *flow, *tmp;
5046 mutex_lock(&rpriv->unready_flows_lock);
5047 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5048 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5049 unready_flow_del(flow);
5051 mutex_unlock(&rpriv->unready_flows_lock);
5054 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5055 struct flow_cls_offload *cls_flower,
5056 unsigned long flags)
5058 switch (cls_flower->command) {
5059 case FLOW_CLS_REPLACE:
5060 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5062 case FLOW_CLS_DESTROY:
5063 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5065 case FLOW_CLS_STATS:
5066 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5073 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5076 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5077 struct mlx5e_priv *priv = cb_priv;
5079 if (!priv->netdev || !netif_device_present(priv->netdev))
5082 if (mlx5e_is_uplink_rep(priv))
5083 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5085 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5088 case TC_SETUP_CLSFLOWER:
5089 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5095 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5096 struct sk_buff *skb)
5098 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5099 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5100 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5101 struct mlx5e_tc_table *tc = &priv->fs.tc;
5102 struct mlx5_mapped_obj mapped_obj;
5103 struct tc_skb_ext *tc_skb_ext;
5106 reg_b = be32_to_cpu(cqe->ft_metadata);
5108 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5110 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5112 netdev_dbg(priv->netdev,
5113 "Couldn't find chain for chain tag: %d, err: %d\n",
5118 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5119 chain = mapped_obj.chain;
5120 tc_skb_ext = tc_skb_ext_alloc(skb);
5121 if (WARN_ON(!tc_skb_ext))
5124 tc_skb_ext->chain = chain;
5126 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5129 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5133 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5136 #endif /* CONFIG_NET_TC_SKB_EXT */