2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
46 #include <net/dst_metadata.h>
49 #include "en/tc/post_act.h"
50 #include "en/tc/act_stats.h"
52 #include "en/rep/tc.h"
53 #include "en/rep/neigh.h"
58 #include "en/tc_tun.h"
59 #include "en/mapping.h"
61 #include "en/mod_hdr.h"
62 #include "en/tc_tun_encap.h"
63 #include "en/tc/sample.h"
64 #include "en/tc/act/act.h"
65 #include "en/tc/post_meter.h"
66 #include "lib/devcom.h"
67 #include "lib/geneve.h"
68 #include "lib/fs_chains.h"
69 #include "diag/en_tc_tracepoint.h"
70 #include <asm/div64.h>
74 #define MLX5E_TC_TABLE_NUM_GROUPS 4
75 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
77 struct mlx5e_tc_table {
78 /* Protects the dynamic assignment of the t parameter
79 * which is the nic tc root table.
82 struct mlx5e_priv *priv;
83 struct mlx5_flow_table *t;
84 struct mlx5_flow_table *miss_t;
85 struct mlx5_fs_chains *chains;
86 struct mlx5e_post_act *post_act;
90 struct mod_hdr_tbl mod_hdr;
91 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
92 DECLARE_HASHTABLE(hairpin_tbl, 8);
94 struct notifier_block netdevice_nb;
95 struct netdev_net_notifier netdevice_nn;
97 struct mlx5_tc_ct_priv *ct;
98 struct mapping_ctx *mapping;
99 struct dentry *dfs_root;
101 /* tc action stats */
102 struct mlx5e_tc_act_stats_handle *action_stats_handle;
105 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
106 [MAPPED_OBJ_TO_REG] = {
107 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
112 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
117 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
119 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
120 .soffset = MLX5_BYTE_OFF(fte_match_param,
121 misc_parameters_2.metadata_reg_c_1),
123 [ZONE_TO_REG] = zone_to_reg_ct,
124 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
125 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
126 [MARK_TO_REG] = mark_to_reg_ct,
127 [LABELS_TO_REG] = labels_to_reg_ct,
128 [FTEID_TO_REG] = fteid_to_reg_ct,
129 /* For NIC rules we store the restore metadata directly
130 * into reg_b that is passed to SW since we don't
131 * jump between steering domains.
133 [NIC_MAPPED_OBJ_TO_REG] = {
134 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
138 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
139 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
142 struct mlx5e_tc_jump_state {
145 struct mlx5_flow_attr *jumping_attr;
147 enum flow_action_id last_id;
151 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
153 struct mlx5e_tc_table *tc;
155 tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
156 return tc ? tc : ERR_PTR(-ENOMEM);
159 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
164 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
169 /* To avoid false lock dependency warning set the tc_ht lock
170 * class different than the lock class of the ht being used when deleting
171 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
172 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
173 * it's different than the ht->mutex here.
175 static struct lock_class_key tc_ht_lock_key;
176 static struct lock_class_key tc_ht_wq_key;
178 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
179 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
180 static void mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow,
181 struct mlx5_flow_attr *attr);
184 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
185 enum mlx5e_tc_attr_to_reg type,
189 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
190 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
191 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
192 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
193 u32 max_mask = GENMASK(match_len - 1, 0);
194 __be32 curr_mask_be, curr_val_be;
195 u32 curr_mask, curr_val;
197 fmask = headers_c + soffset;
198 fval = headers_v + soffset;
200 memcpy(&curr_mask_be, fmask, 4);
201 memcpy(&curr_val_be, fval, 4);
203 curr_mask = be32_to_cpu(curr_mask_be);
204 curr_val = be32_to_cpu(curr_val_be);
206 //move to correct offset
207 WARN_ON(mask > max_mask);
210 max_mask <<= moffset;
213 curr_mask &= ~max_mask;
214 curr_val &= ~max_mask;
216 //add current to mask
220 //back to be32 and write
221 curr_mask_be = cpu_to_be32(curr_mask);
222 curr_val_be = cpu_to_be32(curr_val);
224 memcpy(fmask, &curr_mask_be, 4);
225 memcpy(fval, &curr_val_be, 4);
227 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
231 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
232 enum mlx5e_tc_attr_to_reg type,
236 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
237 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
238 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
239 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
240 u32 max_mask = GENMASK(match_len - 1, 0);
241 __be32 curr_mask_be, curr_val_be;
242 u32 curr_mask, curr_val;
244 fmask = headers_c + soffset;
245 fval = headers_v + soffset;
247 memcpy(&curr_mask_be, fmask, 4);
248 memcpy(&curr_val_be, fval, 4);
250 curr_mask = be32_to_cpu(curr_mask_be);
251 curr_val = be32_to_cpu(curr_val_be);
253 *mask = (curr_mask >> moffset) & max_mask;
254 *val = (curr_val >> moffset) & max_mask;
258 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
259 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
260 enum mlx5_flow_namespace_type ns,
261 enum mlx5e_tc_attr_to_reg type,
264 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
265 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
266 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
270 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
272 return PTR_ERR(modact);
274 /* Firmware has 5bit length field and 0 means 32bits */
278 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
279 MLX5_SET(set_action_in, modact, field, mfield);
280 MLX5_SET(set_action_in, modact, offset, moffset);
281 MLX5_SET(set_action_in, modact, length, mlen);
282 MLX5_SET(set_action_in, modact, data, data);
283 err = mod_hdr_acts->num_actions;
284 mod_hdr_acts->num_actions++;
289 static struct mlx5e_tc_act_stats_handle *
290 get_act_stats_handle(struct mlx5e_priv *priv)
292 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
293 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
294 struct mlx5_rep_uplink_priv *uplink_priv;
295 struct mlx5e_rep_priv *uplink_rpriv;
297 if (is_mdev_switchdev_mode(priv->mdev)) {
298 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
299 uplink_priv = &uplink_rpriv->uplink_priv;
301 return uplink_priv->action_stats_handle;
304 return tc->action_stats_handle;
307 struct mlx5e_tc_int_port_priv *
308 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
311 struct mlx5_rep_uplink_priv *uplink_priv;
312 struct mlx5e_rep_priv *uplink_rpriv;
314 if (is_mdev_switchdev_mode(priv->mdev)) {
315 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
316 uplink_priv = &uplink_rpriv->uplink_priv;
318 return uplink_priv->int_port_priv;
324 struct mlx5e_flow_meters *
325 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
327 struct mlx5_eswitch *esw = dev->priv.eswitch;
328 struct mlx5_rep_uplink_priv *uplink_priv;
329 struct mlx5e_rep_priv *uplink_rpriv;
330 struct mlx5e_priv *priv;
332 if (is_mdev_switchdev_mode(dev)) {
333 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
334 uplink_priv = &uplink_rpriv->uplink_priv;
335 priv = netdev_priv(uplink_rpriv->netdev);
336 if (!uplink_priv->flow_meters)
337 uplink_priv->flow_meters =
338 mlx5e_flow_meters_init(priv,
339 MLX5_FLOW_NAMESPACE_FDB,
340 uplink_priv->post_act);
341 if (!IS_ERR(uplink_priv->flow_meters))
342 return uplink_priv->flow_meters;
348 static struct mlx5_tc_ct_priv *
349 get_ct_priv(struct mlx5e_priv *priv)
351 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
352 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
353 struct mlx5_rep_uplink_priv *uplink_priv;
354 struct mlx5e_rep_priv *uplink_rpriv;
356 if (is_mdev_switchdev_mode(priv->mdev)) {
357 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
358 uplink_priv = &uplink_rpriv->uplink_priv;
360 return uplink_priv->ct_priv;
366 static struct mlx5e_tc_psample *
367 get_sample_priv(struct mlx5e_priv *priv)
369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
370 struct mlx5_rep_uplink_priv *uplink_priv;
371 struct mlx5e_rep_priv *uplink_rpriv;
373 if (is_mdev_switchdev_mode(priv->mdev)) {
374 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
375 uplink_priv = &uplink_rpriv->uplink_priv;
377 return uplink_priv->tc_psample;
383 static struct mlx5e_post_act *
384 get_post_action(struct mlx5e_priv *priv)
386 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
387 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
388 struct mlx5_rep_uplink_priv *uplink_priv;
389 struct mlx5e_rep_priv *uplink_rpriv;
391 if (is_mdev_switchdev_mode(priv->mdev)) {
392 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
393 uplink_priv = &uplink_rpriv->uplink_priv;
395 return uplink_priv->post_act;
401 struct mlx5_flow_handle *
402 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
403 struct mlx5_flow_spec *spec,
404 struct mlx5_flow_attr *attr)
406 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
408 if (is_mdev_switchdev_mode(priv->mdev))
409 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
411 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
415 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
416 struct mlx5_flow_handle *rule,
417 struct mlx5_flow_attr *attr)
419 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
421 if (is_mdev_switchdev_mode(priv->mdev)) {
422 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
426 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
430 is_flow_meter_action(struct mlx5_flow_attr *attr)
432 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
433 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
434 attr->flags & MLX5_ATTR_FLAG_MTU);
438 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
439 struct mlx5_flow_attr *attr)
441 struct mlx5e_post_act *post_act = get_post_action(priv);
442 struct mlx5e_post_meter_priv *post_meter;
443 enum mlx5_flow_namespace_type ns_type;
444 struct mlx5e_flow_meter_handle *meter;
445 enum mlx5e_post_meter_type type;
447 if (IS_ERR(post_act))
448 return PTR_ERR(post_act);
450 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
452 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
453 return PTR_ERR(meter);
456 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
457 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
458 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
460 meter->act_counter, meter->drop_counter,
461 attr->branch_true, attr->branch_false);
462 if (IS_ERR(post_meter)) {
463 mlx5_core_err(priv->mdev, "Failed to init post meter\n");
467 attr->meter_attr.meter = meter;
468 attr->meter_attr.post_meter = post_meter;
469 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
470 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
475 mlx5e_tc_meter_put(meter);
476 return PTR_ERR(post_meter);
480 mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
482 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
483 mlx5e_tc_meter_put(attr->meter_attr.meter);
486 struct mlx5_flow_handle *
487 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
488 struct mlx5_flow_spec *spec,
489 struct mlx5_flow_attr *attr)
491 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
494 if (!is_mdev_switchdev_mode(priv->mdev))
495 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
497 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
498 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
500 if (is_flow_meter_action(attr)) {
501 err = mlx5e_tc_add_flow_meter(priv, attr);
506 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
510 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
511 struct mlx5_flow_handle *rule,
512 struct mlx5_flow_attr *attr)
514 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
516 if (!is_mdev_switchdev_mode(priv->mdev)) {
517 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
521 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
522 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
526 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
528 if (attr->meter_attr.meter)
529 mlx5e_tc_del_flow_meter(esw, attr);
533 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
534 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
535 enum mlx5_flow_namespace_type ns,
536 enum mlx5e_tc_attr_to_reg type,
539 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
541 return ret < 0 ? ret : 0;
544 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
545 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
546 enum mlx5e_tc_attr_to_reg type,
547 int act_id, u32 data)
549 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
550 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
551 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
554 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
556 /* Firmware has 5bit length field and 0 means 32bits */
560 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
561 MLX5_SET(set_action_in, modact, field, mfield);
562 MLX5_SET(set_action_in, modact, offset, moffset);
563 MLX5_SET(set_action_in, modact, length, mlen);
564 MLX5_SET(set_action_in, modact, data, data);
567 struct mlx5e_hairpin {
568 struct mlx5_hairpin *pair;
570 struct mlx5_core_dev *func_mdev;
571 struct mlx5e_priv *func_priv;
573 struct mlx5e_tir direct_tir;
577 struct mlx5e_rqt indir_rqt;
578 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
579 struct mlx5_ttc_table *ttc;
582 struct mlx5e_hairpin_entry {
583 /* a node of a hash table which keeps all the hairpin entries */
584 struct hlist_node hairpin_hlist;
586 /* protects flows list */
587 spinlock_t flows_lock;
588 /* flows sharing the same hairpin */
589 struct list_head flows;
590 /* hpe's that were not fully initialized when dead peer update event
591 * function traversed them.
593 struct list_head dead_peer_wait_list;
597 struct mlx5e_hairpin *hp;
599 struct completion res_ready;
602 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
603 struct mlx5e_tc_flow *flow);
605 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
607 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
608 return ERR_PTR(-EINVAL);
612 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
614 if (refcount_dec_and_test(&flow->refcnt)) {
615 mlx5e_tc_del_flow(priv, flow);
616 kfree_rcu(flow, rcu_head);
620 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
622 return flow_flag_test(flow, ESWITCH);
625 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
627 return flow_flag_test(flow, FT);
630 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
632 return flow_flag_test(flow, OFFLOADED);
635 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
637 return mlx5e_is_eswitch_flow(flow) ?
638 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
641 static struct mlx5_core_dev *
642 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
644 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
647 static struct mod_hdr_tbl *
648 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
650 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
651 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
653 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
654 &esw->offloads.mod_hdr :
658 int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
659 struct mlx5e_tc_flow *flow,
660 struct mlx5_flow_attr *attr)
662 struct mlx5e_mod_hdr_handle *mh;
664 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
665 mlx5e_get_flow_namespace(flow),
666 &attr->parse_attr->mod_hdr_acts);
670 WARN_ON(attr->modify_hdr);
671 attr->modify_hdr = mlx5e_mod_hdr_get(mh);
677 void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
678 struct mlx5e_tc_flow *flow,
679 struct mlx5_flow_attr *attr)
681 /* flow wasn't fully initialized */
685 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
691 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
693 struct mlx5_core_dev *mdev;
694 struct net_device *netdev;
695 struct mlx5e_priv *priv;
697 netdev = dev_get_by_index(net, ifindex);
699 return ERR_PTR(-ENODEV);
701 priv = netdev_priv(netdev);
705 /* Mirred tc action holds a refcount on the ifindex net_device (see
706 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
707 * after dev_put(netdev), while we're in the context of adding a tc flow.
709 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
710 * stored in a hairpin object, which exists until all flows, that refer to it, get
713 * On the other hand, after a hairpin object has been created, the peer net_device may
714 * be removed/unbound while there are still some hairpin flows that are using it. This
715 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
716 * NETDEV_UNREGISTER event of the peer net_device.
721 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
723 struct mlx5e_tir_builder *builder;
726 builder = mlx5e_tir_builder_alloc(false);
730 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
734 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
735 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
740 mlx5e_tir_builder_free(builder);
744 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
749 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
751 mlx5e_tir_destroy(&hp->direct_tir);
752 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
755 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
757 struct mlx5e_priv *priv = hp->func_priv;
758 struct mlx5_core_dev *mdev = priv->mdev;
759 struct mlx5e_rss_params_indir indir;
762 err = mlx5e_rss_params_indir_init(&indir, mdev,
763 mlx5e_rqt_size(mdev, hp->num_channels),
764 mlx5e_rqt_size(mdev, priv->max_nch));
768 mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
769 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
770 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
773 mlx5e_rss_params_indir_cleanup(&indir);
777 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
779 struct mlx5e_priv *priv = hp->func_priv;
780 struct mlx5e_rss_params_hash rss_hash;
781 enum mlx5_traffic_types tt, max_tt;
782 struct mlx5e_tir_builder *builder;
785 builder = mlx5e_tir_builder_alloc(false);
789 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
791 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
792 struct mlx5e_rss_params_traffic_type rss_tt;
794 rss_tt = mlx5e_rss_get_default_tt_config(tt);
796 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
797 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
799 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
801 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
803 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
804 goto err_destroy_tirs;
807 mlx5e_tir_builder_clear(builder);
811 mlx5e_tir_builder_free(builder);
816 for (tt = 0; tt < max_tt; tt++)
817 mlx5e_tir_destroy(&hp->indir_tir[tt]);
822 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
826 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
827 mlx5e_tir_destroy(&hp->indir_tir[tt]);
830 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
831 struct ttc_params *ttc_params)
833 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
836 memset(ttc_params, 0, sizeof(*ttc_params));
838 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
839 MLX5_FLOW_NAMESPACE_KERNEL);
840 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
841 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
842 ttc_params->dests[tt].tir_num =
844 mlx5e_tir_get_tirn(&hp->direct_tir) :
845 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
848 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
849 ft_attr->prio = MLX5E_TC_PRIO;
852 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
854 struct mlx5e_priv *priv = hp->func_priv;
855 struct ttc_params ttc_params;
856 struct mlx5_ttc_table *ttc;
859 err = mlx5e_hairpin_create_indirect_rqt(hp);
863 err = mlx5e_hairpin_create_indirect_tirs(hp);
865 goto err_create_indirect_tirs;
867 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
868 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
869 if (IS_ERR(hp->ttc)) {
870 err = PTR_ERR(hp->ttc);
871 goto err_create_ttc_table;
874 ttc = mlx5e_fs_get_ttc(priv->fs, false);
875 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
877 mlx5_get_ttc_flow_table(ttc)->id);
881 err_create_ttc_table:
882 mlx5e_hairpin_destroy_indirect_tirs(hp);
883 err_create_indirect_tirs:
884 mlx5e_rqt_destroy(&hp->indir_rqt);
889 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
891 mlx5_destroy_ttc_table(hp->ttc);
892 mlx5e_hairpin_destroy_indirect_tirs(hp);
893 mlx5e_rqt_destroy(&hp->indir_rqt);
896 static struct mlx5e_hairpin *
897 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
900 struct mlx5_core_dev *func_mdev, *peer_mdev;
901 struct mlx5e_hairpin *hp;
902 struct mlx5_hairpin *pair;
905 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
907 return ERR_PTR(-ENOMEM);
909 func_mdev = priv->mdev;
910 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
911 if (IS_ERR(peer_mdev)) {
912 err = PTR_ERR(peer_mdev);
913 goto create_pair_err;
916 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
919 goto create_pair_err;
922 hp->func_mdev = func_mdev;
923 hp->func_priv = priv;
924 hp->num_channels = params->num_channels;
925 hp->log_num_packets = params->log_num_packets;
927 err = mlx5e_hairpin_create_transport(hp);
929 goto create_transport_err;
931 if (hp->num_channels > 1) {
932 err = mlx5e_hairpin_rss_init(hp);
940 mlx5e_hairpin_destroy_transport(hp);
941 create_transport_err:
942 mlx5_core_hairpin_destroy(hp->pair);
948 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
950 if (hp->num_channels > 1)
951 mlx5e_hairpin_rss_cleanup(hp);
952 mlx5e_hairpin_destroy_transport(hp);
953 mlx5_core_hairpin_destroy(hp->pair);
957 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
959 return (peer_vhca_id << 16 | prio);
962 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
963 u16 peer_vhca_id, u8 prio)
965 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
966 struct mlx5e_hairpin_entry *hpe;
967 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
969 hash_for_each_possible(tc->hairpin_tbl, hpe,
970 hairpin_hlist, hash_key) {
971 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
972 refcount_inc(&hpe->refcnt);
980 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
981 struct mlx5e_hairpin_entry *hpe)
983 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
984 /* no more hairpin flows for us, release the hairpin pair */
985 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
987 hash_del(&hpe->hairpin_hlist);
988 mutex_unlock(&tc->hairpin_tbl_lock);
990 if (!IS_ERR_OR_NULL(hpe->hp)) {
991 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
992 dev_name(hpe->hp->pair->peer_mdev->device));
994 mlx5e_hairpin_destroy(hpe->hp);
997 WARN_ON(!list_empty(&hpe->flows));
1001 #define UNKNOWN_MATCH_PRIO 8
1003 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
1004 struct mlx5_flow_spec *spec, u8 *match_prio,
1005 struct netlink_ext_ack *extack)
1007 void *headers_c, *headers_v;
1008 u8 prio_val, prio_mask = 0;
1011 #ifdef CONFIG_MLX5_CORE_EN_DCB
1012 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
1013 NL_SET_ERR_MSG_MOD(extack,
1014 "only PCP trust state supported for hairpin");
1018 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1019 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1021 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
1023 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
1024 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
1027 if (!vlan_present || !prio_mask) {
1028 prio_val = UNKNOWN_MATCH_PRIO;
1029 } else if (prio_mask != 0x7) {
1030 NL_SET_ERR_MSG_MOD(extack,
1031 "masked priority match not supported for hairpin");
1035 *match_prio = prio_val;
1039 static int debugfs_hairpin_num_active_get(void *data, u64 *val)
1041 struct mlx5e_tc_table *tc = data;
1042 struct mlx5e_hairpin_entry *hpe;
1046 mutex_lock(&tc->hairpin_tbl_lock);
1047 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1049 mutex_unlock(&tc->hairpin_tbl_lock);
1055 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
1056 debugfs_hairpin_num_active_get, NULL, "%llu\n");
1058 static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
1061 struct mlx5e_tc_table *tc = file->private;
1062 struct mlx5e_hairpin_entry *hpe;
1065 mutex_lock(&tc->hairpin_tbl_lock);
1066 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1068 "Hairpin peer_vhca_id %u prio %u refcnt %u num_channels %u num_packets %lu\n",
1069 hpe->peer_vhca_id, hpe->prio,
1070 refcount_read(&hpe->refcnt), hpe->hp->num_channels,
1071 BIT(hpe->hp->log_num_packets));
1072 mutex_unlock(&tc->hairpin_tbl_lock);
1076 DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
1078 static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
1079 struct dentry *dfs_root)
1081 if (IS_ERR_OR_NULL(dfs_root))
1084 tc->dfs_root = debugfs_create_dir("tc", dfs_root);
1086 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
1087 &fops_hairpin_num_active);
1088 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
1089 &debugfs_hairpin_table_dump_fops);
1092 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
1093 struct mlx5e_tc_flow *flow,
1094 struct mlx5e_tc_flow_parse_attr *parse_attr,
1095 struct netlink_ext_ack *extack)
1097 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1098 struct devlink *devlink = priv_to_devlink(priv->mdev);
1099 int peer_ifindex = parse_attr->mirred_ifindex[0];
1100 union devlink_param_value val = {};
1101 struct mlx5_hairpin_params params;
1102 struct mlx5_core_dev *peer_mdev;
1103 struct mlx5e_hairpin_entry *hpe;
1104 struct mlx5e_hairpin *hp;
1109 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1110 if (IS_ERR(peer_mdev)) {
1111 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1112 return PTR_ERR(peer_mdev);
1115 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1116 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1120 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1121 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1126 mutex_lock(&tc->hairpin_tbl_lock);
1127 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1129 mutex_unlock(&tc->hairpin_tbl_lock);
1130 wait_for_completion(&hpe->res_ready);
1132 if (IS_ERR(hpe->hp)) {
1139 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1141 mutex_unlock(&tc->hairpin_tbl_lock);
1145 spin_lock_init(&hpe->flows_lock);
1146 INIT_LIST_HEAD(&hpe->flows);
1147 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1148 hpe->peer_vhca_id = peer_id;
1149 hpe->prio = match_prio;
1150 refcount_set(&hpe->refcnt, 1);
1151 init_completion(&hpe->res_ready);
1153 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1154 hash_hairpin_info(peer_id, match_prio));
1155 mutex_unlock(&tc->hairpin_tbl_lock);
1157 err = devl_param_driverinit_value_get(
1158 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_QUEUE_SIZE, &val);
1164 params.log_num_packets = ilog2(val.vu32);
1165 params.log_data_size =
1167 params.log_num_packets +
1168 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
1169 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
1170 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1172 params.q_counter = priv->q_counter;
1173 err = devl_param_driverinit_value_get(
1174 devlink, MLX5_DEVLINK_PARAM_ID_HAIRPIN_NUM_QUEUES, &val);
1180 params.num_channels = val.vu32;
1182 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1184 complete_all(&hpe->res_ready);
1190 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1191 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1192 dev_name(hp->pair->peer_mdev->device),
1193 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1196 if (hpe->hp->num_channels > 1) {
1197 flow_flag_set(flow, HAIRPIN_RSS);
1198 flow->attr->nic_attr->hairpin_ft =
1199 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1201 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1205 spin_lock(&hpe->flows_lock);
1206 list_add(&flow->hairpin, &hpe->flows);
1207 spin_unlock(&hpe->flows_lock);
1212 mlx5e_hairpin_put(priv, hpe);
1216 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1217 struct mlx5e_tc_flow *flow)
1219 /* flow wasn't fully initialized */
1223 spin_lock(&flow->hpe->flows_lock);
1224 list_del(&flow->hairpin);
1225 spin_unlock(&flow->hpe->flows_lock);
1227 mlx5e_hairpin_put(priv, flow->hpe);
1231 struct mlx5_flow_handle *
1232 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1233 struct mlx5_flow_spec *spec,
1234 struct mlx5_flow_attr *attr)
1236 struct mlx5_flow_context *flow_context = &spec->flow_context;
1237 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1238 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1239 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1240 struct mlx5_flow_destination dest[2] = {};
1241 struct mlx5_fs_chains *nic_chains;
1242 struct mlx5_flow_act flow_act = {
1243 .action = attr->action,
1244 .flags = FLOW_ACT_NO_APPEND,
1246 struct mlx5_flow_handle *rule;
1247 struct mlx5_flow_table *ft;
1250 nic_chains = mlx5e_nic_chains(tc);
1251 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1252 flow_context->flow_tag = nic_attr->flow_tag;
1254 if (attr->dest_ft) {
1255 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1256 dest[dest_ix].ft = attr->dest_ft;
1258 } else if (nic_attr->hairpin_ft) {
1259 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1260 dest[dest_ix].ft = nic_attr->hairpin_ft;
1262 } else if (nic_attr->hairpin_tirn) {
1263 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1264 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1266 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1267 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1268 if (attr->dest_chain) {
1269 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1270 attr->dest_chain, 1,
1272 if (IS_ERR(dest[dest_ix].ft))
1273 return ERR_CAST(dest[dest_ix].ft);
1275 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1280 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1281 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1282 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1284 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1285 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1286 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1290 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1291 flow_act.modify_hdr = attr->modify_hdr;
1293 mutex_lock(&tc->t_lock);
1294 if (IS_ERR_OR_NULL(tc->t)) {
1295 /* Create the root table here if doesn't exist yet */
1297 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1299 if (IS_ERR(tc->t)) {
1300 mutex_unlock(&tc->t_lock);
1301 netdev_err(priv->netdev,
1302 "Failed to create tc offload table\n");
1303 rule = ERR_CAST(tc->t);
1307 mutex_unlock(&tc->t_lock);
1309 if (attr->chain || attr->prio)
1310 ft = mlx5_chains_get_table(nic_chains,
1311 attr->chain, attr->prio,
1317 rule = ERR_CAST(ft);
1321 if (attr->outer_match_level != MLX5_MATCH_NONE)
1322 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1324 rule = mlx5_add_flow_rules(ft, spec,
1325 &flow_act, dest, dest_ix);
1332 if (attr->chain || attr->prio)
1333 mlx5_chains_put_table(nic_chains,
1334 attr->chain, attr->prio,
1337 if (attr->dest_chain)
1338 mlx5_chains_put_table(nic_chains,
1339 attr->dest_chain, 1,
1342 return ERR_CAST(rule);
1346 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1347 struct mlx5_flow_attr *attr)
1350 struct mlx5_fc *counter;
1352 counter = mlx5_fc_create(counter_dev, true);
1353 if (IS_ERR(counter))
1354 return PTR_ERR(counter);
1356 attr->counter = counter;
1361 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1362 struct mlx5e_tc_flow *flow,
1363 struct netlink_ext_ack *extack)
1365 struct mlx5e_tc_flow_parse_attr *parse_attr;
1366 struct mlx5_flow_attr *attr = flow->attr;
1367 struct mlx5_core_dev *dev = priv->mdev;
1370 parse_attr = attr->parse_attr;
1372 if (flow_flag_test(flow, HAIRPIN)) {
1373 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1378 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1379 err = alloc_flow_attr_counter(dev, attr);
1384 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1385 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
1390 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec, attr);
1391 return PTR_ERR_OR_ZERO(flow->rule[0]);
1394 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1395 struct mlx5_flow_handle *rule,
1396 struct mlx5_flow_attr *attr)
1398 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1399 struct mlx5_fs_chains *nic_chains;
1401 nic_chains = mlx5e_nic_chains(tc);
1402 mlx5_del_flow_rules(rule);
1404 if (attr->chain || attr->prio)
1405 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1408 if (attr->dest_chain)
1409 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1413 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1414 struct mlx5e_tc_flow *flow)
1416 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1417 struct mlx5_flow_attr *attr = flow->attr;
1419 flow_flag_clear(flow, OFFLOADED);
1421 if (!IS_ERR_OR_NULL(flow->rule[0]))
1422 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1424 /* Remove root table if no rules are left to avoid
1425 * extra steering hops.
1427 mutex_lock(&tc->t_lock);
1428 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1429 !IS_ERR_OR_NULL(tc->t)) {
1430 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1433 mutex_unlock(&tc->t_lock);
1435 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1436 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1437 mlx5e_tc_detach_mod_hdr(priv, flow, attr);
1440 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1441 mlx5_fc_destroy(priv->mdev, attr->counter);
1443 if (flow_flag_test(flow, HAIRPIN))
1444 mlx5e_hairpin_flow_del(priv, flow);
1446 free_flow_post_acts(flow);
1447 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1449 kvfree(attr->parse_attr);
1453 struct mlx5_flow_handle *
1454 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1455 struct mlx5e_tc_flow *flow,
1456 struct mlx5_flow_spec *spec,
1457 struct mlx5_flow_attr *attr)
1459 struct mlx5_flow_handle *rule;
1461 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1462 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1464 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1469 if (attr->esw_attr->split_count) {
1470 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1471 if (IS_ERR(flow->rule[1]))
1478 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1479 return flow->rule[1];
1482 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1483 struct mlx5e_tc_flow *flow,
1484 struct mlx5_flow_attr *attr)
1486 flow_flag_clear(flow, OFFLOADED);
1488 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1489 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1491 if (attr->esw_attr->split_count)
1492 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1494 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1497 struct mlx5_flow_handle *
1498 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1499 struct mlx5e_tc_flow *flow,
1500 struct mlx5_flow_spec *spec)
1502 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1503 struct mlx5e_mod_hdr_handle *mh = NULL;
1504 struct mlx5_flow_attr *slow_attr;
1505 struct mlx5_flow_handle *rule;
1506 bool fwd_and_modify_cap;
1507 u32 chain_mapping = 0;
1510 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1512 return ERR_PTR(-ENOMEM);
1514 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1515 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1516 slow_attr->esw_attr->split_count = 0;
1517 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1519 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1520 if (!fwd_and_modify_cap)
1523 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1527 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1528 MAPPED_OBJ_TO_REG, chain_mapping);
1532 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1533 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1539 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1540 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1543 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1545 err = PTR_ERR(rule);
1549 flow->attr->slow_mh = mh;
1550 flow->chain_mapping = chain_mapping;
1551 flow_flag_set(flow, SLOW);
1553 mlx5e_mod_hdr_dealloc(&mod_acts);
1559 if (fwd_and_modify_cap)
1560 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1563 if (fwd_and_modify_cap)
1564 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1566 mlx5e_mod_hdr_dealloc(&mod_acts);
1568 return ERR_PTR(err);
1571 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1572 struct mlx5e_tc_flow *flow)
1574 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
1575 struct mlx5_flow_attr *slow_attr;
1577 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1579 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1583 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1584 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1585 slow_attr->esw_attr->split_count = 0;
1586 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1588 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1589 slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
1591 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1593 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
1594 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1595 flow->chain_mapping = 0;
1596 flow->attr->slow_mh = NULL;
1598 flow_flag_clear(flow, SLOW);
1602 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1605 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1606 struct list_head *unready_flows)
1608 flow_flag_set(flow, NOT_READY);
1609 list_add_tail(&flow->unready, unready_flows);
1612 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1615 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1617 list_del(&flow->unready);
1618 flow_flag_clear(flow, NOT_READY);
1621 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1623 struct mlx5_rep_uplink_priv *uplink_priv;
1624 struct mlx5e_rep_priv *rpriv;
1625 struct mlx5_eswitch *esw;
1627 esw = flow->priv->mdev->priv.eswitch;
1628 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1629 uplink_priv = &rpriv->uplink_priv;
1631 mutex_lock(&uplink_priv->unready_flows_lock);
1632 unready_flow_add(flow, &uplink_priv->unready_flows);
1633 mutex_unlock(&uplink_priv->unready_flows_lock);
1636 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1638 struct mlx5_rep_uplink_priv *uplink_priv;
1639 struct mlx5e_rep_priv *rpriv;
1640 struct mlx5_eswitch *esw;
1642 esw = flow->priv->mdev->priv.eswitch;
1643 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1644 uplink_priv = &rpriv->uplink_priv;
1646 mutex_lock(&uplink_priv->unready_flows_lock);
1647 if (flow_flag_test(flow, NOT_READY))
1648 unready_flow_del(flow);
1649 mutex_unlock(&uplink_priv->unready_flows_lock);
1652 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1654 struct mlx5_core_dev *out_mdev, *route_mdev;
1655 struct mlx5e_priv *out_priv, *route_priv;
1657 out_priv = netdev_priv(out_dev);
1658 out_mdev = out_priv->mdev;
1659 route_priv = netdev_priv(route_dev);
1660 route_mdev = route_priv->mdev;
1662 if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1665 if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1666 route_mdev->coredev_type != MLX5_COREDEV_SF)
1669 return mlx5e_same_hw_devs(out_priv, route_priv);
1672 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1674 struct mlx5e_priv *out_priv, *route_priv;
1675 struct mlx5_core_dev *route_mdev;
1676 struct mlx5_devcom_comp_dev *pos;
1677 struct mlx5_eswitch *esw;
1681 out_priv = netdev_priv(out_dev);
1682 esw = out_priv->mdev->priv.eswitch;
1683 route_priv = netdev_priv(route_dev);
1684 route_mdev = route_priv->mdev;
1686 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1687 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1691 if (!mlx5_lag_is_active(out_priv->mdev))
1696 mlx5_devcom_for_each_peer_entry_rcu(esw->devcom, esw, pos) {
1697 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1707 verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
1710 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1711 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
1716 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1717 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
1721 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1722 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
1723 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
1731 has_encap_dests(struct mlx5_flow_attr *attr)
1733 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
1736 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1737 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1744 post_process_attr(struct mlx5e_tc_flow *flow,
1745 struct mlx5_flow_attr *attr,
1746 struct netlink_ext_ack *extack)
1751 err = verify_attr_actions(attr->action, extack);
1755 if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) {
1756 err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
1761 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1762 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
1767 if (attr->branch_true &&
1768 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1769 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
1774 if (attr->branch_false &&
1775 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1776 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
1781 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1782 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
1792 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1793 struct mlx5e_tc_flow *flow,
1794 struct netlink_ext_ack *extack)
1796 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1797 struct mlx5e_tc_flow_parse_attr *parse_attr;
1798 struct mlx5_flow_attr *attr = flow->attr;
1799 struct mlx5_esw_flow_attr *esw_attr;
1800 u32 max_prio, max_chain;
1803 parse_attr = attr->parse_attr;
1804 esw_attr = attr->esw_attr;
1806 /* We check chain range only for tc flows.
1807 * For ft flows, we checked attr->chain was originally 0 and set it to
1808 * FDB_FT_CHAIN which is outside tc range.
1809 * See mlx5e_rep_setup_ft_cb().
1811 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1812 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1813 NL_SET_ERR_MSG_MOD(extack,
1814 "Requested chain is out of supported range");
1819 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1820 if (attr->prio > max_prio) {
1821 NL_SET_ERR_MSG_MOD(extack,
1822 "Requested priority is out of supported range");
1827 if (flow_flag_test(flow, TUN_RX)) {
1828 err = mlx5e_attach_decap_route(priv, flow);
1832 if (!attr->chain && esw_attr->int_port &&
1833 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1834 /* If decap route device is internal port, change the
1835 * source vport value in reg_c0 back to uplink just in
1836 * case the rule performs goto chain > 0. If we have a miss
1837 * on chain > 0 we want the metadata regs to hold the
1838 * chain id so SW will resume handling of this packet
1839 * from the proper chain.
1841 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1842 esw_attr->in_rep->vport);
1844 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1845 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1850 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1854 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1855 err = mlx5e_attach_decap(priv, flow, extack);
1860 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1861 struct mlx5e_tc_int_port *int_port;
1864 NL_SET_ERR_MSG_MOD(extack,
1865 "Internal port rule is only supported on chain 0");
1870 if (attr->dest_chain) {
1871 NL_SET_ERR_MSG_MOD(extack,
1872 "Internal port rule offload doesn't support goto action");
1877 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1878 parse_attr->filter_dev->ifindex,
1879 flow_flag_test(flow, EGRESS) ?
1880 MLX5E_TC_INT_PORT_EGRESS :
1881 MLX5E_TC_INT_PORT_INGRESS);
1882 if (IS_ERR(int_port)) {
1883 err = PTR_ERR(int_port);
1887 esw_attr->int_port = int_port;
1890 err = post_process_attr(flow, attr, extack);
1894 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
1898 /* we get here if one of the following takes place:
1899 * (1) there's no error
1900 * (2) there's an encap action and we don't have valid neigh
1902 if (flow_flag_test(flow, SLOW))
1903 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1905 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1907 if (IS_ERR(flow->rule[0])) {
1908 err = PTR_ERR(flow->rule[0]);
1911 flow_flag_set(flow, OFFLOADED);
1916 flow_flag_set(flow, FAILED);
1920 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1922 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1923 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1926 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1928 geneve_tlv_option_0_data);
1930 return !!geneve_tlv_opt_0_data;
1933 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
1938 mlx5_free_flow_attr_actions(flow, attr);
1939 kvfree(attr->parse_attr);
1943 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1944 struct mlx5e_tc_flow *flow)
1946 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1947 struct mlx5_flow_attr *attr = flow->attr;
1949 mlx5e_put_flow_tunnel_id(flow);
1951 remove_unready_flow(flow);
1953 if (mlx5e_is_offloaded_flow(flow)) {
1954 if (flow_flag_test(flow, SLOW))
1955 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1957 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1959 complete_all(&flow->del_hw_done);
1961 if (mlx5_flow_has_geneve_opt(flow))
1962 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1964 if (flow->decap_route)
1965 mlx5e_detach_decap_route(priv, flow);
1967 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1969 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1970 mlx5e_detach_decap(priv, flow);
1972 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
1974 free_flow_post_acts(flow);
1975 mlx5_free_flow_attr_actions(flow, attr);
1977 kvfree(attr->esw_attr->rx_tun_attr);
1978 kvfree(attr->parse_attr);
1982 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1984 struct mlx5_flow_attr *attr;
1986 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
1987 return attr->counter;
1990 /* Iterate over tmp_list of flows attached to flow_list head. */
1991 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1993 struct mlx5e_tc_flow *flow, *tmp;
1995 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1996 mlx5e_flow_put(priv, flow);
1999 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
2002 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
2003 struct mlx5e_tc_flow *peer_flow;
2004 struct mlx5e_tc_flow *tmp;
2006 if (!flow_flag_test(flow, ESWITCH) ||
2007 !flow_flag_test(flow, DUP))
2010 mutex_lock(&esw->offloads.peer_mutex);
2011 list_del(&flow->peer[peer_index]);
2012 mutex_unlock(&esw->offloads.peer_mutex);
2014 list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
2015 if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
2017 if (refcount_dec_and_test(&peer_flow->refcnt)) {
2018 mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
2019 list_del(&peer_flow->peer_flows);
2024 if (list_empty(&flow->peer_flows))
2025 flow_flag_clear(flow, DUP);
2028 static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
2032 for (i = 0; i < MLX5_MAX_PORTS; i++) {
2033 if (i == mlx5_get_dev_index(flow->priv->mdev))
2035 mlx5e_tc_del_fdb_peer_flow(flow, i);
2039 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2040 struct mlx5e_tc_flow *flow)
2042 if (mlx5e_is_eswitch_flow(flow)) {
2043 struct mlx5_devcom_comp_dev *devcom = flow->priv->mdev->priv.eswitch->devcom;
2045 if (!mlx5_devcom_for_each_peer_begin(devcom)) {
2046 mlx5e_tc_del_fdb_flow(priv, flow);
2050 mlx5e_tc_del_fdb_peers_flow(flow);
2051 mlx5_devcom_for_each_peer_end(devcom);
2052 mlx5e_tc_del_fdb_flow(priv, flow);
2054 mlx5e_tc_del_nic_flow(priv, flow);
2058 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2060 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2061 struct flow_action *flow_action = &rule->action;
2062 const struct flow_action_entry *act;
2068 flow_action_for_each(i, act, flow_action) {
2070 case FLOW_ACTION_GOTO:
2072 case FLOW_ACTION_SAMPLE:
2083 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2084 struct flow_dissector_key_enc_opts *opts,
2085 struct netlink_ext_ack *extack,
2088 struct geneve_opt *opt;
2093 while (opts->len > off) {
2094 opt = (struct geneve_opt *)&opts->data[off];
2096 if (!(*dont_care) || opt->opt_class || opt->type ||
2097 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2100 if (opt->opt_class != htons(U16_MAX) ||
2101 opt->type != U8_MAX) {
2102 NL_SET_ERR_MSG_MOD(extack,
2103 "Partial match of tunnel options in chain > 0 isn't supported");
2104 netdev_warn(priv->netdev,
2105 "Partial match of tunnel options in chain > 0 isn't supported");
2110 off += sizeof(struct geneve_opt) + opt->length * 4;
2116 #define COPY_DISSECTOR(rule, diss_key, dst)\
2118 struct flow_rule *__rule = (rule);\
2119 typeof(dst) __dst = dst;\
2122 skb_flow_dissector_target(__rule->match.dissector,\
2124 __rule->match.key),\
2128 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2129 struct mlx5e_tc_flow *flow,
2130 struct flow_cls_offload *f,
2131 struct net_device *filter_dev)
2133 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2134 struct netlink_ext_ack *extack = f->common.extack;
2135 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2136 struct flow_match_enc_opts enc_opts_match;
2137 struct tunnel_match_enc_opts tun_enc_opts;
2138 struct mlx5_rep_uplink_priv *uplink_priv;
2139 struct mlx5_flow_attr *attr = flow->attr;
2140 struct mlx5e_rep_priv *uplink_rpriv;
2141 struct tunnel_match_key tunnel_key;
2142 bool enc_opts_is_dont_care = true;
2143 u32 tun_id, enc_opts_id = 0;
2144 struct mlx5_eswitch *esw;
2148 esw = priv->mdev->priv.eswitch;
2149 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2150 uplink_priv = &uplink_rpriv->uplink_priv;
2152 memset(&tunnel_key, 0, sizeof(tunnel_key));
2153 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2154 &tunnel_key.enc_control);
2155 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2156 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2157 &tunnel_key.enc_ipv4);
2159 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2160 &tunnel_key.enc_ipv6);
2161 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2162 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2163 &tunnel_key.enc_tp);
2164 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2165 &tunnel_key.enc_key_id);
2166 tunnel_key.filter_ifindex = filter_dev->ifindex;
2168 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2172 flow_rule_match_enc_opts(rule, &enc_opts_match);
2173 err = enc_opts_is_dont_care_or_full_match(priv,
2174 enc_opts_match.mask,
2176 &enc_opts_is_dont_care);
2180 if (!enc_opts_is_dont_care) {
2181 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2182 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2183 sizeof(*enc_opts_match.key));
2184 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2185 sizeof(*enc_opts_match.mask));
2187 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2188 &tun_enc_opts, &enc_opts_id);
2193 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2194 mask = enc_opts_id ? TUNNEL_ID_MASK :
2195 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2198 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2199 TUNNEL_TO_REG, value, mask);
2201 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2202 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2203 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2204 TUNNEL_TO_REG, value);
2208 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2211 flow->attr->tunnel_id = value;
2216 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2219 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2223 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2225 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2226 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2227 struct mlx5_rep_uplink_priv *uplink_priv;
2228 struct mlx5e_rep_priv *uplink_rpriv;
2229 struct mlx5_eswitch *esw;
2231 esw = flow->priv->mdev->priv.eswitch;
2232 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2233 uplink_priv = &uplink_rpriv->uplink_priv;
2236 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2238 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2242 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2243 struct flow_match_basic *match, bool outer,
2244 void *headers_c, void *headers_v)
2246 bool ip_version_cap;
2248 ip_version_cap = outer ?
2249 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2250 ft_field_support.outer_ip_version) :
2251 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2252 ft_field_support.inner_ip_version);
2254 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2255 (match->key->n_proto == htons(ETH_P_IP) ||
2256 match->key->n_proto == htons(ETH_P_IPV6))) {
2257 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2259 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2261 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2262 ntohs(match->mask->n_proto));
2263 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2264 ntohs(match->key->n_proto));
2268 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2275 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2277 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2279 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2280 /* Return ip_version converted from ethertype anyway */
2282 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2283 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2285 else if (ethertype == ETH_P_IPV6)
2291 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2292 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2293 * +---------+----------------------------------------+
2294 * |Arriving | Arriving Outer Header |
2295 * | Inner +---------+---------+---------+----------+
2296 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2297 * +---------+---------+---------+---------+----------+
2298 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2299 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2300 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2301 * | CE | CE | CE | CE | CE |
2302 * +---------+---------+---------+---------+----------+
2304 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2305 * the inner ip_ecn value before hardware decap action.
2307 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2308 * so matching those values on inner ip_ecn before decap will fail.
2310 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2311 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2312 * and such we can drop the inner ip_ecn=CE match.
2315 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2316 struct flow_cls_offload *f,
2317 bool *match_inner_ecn)
2319 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2320 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2321 struct netlink_ext_ack *extack = f->common.extack;
2322 struct flow_match_ip match;
2324 *match_inner_ecn = true;
2326 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2327 flow_rule_match_enc_ip(rule, &match);
2328 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2329 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2332 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2333 flow_rule_match_ip(rule, &match);
2334 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2335 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2338 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2339 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2340 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2344 if (!outer_ecn_mask) {
2345 if (!inner_ecn_mask)
2348 NL_SET_ERR_MSG_MOD(extack,
2349 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2350 netdev_warn(priv->netdev,
2351 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2355 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2356 NL_SET_ERR_MSG_MOD(extack,
2357 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2358 netdev_warn(priv->netdev,
2359 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2363 if (!inner_ecn_mask)
2366 /* Both inner and outer have full mask on ecn */
2368 if (outer_ecn_key == INET_ECN_ECT_1) {
2369 /* inner ecn might change by DECAP action */
2371 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2372 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2376 if (outer_ecn_key != INET_ECN_CE)
2379 if (inner_ecn_key != INET_ECN_CE) {
2380 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2381 NL_SET_ERR_MSG_MOD(extack,
2382 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2383 netdev_warn(priv->netdev,
2384 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2388 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2389 * drop match on inner ecn
2391 *match_inner_ecn = false;
2396 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2397 struct mlx5e_tc_flow *flow,
2398 struct mlx5_flow_spec *spec,
2399 struct flow_cls_offload *f,
2400 struct net_device *filter_dev,
2404 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2405 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2406 struct netlink_ext_ack *extack = f->common.extack;
2407 bool needs_mapping, sets_mapping;
2410 if (!mlx5e_is_eswitch_flow(flow)) {
2411 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2415 needs_mapping = !!flow->attr->chain;
2416 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2417 *match_inner = !needs_mapping;
2419 if ((needs_mapping || sets_mapping) &&
2420 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2421 NL_SET_ERR_MSG_MOD(extack,
2422 "Chains on tunnel devices isn't supported without register loopback support");
2423 netdev_warn(priv->netdev,
2424 "Chains on tunnel devices isn't supported without register loopback support");
2428 if (!flow->attr->chain) {
2429 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2432 NL_SET_ERR_MSG_MOD(extack,
2433 "Failed to parse tunnel attributes");
2434 netdev_warn(priv->netdev,
2435 "Failed to parse tunnel attributes");
2439 /* With mpls over udp we decapsulate using packet reformat
2442 if (!netif_is_bareudp(filter_dev))
2443 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2444 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2447 } else if (tunnel) {
2448 struct mlx5_flow_spec *tmp_spec;
2450 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2452 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
2453 netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
2456 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2458 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2461 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2462 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2465 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2471 if (!needs_mapping && !sets_mapping)
2474 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2477 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2479 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2483 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2485 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2489 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2491 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2495 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2497 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2501 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2503 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2504 get_match_inner_headers_value(spec) :
2505 get_match_outer_headers_value(spec);
2508 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2510 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2511 get_match_inner_headers_criteria(spec) :
2512 get_match_outer_headers_criteria(spec);
2515 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2516 struct flow_cls_offload *f)
2518 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2519 struct netlink_ext_ack *extack = f->common.extack;
2520 struct net_device *ingress_dev;
2521 struct flow_match_meta match;
2523 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2526 flow_rule_match_meta(rule, &match);
2528 if (match.mask->l2_miss) {
2529 NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
2533 if (!match.mask->ingress_ifindex)
2536 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2537 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2541 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2542 match.key->ingress_ifindex);
2544 NL_SET_ERR_MSG_MOD(extack,
2545 "Can't find the ingress port to match on");
2549 if (ingress_dev != filter_dev) {
2550 NL_SET_ERR_MSG_MOD(extack,
2551 "Can't match on the ingress filter port");
2558 static bool skip_key_basic(struct net_device *filter_dev,
2559 struct flow_cls_offload *f)
2561 /* When doing mpls over udp decap, the user needs to provide
2562 * MPLS_UC as the protocol in order to be able to match on mpls
2563 * label fields. However, the actual ethertype is IP so we want to
2564 * avoid matching on this, otherwise we'll fail the match.
2566 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2572 static int __parse_cls_flower(struct mlx5e_priv *priv,
2573 struct mlx5e_tc_flow *flow,
2574 struct mlx5_flow_spec *spec,
2575 struct flow_cls_offload *f,
2576 struct net_device *filter_dev,
2577 u8 *inner_match_level, u8 *outer_match_level)
2579 struct netlink_ext_ack *extack = f->common.extack;
2580 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2582 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2584 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2586 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2588 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2590 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2592 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2593 struct flow_dissector *dissector = rule->match.dissector;
2594 enum fs_flow_table_type fs_type;
2595 bool match_inner_ecn = true;
2601 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2602 match_level = outer_match_level;
2604 if (dissector->used_keys &
2605 ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
2606 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
2607 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
2608 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2609 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
2610 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
2611 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2612 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2613 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
2614 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2615 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2616 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2617 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2618 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2619 BIT_ULL(FLOW_DISSECTOR_KEY_TCP) |
2620 BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
2621 BIT_ULL(FLOW_DISSECTOR_KEY_CT) |
2622 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
2623 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2624 BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
2625 BIT_ULL(FLOW_DISSECTOR_KEY_MPLS))) {
2626 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2627 netdev_dbg(priv->netdev, "Unsupported key used: 0x%llx\n",
2628 dissector->used_keys);
2632 if (mlx5e_get_tc_tun(filter_dev)) {
2633 bool match_inner = false;
2635 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2636 outer_match_level, &match_inner);
2641 /* header pointers should point to the inner headers
2642 * if the packet was decapsulated already.
2643 * outer headers are set by parse_tunnel_attr.
2645 match_level = inner_match_level;
2646 headers_c = get_match_inner_headers_criteria(spec);
2647 headers_v = get_match_inner_headers_value(spec);
2650 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2655 err = mlx5e_flower_parse_meta(filter_dev, f);
2659 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2660 !skip_key_basic(filter_dev, f)) {
2661 struct flow_match_basic match;
2663 flow_rule_match_basic(rule, &match);
2664 mlx5e_tc_set_ethertype(priv->mdev, &match,
2665 match_level == outer_match_level,
2666 headers_c, headers_v);
2668 if (match.mask->n_proto)
2669 *match_level = MLX5_MATCH_L2;
2671 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2672 is_vlan_dev(filter_dev)) {
2673 struct flow_dissector_key_vlan filter_dev_mask;
2674 struct flow_dissector_key_vlan filter_dev_key;
2675 struct flow_match_vlan match;
2677 if (is_vlan_dev(filter_dev)) {
2678 match.key = &filter_dev_key;
2679 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2680 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2681 match.key->vlan_priority = 0;
2682 match.mask = &filter_dev_mask;
2683 memset(match.mask, 0xff, sizeof(*match.mask));
2684 match.mask->vlan_priority = 0;
2686 flow_rule_match_vlan(rule, &match);
2688 if (match.mask->vlan_id ||
2689 match.mask->vlan_priority ||
2690 match.mask->vlan_tpid) {
2691 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2692 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2694 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2697 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2699 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2704 match.mask->vlan_id);
2705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2706 match.key->vlan_id);
2708 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2709 match.mask->vlan_priority);
2710 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2711 match.key->vlan_priority);
2713 *match_level = MLX5_MATCH_L2;
2715 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2716 match.mask->vlan_eth_type &&
2717 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2718 ft_field_support.outer_second_vid,
2720 MLX5_SET(fte_match_set_misc, misc_c,
2721 outer_second_cvlan_tag, 1);
2722 spec->match_criteria_enable |=
2723 MLX5_MATCH_MISC_PARAMETERS;
2726 } else if (*match_level != MLX5_MATCH_NONE) {
2727 /* cvlan_tag enabled in match criteria and
2728 * disabled in match value means both S & C tags
2729 * don't exist (untagged of both)
2731 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2732 *match_level = MLX5_MATCH_L2;
2735 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2736 struct flow_match_vlan match;
2738 flow_rule_match_cvlan(rule, &match);
2739 if (match.mask->vlan_id ||
2740 match.mask->vlan_priority ||
2741 match.mask->vlan_tpid) {
2742 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2744 NL_SET_ERR_MSG_MOD(extack,
2745 "Matching on CVLAN is not supported");
2749 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2750 MLX5_SET(fte_match_set_misc, misc_c,
2751 outer_second_svlan_tag, 1);
2752 MLX5_SET(fte_match_set_misc, misc_v,
2753 outer_second_svlan_tag, 1);
2755 MLX5_SET(fte_match_set_misc, misc_c,
2756 outer_second_cvlan_tag, 1);
2757 MLX5_SET(fte_match_set_misc, misc_v,
2758 outer_second_cvlan_tag, 1);
2761 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2762 match.mask->vlan_id);
2763 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2764 match.key->vlan_id);
2765 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2766 match.mask->vlan_priority);
2767 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2768 match.key->vlan_priority);
2770 *match_level = MLX5_MATCH_L2;
2771 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2775 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2776 struct flow_match_eth_addrs match;
2778 flow_rule_match_eth_addrs(rule, &match);
2779 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2782 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2786 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2789 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2793 if (!is_zero_ether_addr(match.mask->src) ||
2794 !is_zero_ether_addr(match.mask->dst))
2795 *match_level = MLX5_MATCH_L2;
2798 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2799 struct flow_match_control match;
2801 flow_rule_match_control(rule, &match);
2802 addr_type = match.key->addr_type;
2804 /* the HW doesn't support frag first/later */
2805 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2806 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2810 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2811 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2812 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2813 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2815 /* the HW doesn't need L3 inline to match on frag=no */
2816 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2817 *match_level = MLX5_MATCH_L2;
2818 /* *** L2 attributes parsing up to here *** */
2820 *match_level = MLX5_MATCH_L3;
2824 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2825 struct flow_match_basic match;
2827 flow_rule_match_basic(rule, &match);
2828 ip_proto = match.key->ip_proto;
2830 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2831 match.mask->ip_proto);
2832 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2833 match.key->ip_proto);
2835 if (match.mask->ip_proto)
2836 *match_level = MLX5_MATCH_L3;
2839 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2840 struct flow_match_ipv4_addrs match;
2842 flow_rule_match_ipv4_addrs(rule, &match);
2843 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2844 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2845 &match.mask->src, sizeof(match.mask->src));
2846 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2847 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2848 &match.key->src, sizeof(match.key->src));
2849 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2850 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2851 &match.mask->dst, sizeof(match.mask->dst));
2852 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2853 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2854 &match.key->dst, sizeof(match.key->dst));
2856 if (match.mask->src || match.mask->dst)
2857 *match_level = MLX5_MATCH_L3;
2860 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2861 struct flow_match_ipv6_addrs match;
2863 flow_rule_match_ipv6_addrs(rule, &match);
2864 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2865 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2866 &match.mask->src, sizeof(match.mask->src));
2867 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2868 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2869 &match.key->src, sizeof(match.key->src));
2871 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2872 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2873 &match.mask->dst, sizeof(match.mask->dst));
2874 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2875 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2876 &match.key->dst, sizeof(match.key->dst));
2878 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2879 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2880 *match_level = MLX5_MATCH_L3;
2883 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2884 struct flow_match_ip match;
2886 flow_rule_match_ip(rule, &match);
2887 if (match_inner_ecn) {
2888 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2889 match.mask->tos & 0x3);
2890 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2891 match.key->tos & 0x3);
2894 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2895 match.mask->tos >> 2);
2896 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2897 match.key->tos >> 2);
2899 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2901 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2904 if (match.mask->ttl &&
2905 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2906 ft_field_support.outer_ipv4_ttl)) {
2907 NL_SET_ERR_MSG_MOD(extack,
2908 "Matching on TTL is not supported");
2912 if (match.mask->tos || match.mask->ttl)
2913 *match_level = MLX5_MATCH_L3;
2916 /* *** L3 attributes parsing up to here *** */
2918 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2919 struct flow_match_ports match;
2921 flow_rule_match_ports(rule, &match);
2924 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2925 tcp_sport, ntohs(match.mask->src));
2926 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2927 tcp_sport, ntohs(match.key->src));
2929 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2930 tcp_dport, ntohs(match.mask->dst));
2931 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2932 tcp_dport, ntohs(match.key->dst));
2936 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2937 udp_sport, ntohs(match.mask->src));
2938 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2939 udp_sport, ntohs(match.key->src));
2941 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2942 udp_dport, ntohs(match.mask->dst));
2943 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2944 udp_dport, ntohs(match.key->dst));
2947 NL_SET_ERR_MSG_MOD(extack,
2948 "Only UDP and TCP transports are supported for L4 matching");
2949 netdev_err(priv->netdev,
2950 "Only UDP and TCP transport are supported\n");
2954 if (match.mask->src || match.mask->dst)
2955 *match_level = MLX5_MATCH_L4;
2958 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2959 struct flow_match_tcp match;
2961 flow_rule_match_tcp(rule, &match);
2962 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2963 ntohs(match.mask->flags));
2964 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2965 ntohs(match.key->flags));
2967 if (match.mask->flags)
2968 *match_level = MLX5_MATCH_L4;
2970 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2971 struct flow_match_icmp match;
2973 flow_rule_match_icmp(rule, &match);
2976 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2977 MLX5_FLEX_PROTO_ICMP)) {
2978 NL_SET_ERR_MSG_MOD(extack,
2979 "Match on Flex protocols for ICMP is not supported");
2982 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2984 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2986 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2988 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2991 case IPPROTO_ICMPV6:
2992 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2993 MLX5_FLEX_PROTO_ICMPV6)) {
2994 NL_SET_ERR_MSG_MOD(extack,
2995 "Match on Flex protocols for ICMPV6 is not supported");
2998 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
3000 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
3002 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
3004 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
3008 NL_SET_ERR_MSG_MOD(extack,
3009 "Code and type matching only with ICMP and ICMPv6");
3010 netdev_err(priv->netdev,
3011 "Code and type matching only with ICMP and ICMPv6\n");
3014 if (match.mask->code || match.mask->type) {
3015 *match_level = MLX5_MATCH_L4;
3016 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
3019 /* Currently supported only for MPLS over UDP */
3020 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
3021 !netif_is_bareudp(filter_dev)) {
3022 NL_SET_ERR_MSG_MOD(extack,
3023 "Matching on MPLS is supported only for MPLS over UDP");
3024 netdev_err(priv->netdev,
3025 "Matching on MPLS is supported only for MPLS over UDP\n");
3032 static int parse_cls_flower(struct mlx5e_priv *priv,
3033 struct mlx5e_tc_flow *flow,
3034 struct mlx5_flow_spec *spec,
3035 struct flow_cls_offload *f,
3036 struct net_device *filter_dev)
3038 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
3039 struct netlink_ext_ack *extack = f->common.extack;
3040 struct mlx5_core_dev *dev = priv->mdev;
3041 struct mlx5_eswitch *esw = dev->priv.eswitch;
3042 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3043 struct mlx5_eswitch_rep *rep;
3044 bool is_eswitch_flow;
3047 inner_match_level = MLX5_MATCH_NONE;
3048 outer_match_level = MLX5_MATCH_NONE;
3050 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3051 &inner_match_level, &outer_match_level);
3052 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3053 outer_match_level : inner_match_level;
3055 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3056 if (!err && is_eswitch_flow) {
3058 if (rep->vport != MLX5_VPORT_UPLINK &&
3059 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3060 esw->offloads.inline_mode < non_tunnel_match_level)) {
3061 NL_SET_ERR_MSG_MOD(extack,
3062 "Flow is not offloaded due to min inline setting");
3063 netdev_warn(priv->netdev,
3064 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3065 non_tunnel_match_level, esw->offloads.inline_mode);
3070 flow->attr->inner_match_level = inner_match_level;
3071 flow->attr->outer_match_level = outer_match_level;
3077 struct mlx5_fields {
3085 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3086 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3087 offsetof(struct pedit_headers, field) + (off), \
3088 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3090 /* masked values are the same and there are no rewrites that do not have a
3093 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3094 type matchmaskx = *(type *)(matchmaskp); \
3095 type matchvalx = *(type *)(matchvalp); \
3096 type maskx = *(type *)(maskp); \
3097 type valx = *(type *)(valp); \
3099 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3103 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3104 void *matchmaskp, u8 bsize)
3110 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3113 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3116 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3123 static struct mlx5_fields fields[] = {
3124 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3125 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3126 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3127 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3128 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
3129 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3131 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
3132 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
3133 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3134 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3136 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3137 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3138 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3139 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3140 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3141 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3142 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3143 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3144 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3145 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3146 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3147 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3148 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3149 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3150 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3151 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3152 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3153 OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
3155 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
3156 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
3157 /* in linux iphdr tcp_flags is 8 bits long */
3158 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
3160 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3161 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
3164 static u32 mask_field_get(void *mask, struct mlx5_fields *f)
3166 switch (f->field_bsize) {
3168 return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
3170 return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
3172 return *(u8 *)mask & (u8)f->field_mask;
3176 static void mask_field_clear(void *mask, struct mlx5_fields *f)
3178 switch (f->field_bsize) {
3180 *(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
3183 *(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
3186 *(u8 *)mask &= ~(u8)f->field_mask;
3191 static int offload_pedit_fields(struct mlx5e_priv *priv,
3193 struct mlx5e_tc_flow_parse_attr *parse_attr,
3195 struct netlink_ext_ack *extack)
3197 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3198 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3199 void *headers_c, *headers_v, *action, *vals_p;
3200 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3201 void *s_masks_p, *a_masks_p;
3202 int i, first, last, next_z;
3203 struct mlx5_fields *f;
3208 mod_acts = &parse_attr->mod_hdr_acts;
3209 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3210 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3212 set_masks = &hdrs[0].masks;
3213 add_masks = &hdrs[1].masks;
3214 set_vals = &hdrs[0].vals;
3215 add_vals = &hdrs[1].vals;
3217 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3221 s_masks_p = (void *)set_masks + f->offset;
3222 a_masks_p = (void *)add_masks + f->offset;
3224 s_mask = mask_field_get(s_masks_p, f);
3225 a_mask = mask_field_get(a_masks_p, f);
3227 if (!s_mask && !a_mask) /* nothing to offload here */
3230 if (s_mask && a_mask) {
3231 NL_SET_ERR_MSG_MOD(extack,
3232 "can't set and add to the same HW field");
3233 netdev_warn(priv->netdev,
3234 "mlx5: can't set and add to the same HW field (%x)\n",
3241 void *match_mask = headers_c + f->match_offset;
3242 void *match_val = headers_v + f->match_offset;
3244 cmd = MLX5_ACTION_TYPE_SET;
3246 vals_p = (void *)set_vals + f->offset;
3247 /* don't rewrite if we have a match on the same value */
3248 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3249 match_mask, f->field_bsize))
3251 /* clear to denote we consumed this field */
3252 mask_field_clear(s_masks_p, f);
3254 cmd = MLX5_ACTION_TYPE_ADD;
3256 vals_p = (void *)add_vals + f->offset;
3257 /* add 0 is no change */
3258 if (!mask_field_get(vals_p, f))
3260 /* clear to denote we consumed this field */
3261 mask_field_clear(a_masks_p, f);
3266 first = find_first_bit(&mask, f->field_bsize);
3267 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3268 last = find_last_bit(&mask, f->field_bsize);
3269 if (first < next_z && next_z < last) {
3270 NL_SET_ERR_MSG_MOD(extack,
3271 "rewrite of few sub-fields isn't supported");
3272 netdev_warn(priv->netdev,
3273 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3278 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3279 if (IS_ERR(action)) {
3280 NL_SET_ERR_MSG_MOD(extack,
3281 "too many pedit actions, can't offload");
3282 mlx5_core_warn(priv->mdev,
3283 "mlx5: parsed %d pedit actions, can't do more\n",
3284 mod_acts->num_actions);
3285 return PTR_ERR(action);
3288 MLX5_SET(set_action_in, action, action_type, cmd);
3289 MLX5_SET(set_action_in, action, field, f->field);
3291 if (cmd == MLX5_ACTION_TYPE_SET) {
3292 unsigned long field_mask = f->field_mask;
3295 /* if field is bit sized it can start not from first bit */
3296 start = find_first_bit(&field_mask, f->field_bsize);
3298 MLX5_SET(set_action_in, action, offset, first - start);
3299 /* length is num of bits to be written, zero means length of 32 */
3300 MLX5_SET(set_action_in, action, length, (last - first + 1));
3303 if (f->field_bsize == 32)
3304 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3305 else if (f->field_bsize == 16)
3306 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3307 else if (f->field_bsize == 8)
3308 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3310 ++mod_acts->num_actions;
3316 static const struct pedit_headers zero_masks = {};
3318 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3319 struct mlx5e_tc_flow_parse_attr *parse_attr,
3320 struct netlink_ext_ack *extack)
3322 struct pedit_headers *cmd_masks;
3325 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3326 cmd_masks = &parse_attr->hdrs[cmd].masks;
3327 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3328 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3329 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3330 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3331 16, 1, cmd_masks, sizeof(zero_masks), true);
3339 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3340 struct mlx5e_tc_flow_parse_attr *parse_attr,
3342 struct netlink_ext_ack *extack)
3346 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3348 goto out_dealloc_parsed_actions;
3350 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3352 goto out_dealloc_parsed_actions;
3356 out_dealloc_parsed_actions:
3357 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3361 struct ip_ttl_word {
3367 struct ipv6_hoplimit_word {
3374 is_flow_action_modify_ip_header(struct flow_action *flow_action)
3376 const struct flow_action_entry *act;
3381 /* For IPv4 & IPv6 header check 4 byte word,
3382 * to determine that modified fields
3383 * are NOT ttl & hop_limit only.
3385 flow_action_for_each(i, act, flow_action) {
3386 if (act->id != FLOW_ACTION_MANGLE &&
3387 act->id != FLOW_ACTION_ADD)
3390 htype = act->mangle.htype;
3391 offset = act->mangle.offset;
3392 mask = ~act->mangle.mask;
3394 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3395 struct ip_ttl_word *ttl_word =
3396 (struct ip_ttl_word *)&mask;
3398 if (offset != offsetof(struct iphdr, ttl) ||
3399 ttl_word->protocol ||
3402 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3403 struct ipv6_hoplimit_word *hoplimit_word =
3404 (struct ipv6_hoplimit_word *)&mask;
3406 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3407 hoplimit_word->payload_len ||
3408 hoplimit_word->nexthdr)
3416 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3417 struct mlx5_flow_spec *spec,
3418 struct flow_action *flow_action,
3420 struct netlink_ext_ack *extack)
3422 bool modify_ip_header;
3428 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3429 headers_v = mlx5e_get_match_headers_value(actions, spec);
3430 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3432 /* for non-IP we only re-write MACs, so we're okay */
3433 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3434 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3437 modify_ip_header = is_flow_action_modify_ip_header(flow_action);
3438 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3439 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3440 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3441 NL_SET_ERR_MSG_MOD(extack,
3442 "can't offload re-write of non TCP/UDP");
3443 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3453 actions_match_supported_fdb(struct mlx5e_priv *priv,
3454 struct mlx5e_tc_flow *flow,
3455 struct netlink_ext_ack *extack)
3457 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3459 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3460 NL_SET_ERR_MSG_MOD(extack,
3461 "current firmware doesn't support split rule for port mirroring");
3462 netdev_warn_once(priv->netdev,
3463 "current firmware doesn't support split rule for port mirroring\n");
3471 actions_match_supported(struct mlx5e_priv *priv,
3472 struct flow_action *flow_action,
3474 struct mlx5e_tc_flow_parse_attr *parse_attr,
3475 struct mlx5e_tc_flow *flow,
3476 struct netlink_ext_ack *extack)
3478 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3479 !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions,
3483 if (mlx5e_is_eswitch_flow(flow) &&
3484 !actions_match_supported_fdb(priv, flow, extack))
3490 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3492 return priv->mdev == peer_priv->mdev;
3495 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3497 struct mlx5_core_dev *fmdev, *pmdev;
3498 u64 fsystem_guid, psystem_guid;
3501 pmdev = peer_priv->mdev;
3503 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3504 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3506 return (fsystem_guid == psystem_guid);
3510 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3511 struct mlx5e_tc_flow *flow,
3512 struct mlx5_flow_attr *attr,
3513 struct netlink_ext_ack *extack)
3515 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3516 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3517 enum mlx5_flow_namespace_type ns_type;
3520 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3521 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3524 ns_type = mlx5e_get_flow_namespace(flow);
3526 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3530 if (parse_attr->mod_hdr_acts.num_actions > 0)
3533 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3534 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3535 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3537 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3540 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3541 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3542 attr->esw_attr->split_count = 0;
3547 static struct mlx5_flow_attr*
3548 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3549 enum mlx5_flow_namespace_type ns_type)
3551 struct mlx5e_tc_flow_parse_attr *parse_attr;
3552 u32 attr_sz = ns_to_attr_sz(ns_type);
3553 struct mlx5_flow_attr *attr2;
3555 attr2 = mlx5_alloc_flow_attr(ns_type);
3556 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3557 if (!attr2 || !parse_attr) {
3563 memcpy(attr2, attr, attr_sz);
3564 INIT_LIST_HEAD(&attr2->list);
3565 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3567 attr2->counter = NULL;
3568 attr2->tc_act_cookies_count = 0;
3570 attr2->parse_attr = parse_attr;
3571 attr2->dest_chain = 0;
3572 attr2->dest_ft = NULL;
3573 attr2->act_id_restore_rule = NULL;
3574 memset(&attr2->ct_attr, 0, sizeof(attr2->ct_attr));
3576 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3577 attr2->esw_attr->out_count = 0;
3578 attr2->esw_attr->split_count = 0;
3581 attr2->branch_true = NULL;
3582 attr2->branch_false = NULL;
3583 attr2->jumping_attr = NULL;
3587 struct mlx5_flow_attr *
3588 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3590 struct mlx5_esw_flow_attr *esw_attr;
3591 struct mlx5_flow_attr *attr;
3594 list_for_each_entry(attr, &flow->attrs, list) {
3595 esw_attr = attr->esw_attr;
3596 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3597 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3606 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3608 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3609 struct mlx5_flow_attr *attr;
3611 list_for_each_entry(attr, &flow->attrs, list) {
3612 if (list_is_last(&attr->list, &flow->attrs))
3615 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3620 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3622 struct mlx5_flow_attr *attr, *tmp;
3624 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3625 if (list_is_last(&attr->list, &flow->attrs))
3628 mlx5_free_flow_attr_actions(flow, attr);
3630 list_del(&attr->list);
3631 kvfree(attr->parse_attr);
3637 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3639 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3640 struct mlx5_flow_attr *attr;
3643 list_for_each_entry(attr, &flow->attrs, list) {
3644 if (list_is_last(&attr->list, &flow->attrs))
3647 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3655 /* TC filter rule HW translation:
3657 * +---------------------+
3658 * + ft prio (tc chain) +
3659 * + original match +
3660 * +---------------------+
3662 * | if multi table action
3665 * +---------------------+
3666 * + post act ft |<----.
3667 * + match fte id | | split on multi table action
3668 * + do actions |-----'
3669 * +---------------------+
3673 * Do rest of the actions after last multi table action.
3676 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3678 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3679 struct mlx5_flow_attr *attr, *next_attr = NULL;
3680 struct mlx5e_post_act_handle *handle;
3683 /* This is going in reverse order as needed.
3684 * The first entry is the last attribute.
3686 list_for_each_entry(attr, &flow->attrs, list) {
3688 /* Set counter action on last post act rule. */
3689 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3692 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
3693 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3698 /* Don't add post_act rule for first attr (last in the list).
3699 * It's being handled by the caller.
3701 if (list_is_last(&attr->list, &flow->attrs))
3704 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3708 err = post_process_attr(flow, attr, extack);
3712 handle = mlx5e_tc_post_act_add(post_act, attr);
3713 if (IS_ERR(handle)) {
3714 err = PTR_ERR(handle);
3718 attr->post_act_handle = handle;
3720 if (attr->jumping_attr) {
3721 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
3729 if (flow_flag_test(flow, SLOW))
3732 err = mlx5e_tc_offload_flow_post_acts(flow);
3740 free_flow_post_acts(flow);
3745 set_branch_dest_ft(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr)
3747 struct mlx5e_post_act *post_act = get_post_action(priv);
3749 if (IS_ERR(post_act))
3750 return PTR_ERR(post_act);
3752 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3753 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
3759 alloc_branch_attr(struct mlx5e_tc_flow *flow,
3760 struct mlx5e_tc_act_branch_ctrl *cond,
3761 struct mlx5_flow_attr **cond_attr,
3763 struct netlink_ext_ack *extack)
3765 struct mlx5_flow_attr *attr;
3768 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
3769 mlx5e_get_flow_namespace(flow));
3775 switch (cond->act_id) {
3776 case FLOW_ACTION_DROP:
3777 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3779 case FLOW_ACTION_ACCEPT:
3780 case FLOW_ACTION_PIPE:
3781 if (set_branch_dest_ft(flow->priv, attr))
3784 case FLOW_ACTION_JUMP:
3786 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
3790 *jump_count = cond->extval;
3791 if (set_branch_dest_ft(flow->priv, attr))
3807 dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
3808 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
3809 struct mlx5e_tc_jump_state *jump_state)
3811 if (!jump_state->jump_count)
3814 /* Single tc action can instantiate multiple offload actions (e.g. pedit)
3815 * Jump only over a tc action
3817 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
3820 jump_state->last_id = act->id;
3821 jump_state->last_index = act->hw_index;
3823 /* nothing to do for intermediate actions */
3824 if (--jump_state->jump_count > 1)
3827 if (jump_state->jump_count == 1) { /* last action in the jump action list */
3829 /* create a new attribute after this action */
3830 jump_state->jump_target = true;
3832 if (tc_act->is_terminating_action) { /* the branch ends here */
3833 attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
3834 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3835 } else { /* the branch continues executing the rest of the actions */
3836 struct mlx5e_post_act *post_act;
3838 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3839 post_act = get_post_action(priv);
3840 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
3842 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
3843 /* This is the post action for the jumping attribute (either red or green)
3844 * Use the stored jumping_attr to set the post act id on the jumping attribute
3846 attr->jumping_attr = jump_state->jumping_attr;
3851 parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
3852 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
3853 struct mlx5e_tc_jump_state *jump_state,
3854 struct netlink_ext_ack *extack)
3856 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
3857 u32 jump_count = jump_state->jump_count;
3860 if (!tc_act->get_branch_ctrl)
3863 tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
3865 err = alloc_branch_attr(flow, &cond_true,
3866 &attr->branch_true, &jump_count, extack);
3871 jump_state->jumping_attr = attr->branch_true;
3873 err = alloc_branch_attr(flow, &cond_false,
3874 &attr->branch_false, &jump_count, extack);
3876 goto err_branch_false;
3878 if (jump_count && !jump_state->jumping_attr)
3879 jump_state->jumping_attr = attr->branch_false;
3881 jump_state->jump_count = jump_count;
3883 /* branching action requires its own counter */
3884 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3885 flow_flag_set(flow, USE_ACT_STATS);
3890 free_branch_attr(flow, attr->branch_true);
3896 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3897 struct flow_action *flow_action)
3899 struct netlink_ext_ack *extack = parse_state->extack;
3900 struct mlx5e_tc_flow *flow = parse_state->flow;
3901 struct mlx5e_tc_jump_state jump_state = {};
3902 struct mlx5_flow_attr *attr = flow->attr;
3903 enum mlx5_flow_namespace_type ns_type;
3904 struct mlx5e_priv *priv = flow->priv;
3905 struct mlx5_flow_attr *prev_attr;
3906 struct flow_action_entry *act;
3907 struct mlx5e_tc_act *tc_act;
3908 int err, i, i_split = 0;
3911 ns_type = mlx5e_get_flow_namespace(flow);
3912 list_add(&attr->list, &flow->attrs);
3914 flow_action_for_each(i, act, flow_action) {
3915 jump_state.jump_target = false;
3916 is_missable = false;
3919 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3921 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3923 goto out_free_post_acts;
3926 if (tc_act->can_offload && !tc_act->can_offload(parse_state, act, i, attr)) {
3928 goto out_free_post_acts;
3931 err = tc_act->parse_action(parse_state, act, priv, attr);
3933 goto out_free_post_acts;
3935 dec_jump_count(act, tc_act, attr, priv, &jump_state);
3937 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
3939 goto out_free_post_acts;
3941 parse_state->actions |= attr->action;
3943 /* Split attr for multi table act if not the last act. */
3944 if (jump_state.jump_target ||
3945 (tc_act->is_multi_table_act &&
3946 tc_act->is_multi_table_act(priv, act, attr) &&
3947 i < flow_action->num_entries - 1)) {
3948 is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
3950 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr,
3953 goto out_free_post_acts;
3955 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3958 goto out_free_post_acts;
3962 parse_state->if_count = 0;
3963 list_add(&attr->list, &flow->attrs);
3967 /* Add counter to prev, and assign act to new (next) attr */
3968 prev_attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3969 flow_flag_set(flow, USE_ACT_STATS);
3971 attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
3972 } else if (!tc_act->stats_action) {
3973 prev_attr->tc_act_cookies[prev_attr->tc_act_cookies_count++] = act->cookie;
3977 err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type);
3979 goto out_free_post_acts;
3981 err = alloc_flow_post_acts(flow, extack);
3983 goto out_free_post_acts;
3988 free_flow_post_acts(flow);
3994 flow_action_supported(struct flow_action *flow_action,
3995 struct netlink_ext_ack *extack)
3997 if (!flow_action_has_entries(flow_action)) {
3998 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
4002 if (!flow_action_hw_stats_check(flow_action, extack,
4003 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
4004 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4012 parse_tc_nic_actions(struct mlx5e_priv *priv,
4013 struct flow_action *flow_action,
4014 struct mlx5e_tc_flow *flow,
4015 struct netlink_ext_ack *extack)
4017 struct mlx5e_tc_act_parse_state *parse_state;
4018 struct mlx5e_tc_flow_parse_attr *parse_attr;
4019 struct mlx5_flow_attr *attr = flow->attr;
4022 err = flow_action_supported(flow_action, extack);
4026 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
4027 parse_attr = attr->parse_attr;
4028 parse_state = &parse_attr->parse_state;
4029 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4030 parse_state->ct_priv = get_ct_priv(priv);
4032 err = parse_tc_actions(parse_state, flow_action);
4036 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4040 err = verify_attr_actions(attr->action, extack);
4044 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4045 parse_attr, flow, extack))
4051 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
4052 struct net_device *peer_netdev)
4054 struct mlx5e_priv *peer_priv;
4056 peer_priv = netdev_priv(peer_netdev);
4058 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
4059 mlx5e_eswitch_vf_rep(priv->netdev) &&
4060 mlx5e_eswitch_vf_rep(peer_netdev) &&
4061 mlx5e_same_hw_devs(priv, peer_priv));
4064 static bool same_hw_reps(struct mlx5e_priv *priv,
4065 struct net_device *peer_netdev)
4067 struct mlx5e_priv *peer_priv;
4069 peer_priv = netdev_priv(peer_netdev);
4071 return mlx5e_eswitch_rep(priv->netdev) &&
4072 mlx5e_eswitch_rep(peer_netdev) &&
4073 mlx5e_same_hw_devs(priv, peer_priv);
4076 static bool is_lag_dev(struct mlx5e_priv *priv,
4077 struct net_device *peer_netdev)
4079 return ((mlx5_lag_is_sriov(priv->mdev) ||
4080 mlx5_lag_is_multipath(priv->mdev)) &&
4081 same_hw_reps(priv, peer_netdev));
4084 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4086 return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
4089 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4090 struct net_device *out_dev)
4092 if (is_merged_eswitch_vfs(priv, out_dev))
4095 if (is_multiport_eligible(priv, out_dev))
4098 if (is_lag_dev(priv, out_dev))
4101 return mlx5e_eswitch_rep(out_dev) &&
4102 same_port_devs(priv, netdev_priv(out_dev));
4105 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4106 struct mlx5_flow_attr *attr,
4108 enum mlx5e_tc_int_port_type type,
4112 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4113 struct mlx5e_tc_int_port_priv *int_port_priv;
4114 struct mlx5e_tc_flow_parse_attr *parse_attr;
4115 struct mlx5e_tc_int_port *dest_int_port;
4118 parse_attr = attr->parse_attr;
4119 int_port_priv = mlx5e_get_int_port_priv(priv);
4121 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4122 if (IS_ERR(dest_int_port))
4123 return PTR_ERR(dest_int_port);
4125 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4126 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4127 mlx5e_tc_int_port_get_metadata(dest_int_port));
4129 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4133 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4135 esw_attr->dest_int_port = dest_int_port;
4136 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4137 esw_attr->split_count = out_index;
4139 /* Forward to root fdb for matching against the new source vport */
4140 attr->dest_chain = 0;
4146 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4147 struct flow_action *flow_action,
4148 struct mlx5e_tc_flow *flow,
4149 struct netlink_ext_ack *extack)
4151 struct mlx5e_tc_act_parse_state *parse_state;
4152 struct mlx5e_tc_flow_parse_attr *parse_attr;
4153 struct mlx5_flow_attr *attr = flow->attr;
4154 struct mlx5_esw_flow_attr *esw_attr;
4155 struct net_device *filter_dev;
4158 err = flow_action_supported(flow_action, extack);
4162 esw_attr = attr->esw_attr;
4163 parse_attr = attr->parse_attr;
4164 filter_dev = parse_attr->filter_dev;
4165 parse_state = &parse_attr->parse_state;
4166 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4167 parse_state->ct_priv = get_ct_priv(priv);
4169 err = parse_tc_actions(parse_state, flow_action);
4173 /* Forward to/from internal port can only have 1 dest */
4174 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4175 esw_attr->out_count > 1) {
4176 NL_SET_ERR_MSG_MOD(extack,
4177 "Rules with internal port can have only one destination");
4181 /* Forward from tunnel/internal port to internal port is not supported */
4182 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4183 esw_attr->dest_int_port) {
4184 NL_SET_ERR_MSG_MOD(extack,
4185 "Forwarding from tunnel/internal port to internal port is not supported");
4189 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4193 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4194 parse_attr, flow, extack))
4200 static void get_flags(int flags, unsigned long *flow_flags)
4202 unsigned long __flow_flags = 0;
4204 if (flags & MLX5_TC_FLAG(INGRESS))
4205 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4206 if (flags & MLX5_TC_FLAG(EGRESS))
4207 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4209 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4210 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4211 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4212 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4213 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4214 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4216 *flow_flags = __flow_flags;
4219 static const struct rhashtable_params tc_ht_params = {
4220 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4221 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4222 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4223 .automatic_shrinking = true,
4226 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4227 unsigned long flags)
4229 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4230 struct mlx5e_rep_priv *rpriv;
4232 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4233 rpriv = priv->ppriv;
4234 return &rpriv->tc_ht;
4235 } else /* NIC offload */
4239 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4241 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4242 struct mlx5_flow_attr *attr = flow->attr;
4243 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4244 flow_flag_test(flow, INGRESS);
4245 bool act_is_encap = !!(attr->action &
4246 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4247 bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.eswitch->devcom);
4252 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4253 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4254 (is_rep_ingress || act_is_encap))
4257 if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
4263 struct mlx5_flow_attr *
4264 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4266 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4267 sizeof(struct mlx5_esw_flow_attr) :
4268 sizeof(struct mlx5_nic_flow_attr);
4269 struct mlx5_flow_attr *attr;
4271 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4275 INIT_LIST_HEAD(&attr->list);
4280 mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
4282 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
4283 struct mlx5_esw_flow_attr *esw_attr;
4288 if (attr->post_act_handle)
4289 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
4291 mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
4293 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
4294 mlx5_fc_destroy(counter_dev, attr->counter);
4296 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
4297 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
4298 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
4301 if (mlx5e_is_eswitch_flow(flow)) {
4302 esw_attr = attr->esw_attr;
4304 if (esw_attr->int_port)
4305 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
4306 esw_attr->int_port);
4308 if (esw_attr->dest_int_port)
4309 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
4310 esw_attr->dest_int_port);
4313 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
4315 free_branch_attr(flow, attr->branch_true);
4316 free_branch_attr(flow, attr->branch_false);
4320 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4321 struct flow_cls_offload *f, unsigned long flow_flags,
4322 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4323 struct mlx5e_tc_flow **__flow)
4325 struct mlx5e_tc_flow_parse_attr *parse_attr;
4326 struct mlx5_flow_attr *attr;
4327 struct mlx5e_tc_flow *flow;
4331 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4332 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4333 if (!parse_attr || !flow)
4336 flow->flags = flow_flags;
4337 flow->cookie = f->cookie;
4340 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4346 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4347 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4348 INIT_LIST_HEAD(&flow->hairpin);
4349 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4350 INIT_LIST_HEAD(&flow->attrs);
4351 INIT_LIST_HEAD(&flow->peer_flows);
4352 refcount_set(&flow->refcnt, 1);
4353 init_completion(&flow->init_done);
4354 init_completion(&flow->del_hw_done);
4357 *__parse_attr = parse_attr;
4368 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4369 struct mlx5e_tc_flow_parse_attr *parse_attr,
4370 struct flow_cls_offload *f)
4372 attr->parse_attr = parse_attr;
4373 attr->chain = f->common.chain_index;
4374 attr->prio = f->common.prio;
4378 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4379 struct mlx5e_priv *priv,
4380 struct mlx5e_tc_flow_parse_attr *parse_attr,
4381 struct flow_cls_offload *f,
4382 struct mlx5_eswitch_rep *in_rep,
4383 struct mlx5_core_dev *in_mdev)
4385 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4386 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4388 mlx5e_flow_attr_init(attr, parse_attr, f);
4390 esw_attr->in_rep = in_rep;
4391 esw_attr->in_mdev = in_mdev;
4393 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4394 MLX5_COUNTER_SOURCE_ESWITCH)
4395 esw_attr->counter_dev = in_mdev;
4397 esw_attr->counter_dev = priv->mdev;
4400 static struct mlx5e_tc_flow *
4401 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4402 struct flow_cls_offload *f,
4403 unsigned long flow_flags,
4404 struct net_device *filter_dev,
4405 struct mlx5_eswitch_rep *in_rep,
4406 struct mlx5_core_dev *in_mdev)
4408 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4409 struct netlink_ext_ack *extack = f->common.extack;
4410 struct mlx5e_tc_flow_parse_attr *parse_attr;
4411 struct mlx5e_tc_flow *flow;
4414 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4415 attr_size = sizeof(struct mlx5_esw_flow_attr);
4416 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4417 &parse_attr, &flow);
4421 parse_attr->filter_dev = filter_dev;
4422 mlx5e_flow_esw_attr_init(flow->attr,
4424 f, in_rep, in_mdev);
4426 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4431 /* actions validation depends on parsing the ct matches first */
4432 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4433 &flow->attr->ct_attr, extack);
4437 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4441 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4442 complete_all(&flow->init_done);
4444 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4447 add_unready_flow(flow);
4453 mlx5e_flow_put(priv, flow);
4455 return ERR_PTR(err);
4458 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4459 struct mlx5e_tc_flow *flow,
4460 unsigned long flow_flags,
4461 struct mlx5_eswitch *peer_esw)
4463 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4464 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4465 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4466 struct mlx5e_tc_flow_parse_attr *parse_attr;
4467 int i = mlx5_get_dev_index(peer_esw->dev);
4468 struct mlx5e_rep_priv *peer_urpriv;
4469 struct mlx5e_tc_flow *peer_flow;
4470 struct mlx5_core_dev *in_mdev;
4473 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4474 peer_priv = netdev_priv(peer_urpriv->netdev);
4476 /* in_mdev is assigned of which the packet originated from.
4477 * So packets redirected to uplink use the same mdev of the
4478 * original flow and packets redirected from uplink use the
4480 * In multiport eswitch it's a special case that we need to
4481 * keep the original mdev.
4483 if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
4484 in_mdev = peer_priv->mdev;
4486 in_mdev = priv->mdev;
4488 parse_attr = flow->attr->parse_attr;
4489 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4490 parse_attr->filter_dev,
4491 attr->in_rep, in_mdev);
4492 if (IS_ERR(peer_flow)) {
4493 err = PTR_ERR(peer_flow);
4497 list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
4498 flow_flag_set(flow, DUP);
4499 mutex_lock(&esw->offloads.peer_mutex);
4500 list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
4501 mutex_unlock(&esw->offloads.peer_mutex);
4508 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4509 struct flow_cls_offload *f,
4510 unsigned long flow_flags,
4511 struct net_device *filter_dev,
4512 struct mlx5e_tc_flow **__flow)
4514 struct mlx5_devcom_comp_dev *devcom = priv->mdev->priv.eswitch->devcom, *pos;
4515 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4516 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4517 struct mlx5_core_dev *in_mdev = priv->mdev;
4518 struct mlx5_eswitch *peer_esw;
4519 struct mlx5e_tc_flow *flow;
4522 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4525 return PTR_ERR(flow);
4527 if (!is_peer_flow_needed(flow)) {
4532 if (!mlx5_devcom_for_each_peer_begin(devcom)) {
4537 mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) {
4538 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
4543 mlx5_devcom_for_each_peer_end(devcom);
4549 mlx5e_tc_del_fdb_peers_flow(flow);
4550 mlx5_devcom_for_each_peer_end(devcom);
4552 mlx5e_tc_del_fdb_flow(priv, flow);
4557 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4558 struct flow_cls_offload *f,
4559 unsigned long flow_flags,
4560 struct net_device *filter_dev,
4561 struct mlx5e_tc_flow **__flow)
4563 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4564 struct netlink_ext_ack *extack = f->common.extack;
4565 struct mlx5e_tc_flow_parse_attr *parse_attr;
4566 struct mlx5e_tc_flow *flow;
4569 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4570 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4572 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4576 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4577 attr_size = sizeof(struct mlx5_nic_flow_attr);
4578 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4579 &parse_attr, &flow);
4583 parse_attr->filter_dev = filter_dev;
4584 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4586 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4591 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4592 &flow->attr->ct_attr, extack);
4596 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4600 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4604 flow_flag_set(flow, OFFLOADED);
4610 flow_flag_set(flow, FAILED);
4611 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4612 mlx5e_flow_put(priv, flow);
4618 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4619 struct flow_cls_offload *f,
4620 unsigned long flags,
4621 struct net_device *filter_dev,
4622 struct mlx5e_tc_flow **flow)
4624 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4625 unsigned long flow_flags;
4628 get_flags(flags, &flow_flags);
4630 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4633 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4634 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4637 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4643 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4644 struct mlx5e_rep_priv *rpriv)
4646 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4647 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4648 * function is called from NIC mode.
4650 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4653 /* As IPsec and TC order is not aligned between software and hardware-offload,
4654 * either IPsec offload or TC offload, not both, is allowed for a specific interface.
4656 static bool is_tc_ipsec_order_check_needed(struct net_device *filter, struct mlx5e_priv *priv)
4658 if (!IS_ENABLED(CONFIG_MLX5_EN_IPSEC))
4661 if (filter != priv->netdev)
4664 if (mlx5e_eswitch_vf_rep(priv->netdev))
4670 static int mlx5e_tc_block_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
4672 struct mlx5_core_dev *mdev = priv->mdev;
4674 if (!is_tc_ipsec_order_check_needed(filter, priv))
4677 if (mdev->num_block_tc)
4680 mdev->num_block_ipsec++;
4685 static void mlx5e_tc_unblock_ipsec_offload(struct net_device *filter, struct mlx5e_priv *priv)
4687 if (!is_tc_ipsec_order_check_needed(filter, priv))
4690 priv->mdev->num_block_ipsec--;
4693 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4694 struct flow_cls_offload *f, unsigned long flags)
4696 struct netlink_ext_ack *extack = f->common.extack;
4697 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4698 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4699 struct mlx5e_tc_flow *flow;
4702 if (!mlx5_esw_hold(priv->mdev))
4705 err = mlx5e_tc_block_ipsec_offload(dev, priv);
4709 mlx5_esw_get(priv->mdev);
4712 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4714 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4717 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4720 NL_SET_ERR_MSG_MOD(extack,
4721 "flow cookie already exists, ignoring");
4722 netdev_warn_once(priv->netdev,
4723 "flow cookie %lx already exists, ignoring\n",
4733 trace_mlx5e_configure_flower(f);
4734 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4738 /* Flow rule offloaded to non-uplink representor sharing tc block,
4739 * set the flow's owner dev.
4741 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4742 flow->orig_dev = dev;
4744 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4748 mlx5_esw_release(priv->mdev);
4752 mlx5e_flow_put(priv, flow);
4754 mlx5e_tc_unblock_ipsec_offload(dev, priv);
4755 mlx5_esw_put(priv->mdev);
4757 mlx5_esw_release(priv->mdev);
4761 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4763 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4764 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4766 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4767 flow_flag_test(flow, EGRESS) == dir_egress;
4770 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4771 struct flow_cls_offload *f, unsigned long flags)
4773 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4774 struct mlx5e_tc_flow *flow;
4778 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4779 if (!flow || !same_flow_direction(flow, flags)) {
4784 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4787 if (flow_flag_test_and_set(flow, DELETED)) {
4791 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4794 trace_mlx5e_delete_flower(f);
4795 mlx5e_flow_put(priv, flow);
4797 mlx5e_tc_unblock_ipsec_offload(dev, priv);
4798 mlx5_esw_put(priv->mdev);
4806 int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
4807 struct flow_offload_action *fl_act)
4809 return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
4812 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4813 struct flow_cls_offload *f, unsigned long flags)
4815 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4816 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4817 struct mlx5e_tc_flow *flow;
4818 struct mlx5_fc *counter;
4825 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4829 return PTR_ERR(flow);
4831 if (!same_flow_direction(flow, flags)) {
4836 if (mlx5e_is_offloaded_flow(flow)) {
4837 if (flow_flag_test(flow, USE_ACT_STATS)) {
4838 f->use_act_stats = true;
4840 counter = mlx5e_tc_get_counter(flow);
4844 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4848 /* Under multipath it's possible for one rule to be currently
4849 * un-offloaded while the other rule is offloaded.
4851 if (esw && !mlx5_devcom_for_each_peer_begin(esw->devcom))
4854 if (flow_flag_test(flow, DUP)) {
4855 struct mlx5e_tc_flow *peer_flow;
4857 list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) {
4862 if (!flow_flag_test(peer_flow, OFFLOADED))
4864 if (flow_flag_test(flow, USE_ACT_STATS)) {
4865 f->use_act_stats = true;
4869 counter = mlx5e_tc_get_counter(peer_flow);
4871 goto no_peer_counter;
4872 mlx5_fc_query_cached(counter, &bytes2, &packets2,
4876 packets += packets2;
4877 lastuse = max_t(u64, lastuse, lastuse2);
4883 mlx5_devcom_for_each_peer_end(esw->devcom);
4885 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4886 FLOW_ACTION_HW_STATS_DELAYED);
4887 trace_mlx5e_stats_flower(f);
4889 mlx5e_flow_put(priv, flow);
4893 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4894 struct netlink_ext_ack *extack)
4896 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4897 struct mlx5_eswitch *esw;
4902 vport_num = rpriv->rep->vport;
4903 if (vport_num >= MLX5_VPORT_ECPF) {
4904 NL_SET_ERR_MSG_MOD(extack,
4905 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4909 esw = priv->mdev->priv.eswitch;
4910 /* rate is given in bytes/sec.
4911 * First convert to bits/sec and then round to the nearest mbit/secs.
4912 * mbit means million bits.
4913 * Moreover, if rate is non zero we choose to configure to a minimum of
4917 rate = (rate * BITS_PER_BYTE) + 500000;
4918 do_div(rate, 1000000);
4919 rate_mbps = max_t(u32, rate, 1);
4922 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4924 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4930 tc_matchall_police_validate(const struct flow_action *action,
4931 const struct flow_action_entry *act,
4932 struct netlink_ext_ack *extack)
4934 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
4935 NL_SET_ERR_MSG_MOD(extack,
4936 "Offload not supported when conform action is not continue");
4940 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
4941 NL_SET_ERR_MSG_MOD(extack,
4942 "Offload not supported when exceed action is not drop");
4946 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
4947 !flow_action_is_last_entry(action, act)) {
4948 NL_SET_ERR_MSG_MOD(extack,
4949 "Offload not supported when conform action is ok, but action is not last");
4953 if (act->police.peakrate_bytes_ps ||
4954 act->police.avrate || act->police.overhead) {
4955 NL_SET_ERR_MSG_MOD(extack,
4956 "Offload not supported when peakrate/avrate/overhead is configured");
4963 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4964 struct flow_action *flow_action,
4965 struct netlink_ext_ack *extack)
4967 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4968 const struct flow_action_entry *act;
4972 if (!flow_action_has_entries(flow_action)) {
4973 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4977 if (!flow_offload_has_one_action(flow_action)) {
4978 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4982 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4983 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4987 flow_action_for_each(i, act, flow_action) {
4989 case FLOW_ACTION_POLICE:
4990 err = tc_matchall_police_validate(flow_action, act, extack);
4994 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4998 mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats,
4999 &priv->stats.rep_stats);
5002 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
5010 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
5011 struct tc_cls_matchall_offload *ma)
5013 struct netlink_ext_ack *extack = ma->common.extack;
5015 if (ma->common.prio != 1) {
5016 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
5020 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
5023 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
5024 struct tc_cls_matchall_offload *ma)
5026 struct netlink_ext_ack *extack = ma->common.extack;
5028 return apply_police_params(priv, 0, extack);
5031 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
5032 struct tc_cls_matchall_offload *ma)
5034 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5035 struct rtnl_link_stats64 cur_stats;
5039 mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
5040 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
5041 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5042 rpriv->prev_vf_vport_stats = cur_stats;
5043 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5044 FLOW_ACTION_HW_STATS_DELAYED);
5047 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5048 struct mlx5e_priv *peer_priv)
5050 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5051 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5052 struct mlx5e_hairpin_entry *hpe, *tmp;
5053 LIST_HEAD(init_wait_list);
5057 if (!mlx5e_same_hw_devs(priv, peer_priv))
5060 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5062 mutex_lock(&tc->hairpin_tbl_lock);
5063 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
5064 if (refcount_inc_not_zero(&hpe->refcnt))
5065 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5066 mutex_unlock(&tc->hairpin_tbl_lock);
5068 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5069 wait_for_completion(&hpe->res_ready);
5070 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5071 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
5073 mlx5e_hairpin_put(priv, hpe);
5077 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5078 unsigned long event, void *ptr)
5080 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5081 struct mlx5e_priv *peer_priv;
5082 struct mlx5e_tc_table *tc;
5083 struct mlx5e_priv *priv;
5085 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5086 event != NETDEV_UNREGISTER ||
5087 ndev->reg_state == NETREG_REGISTERED)
5090 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5092 peer_priv = netdev_priv(ndev);
5093 if (priv == peer_priv ||
5094 !(priv->netdev->features & NETIF_F_HW_TC))
5097 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5102 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
5104 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5105 struct mlx5_flow_table **ft = &tc->miss_t;
5106 struct mlx5_flow_table_attr ft_attr = {};
5107 struct mlx5_flow_namespace *ns;
5110 ft_attr.max_fte = 1;
5111 ft_attr.autogroup.max_num_groups = 1;
5112 ft_attr.level = MLX5E_TC_MISS_LEVEL;
5114 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
5116 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
5119 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
5125 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
5127 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5129 mlx5_destroy_flow_table(tc->miss_t);
5132 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5134 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5135 struct mlx5_core_dev *dev = priv->mdev;
5136 struct mapping_ctx *chains_mapping;
5137 struct mlx5_chains_attr attr = {};
5141 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5142 mutex_init(&tc->t_lock);
5143 mutex_init(&tc->hairpin_tbl_lock);
5144 hash_init(tc->hairpin_tbl);
5147 err = rhashtable_init(&tc->ht, &tc_ht_params);
5151 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
5152 lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5154 mapping_id = mlx5_query_nic_system_image_guid(dev);
5156 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
5157 sizeof(struct mlx5_mapped_obj),
5158 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
5160 if (IS_ERR(chains_mapping)) {
5161 err = PTR_ERR(chains_mapping);
5164 tc->mapping = chains_mapping;
5166 err = mlx5e_tc_nic_create_miss_table(priv);
5170 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
5171 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5172 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5173 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5174 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5175 attr.default_ft = tc->miss_t;
5176 attr.mapping = chains_mapping;
5177 attr.fs_base_prio = MLX5E_TC_PRIO;
5179 tc->chains = mlx5_chains_create(dev, &attr);
5180 if (IS_ERR(tc->chains)) {
5181 err = PTR_ERR(tc->chains);
5185 mlx5_chains_print_info(tc->chains);
5187 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5188 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5189 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5191 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5192 err = register_netdevice_notifier_dev_net(priv->netdev,
5196 tc->netdevice_nb.notifier_call = NULL;
5197 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5201 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
5203 tc->action_stats_handle = mlx5e_tc_act_stats_create();
5204 if (IS_ERR(tc->action_stats_handle)) {
5205 err = PTR_ERR(tc->action_stats_handle);
5212 unregister_netdevice_notifier_dev_net(priv->netdev,
5216 mlx5_tc_ct_clean(tc->ct);
5217 mlx5e_tc_post_act_destroy(tc->post_act);
5218 mlx5_chains_destroy(tc->chains);
5220 mlx5e_tc_nic_destroy_miss_table(priv);
5222 mapping_destroy(chains_mapping);
5224 rhashtable_destroy(&tc->ht);
5228 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5230 struct mlx5e_tc_flow *flow = ptr;
5231 struct mlx5e_priv *priv = flow->priv;
5233 mlx5e_tc_del_flow(priv, flow);
5237 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5239 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5241 debugfs_remove_recursive(tc->dfs_root);
5243 if (tc->netdevice_nb.notifier_call)
5244 unregister_netdevice_notifier_dev_net(priv->netdev,
5248 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5249 mutex_destroy(&tc->hairpin_tbl_lock);
5251 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5253 if (!IS_ERR_OR_NULL(tc->t)) {
5254 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5257 mutex_destroy(&tc->t_lock);
5259 mlx5_tc_ct_clean(tc->ct);
5260 mlx5e_tc_post_act_destroy(tc->post_act);
5261 mapping_destroy(tc->mapping);
5262 mlx5_chains_destroy(tc->chains);
5263 mlx5e_tc_nic_destroy_miss_table(priv);
5264 mlx5e_tc_act_stats_free(tc->action_stats_handle);
5267 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5271 err = rhashtable_init(tc_ht, &tc_ht_params);
5275 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5276 lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5281 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5283 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5286 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5288 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5289 struct netdev_phys_item_id ppid;
5290 struct mlx5e_rep_priv *rpriv;
5291 struct mapping_ctx *mapping;
5292 struct mlx5_eswitch *esw;
5293 struct mlx5e_priv *priv;
5294 u64 mapping_id, key;
5297 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5298 priv = netdev_priv(rpriv->netdev);
5299 esw = priv->mdev->priv.eswitch;
5301 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5302 MLX5_FLOW_NAMESPACE_FDB);
5303 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5305 &esw->offloads.mod_hdr,
5306 MLX5_FLOW_NAMESPACE_FDB,
5307 uplink_priv->post_act);
5309 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5311 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5313 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5315 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5316 sizeof(struct tunnel_match_key),
5317 TUNNEL_INFO_BITS_MASK, true);
5319 if (IS_ERR(mapping)) {
5320 err = PTR_ERR(mapping);
5321 goto err_tun_mapping;
5323 uplink_priv->tunnel_mapping = mapping;
5325 /* Two last values are reserved for stack devices slow path table mark
5326 * and bridge ingress push mark.
5328 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5329 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5330 if (IS_ERR(mapping)) {
5331 err = PTR_ERR(mapping);
5332 goto err_enc_opts_mapping;
5334 uplink_priv->tunnel_enc_opts_mapping = mapping;
5336 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5337 if (IS_ERR(uplink_priv->encap)) {
5338 err = PTR_ERR(uplink_priv->encap);
5339 goto err_register_fib_notifier;
5342 uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
5343 if (IS_ERR(uplink_priv->action_stats_handle)) {
5344 err = PTR_ERR(uplink_priv->action_stats_handle);
5345 goto err_action_counter;
5348 err = dev_get_port_parent_id(priv->netdev, &ppid, false);
5350 memcpy(&key, &ppid.id, sizeof(key));
5351 mlx5_esw_offloads_devcom_init(esw, key);
5357 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5358 err_register_fib_notifier:
5359 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5360 err_enc_opts_mapping:
5361 mapping_destroy(uplink_priv->tunnel_mapping);
5363 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5364 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5365 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5366 netdev_warn(priv->netdev,
5367 "Failed to initialize tc (eswitch), err: %d", err);
5368 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5372 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5374 struct mlx5e_rep_priv *rpriv;
5375 struct mlx5_eswitch *esw;
5376 struct mlx5e_priv *priv;
5378 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5379 priv = netdev_priv(rpriv->netdev);
5380 esw = priv->mdev->priv.eswitch;
5382 mlx5_esw_offloads_devcom_cleanup(esw);
5384 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5386 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5387 mapping_destroy(uplink_priv->tunnel_mapping);
5389 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5390 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5391 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5392 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5393 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5394 mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
5397 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5399 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5401 return atomic_read(&tc_ht->nelems);
5404 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5406 struct mlx5e_tc_flow *flow, *tmp;
5409 for (i = 0; i < MLX5_MAX_PORTS; i++) {
5410 if (i == mlx5_get_dev_index(esw->dev))
5412 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
5413 mlx5e_tc_del_fdb_peers_flow(flow);
5417 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5419 struct mlx5_rep_uplink_priv *rpriv =
5420 container_of(work, struct mlx5_rep_uplink_priv,
5421 reoffload_flows_work);
5422 struct mlx5e_tc_flow *flow, *tmp;
5424 mutex_lock(&rpriv->unready_flows_lock);
5425 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5426 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5427 unready_flow_del(flow);
5429 mutex_unlock(&rpriv->unready_flows_lock);
5432 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5433 struct flow_cls_offload *cls_flower,
5434 unsigned long flags)
5436 switch (cls_flower->command) {
5437 case FLOW_CLS_REPLACE:
5438 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5440 case FLOW_CLS_DESTROY:
5441 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5443 case FLOW_CLS_STATS:
5444 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5451 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5454 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5455 struct mlx5e_priv *priv = cb_priv;
5457 if (!priv->netdev || !netif_device_present(priv->netdev))
5460 if (mlx5e_is_uplink_rep(priv))
5461 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5463 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5466 case TC_SETUP_CLSFLOWER:
5467 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5473 static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
5474 struct mlx5e_tc_update_priv *tc_priv,
5477 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5478 struct tunnel_match_enc_opts enc_opts = {};
5479 struct mlx5_rep_uplink_priv *uplink_priv;
5480 struct mlx5e_rep_priv *uplink_rpriv;
5481 struct metadata_dst *tun_dst;
5482 struct tunnel_match_key key;
5483 u32 tun_id, enc_opts_id;
5484 struct net_device *dev;
5487 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
5488 tun_id = tunnel_id >> ENC_OPTS_BITS;
5493 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5494 uplink_priv = &uplink_rpriv->uplink_priv;
5496 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
5498 netdev_dbg(priv->netdev,
5499 "Couldn't find tunnel for tun_id: %d, err: %d\n",
5505 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
5506 enc_opts_id, &enc_opts);
5508 netdev_dbg(priv->netdev,
5509 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
5515 switch (key.enc_control.addr_type) {
5516 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
5517 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
5518 key.enc_ip.tos, key.enc_ip.ttl,
5519 key.enc_tp.dst, TUNNEL_KEY,
5520 key32_to_tunnel_id(key.enc_key_id.keyid),
5523 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
5524 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
5525 key.enc_ip.tos, key.enc_ip.ttl,
5526 key.enc_tp.dst, 0, TUNNEL_KEY,
5527 key32_to_tunnel_id(key.enc_key_id.keyid),
5531 netdev_dbg(priv->netdev,
5532 "Couldn't restore tunnel, unsupported addr_type: %d\n",
5533 key.enc_control.addr_type);
5538 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
5542 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
5544 if (enc_opts.key.len)
5545 ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
5548 enc_opts.key.dst_opt_type);
5550 skb_dst_set(skb, (struct dst_entry *)tun_dst);
5551 dev = dev_get_by_index(&init_net, key.filter_ifindex);
5553 netdev_dbg(priv->netdev,
5554 "Couldn't find tunnel device with ifindex: %d\n",
5555 key.filter_ifindex);
5559 /* Set fwd_dev so we do dev_put() after datapath */
5560 tc_priv->fwd_dev = dev;
5567 static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
5568 struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
5569 u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
5571 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5572 struct tc_skb_ext *tc_skb_ext;
5573 u64 act_miss_cookie;
5576 chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
5577 act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
5578 mapped_obj->act_miss_cookie : 0;
5579 if (chain || act_miss_cookie) {
5580 if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
5583 tc_skb_ext = tc_skb_ext_alloc(skb);
5589 if (act_miss_cookie) {
5590 tc_skb_ext->act_miss_cookie = act_miss_cookie;
5591 tc_skb_ext->act_miss = 1;
5593 tc_skb_ext->chain = chain;
5598 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5603 static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
5604 struct mlx5_mapped_obj *mapped_obj,
5605 struct mlx5e_tc_update_priv *tc_priv)
5607 if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
5608 netdev_dbg(priv->netdev,
5609 "Failed to restore tunnel info for sampled packet\n");
5612 mlx5e_tc_sample_skb(skb, mapped_obj);
5615 static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
5616 struct mlx5_mapped_obj *mapped_obj,
5617 struct mlx5e_tc_update_priv *tc_priv,
5620 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5621 struct mlx5_rep_uplink_priv *uplink_priv;
5622 struct mlx5e_rep_priv *uplink_rpriv;
5623 bool forward_tx = false;
5625 /* Tunnel restore takes precedence over int port restore */
5627 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5629 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5630 uplink_priv = &uplink_rpriv->uplink_priv;
5632 if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
5633 mapped_obj->int_port_metadata, &forward_tx)) {
5634 /* Set fwd_dev for future dev_put */
5635 tc_priv->fwd_dev = skb->dev;
5636 tc_priv->forward_tx = forward_tx;
5644 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
5645 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
5646 struct mlx5_tc_ct_priv *ct_priv,
5647 u32 zone_restore_id, u32 tunnel_id,
5648 struct mlx5e_tc_update_priv *tc_priv)
5650 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5651 struct mlx5_mapped_obj mapped_obj;
5654 err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
5656 netdev_dbg(skb->dev,
5657 "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
5658 mapped_obj_id, err);
5662 switch (mapped_obj.type) {
5663 case MLX5_MAPPED_OBJ_CHAIN:
5664 case MLX5_MAPPED_OBJ_ACT_MISS:
5665 return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
5666 tunnel_id, tc_priv);
5667 case MLX5_MAPPED_OBJ_SAMPLE:
5668 mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
5669 tc_priv->skb_done = true;
5671 case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
5672 return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
5674 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5681 bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
5683 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5684 u32 mapped_obj_id, reg_b, zone_restore_id;
5685 struct mlx5_tc_ct_priv *ct_priv;
5686 struct mapping_ctx *mapping_ctx;
5687 struct mlx5e_tc_table *tc;
5689 reg_b = be32_to_cpu(cqe->ft_metadata);
5690 tc = mlx5e_fs_get_tc(priv->fs);
5691 mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5692 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5695 mapping_ctx = tc->mapping;
5697 return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
5701 static struct mapping_ctx *
5702 mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
5704 struct mlx5e_tc_table *tc;
5705 struct mlx5_eswitch *esw;
5706 struct mapping_ctx *ctx;
5708 if (is_mdev_switchdev_mode(priv->mdev)) {
5709 esw = priv->mdev->priv.eswitch;
5710 ctx = esw->offloads.reg_c0_obj_pool;
5712 tc = mlx5e_fs_get_tc(priv->fs);
5719 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5720 u64 act_miss_cookie, u32 *act_miss_mapping)
5722 struct mlx5_mapped_obj mapped_obj = {};
5723 struct mlx5_eswitch *esw;
5724 struct mapping_ctx *ctx;
5727 ctx = mlx5e_get_priv_obj_mapping(priv);
5728 mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
5729 mapped_obj.act_miss_cookie = act_miss_cookie;
5730 err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
5734 if (!is_mdev_switchdev_mode(priv->mdev))
5737 esw = priv->mdev->priv.eswitch;
5738 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
5739 if (IS_ERR(attr->act_id_restore_rule))
5745 mapping_remove(ctx, *act_miss_mapping);
5749 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5750 u32 act_miss_mapping)
5752 struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
5754 if (is_mdev_switchdev_mode(priv->mdev))
5755 mlx5_del_flow_rules(attr->act_id_restore_rule);
5756 mapping_remove(ctx, act_miss_mapping);