2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
51 #include <net/ipv6_stubs.h>
52 #include <net/bareudp.h>
53 #include <net/bonding.h>
56 #include "en/rep/tc.h"
57 #include "en/rep/neigh.h"
62 #include "en/tc_tun.h"
63 #include "en/mapping.h"
65 #include "en/mod_hdr.h"
66 #include "lib/devcom.h"
67 #include "lib/geneve.h"
68 #include "lib/fs_chains.h"
69 #include "diag/en_tc_tracepoint.h"
70 #include <asm/div64.h>
72 #define nic_chains(priv) ((priv)->fs.tc.chains)
73 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
74 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
77 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
78 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
79 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
80 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
81 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
82 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
83 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
84 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
85 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
86 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
87 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
88 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
89 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
90 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
93 #define MLX5E_TC_MAX_SPLITS 1
95 /* Helper struct for accessing a struct containing list_head array.
102 * |- list_head item 1
104 * To access the containing struct from one of the list_head items:
105 * 1. Get the helper item from the list_head item using
107 * container_of(list_head item, helper struct type, list_head field)
108 * 2. Get the contining struct from the helper item and its index in the array:
109 * containing struct =
110 * container_of(helper item, containing struct type, helper field[index])
112 struct encap_flow_item {
113 struct mlx5e_encap_entry *e; /* attached encap instance */
114 struct list_head list;
118 struct mlx5e_tc_flow {
119 struct rhash_head node;
120 struct mlx5e_priv *priv;
123 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
125 /* flows sharing the same reformat object - currently mpls decap */
126 struct list_head l3_to_l2_reformat;
127 struct mlx5e_decap_entry *decap_reformat;
129 /* Flow can be associated with multiple encap IDs.
130 * The number of encaps is bounded by the number of supported
133 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
134 struct mlx5e_tc_flow *peer_flow;
135 struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
136 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
137 struct list_head hairpin; /* flows sharing the same hairpin */
138 struct list_head peer; /* flows with peer flow */
139 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
140 struct net_device *orig_dev; /* netdev adding flow first */
142 struct list_head tmp_list; /* temporary flow list used by neigh update */
144 struct rcu_head rcu_head;
145 struct completion init_done;
146 int tunnel_id; /* the mapped tunnel id of this flow */
147 struct mlx5_flow_attr *attr;
150 struct mlx5e_tc_flow_parse_attr {
151 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
152 struct net_device *filter_dev;
153 struct mlx5_flow_spec spec;
154 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
155 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
159 #define MLX5E_TC_TABLE_NUM_GROUPS 4
160 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
162 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
164 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
169 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
172 .soffset = MLX5_BYTE_OFF(fte_match_param,
173 misc_parameters_2.metadata_reg_c_1),
175 [ZONE_TO_REG] = zone_to_reg_ct,
176 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
177 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
178 [MARK_TO_REG] = mark_to_reg_ct,
179 [LABELS_TO_REG] = labels_to_reg_ct,
180 [FTEID_TO_REG] = fteid_to_reg_ct,
181 /* For NIC rules we store the retore metadata directly
182 * into reg_b that is passed to SW since we don't
183 * jump between steering domains.
185 [NIC_CHAIN_TO_REG] = {
186 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
190 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
193 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
196 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
197 enum mlx5e_tc_attr_to_reg type,
201 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
202 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
203 void *headers_c = spec->match_criteria;
204 void *headers_v = spec->match_value;
207 fmask = headers_c + soffset;
208 fval = headers_v + soffset;
210 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
211 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
213 memcpy(fmask, &mask, match_len);
214 memcpy(fval, &data, match_len);
216 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
220 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
221 enum mlx5e_tc_attr_to_reg type,
225 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
226 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
227 void *headers_c = spec->match_criteria;
228 void *headers_v = spec->match_value;
231 fmask = headers_c + soffset;
232 fval = headers_v + soffset;
234 memcpy(mask, fmask, match_len);
235 memcpy(data, fval, match_len);
237 *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
238 *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
242 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
243 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
244 enum mlx5_flow_namespace_type ns,
245 enum mlx5e_tc_attr_to_reg type,
248 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
249 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
250 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
254 err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
258 modact = mod_hdr_acts->actions +
259 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
261 /* Firmware has 5bit length field and 0 means 32bits */
265 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
266 MLX5_SET(set_action_in, modact, field, mfield);
267 MLX5_SET(set_action_in, modact, offset, moffset * 8);
268 MLX5_SET(set_action_in, modact, length, mlen * 8);
269 MLX5_SET(set_action_in, modact, data, data);
270 mod_hdr_acts->num_actions++;
275 static struct mlx5_tc_ct_priv *
276 get_ct_priv(struct mlx5e_priv *priv)
278 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
279 struct mlx5_rep_uplink_priv *uplink_priv;
280 struct mlx5e_rep_priv *uplink_rpriv;
282 if (is_mdev_switchdev_mode(priv->mdev)) {
283 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
284 uplink_priv = &uplink_rpriv->uplink_priv;
286 return uplink_priv->ct_priv;
289 return priv->fs.tc.ct;
292 struct mlx5_flow_handle *
293 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
294 struct mlx5_flow_spec *spec,
295 struct mlx5_flow_attr *attr)
297 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
299 if (is_mdev_switchdev_mode(priv->mdev))
300 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
302 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
306 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
307 struct mlx5_flow_handle *rule,
308 struct mlx5_flow_attr *attr)
310 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
312 if (is_mdev_switchdev_mode(priv->mdev)) {
313 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
318 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
321 struct mlx5e_hairpin {
322 struct mlx5_hairpin *pair;
324 struct mlx5_core_dev *func_mdev;
325 struct mlx5e_priv *func_priv;
330 struct mlx5e_rqt indir_rqt;
331 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
332 struct mlx5e_ttc_table ttc;
335 struct mlx5e_hairpin_entry {
336 /* a node of a hash table which keeps all the hairpin entries */
337 struct hlist_node hairpin_hlist;
339 /* protects flows list */
340 spinlock_t flows_lock;
341 /* flows sharing the same hairpin */
342 struct list_head flows;
343 /* hpe's that were not fully initialized when dead peer update event
344 * function traversed them.
346 struct list_head dead_peer_wait_list;
350 struct mlx5e_hairpin *hp;
352 struct completion res_ready;
355 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
356 struct mlx5e_tc_flow *flow);
358 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
360 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
361 return ERR_PTR(-EINVAL);
365 static void mlx5e_flow_put(struct mlx5e_priv *priv,
366 struct mlx5e_tc_flow *flow)
368 if (refcount_dec_and_test(&flow->refcnt)) {
369 mlx5e_tc_del_flow(priv, flow);
370 kfree_rcu(flow, rcu_head);
374 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
376 /* Complete all memory stores before setting bit. */
377 smp_mb__before_atomic();
378 set_bit(flag, &flow->flags);
381 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
383 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
386 /* test_and_set_bit() provides all necessary barriers */
387 return test_and_set_bit(flag, &flow->flags);
390 #define flow_flag_test_and_set(flow, flag) \
391 __flow_flag_test_and_set(flow, \
392 MLX5E_TC_FLOW_FLAG_##flag)
394 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
396 /* Complete all memory stores before clearing bit. */
397 smp_mb__before_atomic();
398 clear_bit(flag, &flow->flags);
401 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
402 MLX5E_TC_FLOW_FLAG_##flag)
404 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
406 bool ret = test_bit(flag, &flow->flags);
408 /* Read fields of flow structure only after checking flags. */
409 smp_mb__after_atomic();
413 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
414 MLX5E_TC_FLOW_FLAG_##flag)
416 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
418 return flow_flag_test(flow, ESWITCH);
421 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
423 return flow_flag_test(flow, FT);
426 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
428 return flow_flag_test(flow, OFFLOADED);
431 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
433 return mlx5e_is_eswitch_flow(flow) ?
434 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
437 static struct mod_hdr_tbl *
438 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
440 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
442 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
443 &esw->offloads.mod_hdr :
444 &priv->fs.tc.mod_hdr;
447 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
448 struct mlx5e_tc_flow *flow,
449 struct mlx5e_tc_flow_parse_attr *parse_attr)
451 struct mlx5_modify_hdr *modify_hdr;
452 struct mlx5e_mod_hdr_handle *mh;
454 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
455 get_flow_name_space(flow),
456 &parse_attr->mod_hdr_acts);
460 modify_hdr = mlx5e_mod_hdr_get(mh);
461 flow->attr->modify_hdr = modify_hdr;
467 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
468 struct mlx5e_tc_flow *flow)
470 /* flow wasn't fully initialized */
474 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
480 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
482 struct net_device *netdev;
483 struct mlx5e_priv *priv;
485 netdev = __dev_get_by_index(net, ifindex);
486 priv = netdev_priv(netdev);
490 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
492 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
496 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
500 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
502 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
503 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
504 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
506 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
513 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
518 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
520 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
521 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
524 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
526 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
527 struct mlx5e_priv *priv = hp->func_priv;
528 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
530 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
533 for (i = 0; i < sz; i++) {
535 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
536 ix = mlx5e_bits_invert(i, ilog2(sz));
537 ix = indirection_rqt[ix];
538 rqn = hp->pair->rqn[ix];
539 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
543 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
545 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
546 struct mlx5e_priv *priv = hp->func_priv;
547 struct mlx5_core_dev *mdev = priv->mdev;
551 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
552 in = kvzalloc(inlen, GFP_KERNEL);
556 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
558 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
559 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
561 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
563 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
565 hp->indir_rqt.enabled = true;
571 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
573 struct mlx5e_priv *priv = hp->func_priv;
574 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
578 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
579 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
581 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
582 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
584 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
585 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
586 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
587 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
589 err = mlx5_core_create_tir(hp->func_mdev, in,
590 &hp->indir_tirn[tt]);
592 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
593 goto err_destroy_tirs;
599 for (i = 0; i < tt; i++)
600 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
604 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
608 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
609 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
612 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
613 struct ttc_params *ttc_params)
615 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
618 memset(ttc_params, 0, sizeof(*ttc_params));
620 ttc_params->any_tt_tirn = hp->tirn;
622 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
623 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
625 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
626 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
627 ft_attr->prio = MLX5E_TC_PRIO;
630 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
632 struct mlx5e_priv *priv = hp->func_priv;
633 struct ttc_params ttc_params;
636 err = mlx5e_hairpin_create_indirect_rqt(hp);
640 err = mlx5e_hairpin_create_indirect_tirs(hp);
642 goto err_create_indirect_tirs;
644 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
645 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
647 goto err_create_ttc_table;
649 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
650 hp->num_channels, hp->ttc.ft.t->id);
654 err_create_ttc_table:
655 mlx5e_hairpin_destroy_indirect_tirs(hp);
656 err_create_indirect_tirs:
657 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
662 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
664 struct mlx5e_priv *priv = hp->func_priv;
666 mlx5e_destroy_ttc_table(priv, &hp->ttc);
667 mlx5e_hairpin_destroy_indirect_tirs(hp);
668 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
671 static struct mlx5e_hairpin *
672 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
675 struct mlx5_core_dev *func_mdev, *peer_mdev;
676 struct mlx5e_hairpin *hp;
677 struct mlx5_hairpin *pair;
680 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
682 return ERR_PTR(-ENOMEM);
684 func_mdev = priv->mdev;
685 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
687 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
690 goto create_pair_err;
693 hp->func_mdev = func_mdev;
694 hp->func_priv = priv;
695 hp->num_channels = params->num_channels;
697 err = mlx5e_hairpin_create_transport(hp);
699 goto create_transport_err;
701 if (hp->num_channels > 1) {
702 err = mlx5e_hairpin_rss_init(hp);
710 mlx5e_hairpin_destroy_transport(hp);
711 create_transport_err:
712 mlx5_core_hairpin_destroy(hp->pair);
718 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
720 if (hp->num_channels > 1)
721 mlx5e_hairpin_rss_cleanup(hp);
722 mlx5e_hairpin_destroy_transport(hp);
723 mlx5_core_hairpin_destroy(hp->pair);
727 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
729 return (peer_vhca_id << 16 | prio);
732 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
733 u16 peer_vhca_id, u8 prio)
735 struct mlx5e_hairpin_entry *hpe;
736 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
738 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
739 hairpin_hlist, hash_key) {
740 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
741 refcount_inc(&hpe->refcnt);
749 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
750 struct mlx5e_hairpin_entry *hpe)
752 /* no more hairpin flows for us, release the hairpin pair */
753 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
755 hash_del(&hpe->hairpin_hlist);
756 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
758 if (!IS_ERR_OR_NULL(hpe->hp)) {
759 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
760 dev_name(hpe->hp->pair->peer_mdev->device));
762 mlx5e_hairpin_destroy(hpe->hp);
765 WARN_ON(!list_empty(&hpe->flows));
769 #define UNKNOWN_MATCH_PRIO 8
771 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
772 struct mlx5_flow_spec *spec, u8 *match_prio,
773 struct netlink_ext_ack *extack)
775 void *headers_c, *headers_v;
776 u8 prio_val, prio_mask = 0;
779 #ifdef CONFIG_MLX5_CORE_EN_DCB
780 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
781 NL_SET_ERR_MSG_MOD(extack,
782 "only PCP trust state supported for hairpin");
786 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
787 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
789 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
791 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
792 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
795 if (!vlan_present || !prio_mask) {
796 prio_val = UNKNOWN_MATCH_PRIO;
797 } else if (prio_mask != 0x7) {
798 NL_SET_ERR_MSG_MOD(extack,
799 "masked priority match not supported for hairpin");
803 *match_prio = prio_val;
807 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
808 struct mlx5e_tc_flow *flow,
809 struct mlx5e_tc_flow_parse_attr *parse_attr,
810 struct netlink_ext_ack *extack)
812 int peer_ifindex = parse_attr->mirred_ifindex[0];
813 struct mlx5_hairpin_params params;
814 struct mlx5_core_dev *peer_mdev;
815 struct mlx5e_hairpin_entry *hpe;
816 struct mlx5e_hairpin *hp;
823 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
824 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
825 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
829 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
830 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
835 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
836 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
838 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
839 wait_for_completion(&hpe->res_ready);
841 if (IS_ERR(hpe->hp)) {
848 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
850 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
854 spin_lock_init(&hpe->flows_lock);
855 INIT_LIST_HEAD(&hpe->flows);
856 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
857 hpe->peer_vhca_id = peer_id;
858 hpe->prio = match_prio;
859 refcount_set(&hpe->refcnt, 1);
860 init_completion(&hpe->res_ready);
862 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
863 hash_hairpin_info(peer_id, match_prio));
864 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
866 params.log_data_size = 15;
867 params.log_data_size = min_t(u8, params.log_data_size,
868 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
869 params.log_data_size = max_t(u8, params.log_data_size,
870 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
872 params.log_num_packets = params.log_data_size -
873 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
874 params.log_num_packets = min_t(u8, params.log_num_packets,
875 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
877 params.q_counter = priv->q_counter;
878 /* set hairpin pair per each 50Gbs share of the link */
879 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
880 link_speed = max_t(u32, link_speed, 50000);
881 link_speed64 = link_speed;
882 do_div(link_speed64, 50000);
883 params.num_channels = link_speed64;
885 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
887 complete_all(&hpe->res_ready);
893 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
894 hp->tirn, hp->pair->rqn[0],
895 dev_name(hp->pair->peer_mdev->device),
896 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
899 if (hpe->hp->num_channels > 1) {
900 flow_flag_set(flow, HAIRPIN_RSS);
901 flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
903 flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
907 spin_lock(&hpe->flows_lock);
908 list_add(&flow->hairpin, &hpe->flows);
909 spin_unlock(&hpe->flows_lock);
914 mlx5e_hairpin_put(priv, hpe);
918 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
919 struct mlx5e_tc_flow *flow)
921 /* flow wasn't fully initialized */
925 spin_lock(&flow->hpe->flows_lock);
926 list_del(&flow->hairpin);
927 spin_unlock(&flow->hpe->flows_lock);
929 mlx5e_hairpin_put(priv, flow->hpe);
933 struct mlx5_flow_handle *
934 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
935 struct mlx5_flow_spec *spec,
936 struct mlx5_flow_attr *attr)
938 struct mlx5_flow_context *flow_context = &spec->flow_context;
939 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
940 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
941 struct mlx5e_tc_table *tc = &priv->fs.tc;
942 struct mlx5_flow_destination dest[2] = {};
943 struct mlx5_flow_act flow_act = {
944 .action = attr->action,
945 .flags = FLOW_ACT_NO_APPEND,
947 struct mlx5_flow_handle *rule;
948 struct mlx5_flow_table *ft;
951 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
952 flow_context->flow_tag = nic_attr->flow_tag;
955 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
956 dest[dest_ix].ft = attr->dest_ft;
958 } else if (nic_attr->hairpin_ft) {
959 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
960 dest[dest_ix].ft = nic_attr->hairpin_ft;
962 } else if (nic_attr->hairpin_tirn) {
963 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
964 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
966 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
967 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
968 if (attr->dest_chain) {
969 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
972 if (IS_ERR(dest[dest_ix].ft))
973 return ERR_CAST(dest[dest_ix].ft);
975 dest[dest_ix].ft = priv->fs.vlan.ft.t;
980 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
981 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
982 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
984 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
985 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
986 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
990 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
991 flow_act.modify_hdr = attr->modify_hdr;
993 mutex_lock(&tc->t_lock);
994 if (IS_ERR_OR_NULL(tc->t)) {
995 /* Create the root table here if doesn't exist yet */
997 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1000 mutex_unlock(&tc->t_lock);
1001 netdev_err(priv->netdev,
1002 "Failed to create tc offload table\n");
1003 rule = ERR_CAST(priv->fs.tc.t);
1007 mutex_unlock(&tc->t_lock);
1009 if (attr->chain || attr->prio)
1010 ft = mlx5_chains_get_table(nic_chains,
1011 attr->chain, attr->prio,
1017 rule = ERR_CAST(ft);
1021 if (attr->outer_match_level != MLX5_MATCH_NONE)
1022 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1024 rule = mlx5_add_flow_rules(ft, spec,
1025 &flow_act, dest, dest_ix);
1032 if (attr->chain || attr->prio)
1033 mlx5_chains_put_table(nic_chains,
1034 attr->chain, attr->prio,
1037 if (attr->dest_chain)
1038 mlx5_chains_put_table(nic_chains,
1039 attr->dest_chain, 1,
1042 return ERR_CAST(rule);
1046 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1047 struct mlx5e_tc_flow_parse_attr *parse_attr,
1048 struct mlx5e_tc_flow *flow,
1049 struct netlink_ext_ack *extack)
1051 struct mlx5_flow_attr *attr = flow->attr;
1052 struct mlx5_core_dev *dev = priv->mdev;
1053 struct mlx5_fc *counter = NULL;
1056 if (flow_flag_test(flow, HAIRPIN)) {
1057 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1062 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1063 counter = mlx5_fc_create(dev, true);
1064 if (IS_ERR(counter))
1065 return PTR_ERR(counter);
1067 attr->counter = counter;
1070 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1071 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1072 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1077 if (flow_flag_test(flow, CT))
1078 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1079 attr, &parse_attr->mod_hdr_acts);
1081 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1084 return PTR_ERR_OR_ZERO(flow->rule[0]);
1087 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1088 struct mlx5_flow_handle *rule,
1089 struct mlx5_flow_attr *attr)
1091 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1093 mlx5_del_flow_rules(rule);
1095 if (attr->chain || attr->prio)
1096 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1099 if (attr->dest_chain)
1100 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1104 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1105 struct mlx5e_tc_flow *flow)
1107 struct mlx5_flow_attr *attr = flow->attr;
1108 struct mlx5e_tc_table *tc = &priv->fs.tc;
1110 flow_flag_clear(flow, OFFLOADED);
1112 if (flow_flag_test(flow, CT))
1113 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1114 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1115 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1117 /* Remove root table if no rules are left to avoid
1118 * extra steering hops.
1120 mutex_lock(&priv->fs.tc.t_lock);
1121 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1122 !IS_ERR_OR_NULL(tc->t)) {
1123 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1124 priv->fs.tc.t = NULL;
1126 mutex_unlock(&priv->fs.tc.t_lock);
1128 kvfree(attr->parse_attr);
1130 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1131 mlx5e_detach_mod_hdr(priv, flow);
1133 mlx5_fc_destroy(priv->mdev, attr->counter);
1135 if (flow_flag_test(flow, HAIRPIN))
1136 mlx5e_hairpin_flow_del(priv, flow);
1141 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1142 struct mlx5e_tc_flow *flow, int out_index);
1144 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1145 struct mlx5e_tc_flow *flow,
1146 struct net_device *mirred_dev,
1148 struct netlink_ext_ack *extack,
1149 struct net_device **encap_dev,
1151 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
1152 struct mlx5e_tc_flow *flow,
1153 struct netlink_ext_ack *extack);
1154 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1155 struct mlx5e_tc_flow *flow);
1157 static struct mlx5_flow_handle *
1158 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1159 struct mlx5e_tc_flow *flow,
1160 struct mlx5_flow_spec *spec,
1161 struct mlx5_flow_attr *attr)
1163 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1164 struct mlx5_flow_handle *rule;
1166 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1167 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1169 if (flow_flag_test(flow, CT)) {
1170 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1172 return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1177 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1181 if (attr->esw_attr->split_count) {
1182 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1183 if (IS_ERR(flow->rule[1])) {
1184 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1185 return flow->rule[1];
1193 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1194 struct mlx5e_tc_flow *flow,
1195 struct mlx5_flow_attr *attr)
1197 flow_flag_clear(flow, OFFLOADED);
1199 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1200 goto offload_rule_0;
1202 if (flow_flag_test(flow, CT)) {
1203 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1207 if (attr->esw_attr->split_count)
1208 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1211 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1214 static struct mlx5_flow_handle *
1215 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1216 struct mlx5e_tc_flow *flow,
1217 struct mlx5_flow_spec *spec)
1219 struct mlx5_flow_attr *slow_attr;
1220 struct mlx5_flow_handle *rule;
1222 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1224 return ERR_PTR(-ENOMEM);
1226 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1227 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1228 slow_attr->esw_attr->split_count = 0;
1229 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1231 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1233 flow_flag_set(flow, SLOW);
1241 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1242 struct mlx5e_tc_flow *flow)
1244 struct mlx5_flow_attr *slow_attr;
1246 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1248 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1252 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1253 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1254 slow_attr->esw_attr->split_count = 0;
1255 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1256 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1257 flow_flag_clear(flow, SLOW);
1261 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1264 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1265 struct list_head *unready_flows)
1267 flow_flag_set(flow, NOT_READY);
1268 list_add_tail(&flow->unready, unready_flows);
1271 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1274 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1276 list_del(&flow->unready);
1277 flow_flag_clear(flow, NOT_READY);
1280 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1282 struct mlx5_rep_uplink_priv *uplink_priv;
1283 struct mlx5e_rep_priv *rpriv;
1284 struct mlx5_eswitch *esw;
1286 esw = flow->priv->mdev->priv.eswitch;
1287 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1288 uplink_priv = &rpriv->uplink_priv;
1290 mutex_lock(&uplink_priv->unready_flows_lock);
1291 unready_flow_add(flow, &uplink_priv->unready_flows);
1292 mutex_unlock(&uplink_priv->unready_flows_lock);
1295 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1297 struct mlx5_rep_uplink_priv *uplink_priv;
1298 struct mlx5e_rep_priv *rpriv;
1299 struct mlx5_eswitch *esw;
1301 esw = flow->priv->mdev->priv.eswitch;
1302 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1303 uplink_priv = &rpriv->uplink_priv;
1305 mutex_lock(&uplink_priv->unready_flows_lock);
1306 unready_flow_del(flow);
1307 mutex_unlock(&uplink_priv->unready_flows_lock);
1311 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1312 struct mlx5e_tc_flow *flow,
1313 struct netlink_ext_ack *extack)
1315 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1316 struct net_device *out_dev, *encap_dev = NULL;
1317 struct mlx5e_tc_flow_parse_attr *parse_attr;
1318 struct mlx5_flow_attr *attr = flow->attr;
1319 struct mlx5_esw_flow_attr *esw_attr;
1320 struct mlx5_fc *counter = NULL;
1321 struct mlx5e_rep_priv *rpriv;
1322 struct mlx5e_priv *out_priv;
1323 bool encap_valid = true;
1324 u32 max_prio, max_chain;
1328 if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
1329 NL_SET_ERR_MSG_MOD(extack,
1330 "E-switch priorities unsupported, upgrade FW");
1334 /* We check chain range only for tc flows.
1335 * For ft flows, we checked attr->chain was originally 0 and set it to
1336 * FDB_FT_CHAIN which is outside tc range.
1337 * See mlx5e_rep_setup_ft_cb().
1339 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1340 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1341 NL_SET_ERR_MSG_MOD(extack,
1342 "Requested chain is out of supported range");
1346 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1347 if (attr->prio > max_prio) {
1348 NL_SET_ERR_MSG_MOD(extack,
1349 "Requested priority is out of supported range");
1353 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1354 err = mlx5e_attach_decap(priv, flow, extack);
1359 parse_attr = attr->parse_attr;
1360 esw_attr = attr->esw_attr;
1362 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1365 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1368 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1369 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1371 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1372 extack, &encap_dev, &encap_valid);
1376 out_priv = netdev_priv(encap_dev);
1377 rpriv = out_priv->ppriv;
1378 esw_attr->dests[out_index].rep = rpriv->rep;
1379 esw_attr->dests[out_index].mdev = out_priv->mdev;
1382 err = mlx5_eswitch_add_vlan_action(esw, attr);
1386 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1387 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1388 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1389 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1394 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1395 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1396 if (IS_ERR(counter))
1397 return PTR_ERR(counter);
1399 attr->counter = counter;
1402 /* we get here if one of the following takes place:
1403 * (1) there's no error
1404 * (2) there's an encap action and we don't have valid neigh
1407 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1409 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1411 if (IS_ERR(flow->rule[0]))
1412 return PTR_ERR(flow->rule[0]);
1414 flow_flag_set(flow, OFFLOADED);
1419 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1421 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1422 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1425 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1427 geneve_tlv_option_0_data);
1429 return !!geneve_tlv_opt_0_data;
1432 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1433 struct mlx5e_tc_flow *flow)
1435 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1436 struct mlx5_flow_attr *attr = flow->attr;
1439 mlx5e_put_flow_tunnel_id(flow);
1441 if (flow_flag_test(flow, NOT_READY))
1442 remove_unready_flow(flow);
1444 if (mlx5e_is_offloaded_flow(flow)) {
1445 if (flow_flag_test(flow, SLOW))
1446 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1448 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1451 if (mlx5_flow_has_geneve_opt(flow))
1452 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1454 mlx5_eswitch_del_vlan_action(esw, attr);
1456 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1457 if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1458 mlx5e_detach_encap(priv, flow, out_index);
1459 kfree(attr->parse_attr->tun_info[out_index]);
1461 kvfree(attr->parse_attr);
1463 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1465 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1466 mlx5e_detach_mod_hdr(priv, flow);
1468 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1469 mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
1471 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1472 mlx5e_detach_decap(priv, flow);
1477 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1478 struct mlx5e_encap_entry *e,
1479 struct list_head *flow_list)
1481 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1482 struct mlx5_esw_flow_attr *esw_attr;
1483 struct mlx5_flow_handle *rule;
1484 struct mlx5_flow_attr *attr;
1485 struct mlx5_flow_spec *spec;
1486 struct mlx5e_tc_flow *flow;
1489 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1491 e->encap_size, e->encap_header,
1492 MLX5_FLOW_NAMESPACE_FDB);
1493 if (IS_ERR(e->pkt_reformat)) {
1494 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1495 PTR_ERR(e->pkt_reformat));
1498 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1499 mlx5e_rep_queue_neigh_stats_work(priv);
1501 list_for_each_entry(flow, flow_list, tmp_list) {
1502 bool all_flow_encaps_valid = true;
1505 if (!mlx5e_is_offloaded_flow(flow))
1508 esw_attr = attr->esw_attr;
1509 spec = &attr->parse_attr->spec;
1511 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1512 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1513 /* Flow can be associated with multiple encap entries.
1514 * Before offloading the flow verify that all of them have
1515 * a valid neighbour.
1517 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1518 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1520 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1521 all_flow_encaps_valid = false;
1525 /* Do not offload flows with unresolved neighbors */
1526 if (!all_flow_encaps_valid)
1528 /* update from slow path rule to encap rule */
1529 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
1531 err = PTR_ERR(rule);
1532 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1537 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1538 flow->rule[0] = rule;
1539 /* was unset when slow path rule removed */
1540 flow_flag_set(flow, OFFLOADED);
1544 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1545 struct mlx5e_encap_entry *e,
1546 struct list_head *flow_list)
1548 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1549 struct mlx5_esw_flow_attr *esw_attr;
1550 struct mlx5_flow_handle *rule;
1551 struct mlx5_flow_attr *attr;
1552 struct mlx5_flow_spec *spec;
1553 struct mlx5e_tc_flow *flow;
1556 list_for_each_entry(flow, flow_list, tmp_list) {
1557 if (!mlx5e_is_offloaded_flow(flow))
1560 esw_attr = attr->esw_attr;
1561 spec = &attr->parse_attr->spec;
1563 /* update from encap rule to slow path rule */
1564 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1565 /* mark the flow's encap dest as non-valid */
1566 esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1569 err = PTR_ERR(rule);
1570 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1575 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1576 flow->rule[0] = rule;
1577 /* was unset when fast path rule removed */
1578 flow_flag_set(flow, OFFLOADED);
1581 /* we know that the encap is valid */
1582 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1583 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1586 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1588 return flow->attr->counter;
1591 /* Takes reference to all flows attached to encap and adds the flows to
1592 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1594 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1596 struct encap_flow_item *efi;
1597 struct mlx5e_tc_flow *flow;
1599 list_for_each_entry(efi, &e->flows, list) {
1600 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1601 if (IS_ERR(mlx5e_flow_get(flow)))
1603 wait_for_completion(&flow->init_done);
1605 flow->tmp_efi_index = efi->index;
1606 list_add(&flow->tmp_list, flow_list);
1610 /* Iterate over tmp_list of flows attached to flow_list head. */
1611 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1613 struct mlx5e_tc_flow *flow, *tmp;
1615 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1616 mlx5e_flow_put(priv, flow);
1619 static struct mlx5e_encap_entry *
1620 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1621 struct mlx5e_encap_entry *e)
1623 struct mlx5e_encap_entry *next = NULL;
1628 /* find encap with non-zero reference counter value */
1630 list_next_or_null_rcu(&nhe->encap_list,
1632 struct mlx5e_encap_entry,
1634 list_first_or_null_rcu(&nhe->encap_list,
1635 struct mlx5e_encap_entry,
1638 next = list_next_or_null_rcu(&nhe->encap_list,
1640 struct mlx5e_encap_entry,
1642 if (mlx5e_encap_take(next))
1647 /* release starting encap */
1649 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1653 /* wait for encap to be fully initialized */
1654 wait_for_completion(&next->res_ready);
1655 /* continue searching if encap entry is not in valid state after completion */
1656 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1664 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1666 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1667 struct mlx5e_encap_entry *e = NULL;
1668 struct mlx5e_tc_flow *flow;
1669 struct mlx5_fc *counter;
1670 struct neigh_table *tbl;
1671 bool neigh_used = false;
1672 struct neighbour *n;
1675 if (m_neigh->family == AF_INET)
1677 #if IS_ENABLED(CONFIG_IPV6)
1678 else if (m_neigh->family == AF_INET6)
1679 tbl = ipv6_stub->nd_tbl;
1684 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1687 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1688 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1689 struct encap_flow_item *efi, *tmp;
1690 struct mlx5_eswitch *esw;
1691 LIST_HEAD(flow_list);
1693 esw = priv->mdev->priv.eswitch;
1694 mutex_lock(&esw->offloads.encap_tbl_lock);
1695 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1696 flow = container_of(efi, struct mlx5e_tc_flow,
1697 encaps[efi->index]);
1698 if (IS_ERR(mlx5e_flow_get(flow)))
1700 list_add(&flow->tmp_list, &flow_list);
1702 if (mlx5e_is_offloaded_flow(flow)) {
1703 counter = mlx5e_tc_get_counter(flow);
1704 lastuse = mlx5_fc_query_lastuse(counter);
1705 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1711 mutex_unlock(&esw->offloads.encap_tbl_lock);
1713 mlx5e_put_encap_flow_list(priv, &flow_list);
1715 /* release current encap before breaking the loop */
1716 mlx5e_encap_put(priv, e);
1721 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1724 nhe->reported_lastuse = jiffies;
1726 /* find the relevant neigh according to the cached device and
1729 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1733 neigh_event_send(n, NULL);
1738 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1740 WARN_ON(!list_empty(&e->flows));
1742 if (e->compl_result > 0) {
1743 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1745 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1746 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1750 kfree(e->encap_header);
1754 static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
1755 struct mlx5e_decap_entry *d)
1757 WARN_ON(!list_empty(&d->flows));
1759 if (!d->compl_result)
1760 mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
1765 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1767 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1769 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1771 hash_del_rcu(&e->encap_hlist);
1772 mutex_unlock(&esw->offloads.encap_tbl_lock);
1774 mlx5e_encap_dealloc(priv, e);
1777 static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
1779 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1781 if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
1783 hash_del_rcu(&d->hlist);
1784 mutex_unlock(&esw->offloads.decap_tbl_lock);
1786 mlx5e_decap_dealloc(priv, d);
1789 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1790 struct mlx5e_tc_flow *flow, int out_index)
1792 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1793 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1795 /* flow wasn't fully initialized */
1799 mutex_lock(&esw->offloads.encap_tbl_lock);
1800 list_del(&flow->encaps[out_index].list);
1801 flow->encaps[out_index].e = NULL;
1802 if (!refcount_dec_and_test(&e->refcnt)) {
1803 mutex_unlock(&esw->offloads.encap_tbl_lock);
1806 hash_del_rcu(&e->encap_hlist);
1807 mutex_unlock(&esw->offloads.encap_tbl_lock);
1809 mlx5e_encap_dealloc(priv, e);
1812 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1813 struct mlx5e_tc_flow *flow)
1815 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1816 struct mlx5e_decap_entry *d = flow->decap_reformat;
1821 mutex_lock(&esw->offloads.decap_tbl_lock);
1822 list_del(&flow->l3_to_l2_reformat);
1823 flow->decap_reformat = NULL;
1825 if (!refcount_dec_and_test(&d->refcnt)) {
1826 mutex_unlock(&esw->offloads.decap_tbl_lock);
1829 hash_del_rcu(&d->hlist);
1830 mutex_unlock(&esw->offloads.decap_tbl_lock);
1832 mlx5e_decap_dealloc(priv, d);
1835 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1837 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1839 if (!flow_flag_test(flow, ESWITCH) ||
1840 !flow_flag_test(flow, DUP))
1843 mutex_lock(&esw->offloads.peer_mutex);
1844 list_del(&flow->peer);
1845 mutex_unlock(&esw->offloads.peer_mutex);
1847 flow_flag_clear(flow, DUP);
1849 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1850 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1851 kfree(flow->peer_flow);
1854 flow->peer_flow = NULL;
1857 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1859 struct mlx5_core_dev *dev = flow->priv->mdev;
1860 struct mlx5_devcom *devcom = dev->priv.devcom;
1861 struct mlx5_eswitch *peer_esw;
1863 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1867 __mlx5e_tc_del_fdb_peer_flow(flow);
1868 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1871 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1872 struct mlx5e_tc_flow *flow)
1874 if (mlx5e_is_eswitch_flow(flow)) {
1875 mlx5e_tc_del_fdb_peer_flow(flow);
1876 mlx5e_tc_del_fdb_flow(priv, flow);
1878 mlx5e_tc_del_nic_flow(priv, flow);
1882 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1884 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1885 struct flow_action *flow_action = &rule->action;
1886 const struct flow_action_entry *act;
1889 flow_action_for_each(i, act, flow_action) {
1891 case FLOW_ACTION_GOTO:
1902 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1903 struct flow_dissector_key_enc_opts *opts,
1904 struct netlink_ext_ack *extack,
1907 struct geneve_opt *opt;
1912 while (opts->len > off) {
1913 opt = (struct geneve_opt *)&opts->data[off];
1915 if (!(*dont_care) || opt->opt_class || opt->type ||
1916 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1919 if (opt->opt_class != htons(U16_MAX) ||
1920 opt->type != U8_MAX) {
1921 NL_SET_ERR_MSG(extack,
1922 "Partial match of tunnel options in chain > 0 isn't supported");
1923 netdev_warn(priv->netdev,
1924 "Partial match of tunnel options in chain > 0 isn't supported");
1929 off += sizeof(struct geneve_opt) + opt->length * 4;
1935 #define COPY_DISSECTOR(rule, diss_key, dst)\
1937 struct flow_rule *__rule = (rule);\
1938 typeof(dst) __dst = dst;\
1941 skb_flow_dissector_target(__rule->match.dissector,\
1943 __rule->match.key),\
1947 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1948 struct mlx5e_tc_flow *flow,
1949 struct flow_cls_offload *f,
1950 struct net_device *filter_dev)
1952 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1953 struct netlink_ext_ack *extack = f->common.extack;
1954 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1955 struct flow_match_enc_opts enc_opts_match;
1956 struct tunnel_match_enc_opts tun_enc_opts;
1957 struct mlx5_rep_uplink_priv *uplink_priv;
1958 struct mlx5_flow_attr *attr = flow->attr;
1959 struct mlx5e_rep_priv *uplink_rpriv;
1960 struct tunnel_match_key tunnel_key;
1961 bool enc_opts_is_dont_care = true;
1962 u32 tun_id, enc_opts_id = 0;
1963 struct mlx5_eswitch *esw;
1967 esw = priv->mdev->priv.eswitch;
1968 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1969 uplink_priv = &uplink_rpriv->uplink_priv;
1971 memset(&tunnel_key, 0, sizeof(tunnel_key));
1972 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1973 &tunnel_key.enc_control);
1974 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1975 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1976 &tunnel_key.enc_ipv4);
1978 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1979 &tunnel_key.enc_ipv6);
1980 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1981 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1982 &tunnel_key.enc_tp);
1983 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1984 &tunnel_key.enc_key_id);
1985 tunnel_key.filter_ifindex = filter_dev->ifindex;
1987 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1991 flow_rule_match_enc_opts(rule, &enc_opts_match);
1992 err = enc_opts_is_dont_care_or_full_match(priv,
1993 enc_opts_match.mask,
1995 &enc_opts_is_dont_care);
1999 if (!enc_opts_is_dont_care) {
2000 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2001 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2002 sizeof(*enc_opts_match.key));
2003 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2004 sizeof(*enc_opts_match.mask));
2006 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2007 &tun_enc_opts, &enc_opts_id);
2012 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2013 mask = enc_opts_id ? TUNNEL_ID_MASK :
2014 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2017 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2018 TUNNEL_TO_REG, value, mask);
2020 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2021 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2022 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2023 TUNNEL_TO_REG, value);
2027 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2030 flow->tunnel_id = value;
2035 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2038 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2042 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2044 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
2045 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
2046 struct mlx5_rep_uplink_priv *uplink_priv;
2047 struct mlx5e_rep_priv *uplink_rpriv;
2048 struct mlx5_eswitch *esw;
2050 esw = flow->priv->mdev->priv.eswitch;
2051 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2052 uplink_priv = &uplink_rpriv->uplink_priv;
2055 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2057 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2061 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
2063 return flow->tunnel_id;
2066 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2067 struct flow_match_basic *match, bool outer,
2068 void *headers_c, void *headers_v)
2070 bool ip_version_cap;
2072 ip_version_cap = outer ?
2073 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2074 ft_field_support.outer_ip_version) :
2075 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2076 ft_field_support.inner_ip_version);
2078 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2079 (match->key->n_proto == htons(ETH_P_IP) ||
2080 match->key->n_proto == htons(ETH_P_IPV6))) {
2081 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2082 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2083 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2085 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2086 ntohs(match->mask->n_proto));
2087 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2088 ntohs(match->key->n_proto));
2092 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2093 struct mlx5e_tc_flow *flow,
2094 struct mlx5_flow_spec *spec,
2095 struct flow_cls_offload *f,
2096 struct net_device *filter_dev,
2100 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2101 struct netlink_ext_ack *extack = f->common.extack;
2102 bool needs_mapping, sets_mapping;
2105 if (!mlx5e_is_eswitch_flow(flow))
2108 needs_mapping = !!flow->attr->chain;
2109 sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
2110 *match_inner = !needs_mapping;
2112 if ((needs_mapping || sets_mapping) &&
2113 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2114 NL_SET_ERR_MSG(extack,
2115 "Chains on tunnel devices isn't supported without register loopback support");
2116 netdev_warn(priv->netdev,
2117 "Chains on tunnel devices isn't supported without register loopback support");
2121 if (!flow->attr->chain) {
2122 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2125 NL_SET_ERR_MSG_MOD(extack,
2126 "Failed to parse tunnel attributes");
2127 netdev_warn(priv->netdev,
2128 "Failed to parse tunnel attributes");
2132 /* With mpls over udp we decapsulate using packet reformat
2135 if (!netif_is_bareudp(filter_dev))
2136 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2139 if (!needs_mapping && !sets_mapping)
2142 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2145 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2147 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2151 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2153 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2157 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2159 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2163 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2165 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2169 static void *get_match_headers_value(u32 flags,
2170 struct mlx5_flow_spec *spec)
2172 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2173 get_match_inner_headers_value(spec) :
2174 get_match_outer_headers_value(spec);
2177 static void *get_match_headers_criteria(u32 flags,
2178 struct mlx5_flow_spec *spec)
2180 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2181 get_match_inner_headers_criteria(spec) :
2182 get_match_outer_headers_criteria(spec);
2185 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2186 struct flow_cls_offload *f)
2188 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2189 struct netlink_ext_ack *extack = f->common.extack;
2190 struct net_device *ingress_dev;
2191 struct flow_match_meta match;
2193 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2196 flow_rule_match_meta(rule, &match);
2197 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2198 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2202 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2203 match.key->ingress_ifindex);
2205 NL_SET_ERR_MSG_MOD(extack,
2206 "Can't find the ingress port to match on");
2210 if (ingress_dev != filter_dev) {
2211 NL_SET_ERR_MSG_MOD(extack,
2212 "Can't match on the ingress filter port");
2219 static bool skip_key_basic(struct net_device *filter_dev,
2220 struct flow_cls_offload *f)
2222 /* When doing mpls over udp decap, the user needs to provide
2223 * MPLS_UC as the protocol in order to be able to match on mpls
2224 * label fields. However, the actual ethertype is IP so we want to
2225 * avoid matching on this, otherwise we'll fail the match.
2227 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2233 static int __parse_cls_flower(struct mlx5e_priv *priv,
2234 struct mlx5e_tc_flow *flow,
2235 struct mlx5_flow_spec *spec,
2236 struct flow_cls_offload *f,
2237 struct net_device *filter_dev,
2238 u8 *inner_match_level, u8 *outer_match_level)
2240 struct netlink_ext_ack *extack = f->common.extack;
2241 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2243 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2245 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2247 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2249 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2250 struct flow_dissector *dissector = rule->match.dissector;
2256 match_level = outer_match_level;
2258 if (dissector->used_keys &
2259 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2260 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2261 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2262 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2263 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2264 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2265 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2266 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2267 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2268 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2269 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2270 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2271 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2272 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2273 BIT(FLOW_DISSECTOR_KEY_TCP) |
2274 BIT(FLOW_DISSECTOR_KEY_IP) |
2275 BIT(FLOW_DISSECTOR_KEY_CT) |
2276 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2277 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2278 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2279 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2280 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2281 dissector->used_keys);
2285 if (mlx5e_get_tc_tun(filter_dev)) {
2286 bool match_inner = false;
2288 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2289 outer_match_level, &match_inner);
2294 /* header pointers should point to the inner headers
2295 * if the packet was decapsulated already.
2296 * outer headers are set by parse_tunnel_attr.
2298 match_level = inner_match_level;
2299 headers_c = get_match_inner_headers_criteria(spec);
2300 headers_v = get_match_inner_headers_value(spec);
2304 err = mlx5e_flower_parse_meta(filter_dev, f);
2308 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2309 !skip_key_basic(filter_dev, f)) {
2310 struct flow_match_basic match;
2312 flow_rule_match_basic(rule, &match);
2313 mlx5e_tc_set_ethertype(priv->mdev, &match,
2314 match_level == outer_match_level,
2315 headers_c, headers_v);
2317 if (match.mask->n_proto)
2318 *match_level = MLX5_MATCH_L2;
2320 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2321 is_vlan_dev(filter_dev)) {
2322 struct flow_dissector_key_vlan filter_dev_mask;
2323 struct flow_dissector_key_vlan filter_dev_key;
2324 struct flow_match_vlan match;
2326 if (is_vlan_dev(filter_dev)) {
2327 match.key = &filter_dev_key;
2328 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2329 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2330 match.key->vlan_priority = 0;
2331 match.mask = &filter_dev_mask;
2332 memset(match.mask, 0xff, sizeof(*match.mask));
2333 match.mask->vlan_priority = 0;
2335 flow_rule_match_vlan(rule, &match);
2337 if (match.mask->vlan_id ||
2338 match.mask->vlan_priority ||
2339 match.mask->vlan_tpid) {
2340 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2341 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2343 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2346 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2348 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2352 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2353 match.mask->vlan_id);
2354 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2355 match.key->vlan_id);
2357 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2358 match.mask->vlan_priority);
2359 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2360 match.key->vlan_priority);
2362 *match_level = MLX5_MATCH_L2;
2364 } else if (*match_level != MLX5_MATCH_NONE) {
2365 /* cvlan_tag enabled in match criteria and
2366 * disabled in match value means both S & C tags
2367 * don't exist (untagged of both)
2369 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2370 *match_level = MLX5_MATCH_L2;
2373 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2374 struct flow_match_vlan match;
2376 flow_rule_match_cvlan(rule, &match);
2377 if (match.mask->vlan_id ||
2378 match.mask->vlan_priority ||
2379 match.mask->vlan_tpid) {
2380 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2381 MLX5_SET(fte_match_set_misc, misc_c,
2382 outer_second_svlan_tag, 1);
2383 MLX5_SET(fte_match_set_misc, misc_v,
2384 outer_second_svlan_tag, 1);
2386 MLX5_SET(fte_match_set_misc, misc_c,
2387 outer_second_cvlan_tag, 1);
2388 MLX5_SET(fte_match_set_misc, misc_v,
2389 outer_second_cvlan_tag, 1);
2392 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2393 match.mask->vlan_id);
2394 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2395 match.key->vlan_id);
2396 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2397 match.mask->vlan_priority);
2398 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2399 match.key->vlan_priority);
2401 *match_level = MLX5_MATCH_L2;
2402 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2406 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2407 struct flow_match_eth_addrs match;
2409 flow_rule_match_eth_addrs(rule, &match);
2410 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2413 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2417 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2420 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2424 if (!is_zero_ether_addr(match.mask->src) ||
2425 !is_zero_ether_addr(match.mask->dst))
2426 *match_level = MLX5_MATCH_L2;
2429 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2430 struct flow_match_control match;
2432 flow_rule_match_control(rule, &match);
2433 addr_type = match.key->addr_type;
2435 /* the HW doesn't support frag first/later */
2436 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2439 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2440 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2441 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2442 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2444 /* the HW doesn't need L3 inline to match on frag=no */
2445 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2446 *match_level = MLX5_MATCH_L2;
2447 /* *** L2 attributes parsing up to here *** */
2449 *match_level = MLX5_MATCH_L3;
2453 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2454 struct flow_match_basic match;
2456 flow_rule_match_basic(rule, &match);
2457 ip_proto = match.key->ip_proto;
2459 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2460 match.mask->ip_proto);
2461 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2462 match.key->ip_proto);
2464 if (match.mask->ip_proto)
2465 *match_level = MLX5_MATCH_L3;
2468 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2469 struct flow_match_ipv4_addrs match;
2471 flow_rule_match_ipv4_addrs(rule, &match);
2472 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2473 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2474 &match.mask->src, sizeof(match.mask->src));
2475 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2476 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2477 &match.key->src, sizeof(match.key->src));
2478 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2479 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2480 &match.mask->dst, sizeof(match.mask->dst));
2481 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2482 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2483 &match.key->dst, sizeof(match.key->dst));
2485 if (match.mask->src || match.mask->dst)
2486 *match_level = MLX5_MATCH_L3;
2489 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2490 struct flow_match_ipv6_addrs match;
2492 flow_rule_match_ipv6_addrs(rule, &match);
2493 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2494 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2495 &match.mask->src, sizeof(match.mask->src));
2496 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2497 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2498 &match.key->src, sizeof(match.key->src));
2500 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2501 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2502 &match.mask->dst, sizeof(match.mask->dst));
2503 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2504 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2505 &match.key->dst, sizeof(match.key->dst));
2507 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2508 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2509 *match_level = MLX5_MATCH_L3;
2512 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2513 struct flow_match_ip match;
2515 flow_rule_match_ip(rule, &match);
2516 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2517 match.mask->tos & 0x3);
2518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2519 match.key->tos & 0x3);
2521 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2522 match.mask->tos >> 2);
2523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2524 match.key->tos >> 2);
2526 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2528 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2531 if (match.mask->ttl &&
2532 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2533 ft_field_support.outer_ipv4_ttl)) {
2534 NL_SET_ERR_MSG_MOD(extack,
2535 "Matching on TTL is not supported");
2539 if (match.mask->tos || match.mask->ttl)
2540 *match_level = MLX5_MATCH_L3;
2543 /* *** L3 attributes parsing up to here *** */
2545 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2546 struct flow_match_ports match;
2548 flow_rule_match_ports(rule, &match);
2551 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2552 tcp_sport, ntohs(match.mask->src));
2553 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2554 tcp_sport, ntohs(match.key->src));
2556 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2557 tcp_dport, ntohs(match.mask->dst));
2558 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2559 tcp_dport, ntohs(match.key->dst));
2563 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2564 udp_sport, ntohs(match.mask->src));
2565 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2566 udp_sport, ntohs(match.key->src));
2568 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2569 udp_dport, ntohs(match.mask->dst));
2570 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2571 udp_dport, ntohs(match.key->dst));
2574 NL_SET_ERR_MSG_MOD(extack,
2575 "Only UDP and TCP transports are supported for L4 matching");
2576 netdev_err(priv->netdev,
2577 "Only UDP and TCP transport are supported\n");
2581 if (match.mask->src || match.mask->dst)
2582 *match_level = MLX5_MATCH_L4;
2585 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2586 struct flow_match_tcp match;
2588 flow_rule_match_tcp(rule, &match);
2589 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2590 ntohs(match.mask->flags));
2591 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2592 ntohs(match.key->flags));
2594 if (match.mask->flags)
2595 *match_level = MLX5_MATCH_L4;
2601 static int parse_cls_flower(struct mlx5e_priv *priv,
2602 struct mlx5e_tc_flow *flow,
2603 struct mlx5_flow_spec *spec,
2604 struct flow_cls_offload *f,
2605 struct net_device *filter_dev)
2607 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2608 struct netlink_ext_ack *extack = f->common.extack;
2609 struct mlx5_core_dev *dev = priv->mdev;
2610 struct mlx5_eswitch *esw = dev->priv.eswitch;
2611 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2612 struct mlx5_eswitch_rep *rep;
2613 bool is_eswitch_flow;
2616 inner_match_level = MLX5_MATCH_NONE;
2617 outer_match_level = MLX5_MATCH_NONE;
2619 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2620 &inner_match_level, &outer_match_level);
2621 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2622 outer_match_level : inner_match_level;
2624 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2625 if (!err && is_eswitch_flow) {
2627 if (rep->vport != MLX5_VPORT_UPLINK &&
2628 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2629 esw->offloads.inline_mode < non_tunnel_match_level)) {
2630 NL_SET_ERR_MSG_MOD(extack,
2631 "Flow is not offloaded due to min inline setting");
2632 netdev_warn(priv->netdev,
2633 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2634 non_tunnel_match_level, esw->offloads.inline_mode);
2639 flow->attr->inner_match_level = inner_match_level;
2640 flow->attr->outer_match_level = outer_match_level;
2646 struct pedit_headers {
2648 struct vlan_hdr vlan;
2655 struct pedit_headers_action {
2656 struct pedit_headers vals;
2657 struct pedit_headers masks;
2661 static int pedit_header_offsets[] = {
2662 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2663 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2664 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2665 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2666 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2669 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2671 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2672 struct pedit_headers_action *hdrs)
2674 u32 *curr_pmask, *curr_pval;
2676 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2677 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2679 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2682 *curr_pmask |= mask;
2683 *curr_pval |= (val & mask);
2691 struct mlx5_fields {
2699 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2700 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2701 offsetof(struct pedit_headers, field) + (off), \
2702 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2704 /* masked values are the same and there are no rewrites that do not have a
2707 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2708 type matchmaskx = *(type *)(matchmaskp); \
2709 type matchvalx = *(type *)(matchvalp); \
2710 type maskx = *(type *)(maskp); \
2711 type valx = *(type *)(valp); \
2713 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2717 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2718 void *matchmaskp, u8 bsize)
2724 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2727 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2730 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2737 static struct mlx5_fields fields[] = {
2738 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2739 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2740 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2741 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2742 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2743 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2745 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2746 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2747 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2748 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2750 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2751 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2752 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2753 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2754 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2755 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2756 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2757 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2758 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2759 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2760 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2761 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2762 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2763 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2764 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2765 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2766 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2767 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2769 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2770 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2771 /* in linux iphdr tcp_flags is 8 bits long */
2772 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2774 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2775 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2778 static unsigned long mask_to_le(unsigned long mask, int size)
2784 mask_be32 = (__force __be32)(mask);
2785 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2786 } else if (size == 16) {
2787 mask_be32 = (__force __be32)(mask);
2788 mask_be16 = *(__be16 *)&mask_be32;
2789 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2794 static int offload_pedit_fields(struct mlx5e_priv *priv,
2796 struct pedit_headers_action *hdrs,
2797 struct mlx5e_tc_flow_parse_attr *parse_attr,
2799 struct netlink_ext_ack *extack)
2801 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2802 int i, action_size, first, last, next_z;
2803 void *headers_c, *headers_v, *action, *vals_p;
2804 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2805 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2806 struct mlx5_fields *f;
2807 unsigned long mask, field_mask;
2811 mod_acts = &parse_attr->mod_hdr_acts;
2812 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2813 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2815 set_masks = &hdrs[0].masks;
2816 add_masks = &hdrs[1].masks;
2817 set_vals = &hdrs[0].vals;
2818 add_vals = &hdrs[1].vals;
2820 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2822 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2826 /* avoid seeing bits set from previous iterations */
2830 s_masks_p = (void *)set_masks + f->offset;
2831 a_masks_p = (void *)add_masks + f->offset;
2833 s_mask = *s_masks_p & f->field_mask;
2834 a_mask = *a_masks_p & f->field_mask;
2836 if (!s_mask && !a_mask) /* nothing to offload here */
2839 if (s_mask && a_mask) {
2840 NL_SET_ERR_MSG_MOD(extack,
2841 "can't set and add to the same HW field");
2842 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2848 void *match_mask = headers_c + f->match_offset;
2849 void *match_val = headers_v + f->match_offset;
2851 cmd = MLX5_ACTION_TYPE_SET;
2853 vals_p = (void *)set_vals + f->offset;
2854 /* don't rewrite if we have a match on the same value */
2855 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2856 match_mask, f->field_bsize))
2858 /* clear to denote we consumed this field */
2859 *s_masks_p &= ~f->field_mask;
2861 cmd = MLX5_ACTION_TYPE_ADD;
2863 vals_p = (void *)add_vals + f->offset;
2864 /* add 0 is no change */
2865 if ((*(u32 *)vals_p & f->field_mask) == 0)
2867 /* clear to denote we consumed this field */
2868 *a_masks_p &= ~f->field_mask;
2873 mask = mask_to_le(mask, f->field_bsize);
2875 first = find_first_bit(&mask, f->field_bsize);
2876 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2877 last = find_last_bit(&mask, f->field_bsize);
2878 if (first < next_z && next_z < last) {
2879 NL_SET_ERR_MSG_MOD(extack,
2880 "rewrite of few sub-fields isn't supported");
2881 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2886 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2888 NL_SET_ERR_MSG_MOD(extack,
2889 "too many pedit actions, can't offload");
2890 mlx5_core_warn(priv->mdev,
2891 "mlx5: parsed %d pedit actions, can't do more\n",
2892 mod_acts->num_actions);
2896 action = mod_acts->actions +
2897 (mod_acts->num_actions * action_size);
2898 MLX5_SET(set_action_in, action, action_type, cmd);
2899 MLX5_SET(set_action_in, action, field, f->field);
2901 if (cmd == MLX5_ACTION_TYPE_SET) {
2904 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2906 /* if field is bit sized it can start not from first bit */
2907 start = find_first_bit(&field_mask, f->field_bsize);
2909 MLX5_SET(set_action_in, action, offset, first - start);
2910 /* length is num of bits to be written, zero means length of 32 */
2911 MLX5_SET(set_action_in, action, length, (last - first + 1));
2914 if (f->field_bsize == 32)
2915 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2916 else if (f->field_bsize == 16)
2917 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2918 else if (f->field_bsize == 8)
2919 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2921 ++mod_acts->num_actions;
2927 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2930 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2931 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2932 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2933 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2936 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2938 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2940 int action_size, new_num_actions, max_hw_actions;
2941 size_t new_sz, old_sz;
2944 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2947 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2949 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2951 new_num_actions = min(max_hw_actions,
2952 mod_hdr_acts->actions ?
2953 mod_hdr_acts->max_actions * 2 : 1);
2954 if (mod_hdr_acts->max_actions == new_num_actions)
2957 new_sz = action_size * new_num_actions;
2958 old_sz = mod_hdr_acts->max_actions * action_size;
2959 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2963 memset(ret + old_sz, 0, new_sz - old_sz);
2964 mod_hdr_acts->actions = ret;
2965 mod_hdr_acts->max_actions = new_num_actions;
2970 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2972 kfree(mod_hdr_acts->actions);
2973 mod_hdr_acts->actions = NULL;
2974 mod_hdr_acts->num_actions = 0;
2975 mod_hdr_acts->max_actions = 0;
2978 static const struct pedit_headers zero_masks = {};
2981 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2982 const struct flow_action_entry *act, int namespace,
2983 struct mlx5e_tc_flow_parse_attr *parse_attr,
2984 struct pedit_headers_action *hdrs,
2985 struct netlink_ext_ack *extack)
2987 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2988 int err = -EOPNOTSUPP;
2989 u32 mask, val, offset;
2992 htype = act->mangle.htype;
2993 err = -EOPNOTSUPP; /* can't be all optimistic */
2995 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2996 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
3000 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
3001 NL_SET_ERR_MSG_MOD(extack,
3002 "The pedit offload action is not supported");
3006 mask = act->mangle.mask;
3007 val = act->mangle.val;
3008 offset = act->mangle.offset;
3010 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
3022 parse_pedit_to_reformat(struct mlx5e_priv *priv,
3023 const struct flow_action_entry *act,
3024 struct mlx5e_tc_flow_parse_attr *parse_attr,
3025 struct netlink_ext_ack *extack)
3027 u32 mask, val, offset;
3030 if (act->id != FLOW_ACTION_MANGLE)
3033 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
3034 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
3038 mask = ~act->mangle.mask;
3039 val = act->mangle.val;
3040 offset = act->mangle.offset;
3041 p = (u32 *)&parse_attr->eth;
3042 *(p + (offset >> 2)) |= (val & mask);
3047 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
3048 const struct flow_action_entry *act, int namespace,
3049 struct mlx5e_tc_flow_parse_attr *parse_attr,
3050 struct pedit_headers_action *hdrs,
3051 struct mlx5e_tc_flow *flow,
3052 struct netlink_ext_ack *extack)
3054 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
3055 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
3057 return parse_pedit_to_modify_hdr(priv, act, namespace,
3058 parse_attr, hdrs, extack);
3061 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3062 struct mlx5e_tc_flow_parse_attr *parse_attr,
3063 struct pedit_headers_action *hdrs,
3065 struct netlink_ext_ack *extack)
3067 struct pedit_headers *cmd_masks;
3071 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
3072 action_flags, extack);
3074 goto out_dealloc_parsed_actions;
3076 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3077 cmd_masks = &hdrs[cmd].masks;
3078 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3079 NL_SET_ERR_MSG_MOD(extack,
3080 "attempt to offload an unsupported field");
3081 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3082 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3083 16, 1, cmd_masks, sizeof(zero_masks), true);
3085 goto out_dealloc_parsed_actions;
3091 out_dealloc_parsed_actions:
3092 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3096 static bool csum_offload_supported(struct mlx5e_priv *priv,
3099 struct netlink_ext_ack *extack)
3101 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
3102 TCA_CSUM_UPDATE_FLAG_UDP;
3104 /* The HW recalcs checksums only if re-writing headers */
3105 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
3106 NL_SET_ERR_MSG_MOD(extack,
3107 "TC csum action is only offloaded with pedit");
3108 netdev_warn(priv->netdev,
3109 "TC csum action is only offloaded with pedit\n");
3113 if (update_flags & ~prot_flags) {
3114 NL_SET_ERR_MSG_MOD(extack,
3115 "can't offload TC csum action for some header/s");
3116 netdev_warn(priv->netdev,
3117 "can't offload TC csum action for some header/s - flags %#x\n",
3125 struct ip_ttl_word {
3131 struct ipv6_hoplimit_word {
3137 static int is_action_keys_supported(const struct flow_action_entry *act,
3138 bool ct_flow, bool *modify_ip_header,
3140 struct netlink_ext_ack *extack)
3145 htype = act->mangle.htype;
3146 offset = act->mangle.offset;
3147 mask = ~act->mangle.mask;
3148 /* For IPv4 & IPv6 header check 4 byte word,
3149 * to determine that modified fields
3150 * are NOT ttl & hop_limit only.
3152 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3153 struct ip_ttl_word *ttl_word =
3154 (struct ip_ttl_word *)&mask;
3156 if (offset != offsetof(struct iphdr, ttl) ||
3157 ttl_word->protocol ||
3159 *modify_ip_header = true;
3162 if (offset >= offsetof(struct iphdr, saddr))
3163 *modify_tuple = true;
3165 if (ct_flow && *modify_tuple) {
3166 NL_SET_ERR_MSG_MOD(extack,
3167 "can't offload re-write of ipv4 address with action ct");
3170 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3171 struct ipv6_hoplimit_word *hoplimit_word =
3172 (struct ipv6_hoplimit_word *)&mask;
3174 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3175 hoplimit_word->payload_len ||
3176 hoplimit_word->nexthdr) {
3177 *modify_ip_header = true;
3180 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3181 *modify_tuple = true;
3183 if (ct_flow && *modify_tuple) {
3184 NL_SET_ERR_MSG_MOD(extack,
3185 "can't offload re-write of ipv6 address with action ct");
3188 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3189 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3190 *modify_tuple = true;
3192 NL_SET_ERR_MSG_MOD(extack,
3193 "can't offload re-write of transport header ports with action ct");
3201 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3202 struct mlx5_flow_spec *spec,
3203 struct flow_action *flow_action,
3204 u32 actions, bool ct_flow,
3206 struct netlink_ext_ack *extack)
3208 const struct flow_action_entry *act;
3209 bool modify_ip_header, modify_tuple;
3216 headers_c = get_match_headers_criteria(actions, spec);
3217 headers_v = get_match_headers_value(actions, spec);
3218 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3220 /* for non-IP we only re-write MACs, so we're okay */
3221 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3222 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3225 modify_ip_header = false;
3226 modify_tuple = false;
3227 flow_action_for_each(i, act, flow_action) {
3228 if (act->id != FLOW_ACTION_MANGLE &&
3229 act->id != FLOW_ACTION_ADD)
3232 err = is_action_keys_supported(act, ct_flow,
3234 &modify_tuple, extack);
3239 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3240 * (or after clear action), as otherwise, since the tuple is changed,
3241 * we can't restore ct state
3243 if (!ct_clear && modify_tuple &&
3244 mlx5_tc_ct_add_no_trk_match(spec)) {
3245 NL_SET_ERR_MSG_MOD(extack,
3246 "can't offload tuple modify header with ct matches");
3247 netdev_info(priv->netdev,
3248 "can't offload tuple modify header with ct matches");
3252 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3253 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3254 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3255 NL_SET_ERR_MSG_MOD(extack,
3256 "can't offload re-write of non TCP/UDP");
3257 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3266 static bool actions_match_supported(struct mlx5e_priv *priv,
3267 struct flow_action *flow_action,
3268 struct mlx5e_tc_flow_parse_attr *parse_attr,
3269 struct mlx5e_tc_flow *flow,
3270 struct netlink_ext_ack *extack)
3272 bool ct_flow = false, ct_clear = false;
3275 ct_clear = flow->attr->ct_attr.ct_action &
3277 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3278 actions = flow->attr->action;
3280 if (mlx5e_is_eswitch_flow(flow)) {
3281 if (flow->attr->esw_attr->split_count && ct_flow) {
3282 /* All registers used by ct are cleared when using
3285 NL_SET_ERR_MSG_MOD(extack,
3286 "Can't offload mirroring with action ct");
3291 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3292 return modify_header_match_supported(priv, &parse_attr->spec,
3293 flow_action, actions,
3300 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3302 return priv->mdev == peer_priv->mdev;
3305 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3307 struct mlx5_core_dev *fmdev, *pmdev;
3308 u64 fsystem_guid, psystem_guid;
3311 pmdev = peer_priv->mdev;
3313 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3314 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3316 return (fsystem_guid == psystem_guid);
3319 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3320 const struct flow_action_entry *act,
3321 struct mlx5e_tc_flow_parse_attr *parse_attr,
3322 struct pedit_headers_action *hdrs,
3323 u32 *action, struct netlink_ext_ack *extack)
3325 u16 mask16 = VLAN_VID_MASK;
3326 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3327 const struct flow_action_entry pedit_act = {
3328 .id = FLOW_ACTION_MANGLE,
3329 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3330 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3331 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3332 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3334 u8 match_prio_mask, match_prio_val;
3335 void *headers_c, *headers_v;
3338 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3339 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3341 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3342 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3343 NL_SET_ERR_MSG_MOD(extack,
3344 "VLAN rewrite action must have VLAN protocol match");
3348 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3349 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3350 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3351 NL_SET_ERR_MSG_MOD(extack,
3352 "Changing VLAN prio is not supported");
3356 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3357 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3363 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3364 struct mlx5e_tc_flow_parse_attr *parse_attr,
3365 struct pedit_headers_action *hdrs,
3366 u32 *action, struct netlink_ext_ack *extack)
3368 const struct flow_action_entry prio_tag_act = {
3371 MLX5_GET(fte_match_set_lyr_2_4,
3372 get_match_headers_value(*action,
3375 MLX5_GET(fte_match_set_lyr_2_4,
3376 get_match_headers_criteria(*action,
3381 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3382 &prio_tag_act, parse_attr, hdrs, action,
3386 static int validate_goto_chain(struct mlx5e_priv *priv,
3387 struct mlx5e_tc_flow *flow,
3388 const struct flow_action_entry *act,
3390 struct netlink_ext_ack *extack)
3392 bool is_esw = mlx5e_is_eswitch_flow(flow);
3393 struct mlx5_flow_attr *attr = flow->attr;
3394 bool ft_flow = mlx5e_is_ft_flow(flow);
3395 u32 dest_chain = act->chain_index;
3396 struct mlx5_fs_chains *chains;
3397 struct mlx5_eswitch *esw;
3398 u32 reformat_and_fwd;
3401 esw = priv->mdev->priv.eswitch;
3402 chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3403 max_chain = mlx5_chains_get_chain_range(chains);
3404 reformat_and_fwd = is_esw ?
3405 MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3406 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3409 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3413 if (!mlx5_chains_backwards_supported(chains) &&
3414 dest_chain <= attr->chain) {
3415 NL_SET_ERR_MSG_MOD(extack,
3416 "Goto lower numbered chain isn't supported");
3420 if (dest_chain > max_chain) {
3421 NL_SET_ERR_MSG_MOD(extack,
3422 "Requested destination chain is out of supported range");
3426 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3427 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3428 !reformat_and_fwd) {
3429 NL_SET_ERR_MSG_MOD(extack,
3430 "Goto chain is not allowed if action has reformat or decap");
3437 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3438 struct flow_action *flow_action,
3439 struct mlx5e_tc_flow_parse_attr *parse_attr,
3440 struct mlx5e_tc_flow *flow,
3441 struct netlink_ext_ack *extack)
3443 struct mlx5_flow_attr *attr = flow->attr;
3444 struct pedit_headers_action hdrs[2] = {};
3445 const struct flow_action_entry *act;
3446 struct mlx5_nic_flow_attr *nic_attr;
3450 if (!flow_action_has_entries(flow_action))
3453 if (!flow_action_hw_stats_check(flow_action, extack,
3454 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3457 nic_attr = attr->nic_attr;
3459 nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3461 flow_action_for_each(i, act, flow_action) {
3463 case FLOW_ACTION_ACCEPT:
3464 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3465 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3467 case FLOW_ACTION_DROP:
3468 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3469 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3470 flow_table_properties_nic_receive.flow_counter))
3471 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3473 case FLOW_ACTION_MANGLE:
3474 case FLOW_ACTION_ADD:
3475 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3476 parse_attr, hdrs, NULL, extack);
3480 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3482 case FLOW_ACTION_VLAN_MANGLE:
3483 err = add_vlan_rewrite_action(priv,
3484 MLX5_FLOW_NAMESPACE_KERNEL,
3485 act, parse_attr, hdrs,
3491 case FLOW_ACTION_CSUM:
3492 if (csum_offload_supported(priv, action,
3498 case FLOW_ACTION_REDIRECT: {
3499 struct net_device *peer_dev = act->dev;
3501 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3502 same_hw_devs(priv, netdev_priv(peer_dev))) {
3503 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3504 flow_flag_set(flow, HAIRPIN);
3505 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3506 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3508 NL_SET_ERR_MSG_MOD(extack,
3509 "device is not on same HW, can't offload");
3510 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3516 case FLOW_ACTION_MARK: {
3517 u32 mark = act->mark;
3519 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3520 NL_SET_ERR_MSG_MOD(extack,
3521 "Bad flow mark - only 16 bit is supported");
3525 nic_attr->flow_tag = mark;
3526 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3529 case FLOW_ACTION_GOTO:
3530 err = validate_goto_chain(priv, flow, act, action,
3535 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3536 attr->dest_chain = act->chain_index;
3538 case FLOW_ACTION_CT:
3539 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3543 flow_flag_set(flow, CT);
3546 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3551 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3552 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3553 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3554 parse_attr, hdrs, &action, extack);
3557 /* in case all pedit actions are skipped, remove the MOD_HDR
3560 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3561 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3562 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3566 attr->action = action;
3568 if (attr->dest_chain) {
3569 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3570 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3573 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3576 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3577 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3579 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3586 const struct ip_tunnel_key *ip_tun_key;
3587 struct mlx5e_tc_tunnel *tc_tunnel;
3590 static inline int cmp_encap_info(struct encap_key *a,
3591 struct encap_key *b)
3593 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3594 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3597 static inline int cmp_decap_info(struct mlx5e_decap_key *a,
3598 struct mlx5e_decap_key *b)
3600 return memcmp(&a->key, &b->key, sizeof(b->key));
3603 static inline int hash_encap_info(struct encap_key *key)
3605 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3606 key->tc_tunnel->tunnel_type);
3609 static inline int hash_decap_info(struct mlx5e_decap_key *key)
3611 return jhash(&key->key, sizeof(key->key), 0);
3614 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3615 struct net_device *peer_netdev)
3617 struct mlx5e_priv *peer_priv;
3619 peer_priv = netdev_priv(peer_netdev);
3621 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3622 mlx5e_eswitch_vf_rep(priv->netdev) &&
3623 mlx5e_eswitch_vf_rep(peer_netdev) &&
3624 same_hw_devs(priv, peer_priv));
3627 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3629 return refcount_inc_not_zero(&e->refcnt);
3632 static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
3634 return refcount_inc_not_zero(&e->refcnt);
3637 static struct mlx5e_encap_entry *
3638 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3641 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3642 struct mlx5e_encap_entry *e;
3643 struct encap_key e_key;
3645 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3646 encap_hlist, hash_key) {
3647 e_key.ip_tun_key = &e->tun_info->key;
3648 e_key.tc_tunnel = e->tunnel;
3649 if (!cmp_encap_info(&e_key, key) &&
3650 mlx5e_encap_take(e))
3657 static struct mlx5e_decap_entry *
3658 mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
3661 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3662 struct mlx5e_decap_key r_key;
3663 struct mlx5e_decap_entry *e;
3665 hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
3668 if (!cmp_decap_info(&r_key, key) &&
3669 mlx5e_decap_take(e))
3675 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3677 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3679 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3682 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3683 struct mlx5e_tc_flow *flow,
3685 struct mlx5e_encap_entry *e,
3686 struct netlink_ext_ack *extack)
3690 for (i = 0; i < out_index; i++) {
3691 if (flow->encaps[i].e != e)
3693 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3694 netdev_err(priv->netdev, "can't duplicate encap action\n");
3701 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3702 struct mlx5e_tc_flow *flow,
3703 struct net_device *mirred_dev,
3705 struct netlink_ext_ack *extack,
3706 struct net_device **encap_dev,
3709 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3710 struct mlx5e_tc_flow_parse_attr *parse_attr;
3711 struct mlx5_flow_attr *attr = flow->attr;
3712 const struct ip_tunnel_info *tun_info;
3713 struct encap_key key;
3714 struct mlx5e_encap_entry *e;
3715 unsigned short family;
3719 parse_attr = attr->parse_attr;
3720 tun_info = parse_attr->tun_info[out_index];
3721 family = ip_tunnel_info_af(tun_info);
3722 key.ip_tun_key = &tun_info->key;
3723 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3724 if (!key.tc_tunnel) {
3725 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3729 hash_key = hash_encap_info(&key);
3731 mutex_lock(&esw->offloads.encap_tbl_lock);
3732 e = mlx5e_encap_get(priv, &key, hash_key);
3734 /* must verify if encap is valid or not */
3736 /* Check that entry was not already attached to this flow */
3737 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3742 mutex_unlock(&esw->offloads.encap_tbl_lock);
3743 wait_for_completion(&e->res_ready);
3745 /* Protect against concurrent neigh update. */
3746 mutex_lock(&esw->offloads.encap_tbl_lock);
3747 if (e->compl_result < 0) {
3754 e = kzalloc(sizeof(*e), GFP_KERNEL);
3760 refcount_set(&e->refcnt, 1);
3761 init_completion(&e->res_ready);
3763 tun_info = dup_tun_info(tun_info);
3768 e->tun_info = tun_info;
3769 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3773 INIT_LIST_HEAD(&e->flows);
3774 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3775 mutex_unlock(&esw->offloads.encap_tbl_lock);
3777 if (family == AF_INET)
3778 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3779 else if (family == AF_INET6)
3780 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3782 /* Protect against concurrent neigh update. */
3783 mutex_lock(&esw->offloads.encap_tbl_lock);
3784 complete_all(&e->res_ready);
3786 e->compl_result = err;
3789 e->compl_result = 1;
3792 flow->encaps[out_index].e = e;
3793 list_add(&flow->encaps[out_index].list, &e->flows);
3794 flow->encaps[out_index].index = out_index;
3795 *encap_dev = e->out_dev;
3796 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3797 attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3798 attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3799 *encap_valid = true;
3801 *encap_valid = false;
3803 mutex_unlock(&esw->offloads.encap_tbl_lock);
3808 mutex_unlock(&esw->offloads.encap_tbl_lock);
3810 mlx5e_encap_put(priv, e);
3814 mutex_unlock(&esw->offloads.encap_tbl_lock);
3820 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
3821 struct mlx5e_tc_flow *flow,
3822 struct netlink_ext_ack *extack)
3824 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3825 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3826 struct mlx5e_tc_flow_parse_attr *parse_attr;
3827 struct mlx5e_decap_entry *d;
3828 struct mlx5e_decap_key key;
3832 parse_attr = flow->attr->parse_attr;
3833 if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
3834 NL_SET_ERR_MSG_MOD(extack,
3835 "encap header larger than max supported");
3839 key.key = parse_attr->eth;
3840 hash_key = hash_decap_info(&key);
3841 mutex_lock(&esw->offloads.decap_tbl_lock);
3842 d = mlx5e_decap_get(priv, &key, hash_key);
3844 mutex_unlock(&esw->offloads.decap_tbl_lock);
3845 wait_for_completion(&d->res_ready);
3846 mutex_lock(&esw->offloads.decap_tbl_lock);
3847 if (d->compl_result) {
3854 d = kzalloc(sizeof(*d), GFP_KERNEL);
3861 refcount_set(&d->refcnt, 1);
3862 init_completion(&d->res_ready);
3863 INIT_LIST_HEAD(&d->flows);
3864 hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
3865 mutex_unlock(&esw->offloads.decap_tbl_lock);
3867 d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
3868 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
3869 sizeof(parse_attr->eth),
3871 MLX5_FLOW_NAMESPACE_FDB);
3872 if (IS_ERR(d->pkt_reformat)) {
3873 err = PTR_ERR(d->pkt_reformat);
3874 d->compl_result = err;
3876 mutex_lock(&esw->offloads.decap_tbl_lock);
3877 complete_all(&d->res_ready);
3882 flow->decap_reformat = d;
3883 attr->decap_pkt_reformat = d->pkt_reformat;
3884 list_add(&flow->l3_to_l2_reformat, &d->flows);
3885 mutex_unlock(&esw->offloads.decap_tbl_lock);
3889 mutex_unlock(&esw->offloads.decap_tbl_lock);
3890 mlx5e_decap_put(priv, d);
3894 mutex_unlock(&esw->offloads.decap_tbl_lock);
3898 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3899 const struct flow_action_entry *act,
3900 struct mlx5_esw_flow_attr *attr,
3903 u8 vlan_idx = attr->total_vlan;
3905 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3909 case FLOW_ACTION_VLAN_POP:
3911 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3912 MLX5_FS_VLAN_DEPTH))
3915 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3917 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3920 case FLOW_ACTION_VLAN_PUSH:
3921 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3922 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3923 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3924 if (!attr->vlan_proto[vlan_idx])
3925 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3928 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3929 MLX5_FS_VLAN_DEPTH))
3932 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3934 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3935 (act->vlan.proto != htons(ETH_P_8021Q) ||
3939 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3946 attr->total_vlan = vlan_idx + 1;
3951 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3952 struct net_device *out_dev)
3954 struct net_device *fdb_out_dev = out_dev;
3955 struct net_device *uplink_upper;
3958 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3959 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3960 uplink_upper == out_dev) {
3961 fdb_out_dev = uplink_dev;
3962 } else if (netif_is_lag_master(out_dev)) {
3963 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3965 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3966 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3973 static int add_vlan_push_action(struct mlx5e_priv *priv,
3974 struct mlx5_flow_attr *attr,
3975 struct net_device **out_dev,
3978 struct net_device *vlan_dev = *out_dev;
3979 struct flow_action_entry vlan_act = {
3980 .id = FLOW_ACTION_VLAN_PUSH,
3981 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3982 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3987 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3991 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3992 dev_get_iflink(vlan_dev));
3993 if (is_vlan_dev(*out_dev))
3994 err = add_vlan_push_action(priv, attr, out_dev, action);
3999 static int add_vlan_pop_action(struct mlx5e_priv *priv,
4000 struct mlx5_flow_attr *attr,
4003 struct flow_action_entry vlan_act = {
4004 .id = FLOW_ACTION_VLAN_POP,
4006 int nest_level, err = 0;
4008 nest_level = attr->parse_attr->filter_dev->lower_level -
4009 priv->netdev->lower_level;
4010 while (nest_level--) {
4011 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
4019 static bool same_hw_reps(struct mlx5e_priv *priv,
4020 struct net_device *peer_netdev)
4022 struct mlx5e_priv *peer_priv;
4024 peer_priv = netdev_priv(peer_netdev);
4026 return mlx5e_eswitch_rep(priv->netdev) &&
4027 mlx5e_eswitch_rep(peer_netdev) &&
4028 same_hw_devs(priv, peer_priv);
4031 static bool is_lag_dev(struct mlx5e_priv *priv,
4032 struct net_device *peer_netdev)
4034 return ((mlx5_lag_is_sriov(priv->mdev) ||
4035 mlx5_lag_is_multipath(priv->mdev)) &&
4036 same_hw_reps(priv, peer_netdev));
4039 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4040 struct net_device *out_dev)
4042 if (is_merged_eswitch_vfs(priv, out_dev))
4045 if (is_lag_dev(priv, out_dev))
4048 return mlx5e_eswitch_rep(out_dev) &&
4049 same_port_devs(priv, netdev_priv(out_dev));
4052 static bool is_duplicated_output_device(struct net_device *dev,
4053 struct net_device *out_dev,
4054 int *ifindexes, int if_count,
4055 struct netlink_ext_ack *extack)
4059 for (i = 0; i < if_count; i++) {
4060 if (ifindexes[i] == out_dev->ifindex) {
4061 NL_SET_ERR_MSG_MOD(extack,
4062 "can't duplicate output to same device");
4063 netdev_err(dev, "can't duplicate output to same device: %s\n",
4072 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
4073 struct mlx5e_tc_flow *flow,
4074 struct net_device *out_dev,
4075 struct netlink_ext_ack *extack)
4077 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4078 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4079 struct mlx5e_rep_priv *rep_priv;
4081 /* Forwarding non encapsulated traffic between
4082 * uplink ports is allowed only if
4083 * termination_table_raw_traffic cap is set.
4085 * Input vport was stored attr->in_rep.
4086 * In LAG case, *priv* is the private data of
4087 * uplink which may be not the input vport.
4089 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
4091 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
4092 mlx5e_eswitch_uplink_rep(out_dev)))
4095 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
4096 termination_table_raw_traffic)) {
4097 NL_SET_ERR_MSG_MOD(extack,
4098 "devices are both uplink, can't offload forwarding");
4099 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
4100 priv->netdev->name, out_dev->name);
4102 } else if (out_dev != rep_priv->netdev) {
4103 NL_SET_ERR_MSG_MOD(extack,
4104 "devices are not the same uplink, can't offload forwarding");
4105 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
4106 priv->netdev->name, out_dev->name);
4112 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
4113 struct flow_action *flow_action,
4114 struct mlx5e_tc_flow *flow,
4115 struct netlink_ext_ack *extack,
4116 struct net_device *filter_dev)
4118 struct pedit_headers_action hdrs[2] = {};
4119 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4120 struct mlx5e_tc_flow_parse_attr *parse_attr;
4121 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4122 const struct ip_tunnel_info *info = NULL;
4123 struct mlx5_flow_attr *attr = flow->attr;
4124 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
4125 bool ft_flow = mlx5e_is_ft_flow(flow);
4126 const struct flow_action_entry *act;
4127 struct mlx5_esw_flow_attr *esw_attr;
4128 bool encap = false, decap = false;
4129 u32 action = attr->action;
4130 int err, i, if_count = 0;
4131 bool mpls_push = false;
4133 if (!flow_action_has_entries(flow_action))
4136 if (!flow_action_hw_stats_check(flow_action, extack,
4137 FLOW_ACTION_HW_STATS_DELAYED_BIT))
4140 esw_attr = attr->esw_attr;
4141 parse_attr = attr->parse_attr;
4143 flow_action_for_each(i, act, flow_action) {
4145 case FLOW_ACTION_DROP:
4146 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
4147 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4149 case FLOW_ACTION_TRAP:
4150 if (!flow_offload_has_one_action(flow_action)) {
4151 NL_SET_ERR_MSG_MOD(extack,
4152 "action trap is supported as a sole action only");
4155 action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4156 MLX5_FLOW_CONTEXT_ACTION_COUNT);
4157 attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
4159 case FLOW_ACTION_MPLS_PUSH:
4160 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
4161 reformat_l2_to_l3_tunnel) ||
4162 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
4163 NL_SET_ERR_MSG_MOD(extack,
4164 "mpls push is supported only for mpls_uc protocol");
4169 case FLOW_ACTION_MPLS_POP:
4170 /* we only support mpls pop if it is the first action
4171 * and the filter net device is bareudp. Subsequent
4172 * actions can be pedit and the last can be mirred
4176 NL_SET_ERR_MSG_MOD(extack,
4177 "mpls pop supported only as first action");
4180 if (!netif_is_bareudp(filter_dev)) {
4181 NL_SET_ERR_MSG_MOD(extack,
4182 "mpls pop supported only on bareudp devices");
4186 parse_attr->eth.h_proto = act->mpls_pop.proto;
4187 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
4188 flow_flag_set(flow, L3_TO_L2_DECAP);
4190 case FLOW_ACTION_MANGLE:
4191 case FLOW_ACTION_ADD:
4192 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
4193 parse_attr, hdrs, flow, extack);
4197 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
4198 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4199 esw_attr->split_count = esw_attr->out_count;
4202 case FLOW_ACTION_CSUM:
4203 if (csum_offload_supported(priv, action,
4204 act->csum_flags, extack))
4208 case FLOW_ACTION_REDIRECT:
4209 case FLOW_ACTION_MIRRED: {
4210 struct mlx5e_priv *out_priv;
4211 struct net_device *out_dev;
4215 /* out_dev is NULL when filters with
4216 * non-existing mirred device are replayed to
4222 if (mpls_push && !netif_is_bareudp(out_dev)) {
4223 NL_SET_ERR_MSG_MOD(extack,
4224 "mpls is supported only through a bareudp device");
4228 if (ft_flow && out_dev == priv->netdev) {
4229 /* Ignore forward to self rules generated
4230 * by adding both mlx5 devs to the flow table
4231 * block on a normal nft offload setup.
4236 if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4237 NL_SET_ERR_MSG_MOD(extack,
4238 "can't support more output ports, can't offload forwarding");
4239 netdev_warn(priv->netdev,
4240 "can't support more than %d output ports, can't offload forwarding\n",
4241 esw_attr->out_count);
4245 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4246 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4248 parse_attr->mirred_ifindex[esw_attr->out_count] =
4250 parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
4251 if (!parse_attr->tun_info[esw_attr->out_count])
4254 esw_attr->dests[esw_attr->out_count].flags |=
4255 MLX5_ESW_DEST_ENCAP;
4256 esw_attr->out_count++;
4257 /* attr->dests[].rep is resolved when we
4260 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4261 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4262 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4264 if (is_duplicated_output_device(priv->netdev,
4271 ifindexes[if_count] = out_dev->ifindex;
4274 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4278 if (is_vlan_dev(out_dev)) {
4279 err = add_vlan_push_action(priv, attr,
4286 if (is_vlan_dev(parse_attr->filter_dev)) {
4287 err = add_vlan_pop_action(priv, attr,
4293 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4297 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4298 NL_SET_ERR_MSG_MOD(extack,
4299 "devices are not on same switch HW, can't offload forwarding");
4303 out_priv = netdev_priv(out_dev);
4304 rpriv = out_priv->ppriv;
4305 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
4306 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
4307 esw_attr->out_count++;
4308 } else if (parse_attr->filter_dev != priv->netdev) {
4309 /* All mlx5 devices are called to configure
4310 * high level device filters. Therefore, the
4311 * *attempt* to install a filter on invalid
4312 * eswitch should not trigger an explicit error
4316 NL_SET_ERR_MSG_MOD(extack,
4317 "devices are not on same switch HW, can't offload forwarding");
4318 netdev_warn(priv->netdev,
4319 "devices %s %s not on same switch HW, can't offload forwarding\n",
4326 case FLOW_ACTION_TUNNEL_ENCAP:
4334 case FLOW_ACTION_VLAN_PUSH:
4335 case FLOW_ACTION_VLAN_POP:
4336 if (act->id == FLOW_ACTION_VLAN_PUSH &&
4337 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4338 /* Replace vlan pop+push with vlan modify */
4339 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4340 err = add_vlan_rewrite_action(priv,
4341 MLX5_FLOW_NAMESPACE_FDB,
4342 act, parse_attr, hdrs,
4345 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
4350 esw_attr->split_count = esw_attr->out_count;
4352 case FLOW_ACTION_VLAN_MANGLE:
4353 err = add_vlan_rewrite_action(priv,
4354 MLX5_FLOW_NAMESPACE_FDB,
4355 act, parse_attr, hdrs,
4360 esw_attr->split_count = esw_attr->out_count;
4362 case FLOW_ACTION_TUNNEL_DECAP:
4365 case FLOW_ACTION_GOTO:
4366 err = validate_goto_chain(priv, flow, act, action,
4371 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4372 attr->dest_chain = act->chain_index;
4374 case FLOW_ACTION_CT:
4375 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
4379 flow_flag_set(flow, CT);
4382 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4387 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4388 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4389 /* For prio tag mode, replace vlan pop with rewrite vlan prio
4392 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4393 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4399 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4400 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4401 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4402 parse_attr, hdrs, &action, extack);
4405 /* in case all pedit actions are skipped, remove the MOD_HDR
4406 * flag. we might have set split_count either by pedit or
4407 * pop/push. if there is no pop/push either, reset it too.
4409 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4410 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4411 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4412 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4413 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4414 esw_attr->split_count = 0;
4418 attr->action = action;
4419 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4422 if (attr->dest_chain) {
4424 /* It can be supported if we'll create a mapping for
4425 * the tunnel device only (without tunnel), and set
4426 * this tunnel id with this decap flow.
4428 * On restore (miss), we'll just set this saved tunnel
4432 NL_SET_ERR_MSG(extack,
4433 "Decap with goto isn't supported");
4434 netdev_warn(priv->netdev,
4435 "Decap with goto isn't supported");
4439 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
4440 NL_SET_ERR_MSG_MOD(extack,
4441 "Mirroring goto chain rules isn't supported");
4444 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4447 if (!(attr->action &
4448 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4449 NL_SET_ERR_MSG_MOD(extack,
4450 "Rule must have at least one forward/drop action");
4454 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4455 NL_SET_ERR_MSG_MOD(extack,
4456 "current firmware doesn't support split rule for port mirroring");
4457 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4464 static void get_flags(int flags, unsigned long *flow_flags)
4466 unsigned long __flow_flags = 0;
4468 if (flags & MLX5_TC_FLAG(INGRESS))
4469 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4470 if (flags & MLX5_TC_FLAG(EGRESS))
4471 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4473 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4474 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4475 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4476 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4477 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4478 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4480 *flow_flags = __flow_flags;
4483 static const struct rhashtable_params tc_ht_params = {
4484 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4485 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4486 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4487 .automatic_shrinking = true,
4490 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4491 unsigned long flags)
4493 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4494 struct mlx5e_rep_priv *uplink_rpriv;
4496 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4497 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4498 return &uplink_rpriv->uplink_priv.tc_ht;
4499 } else /* NIC offload */
4500 return &priv->fs.tc.ht;
4503 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4505 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4506 struct mlx5_flow_attr *attr = flow->attr;
4507 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4508 flow_flag_test(flow, INGRESS);
4509 bool act_is_encap = !!(attr->action &
4510 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4511 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4512 MLX5_DEVCOM_ESW_OFFLOADS);
4517 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4518 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4519 (is_rep_ingress || act_is_encap))
4525 struct mlx5_flow_attr *
4526 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4528 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4529 sizeof(struct mlx5_esw_flow_attr) :
4530 sizeof(struct mlx5_nic_flow_attr);
4531 struct mlx5_flow_attr *attr;
4533 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4537 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4538 struct flow_cls_offload *f, unsigned long flow_flags,
4539 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4540 struct mlx5e_tc_flow **__flow)
4542 struct mlx5e_tc_flow_parse_attr *parse_attr;
4543 struct mlx5_flow_attr *attr;
4544 struct mlx5e_tc_flow *flow;
4548 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4549 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4550 if (!parse_attr || !flow)
4553 flow->flags = flow_flags;
4554 flow->cookie = f->cookie;
4557 attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4563 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4564 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4565 INIT_LIST_HEAD(&flow->hairpin);
4566 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4567 refcount_set(&flow->refcnt, 1);
4568 init_completion(&flow->init_done);
4571 *__parse_attr = parse_attr;
4582 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4583 struct mlx5e_tc_flow_parse_attr *parse_attr,
4584 struct flow_cls_offload *f)
4586 attr->parse_attr = parse_attr;
4587 attr->chain = f->common.chain_index;
4588 attr->prio = f->common.prio;
4592 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4593 struct mlx5e_priv *priv,
4594 struct mlx5e_tc_flow_parse_attr *parse_attr,
4595 struct flow_cls_offload *f,
4596 struct mlx5_eswitch_rep *in_rep,
4597 struct mlx5_core_dev *in_mdev)
4599 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4600 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4602 mlx5e_flow_attr_init(attr, parse_attr, f);
4604 esw_attr->in_rep = in_rep;
4605 esw_attr->in_mdev = in_mdev;
4607 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4608 MLX5_COUNTER_SOURCE_ESWITCH)
4609 esw_attr->counter_dev = in_mdev;
4611 esw_attr->counter_dev = priv->mdev;
4614 static struct mlx5e_tc_flow *
4615 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4616 struct flow_cls_offload *f,
4617 unsigned long flow_flags,
4618 struct net_device *filter_dev,
4619 struct mlx5_eswitch_rep *in_rep,
4620 struct mlx5_core_dev *in_mdev)
4622 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4623 struct netlink_ext_ack *extack = f->common.extack;
4624 struct mlx5e_tc_flow_parse_attr *parse_attr;
4625 struct mlx5e_tc_flow *flow;
4628 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4629 attr_size = sizeof(struct mlx5_esw_flow_attr);
4630 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4631 &parse_attr, &flow);
4635 parse_attr->filter_dev = filter_dev;
4636 mlx5e_flow_esw_attr_init(flow->attr,
4638 f, in_rep, in_mdev);
4640 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4645 /* actions validation depends on parsing the ct matches first */
4646 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4647 &flow->attr->ct_attr, extack);
4651 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4655 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4656 complete_all(&flow->init_done);
4658 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4661 add_unready_flow(flow);
4667 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4668 mlx5e_flow_put(priv, flow);
4670 return ERR_PTR(err);
4673 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4674 struct mlx5e_tc_flow *flow,
4675 unsigned long flow_flags)
4677 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4678 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4679 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4680 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4681 struct mlx5e_tc_flow_parse_attr *parse_attr;
4682 struct mlx5e_rep_priv *peer_urpriv;
4683 struct mlx5e_tc_flow *peer_flow;
4684 struct mlx5_core_dev *in_mdev;
4687 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4691 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4692 peer_priv = netdev_priv(peer_urpriv->netdev);
4694 /* in_mdev is assigned of which the packet originated from.
4695 * So packets redirected to uplink use the same mdev of the
4696 * original flow and packets redirected from uplink use the
4699 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4700 in_mdev = peer_priv->mdev;
4702 in_mdev = priv->mdev;
4704 parse_attr = flow->attr->parse_attr;
4705 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4706 parse_attr->filter_dev,
4707 attr->in_rep, in_mdev);
4708 if (IS_ERR(peer_flow)) {
4709 err = PTR_ERR(peer_flow);
4713 flow->peer_flow = peer_flow;
4714 flow_flag_set(flow, DUP);
4715 mutex_lock(&esw->offloads.peer_mutex);
4716 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4717 mutex_unlock(&esw->offloads.peer_mutex);
4720 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4725 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4726 struct flow_cls_offload *f,
4727 unsigned long flow_flags,
4728 struct net_device *filter_dev,
4729 struct mlx5e_tc_flow **__flow)
4731 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4732 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4733 struct mlx5_core_dev *in_mdev = priv->mdev;
4734 struct mlx5e_tc_flow *flow;
4737 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4740 return PTR_ERR(flow);
4742 if (is_peer_flow_needed(flow)) {
4743 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4745 mlx5e_tc_del_fdb_flow(priv, flow);
4759 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4760 struct flow_cls_offload *f,
4761 unsigned long flow_flags,
4762 struct net_device *filter_dev,
4763 struct mlx5e_tc_flow **__flow)
4765 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4766 struct netlink_ext_ack *extack = f->common.extack;
4767 struct mlx5e_tc_flow_parse_attr *parse_attr;
4768 struct mlx5e_tc_flow *flow;
4771 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4772 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4774 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4778 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4779 attr_size = sizeof(struct mlx5_nic_flow_attr);
4780 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4781 &parse_attr, &flow);
4785 parse_attr->filter_dev = filter_dev;
4786 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4788 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4793 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4794 &flow->attr->ct_attr, extack);
4798 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4802 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4806 flow_flag_set(flow, OFFLOADED);
4812 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4813 mlx5e_flow_put(priv, flow);
4819 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4820 struct flow_cls_offload *f,
4821 unsigned long flags,
4822 struct net_device *filter_dev,
4823 struct mlx5e_tc_flow **flow)
4825 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4826 unsigned long flow_flags;
4829 get_flags(flags, &flow_flags);
4831 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4834 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4835 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4838 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4844 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4845 struct mlx5e_rep_priv *rpriv)
4847 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4848 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4849 * function is called from NIC mode.
4851 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4854 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4855 struct flow_cls_offload *f, unsigned long flags)
4857 struct netlink_ext_ack *extack = f->common.extack;
4858 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4859 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4860 struct mlx5e_tc_flow *flow;
4864 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4866 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4869 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4872 NL_SET_ERR_MSG_MOD(extack,
4873 "flow cookie already exists, ignoring");
4874 netdev_warn_once(priv->netdev,
4875 "flow cookie %lx already exists, ignoring\n",
4885 trace_mlx5e_configure_flower(f);
4886 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4890 /* Flow rule offloaded to non-uplink representor sharing tc block,
4891 * set the flow's owner dev.
4893 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4894 flow->orig_dev = dev;
4896 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4903 mlx5e_flow_put(priv, flow);
4908 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4910 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4911 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4913 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4914 flow_flag_test(flow, EGRESS) == dir_egress;
4917 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4918 struct flow_cls_offload *f, unsigned long flags)
4920 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4921 struct mlx5e_tc_flow *flow;
4925 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4926 if (!flow || !same_flow_direction(flow, flags)) {
4931 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4934 if (flow_flag_test_and_set(flow, DELETED)) {
4938 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4941 trace_mlx5e_delete_flower(f);
4942 mlx5e_flow_put(priv, flow);
4951 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4952 struct flow_cls_offload *f, unsigned long flags)
4954 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4955 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4956 struct mlx5_eswitch *peer_esw;
4957 struct mlx5e_tc_flow *flow;
4958 struct mlx5_fc *counter;
4965 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4969 return PTR_ERR(flow);
4971 if (!same_flow_direction(flow, flags)) {
4976 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4977 counter = mlx5e_tc_get_counter(flow);
4981 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4984 /* Under multipath it's possible for one rule to be currently
4985 * un-offloaded while the other rule is offloaded.
4987 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4991 if (flow_flag_test(flow, DUP) &&
4992 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4997 counter = mlx5e_tc_get_counter(flow->peer_flow);
4999 goto no_peer_counter;
5000 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
5003 packets += packets2;
5004 lastuse = max_t(u64, lastuse, lastuse2);
5008 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5010 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
5011 FLOW_ACTION_HW_STATS_DELAYED);
5012 trace_mlx5e_stats_flower(f);
5014 mlx5e_flow_put(priv, flow);
5018 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
5019 struct netlink_ext_ack *extack)
5021 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5022 struct mlx5_eswitch *esw;
5027 vport_num = rpriv->rep->vport;
5028 if (vport_num >= MLX5_VPORT_ECPF) {
5029 NL_SET_ERR_MSG_MOD(extack,
5030 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
5034 esw = priv->mdev->priv.eswitch;
5035 /* rate is given in bytes/sec.
5036 * First convert to bits/sec and then round to the nearest mbit/secs.
5037 * mbit means million bits.
5038 * Moreover, if rate is non zero we choose to configure to a minimum of
5042 rate = (rate * BITS_PER_BYTE) + 500000;
5043 rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
5046 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
5048 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
5053 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
5054 struct flow_action *flow_action,
5055 struct netlink_ext_ack *extack)
5057 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5058 const struct flow_action_entry *act;
5062 if (!flow_action_has_entries(flow_action)) {
5063 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
5067 if (!flow_offload_has_one_action(flow_action)) {
5068 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
5072 if (!flow_action_basic_hw_stats_check(flow_action, extack))
5075 flow_action_for_each(i, act, flow_action) {
5077 case FLOW_ACTION_POLICE:
5078 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
5082 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
5085 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
5093 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
5094 struct tc_cls_matchall_offload *ma)
5096 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5097 struct netlink_ext_ack *extack = ma->common.extack;
5099 if (!mlx5_esw_qos_enabled(esw)) {
5100 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
5104 if (ma->common.prio != 1) {
5105 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
5109 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
5112 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
5113 struct tc_cls_matchall_offload *ma)
5115 struct netlink_ext_ack *extack = ma->common.extack;
5117 return apply_police_params(priv, 0, extack);
5120 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
5121 struct tc_cls_matchall_offload *ma)
5123 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5124 struct rtnl_link_stats64 cur_stats;
5128 cur_stats = priv->stats.vf_vport;
5129 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
5130 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5131 rpriv->prev_vf_vport_stats = cur_stats;
5132 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5133 FLOW_ACTION_HW_STATS_DELAYED);
5136 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5137 struct mlx5e_priv *peer_priv)
5139 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5140 struct mlx5e_hairpin_entry *hpe, *tmp;
5141 LIST_HEAD(init_wait_list);
5145 if (!same_hw_devs(priv, peer_priv))
5148 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5150 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
5151 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
5152 if (refcount_inc_not_zero(&hpe->refcnt))
5153 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5154 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
5156 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5157 wait_for_completion(&hpe->res_ready);
5158 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5159 hpe->hp->pair->peer_gone = true;
5161 mlx5e_hairpin_put(priv, hpe);
5165 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5166 unsigned long event, void *ptr)
5168 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5169 struct mlx5e_flow_steering *fs;
5170 struct mlx5e_priv *peer_priv;
5171 struct mlx5e_tc_table *tc;
5172 struct mlx5e_priv *priv;
5174 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5175 event != NETDEV_UNREGISTER ||
5176 ndev->reg_state == NETREG_REGISTERED)
5179 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5180 fs = container_of(tc, struct mlx5e_flow_steering, tc);
5181 priv = container_of(fs, struct mlx5e_priv, fs);
5182 peer_priv = netdev_priv(ndev);
5183 if (priv == peer_priv ||
5184 !(priv->netdev->features & NETIF_F_HW_TC))
5187 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5192 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
5194 int tc_grp_size, tc_tbl_size;
5195 u32 max_flow_counter;
5197 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
5198 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
5200 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
5202 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
5203 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
5208 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5210 struct mlx5e_tc_table *tc = &priv->fs.tc;
5211 struct mlx5_core_dev *dev = priv->mdev;
5212 struct mlx5_chains_attr attr = {};
5215 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5216 mutex_init(&tc->t_lock);
5217 mutex_init(&tc->hairpin_tbl_lock);
5218 hash_init(tc->hairpin_tbl);
5220 err = rhashtable_init(&tc->ht, &tc_ht_params);
5224 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
5225 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5226 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5227 attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5229 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5230 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5231 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5232 attr.default_ft = priv->fs.vlan.ft.t;
5234 tc->chains = mlx5_chains_create(dev, &attr);
5235 if (IS_ERR(tc->chains)) {
5236 err = PTR_ERR(tc->chains);
5240 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
5241 MLX5_FLOW_NAMESPACE_KERNEL);
5242 if (IS_ERR(tc->ct)) {
5243 err = PTR_ERR(tc->ct);
5247 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5248 err = register_netdevice_notifier_dev_net(priv->netdev,
5252 tc->netdevice_nb.notifier_call = NULL;
5253 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5260 mlx5_tc_ct_clean(tc->ct);
5262 mlx5_chains_destroy(tc->chains);
5264 rhashtable_destroy(&tc->ht);
5268 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5270 struct mlx5e_tc_flow *flow = ptr;
5271 struct mlx5e_priv *priv = flow->priv;
5273 mlx5e_tc_del_flow(priv, flow);
5277 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5279 struct mlx5e_tc_table *tc = &priv->fs.tc;
5281 if (tc->netdevice_nb.notifier_call)
5282 unregister_netdevice_notifier_dev_net(priv->netdev,
5286 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5287 mutex_destroy(&tc->hairpin_tbl_lock);
5289 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5291 if (!IS_ERR_OR_NULL(tc->t)) {
5292 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5295 mutex_destroy(&tc->t_lock);
5297 mlx5_tc_ct_clean(tc->ct);
5298 mlx5_chains_destroy(tc->chains);
5301 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
5303 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5304 struct mlx5_rep_uplink_priv *uplink_priv;
5305 struct mlx5e_rep_priv *rpriv;
5306 struct mapping_ctx *mapping;
5307 struct mlx5_eswitch *esw;
5308 struct mlx5e_priv *priv;
5311 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5312 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5313 priv = netdev_priv(rpriv->netdev);
5314 esw = priv->mdev->priv.eswitch;
5316 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5318 &esw->offloads.mod_hdr,
5319 MLX5_FLOW_NAMESPACE_FDB);
5320 if (IS_ERR(uplink_priv->ct_priv))
5323 mapping = mapping_create(sizeof(struct tunnel_match_key),
5324 TUNNEL_INFO_BITS_MASK, true);
5325 if (IS_ERR(mapping)) {
5326 err = PTR_ERR(mapping);
5327 goto err_tun_mapping;
5329 uplink_priv->tunnel_mapping = mapping;
5331 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
5332 if (IS_ERR(mapping)) {
5333 err = PTR_ERR(mapping);
5334 goto err_enc_opts_mapping;
5336 uplink_priv->tunnel_enc_opts_mapping = mapping;
5338 err = rhashtable_init(tc_ht, &tc_ht_params);
5345 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5346 err_enc_opts_mapping:
5347 mapping_destroy(uplink_priv->tunnel_mapping);
5349 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5351 netdev_warn(priv->netdev,
5352 "Failed to initialize tc (eswitch), err: %d", err);
5356 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5358 struct mlx5_rep_uplink_priv *uplink_priv;
5360 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5362 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5364 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5365 mapping_destroy(uplink_priv->tunnel_mapping);
5367 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5370 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5372 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5374 return atomic_read(&tc_ht->nelems);
5377 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5379 struct mlx5e_tc_flow *flow, *tmp;
5381 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5382 __mlx5e_tc_del_fdb_peer_flow(flow);
5385 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5387 struct mlx5_rep_uplink_priv *rpriv =
5388 container_of(work, struct mlx5_rep_uplink_priv,
5389 reoffload_flows_work);
5390 struct mlx5e_tc_flow *flow, *tmp;
5392 mutex_lock(&rpriv->unready_flows_lock);
5393 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5394 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5395 unready_flow_del(flow);
5397 mutex_unlock(&rpriv->unready_flows_lock);
5400 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5401 struct flow_cls_offload *cls_flower,
5402 unsigned long flags)
5404 switch (cls_flower->command) {
5405 case FLOW_CLS_REPLACE:
5406 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5408 case FLOW_CLS_DESTROY:
5409 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5411 case FLOW_CLS_STATS:
5412 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5419 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5422 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5423 struct mlx5e_priv *priv = cb_priv;
5426 case TC_SETUP_CLSFLOWER:
5427 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5433 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5434 struct sk_buff *skb)
5436 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5437 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5438 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5439 struct mlx5e_tc_table *tc = &priv->fs.tc;
5440 struct tc_skb_ext *tc_skb_ext;
5443 reg_b = be32_to_cpu(cqe->ft_metadata);
5445 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5447 err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
5449 netdev_dbg(priv->netdev,
5450 "Couldn't find chain for chain tag: %d, err: %d\n",
5456 tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
5457 if (WARN_ON(!tc_skb_ext))
5460 tc_skb_ext->chain = chain;
5462 zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
5465 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5469 #endif /* CONFIG_NET_TC_SKB_EXT */