2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
51 #include <net/ipv6_stubs.h>
52 #include <net/bareudp.h>
53 #include <net/bonding.h>
56 #include "en/rep/tc.h"
57 #include "en/rep/neigh.h"
60 #include "esw/chains.h"
63 #include "en/tc_tun.h"
64 #include "en/mapping.h"
66 #include "lib/devcom.h"
67 #include "lib/geneve.h"
68 #include "diag/en_tc_tracepoint.h"
70 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
72 struct mlx5_nic_flow_attr {
75 struct mlx5_modify_hdr *modify_hdr;
78 struct mlx5_flow_table *hairpin_ft;
79 struct mlx5_fc *counter;
82 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
85 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
86 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
87 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
88 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
89 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
90 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
91 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
92 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
93 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
94 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
95 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
96 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
97 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
98 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
101 #define MLX5E_TC_MAX_SPLITS 1
103 /* Helper struct for accessing a struct containing list_head array.
107 * |- list_head item 0
110 * |- list_head item 1
112 * To access the containing struct from one of the list_head items:
113 * 1. Get the helper item from the list_head item using
115 * container_of(list_head item, helper struct type, list_head field)
116 * 2. Get the contining struct from the helper item and its index in the array:
117 * containing struct =
118 * container_of(helper item, containing struct type, helper field[index])
120 struct encap_flow_item {
121 struct mlx5e_encap_entry *e; /* attached encap instance */
122 struct list_head list;
126 struct mlx5e_tc_flow {
127 struct rhash_head node;
128 struct mlx5e_priv *priv;
131 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
133 /* flows sharing the same reformat object - currently mpls decap */
134 struct list_head l3_to_l2_reformat;
135 struct mlx5e_decap_entry *decap_reformat;
137 /* Flow can be associated with multiple encap IDs.
138 * The number of encaps is bounded by the number of supported
141 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
142 struct mlx5e_tc_flow *peer_flow;
143 struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
144 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
145 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
146 struct list_head hairpin; /* flows sharing the same hairpin */
147 struct list_head peer; /* flows with peer flow */
148 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
149 struct net_device *orig_dev; /* netdev adding flow first */
151 struct list_head tmp_list; /* temporary flow list used by neigh update */
153 struct rcu_head rcu_head;
154 struct completion init_done;
155 int tunnel_id; /* the mapped tunnel id of this flow */
158 struct mlx5_esw_flow_attr esw_attr[0];
159 struct mlx5_nic_flow_attr nic_attr[0];
163 struct mlx5e_tc_flow_parse_attr {
164 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
165 struct net_device *filter_dev;
166 struct mlx5_flow_spec spec;
167 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
168 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
172 #define MLX5E_TC_TABLE_NUM_GROUPS 4
173 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
175 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
177 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
182 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
185 .soffset = MLX5_BYTE_OFF(fte_match_param,
186 misc_parameters_2.metadata_reg_c_1),
188 [ZONE_TO_REG] = zone_to_reg_ct,
189 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
190 [MARK_TO_REG] = mark_to_reg_ct,
191 [LABELS_TO_REG] = labels_to_reg_ct,
192 [FTEID_TO_REG] = fteid_to_reg_ct,
193 [TUPLEID_TO_REG] = tupleid_to_reg_ct,
196 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
199 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
200 enum mlx5e_tc_attr_to_reg type,
204 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
205 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
206 void *headers_c = spec->match_criteria;
207 void *headers_v = spec->match_value;
210 fmask = headers_c + soffset;
211 fval = headers_v + soffset;
213 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
214 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
216 memcpy(fmask, &mask, match_len);
217 memcpy(fval, &data, match_len);
219 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
223 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
224 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
225 enum mlx5e_tc_attr_to_reg type,
228 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
229 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
230 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
234 err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
239 modact = mod_hdr_acts->actions +
240 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
242 /* Firmware has 5bit length field and 0 means 32bits */
246 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
247 MLX5_SET(set_action_in, modact, field, mfield);
248 MLX5_SET(set_action_in, modact, offset, moffset * 8);
249 MLX5_SET(set_action_in, modact, length, mlen * 8);
250 MLX5_SET(set_action_in, modact, data, data);
251 mod_hdr_acts->num_actions++;
256 struct mlx5e_hairpin {
257 struct mlx5_hairpin *pair;
259 struct mlx5_core_dev *func_mdev;
260 struct mlx5e_priv *func_priv;
265 struct mlx5e_rqt indir_rqt;
266 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
267 struct mlx5e_ttc_table ttc;
270 struct mlx5e_hairpin_entry {
271 /* a node of a hash table which keeps all the hairpin entries */
272 struct hlist_node hairpin_hlist;
274 /* protects flows list */
275 spinlock_t flows_lock;
276 /* flows sharing the same hairpin */
277 struct list_head flows;
278 /* hpe's that were not fully initialized when dead peer update event
279 * function traversed them.
281 struct list_head dead_peer_wait_list;
285 struct mlx5e_hairpin *hp;
287 struct completion res_ready;
295 struct mlx5e_mod_hdr_entry {
296 /* a node of a hash table which keeps all the mod_hdr entries */
297 struct hlist_node mod_hdr_hlist;
299 /* protects flows list */
300 spinlock_t flows_lock;
301 /* flows sharing the same mod_hdr entry */
302 struct list_head flows;
304 struct mod_hdr_key key;
306 struct mlx5_modify_hdr *modify_hdr;
309 struct completion res_ready;
313 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
314 struct mlx5e_tc_flow *flow);
316 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
318 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
319 return ERR_PTR(-EINVAL);
323 static void mlx5e_flow_put(struct mlx5e_priv *priv,
324 struct mlx5e_tc_flow *flow)
326 if (refcount_dec_and_test(&flow->refcnt)) {
327 mlx5e_tc_del_flow(priv, flow);
328 kfree_rcu(flow, rcu_head);
332 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
334 /* Complete all memory stores before setting bit. */
335 smp_mb__before_atomic();
336 set_bit(flag, &flow->flags);
339 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
341 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
344 /* test_and_set_bit() provides all necessary barriers */
345 return test_and_set_bit(flag, &flow->flags);
348 #define flow_flag_test_and_set(flow, flag) \
349 __flow_flag_test_and_set(flow, \
350 MLX5E_TC_FLOW_FLAG_##flag)
352 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
354 /* Complete all memory stores before clearing bit. */
355 smp_mb__before_atomic();
356 clear_bit(flag, &flow->flags);
359 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
360 MLX5E_TC_FLOW_FLAG_##flag)
362 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
364 bool ret = test_bit(flag, &flow->flags);
366 /* Read fields of flow structure only after checking flags. */
367 smp_mb__after_atomic();
371 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
372 MLX5E_TC_FLOW_FLAG_##flag)
374 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
376 return flow_flag_test(flow, ESWITCH);
379 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
381 return flow_flag_test(flow, FT);
384 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
386 return flow_flag_test(flow, OFFLOADED);
389 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
391 return jhash(key->actions,
392 key->num_actions * MLX5_MH_ACT_SZ, 0);
395 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
396 struct mod_hdr_key *b)
398 if (a->num_actions != b->num_actions)
401 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
404 static struct mod_hdr_tbl *
405 get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
407 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
409 return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
410 &priv->fs.tc.mod_hdr;
413 static struct mlx5e_mod_hdr_entry *
414 mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
416 struct mlx5e_mod_hdr_entry *mh, *found = NULL;
418 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
419 if (!cmp_mod_hdr_info(&mh->key, key)) {
420 refcount_inc(&mh->refcnt);
429 static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
430 struct mlx5e_mod_hdr_entry *mh,
433 struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
435 if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
437 hash_del(&mh->mod_hdr_hlist);
438 mutex_unlock(&tbl->lock);
440 WARN_ON(!list_empty(&mh->flows));
441 if (mh->compl_result > 0)
442 mlx5_modify_header_dealloc(priv->mdev, mh->modify_hdr);
447 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
449 return mlx5e_is_eswitch_flow(flow) ?
450 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
452 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
453 struct mlx5e_tc_flow *flow,
454 struct mlx5e_tc_flow_parse_attr *parse_attr)
456 int num_actions, actions_size, namespace, err;
457 struct mlx5e_mod_hdr_entry *mh;
458 struct mod_hdr_tbl *tbl;
459 struct mod_hdr_key key;
462 num_actions = parse_attr->mod_hdr_acts.num_actions;
463 actions_size = MLX5_MH_ACT_SZ * num_actions;
465 key.actions = parse_attr->mod_hdr_acts.actions;
466 key.num_actions = num_actions;
468 hash_key = hash_mod_hdr_info(&key);
470 namespace = get_flow_name_space(flow);
471 tbl = get_mod_hdr_table(priv, namespace);
473 mutex_lock(&tbl->lock);
474 mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
476 mutex_unlock(&tbl->lock);
477 wait_for_completion(&mh->res_ready);
479 if (mh->compl_result < 0) {
481 goto attach_header_err;
486 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
488 mutex_unlock(&tbl->lock);
492 mh->key.actions = (void *)mh + sizeof(*mh);
493 memcpy(mh->key.actions, key.actions, actions_size);
494 mh->key.num_actions = num_actions;
495 spin_lock_init(&mh->flows_lock);
496 INIT_LIST_HEAD(&mh->flows);
497 refcount_set(&mh->refcnt, 1);
498 init_completion(&mh->res_ready);
500 hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
501 mutex_unlock(&tbl->lock);
503 mh->modify_hdr = mlx5_modify_header_alloc(priv->mdev, namespace,
506 if (IS_ERR(mh->modify_hdr)) {
507 err = PTR_ERR(mh->modify_hdr);
508 mh->compl_result = err;
509 goto alloc_header_err;
511 mh->compl_result = 1;
512 complete_all(&mh->res_ready);
516 spin_lock(&mh->flows_lock);
517 list_add(&flow->mod_hdr, &mh->flows);
518 spin_unlock(&mh->flows_lock);
519 if (mlx5e_is_eswitch_flow(flow))
520 flow->esw_attr->modify_hdr = mh->modify_hdr;
522 flow->nic_attr->modify_hdr = mh->modify_hdr;
527 complete_all(&mh->res_ready);
529 mlx5e_mod_hdr_put(priv, mh, namespace);
533 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
534 struct mlx5e_tc_flow *flow)
536 /* flow wasn't fully initialized */
540 spin_lock(&flow->mh->flows_lock);
541 list_del(&flow->mod_hdr);
542 spin_unlock(&flow->mh->flows_lock);
544 mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
549 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
551 struct net_device *netdev;
552 struct mlx5e_priv *priv;
554 netdev = __dev_get_by_index(net, ifindex);
555 priv = netdev_priv(netdev);
559 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
561 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
565 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
569 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
571 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
572 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
573 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
575 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
582 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
587 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
589 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
590 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
593 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
595 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
596 struct mlx5e_priv *priv = hp->func_priv;
597 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
599 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
602 for (i = 0; i < sz; i++) {
604 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
605 ix = mlx5e_bits_invert(i, ilog2(sz));
606 ix = indirection_rqt[ix];
607 rqn = hp->pair->rqn[ix];
608 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
612 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
614 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
615 struct mlx5e_priv *priv = hp->func_priv;
616 struct mlx5_core_dev *mdev = priv->mdev;
620 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
621 in = kvzalloc(inlen, GFP_KERNEL);
625 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
627 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
628 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
630 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
632 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
634 hp->indir_rqt.enabled = true;
640 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
642 struct mlx5e_priv *priv = hp->func_priv;
643 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
647 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
648 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
650 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
651 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
653 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
654 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
655 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
656 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
658 err = mlx5_core_create_tir(hp->func_mdev, in,
659 &hp->indir_tirn[tt]);
661 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
662 goto err_destroy_tirs;
668 for (i = 0; i < tt; i++)
669 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
673 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
677 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
678 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
681 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
682 struct ttc_params *ttc_params)
684 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
687 memset(ttc_params, 0, sizeof(*ttc_params));
689 ttc_params->any_tt_tirn = hp->tirn;
691 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
692 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
694 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
695 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
696 ft_attr->prio = MLX5E_TC_PRIO;
699 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
701 struct mlx5e_priv *priv = hp->func_priv;
702 struct ttc_params ttc_params;
705 err = mlx5e_hairpin_create_indirect_rqt(hp);
709 err = mlx5e_hairpin_create_indirect_tirs(hp);
711 goto err_create_indirect_tirs;
713 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
714 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
716 goto err_create_ttc_table;
718 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
719 hp->num_channels, hp->ttc.ft.t->id);
723 err_create_ttc_table:
724 mlx5e_hairpin_destroy_indirect_tirs(hp);
725 err_create_indirect_tirs:
726 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
731 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
733 struct mlx5e_priv *priv = hp->func_priv;
735 mlx5e_destroy_ttc_table(priv, &hp->ttc);
736 mlx5e_hairpin_destroy_indirect_tirs(hp);
737 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
740 static struct mlx5e_hairpin *
741 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
744 struct mlx5_core_dev *func_mdev, *peer_mdev;
745 struct mlx5e_hairpin *hp;
746 struct mlx5_hairpin *pair;
749 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
751 return ERR_PTR(-ENOMEM);
753 func_mdev = priv->mdev;
754 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
756 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
759 goto create_pair_err;
762 hp->func_mdev = func_mdev;
763 hp->func_priv = priv;
764 hp->num_channels = params->num_channels;
766 err = mlx5e_hairpin_create_transport(hp);
768 goto create_transport_err;
770 if (hp->num_channels > 1) {
771 err = mlx5e_hairpin_rss_init(hp);
779 mlx5e_hairpin_destroy_transport(hp);
780 create_transport_err:
781 mlx5_core_hairpin_destroy(hp->pair);
787 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
789 if (hp->num_channels > 1)
790 mlx5e_hairpin_rss_cleanup(hp);
791 mlx5e_hairpin_destroy_transport(hp);
792 mlx5_core_hairpin_destroy(hp->pair);
796 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
798 return (peer_vhca_id << 16 | prio);
801 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
802 u16 peer_vhca_id, u8 prio)
804 struct mlx5e_hairpin_entry *hpe;
805 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
807 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
808 hairpin_hlist, hash_key) {
809 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
810 refcount_inc(&hpe->refcnt);
818 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
819 struct mlx5e_hairpin_entry *hpe)
821 /* no more hairpin flows for us, release the hairpin pair */
822 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
824 hash_del(&hpe->hairpin_hlist);
825 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
827 if (!IS_ERR_OR_NULL(hpe->hp)) {
828 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
829 dev_name(hpe->hp->pair->peer_mdev->device));
831 mlx5e_hairpin_destroy(hpe->hp);
834 WARN_ON(!list_empty(&hpe->flows));
838 #define UNKNOWN_MATCH_PRIO 8
840 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
841 struct mlx5_flow_spec *spec, u8 *match_prio,
842 struct netlink_ext_ack *extack)
844 void *headers_c, *headers_v;
845 u8 prio_val, prio_mask = 0;
848 #ifdef CONFIG_MLX5_CORE_EN_DCB
849 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
850 NL_SET_ERR_MSG_MOD(extack,
851 "only PCP trust state supported for hairpin");
855 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
856 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
858 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
860 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
861 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
864 if (!vlan_present || !prio_mask) {
865 prio_val = UNKNOWN_MATCH_PRIO;
866 } else if (prio_mask != 0x7) {
867 NL_SET_ERR_MSG_MOD(extack,
868 "masked priority match not supported for hairpin");
872 *match_prio = prio_val;
876 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
877 struct mlx5e_tc_flow *flow,
878 struct mlx5e_tc_flow_parse_attr *parse_attr,
879 struct netlink_ext_ack *extack)
881 int peer_ifindex = parse_attr->mirred_ifindex[0];
882 struct mlx5_hairpin_params params;
883 struct mlx5_core_dev *peer_mdev;
884 struct mlx5e_hairpin_entry *hpe;
885 struct mlx5e_hairpin *hp;
892 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
893 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
894 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
898 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
899 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
904 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
905 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
907 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
908 wait_for_completion(&hpe->res_ready);
910 if (IS_ERR(hpe->hp)) {
917 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
919 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
923 spin_lock_init(&hpe->flows_lock);
924 INIT_LIST_HEAD(&hpe->flows);
925 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
926 hpe->peer_vhca_id = peer_id;
927 hpe->prio = match_prio;
928 refcount_set(&hpe->refcnt, 1);
929 init_completion(&hpe->res_ready);
931 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
932 hash_hairpin_info(peer_id, match_prio));
933 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
935 params.log_data_size = 15;
936 params.log_data_size = min_t(u8, params.log_data_size,
937 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
938 params.log_data_size = max_t(u8, params.log_data_size,
939 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
941 params.log_num_packets = params.log_data_size -
942 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
943 params.log_num_packets = min_t(u8, params.log_num_packets,
944 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
946 params.q_counter = priv->q_counter;
947 /* set hairpin pair per each 50Gbs share of the link */
948 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
949 link_speed = max_t(u32, link_speed, 50000);
950 link_speed64 = link_speed;
951 do_div(link_speed64, 50000);
952 params.num_channels = link_speed64;
954 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
956 complete_all(&hpe->res_ready);
962 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
963 hp->tirn, hp->pair->rqn[0],
964 dev_name(hp->pair->peer_mdev->device),
965 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
968 if (hpe->hp->num_channels > 1) {
969 flow_flag_set(flow, HAIRPIN_RSS);
970 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
972 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
976 spin_lock(&hpe->flows_lock);
977 list_add(&flow->hairpin, &hpe->flows);
978 spin_unlock(&hpe->flows_lock);
983 mlx5e_hairpin_put(priv, hpe);
987 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
988 struct mlx5e_tc_flow *flow)
990 /* flow wasn't fully initialized */
994 spin_lock(&flow->hpe->flows_lock);
995 list_del(&flow->hairpin);
996 spin_unlock(&flow->hpe->flows_lock);
998 mlx5e_hairpin_put(priv, flow->hpe);
1003 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1004 struct mlx5e_tc_flow_parse_attr *parse_attr,
1005 struct mlx5e_tc_flow *flow,
1006 struct netlink_ext_ack *extack)
1008 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
1009 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1010 struct mlx5_core_dev *dev = priv->mdev;
1011 struct mlx5_flow_destination dest[2] = {};
1012 struct mlx5_flow_act flow_act = {
1013 .action = attr->action,
1014 .flags = FLOW_ACT_NO_APPEND,
1016 struct mlx5_fc *counter = NULL;
1017 int err, dest_ix = 0;
1019 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1020 flow_context->flow_tag = attr->flow_tag;
1022 if (flow_flag_test(flow, HAIRPIN)) {
1023 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1027 if (flow_flag_test(flow, HAIRPIN_RSS)) {
1028 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1029 dest[dest_ix].ft = attr->hairpin_ft;
1031 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1032 dest[dest_ix].tir_num = attr->hairpin_tirn;
1035 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1036 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1037 dest[dest_ix].ft = priv->fs.vlan.ft.t;
1041 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1042 counter = mlx5_fc_create(dev, true);
1043 if (IS_ERR(counter))
1044 return PTR_ERR(counter);
1046 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1047 dest[dest_ix].counter_id = mlx5_fc_id(counter);
1049 attr->counter = counter;
1052 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1053 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1054 flow_act.modify_hdr = attr->modify_hdr;
1055 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1060 mutex_lock(&priv->fs.tc.t_lock);
1061 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
1062 struct mlx5_flow_table_attr ft_attr = {};
1063 int tc_grp_size, tc_tbl_size, tc_num_grps;
1064 u32 max_flow_counter;
1066 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
1067 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1069 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
1071 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
1072 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
1073 tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
1075 ft_attr.prio = MLX5E_TC_PRIO;
1076 ft_attr.max_fte = tc_tbl_size;
1077 ft_attr.level = MLX5E_TC_FT_LEVEL;
1078 ft_attr.autogroup.max_num_groups = tc_num_grps;
1080 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
1082 if (IS_ERR(priv->fs.tc.t)) {
1083 mutex_unlock(&priv->fs.tc.t_lock);
1084 NL_SET_ERR_MSG_MOD(extack,
1085 "Failed to create tc offload table");
1086 netdev_err(priv->netdev,
1087 "Failed to create tc offload table\n");
1088 return PTR_ERR(priv->fs.tc.t);
1092 if (attr->match_level != MLX5_MATCH_NONE)
1093 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1095 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
1096 &flow_act, dest, dest_ix);
1097 mutex_unlock(&priv->fs.tc.t_lock);
1099 return PTR_ERR_OR_ZERO(flow->rule[0]);
1102 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1103 struct mlx5e_tc_flow *flow)
1105 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1106 struct mlx5_fc *counter = NULL;
1108 counter = attr->counter;
1109 if (!IS_ERR_OR_NULL(flow->rule[0]))
1110 mlx5_del_flow_rules(flow->rule[0]);
1111 mlx5_fc_destroy(priv->mdev, counter);
1113 mutex_lock(&priv->fs.tc.t_lock);
1114 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1115 mlx5_destroy_flow_table(priv->fs.tc.t);
1116 priv->fs.tc.t = NULL;
1118 mutex_unlock(&priv->fs.tc.t_lock);
1120 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1121 mlx5e_detach_mod_hdr(priv, flow);
1123 if (flow_flag_test(flow, HAIRPIN))
1124 mlx5e_hairpin_flow_del(priv, flow);
1127 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1128 struct mlx5e_tc_flow *flow, int out_index);
1130 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1131 struct mlx5e_tc_flow *flow,
1132 struct net_device *mirred_dev,
1134 struct netlink_ext_ack *extack,
1135 struct net_device **encap_dev,
1137 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
1138 struct mlx5e_tc_flow *flow,
1139 struct netlink_ext_ack *extack);
1140 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1141 struct mlx5e_tc_flow *flow);
1143 static struct mlx5_flow_handle *
1144 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1145 struct mlx5e_tc_flow *flow,
1146 struct mlx5_flow_spec *spec,
1147 struct mlx5_esw_flow_attr *attr)
1149 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1150 struct mlx5_flow_handle *rule;
1152 if (flow_flag_test(flow, CT)) {
1153 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1155 return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
1159 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1163 if (attr->split_count) {
1164 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1165 if (IS_ERR(flow->rule[1])) {
1166 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1167 return flow->rule[1];
1175 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1176 struct mlx5e_tc_flow *flow,
1177 struct mlx5_esw_flow_attr *attr)
1179 flow_flag_clear(flow, OFFLOADED);
1181 if (flow_flag_test(flow, CT)) {
1182 mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
1186 if (attr->split_count)
1187 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1189 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1192 static struct mlx5_flow_handle *
1193 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1194 struct mlx5e_tc_flow *flow,
1195 struct mlx5_flow_spec *spec)
1197 struct mlx5_esw_flow_attr slow_attr;
1198 struct mlx5_flow_handle *rule;
1200 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1201 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1202 slow_attr.split_count = 0;
1203 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1205 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
1207 flow_flag_set(flow, SLOW);
1213 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1214 struct mlx5e_tc_flow *flow)
1216 struct mlx5_esw_flow_attr slow_attr;
1218 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1219 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1220 slow_attr.split_count = 0;
1221 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1222 mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1223 flow_flag_clear(flow, SLOW);
1226 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1229 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1230 struct list_head *unready_flows)
1232 flow_flag_set(flow, NOT_READY);
1233 list_add_tail(&flow->unready, unready_flows);
1236 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1239 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1241 list_del(&flow->unready);
1242 flow_flag_clear(flow, NOT_READY);
1245 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1247 struct mlx5_rep_uplink_priv *uplink_priv;
1248 struct mlx5e_rep_priv *rpriv;
1249 struct mlx5_eswitch *esw;
1251 esw = flow->priv->mdev->priv.eswitch;
1252 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1253 uplink_priv = &rpriv->uplink_priv;
1255 mutex_lock(&uplink_priv->unready_flows_lock);
1256 unready_flow_add(flow, &uplink_priv->unready_flows);
1257 mutex_unlock(&uplink_priv->unready_flows_lock);
1260 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1262 struct mlx5_rep_uplink_priv *uplink_priv;
1263 struct mlx5e_rep_priv *rpriv;
1264 struct mlx5_eswitch *esw;
1266 esw = flow->priv->mdev->priv.eswitch;
1267 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1268 uplink_priv = &rpriv->uplink_priv;
1270 mutex_lock(&uplink_priv->unready_flows_lock);
1271 unready_flow_del(flow);
1272 mutex_unlock(&uplink_priv->unready_flows_lock);
1276 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1277 struct mlx5e_tc_flow *flow,
1278 struct netlink_ext_ack *extack)
1280 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1281 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1282 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1283 struct net_device *out_dev, *encap_dev = NULL;
1284 struct mlx5_fc *counter = NULL;
1285 struct mlx5e_rep_priv *rpriv;
1286 struct mlx5e_priv *out_priv;
1287 bool encap_valid = true;
1288 u32 max_prio, max_chain;
1292 if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
1293 NL_SET_ERR_MSG_MOD(extack,
1294 "E-switch priorities unsupported, upgrade FW");
1298 /* We check chain range only for tc flows.
1299 * For ft flows, we checked attr->chain was originally 0 and set it to
1300 * FDB_FT_CHAIN which is outside tc range.
1301 * See mlx5e_rep_setup_ft_cb().
1303 max_chain = mlx5_esw_chains_get_chain_range(esw);
1304 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1305 NL_SET_ERR_MSG_MOD(extack,
1306 "Requested chain is out of supported range");
1310 max_prio = mlx5_esw_chains_get_prio_range(esw);
1311 if (attr->prio > max_prio) {
1312 NL_SET_ERR_MSG_MOD(extack,
1313 "Requested priority is out of supported range");
1317 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1318 err = mlx5e_attach_decap(priv, flow, extack);
1323 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1326 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1329 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1330 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1332 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1333 extack, &encap_dev, &encap_valid);
1337 out_priv = netdev_priv(encap_dev);
1338 rpriv = out_priv->ppriv;
1339 attr->dests[out_index].rep = rpriv->rep;
1340 attr->dests[out_index].mdev = out_priv->mdev;
1343 err = mlx5_eswitch_add_vlan_action(esw, attr);
1347 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1348 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1349 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1350 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1355 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1356 counter = mlx5_fc_create(attr->counter_dev, true);
1357 if (IS_ERR(counter))
1358 return PTR_ERR(counter);
1360 attr->counter = counter;
1363 /* we get here if one of the following takes place:
1364 * (1) there's no error
1365 * (2) there's an encap action and we don't have valid neigh
1368 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1370 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1372 if (IS_ERR(flow->rule[0]))
1373 return PTR_ERR(flow->rule[0]);
1375 flow_flag_set(flow, OFFLOADED);
1380 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1382 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1383 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1386 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1388 geneve_tlv_option_0_data);
1390 return !!geneve_tlv_opt_0_data;
1393 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1394 struct mlx5e_tc_flow *flow)
1396 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1397 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1400 mlx5e_put_flow_tunnel_id(flow);
1402 if (flow_flag_test(flow, NOT_READY)) {
1403 remove_unready_flow(flow);
1404 kvfree(attr->parse_attr);
1408 if (mlx5e_is_offloaded_flow(flow)) {
1409 if (flow_flag_test(flow, SLOW))
1410 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1412 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1415 if (mlx5_flow_has_geneve_opt(flow))
1416 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1418 mlx5_eswitch_del_vlan_action(esw, attr);
1420 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1421 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1422 mlx5e_detach_encap(priv, flow, out_index);
1423 kfree(attr->parse_attr->tun_info[out_index]);
1425 kvfree(attr->parse_attr);
1427 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1428 mlx5e_detach_mod_hdr(priv, flow);
1430 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1431 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1433 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1434 mlx5e_detach_decap(priv, flow);
1437 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1438 struct mlx5e_encap_entry *e,
1439 struct list_head *flow_list)
1441 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1442 struct mlx5_esw_flow_attr *esw_attr;
1443 struct mlx5_flow_handle *rule;
1444 struct mlx5_flow_spec *spec;
1445 struct mlx5e_tc_flow *flow;
1448 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1450 e->encap_size, e->encap_header,
1451 MLX5_FLOW_NAMESPACE_FDB);
1452 if (IS_ERR(e->pkt_reformat)) {
1453 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1454 PTR_ERR(e->pkt_reformat));
1457 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1458 mlx5e_rep_queue_neigh_stats_work(priv);
1460 list_for_each_entry(flow, flow_list, tmp_list) {
1461 bool all_flow_encaps_valid = true;
1464 if (!mlx5e_is_offloaded_flow(flow))
1466 esw_attr = flow->esw_attr;
1467 spec = &esw_attr->parse_attr->spec;
1469 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1470 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1471 /* Flow can be associated with multiple encap entries.
1472 * Before offloading the flow verify that all of them have
1473 * a valid neighbour.
1475 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1476 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1478 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1479 all_flow_encaps_valid = false;
1483 /* Do not offload flows with unresolved neighbors */
1484 if (!all_flow_encaps_valid)
1486 /* update from slow path rule to encap rule */
1487 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1489 err = PTR_ERR(rule);
1490 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1495 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1496 flow->rule[0] = rule;
1497 /* was unset when slow path rule removed */
1498 flow_flag_set(flow, OFFLOADED);
1502 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1503 struct mlx5e_encap_entry *e,
1504 struct list_head *flow_list)
1506 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1507 struct mlx5_flow_handle *rule;
1508 struct mlx5_flow_spec *spec;
1509 struct mlx5e_tc_flow *flow;
1512 list_for_each_entry(flow, flow_list, tmp_list) {
1513 if (!mlx5e_is_offloaded_flow(flow))
1515 spec = &flow->esw_attr->parse_attr->spec;
1517 /* update from encap rule to slow path rule */
1518 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1519 /* mark the flow's encap dest as non-valid */
1520 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1523 err = PTR_ERR(rule);
1524 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1529 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1530 flow->rule[0] = rule;
1531 /* was unset when fast path rule removed */
1532 flow_flag_set(flow, OFFLOADED);
1535 /* we know that the encap is valid */
1536 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1537 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1540 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1542 if (mlx5e_is_eswitch_flow(flow))
1543 return flow->esw_attr->counter;
1545 return flow->nic_attr->counter;
1548 /* Takes reference to all flows attached to encap and adds the flows to
1549 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1551 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1553 struct encap_flow_item *efi;
1554 struct mlx5e_tc_flow *flow;
1556 list_for_each_entry(efi, &e->flows, list) {
1557 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1558 if (IS_ERR(mlx5e_flow_get(flow)))
1560 wait_for_completion(&flow->init_done);
1562 flow->tmp_efi_index = efi->index;
1563 list_add(&flow->tmp_list, flow_list);
1567 /* Iterate over tmp_list of flows attached to flow_list head. */
1568 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1570 struct mlx5e_tc_flow *flow, *tmp;
1572 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1573 mlx5e_flow_put(priv, flow);
1576 static struct mlx5e_encap_entry *
1577 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1578 struct mlx5e_encap_entry *e)
1580 struct mlx5e_encap_entry *next = NULL;
1585 /* find encap with non-zero reference counter value */
1587 list_next_or_null_rcu(&nhe->encap_list,
1589 struct mlx5e_encap_entry,
1591 list_first_or_null_rcu(&nhe->encap_list,
1592 struct mlx5e_encap_entry,
1595 next = list_next_or_null_rcu(&nhe->encap_list,
1597 struct mlx5e_encap_entry,
1599 if (mlx5e_encap_take(next))
1604 /* release starting encap */
1606 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1610 /* wait for encap to be fully initialized */
1611 wait_for_completion(&next->res_ready);
1612 /* continue searching if encap entry is not in valid state after completion */
1613 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1621 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1623 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1624 struct mlx5e_encap_entry *e = NULL;
1625 struct mlx5e_tc_flow *flow;
1626 struct mlx5_fc *counter;
1627 struct neigh_table *tbl;
1628 bool neigh_used = false;
1629 struct neighbour *n;
1632 if (m_neigh->family == AF_INET)
1634 #if IS_ENABLED(CONFIG_IPV6)
1635 else if (m_neigh->family == AF_INET6)
1636 tbl = ipv6_stub->nd_tbl;
1641 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1644 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1645 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1646 struct encap_flow_item *efi, *tmp;
1647 struct mlx5_eswitch *esw;
1648 LIST_HEAD(flow_list);
1650 esw = priv->mdev->priv.eswitch;
1651 mutex_lock(&esw->offloads.encap_tbl_lock);
1652 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1653 flow = container_of(efi, struct mlx5e_tc_flow,
1654 encaps[efi->index]);
1655 if (IS_ERR(mlx5e_flow_get(flow)))
1657 list_add(&flow->tmp_list, &flow_list);
1659 if (mlx5e_is_offloaded_flow(flow)) {
1660 counter = mlx5e_tc_get_counter(flow);
1661 lastuse = mlx5_fc_query_lastuse(counter);
1662 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1668 mutex_unlock(&esw->offloads.encap_tbl_lock);
1670 mlx5e_put_encap_flow_list(priv, &flow_list);
1672 /* release current encap before breaking the loop */
1673 mlx5e_encap_put(priv, e);
1678 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1681 nhe->reported_lastuse = jiffies;
1683 /* find the relevant neigh according to the cached device and
1686 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1690 neigh_event_send(n, NULL);
1695 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1697 WARN_ON(!list_empty(&e->flows));
1699 if (e->compl_result > 0) {
1700 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1702 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1703 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1707 kfree(e->encap_header);
1711 static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
1712 struct mlx5e_decap_entry *d)
1714 WARN_ON(!list_empty(&d->flows));
1716 if (!d->compl_result)
1717 mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
1722 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1724 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1726 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1728 hash_del_rcu(&e->encap_hlist);
1729 mutex_unlock(&esw->offloads.encap_tbl_lock);
1731 mlx5e_encap_dealloc(priv, e);
1734 static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
1736 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1738 if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
1740 hash_del_rcu(&d->hlist);
1741 mutex_unlock(&esw->offloads.decap_tbl_lock);
1743 mlx5e_decap_dealloc(priv, d);
1746 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1747 struct mlx5e_tc_flow *flow, int out_index)
1749 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1750 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1752 /* flow wasn't fully initialized */
1756 mutex_lock(&esw->offloads.encap_tbl_lock);
1757 list_del(&flow->encaps[out_index].list);
1758 flow->encaps[out_index].e = NULL;
1759 if (!refcount_dec_and_test(&e->refcnt)) {
1760 mutex_unlock(&esw->offloads.encap_tbl_lock);
1763 hash_del_rcu(&e->encap_hlist);
1764 mutex_unlock(&esw->offloads.encap_tbl_lock);
1766 mlx5e_encap_dealloc(priv, e);
1769 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1770 struct mlx5e_tc_flow *flow)
1772 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1773 struct mlx5e_decap_entry *d = flow->decap_reformat;
1778 mutex_lock(&esw->offloads.decap_tbl_lock);
1779 list_del(&flow->l3_to_l2_reformat);
1780 flow->decap_reformat = NULL;
1782 if (!refcount_dec_and_test(&d->refcnt)) {
1783 mutex_unlock(&esw->offloads.decap_tbl_lock);
1786 hash_del_rcu(&d->hlist);
1787 mutex_unlock(&esw->offloads.decap_tbl_lock);
1789 mlx5e_decap_dealloc(priv, d);
1792 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1794 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1796 if (!flow_flag_test(flow, ESWITCH) ||
1797 !flow_flag_test(flow, DUP))
1800 mutex_lock(&esw->offloads.peer_mutex);
1801 list_del(&flow->peer);
1802 mutex_unlock(&esw->offloads.peer_mutex);
1804 flow_flag_clear(flow, DUP);
1806 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1807 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1808 kfree(flow->peer_flow);
1811 flow->peer_flow = NULL;
1814 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1816 struct mlx5_core_dev *dev = flow->priv->mdev;
1817 struct mlx5_devcom *devcom = dev->priv.devcom;
1818 struct mlx5_eswitch *peer_esw;
1820 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1824 __mlx5e_tc_del_fdb_peer_flow(flow);
1825 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1828 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1829 struct mlx5e_tc_flow *flow)
1831 if (mlx5e_is_eswitch_flow(flow)) {
1832 mlx5e_tc_del_fdb_peer_flow(flow);
1833 mlx5e_tc_del_fdb_flow(priv, flow);
1835 mlx5e_tc_del_nic_flow(priv, flow);
1839 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1841 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1842 struct flow_action *flow_action = &rule->action;
1843 const struct flow_action_entry *act;
1846 flow_action_for_each(i, act, flow_action) {
1848 case FLOW_ACTION_GOTO:
1859 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1860 struct flow_dissector_key_enc_opts *opts,
1861 struct netlink_ext_ack *extack,
1864 struct geneve_opt *opt;
1869 while (opts->len > off) {
1870 opt = (struct geneve_opt *)&opts->data[off];
1872 if (!(*dont_care) || opt->opt_class || opt->type ||
1873 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1876 if (opt->opt_class != htons(U16_MAX) ||
1877 opt->type != U8_MAX) {
1878 NL_SET_ERR_MSG(extack,
1879 "Partial match of tunnel options in chain > 0 isn't supported");
1880 netdev_warn(priv->netdev,
1881 "Partial match of tunnel options in chain > 0 isn't supported");
1886 off += sizeof(struct geneve_opt) + opt->length * 4;
1892 #define COPY_DISSECTOR(rule, diss_key, dst)\
1894 struct flow_rule *__rule = (rule);\
1895 typeof(dst) __dst = dst;\
1898 skb_flow_dissector_target(__rule->match.dissector,\
1900 __rule->match.key),\
1904 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1905 struct mlx5e_tc_flow *flow,
1906 struct flow_cls_offload *f,
1907 struct net_device *filter_dev)
1909 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1910 struct netlink_ext_ack *extack = f->common.extack;
1911 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1912 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1913 struct flow_match_enc_opts enc_opts_match;
1914 struct tunnel_match_enc_opts tun_enc_opts;
1915 struct mlx5_rep_uplink_priv *uplink_priv;
1916 struct mlx5e_rep_priv *uplink_rpriv;
1917 struct tunnel_match_key tunnel_key;
1918 bool enc_opts_is_dont_care = true;
1919 u32 tun_id, enc_opts_id = 0;
1920 struct mlx5_eswitch *esw;
1924 esw = priv->mdev->priv.eswitch;
1925 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1926 uplink_priv = &uplink_rpriv->uplink_priv;
1928 memset(&tunnel_key, 0, sizeof(tunnel_key));
1929 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1930 &tunnel_key.enc_control);
1931 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1932 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1933 &tunnel_key.enc_ipv4);
1935 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1936 &tunnel_key.enc_ipv6);
1937 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1938 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1939 &tunnel_key.enc_tp);
1940 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1941 &tunnel_key.enc_key_id);
1942 tunnel_key.filter_ifindex = filter_dev->ifindex;
1944 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1948 flow_rule_match_enc_opts(rule, &enc_opts_match);
1949 err = enc_opts_is_dont_care_or_full_match(priv,
1950 enc_opts_match.mask,
1952 &enc_opts_is_dont_care);
1956 if (!enc_opts_is_dont_care) {
1957 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1958 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1959 sizeof(*enc_opts_match.key));
1960 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1961 sizeof(*enc_opts_match.mask));
1963 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1964 &tun_enc_opts, &enc_opts_id);
1969 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1970 mask = enc_opts_id ? TUNNEL_ID_MASK :
1971 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1974 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1975 TUNNEL_TO_REG, value, mask);
1977 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1978 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1980 TUNNEL_TO_REG, value);
1984 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1987 flow->tunnel_id = value;
1992 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1995 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1999 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2001 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
2002 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
2003 struct mlx5_rep_uplink_priv *uplink_priv;
2004 struct mlx5e_rep_priv *uplink_rpriv;
2005 struct mlx5_eswitch *esw;
2007 esw = flow->priv->mdev->priv.eswitch;
2008 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2009 uplink_priv = &uplink_rpriv->uplink_priv;
2012 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2014 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2018 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
2020 return flow->tunnel_id;
2023 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2024 struct flow_match_basic *match, bool outer,
2025 void *headers_c, void *headers_v)
2027 bool ip_version_cap;
2029 ip_version_cap = outer ?
2030 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2031 ft_field_support.outer_ip_version) :
2032 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2033 ft_field_support.inner_ip_version);
2035 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2036 (match->key->n_proto == htons(ETH_P_IP) ||
2037 match->key->n_proto == htons(ETH_P_IPV6))) {
2038 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2039 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2040 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2042 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2043 ntohs(match->mask->n_proto));
2044 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2045 ntohs(match->key->n_proto));
2049 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2050 struct mlx5e_tc_flow *flow,
2051 struct mlx5_flow_spec *spec,
2052 struct flow_cls_offload *f,
2053 struct net_device *filter_dev,
2057 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2058 struct netlink_ext_ack *extack = f->common.extack;
2059 bool needs_mapping, sets_mapping;
2062 if (!mlx5e_is_eswitch_flow(flow))
2065 needs_mapping = !!flow->esw_attr->chain;
2066 sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
2067 *match_inner = !needs_mapping;
2069 if ((needs_mapping || sets_mapping) &&
2070 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2071 NL_SET_ERR_MSG(extack,
2072 "Chains on tunnel devices isn't supported without register loopback support");
2073 netdev_warn(priv->netdev,
2074 "Chains on tunnel devices isn't supported without register loopback support");
2078 if (!flow->esw_attr->chain) {
2079 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2082 NL_SET_ERR_MSG_MOD(extack,
2083 "Failed to parse tunnel attributes");
2084 netdev_warn(priv->netdev,
2085 "Failed to parse tunnel attributes");
2089 /* With mpls over udp we decapsulate using packet reformat
2092 if (!netif_is_bareudp(filter_dev))
2093 flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2096 if (!needs_mapping && !sets_mapping)
2099 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2102 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2104 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2108 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2110 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2114 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2116 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2120 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2122 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2126 static void *get_match_headers_value(u32 flags,
2127 struct mlx5_flow_spec *spec)
2129 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2130 get_match_inner_headers_value(spec) :
2131 get_match_outer_headers_value(spec);
2134 static void *get_match_headers_criteria(u32 flags,
2135 struct mlx5_flow_spec *spec)
2137 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2138 get_match_inner_headers_criteria(spec) :
2139 get_match_outer_headers_criteria(spec);
2142 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2143 struct flow_cls_offload *f)
2145 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2146 struct netlink_ext_ack *extack = f->common.extack;
2147 struct net_device *ingress_dev;
2148 struct flow_match_meta match;
2150 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2153 flow_rule_match_meta(rule, &match);
2154 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2155 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2159 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2160 match.key->ingress_ifindex);
2162 NL_SET_ERR_MSG_MOD(extack,
2163 "Can't find the ingress port to match on");
2167 if (ingress_dev != filter_dev) {
2168 NL_SET_ERR_MSG_MOD(extack,
2169 "Can't match on the ingress filter port");
2176 static bool skip_key_basic(struct net_device *filter_dev,
2177 struct flow_cls_offload *f)
2179 /* When doing mpls over udp decap, the user needs to provide
2180 * MPLS_UC as the protocol in order to be able to match on mpls
2181 * label fields. However, the actual ethertype is IP so we want to
2182 * avoid matching on this, otherwise we'll fail the match.
2184 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2190 static int __parse_cls_flower(struct mlx5e_priv *priv,
2191 struct mlx5e_tc_flow *flow,
2192 struct mlx5_flow_spec *spec,
2193 struct flow_cls_offload *f,
2194 struct net_device *filter_dev,
2195 u8 *inner_match_level, u8 *outer_match_level)
2197 struct netlink_ext_ack *extack = f->common.extack;
2198 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2200 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2202 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2204 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2206 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2207 struct flow_dissector *dissector = rule->match.dissector;
2213 match_level = outer_match_level;
2215 if (dissector->used_keys &
2216 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2217 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2218 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2219 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2220 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2221 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2222 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2223 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2224 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2225 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2226 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2227 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2228 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2229 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2230 BIT(FLOW_DISSECTOR_KEY_TCP) |
2231 BIT(FLOW_DISSECTOR_KEY_IP) |
2232 BIT(FLOW_DISSECTOR_KEY_CT) |
2233 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2234 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2235 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2236 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2237 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
2238 dissector->used_keys);
2242 if (mlx5e_get_tc_tun(filter_dev)) {
2243 bool match_inner = false;
2245 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2246 outer_match_level, &match_inner);
2251 /* header pointers should point to the inner headers
2252 * if the packet was decapsulated already.
2253 * outer headers are set by parse_tunnel_attr.
2255 match_level = inner_match_level;
2256 headers_c = get_match_inner_headers_criteria(spec);
2257 headers_v = get_match_inner_headers_value(spec);
2261 err = mlx5e_flower_parse_meta(filter_dev, f);
2265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2266 !skip_key_basic(filter_dev, f)) {
2267 struct flow_match_basic match;
2269 flow_rule_match_basic(rule, &match);
2270 mlx5e_tc_set_ethertype(priv->mdev, &match,
2271 match_level == outer_match_level,
2272 headers_c, headers_v);
2274 if (match.mask->n_proto)
2275 *match_level = MLX5_MATCH_L2;
2277 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2278 is_vlan_dev(filter_dev)) {
2279 struct flow_dissector_key_vlan filter_dev_mask;
2280 struct flow_dissector_key_vlan filter_dev_key;
2281 struct flow_match_vlan match;
2283 if (is_vlan_dev(filter_dev)) {
2284 match.key = &filter_dev_key;
2285 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2286 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2287 match.key->vlan_priority = 0;
2288 match.mask = &filter_dev_mask;
2289 memset(match.mask, 0xff, sizeof(*match.mask));
2290 match.mask->vlan_priority = 0;
2292 flow_rule_match_vlan(rule, &match);
2294 if (match.mask->vlan_id ||
2295 match.mask->vlan_priority ||
2296 match.mask->vlan_tpid) {
2297 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2298 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2300 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2303 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2305 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2309 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2310 match.mask->vlan_id);
2311 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2312 match.key->vlan_id);
2314 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2315 match.mask->vlan_priority);
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2317 match.key->vlan_priority);
2319 *match_level = MLX5_MATCH_L2;
2321 } else if (*match_level != MLX5_MATCH_NONE) {
2322 /* cvlan_tag enabled in match criteria and
2323 * disabled in match value means both S & C tags
2324 * don't exist (untagged of both)
2326 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2327 *match_level = MLX5_MATCH_L2;
2330 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2331 struct flow_match_vlan match;
2333 flow_rule_match_cvlan(rule, &match);
2334 if (match.mask->vlan_id ||
2335 match.mask->vlan_priority ||
2336 match.mask->vlan_tpid) {
2337 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2338 MLX5_SET(fte_match_set_misc, misc_c,
2339 outer_second_svlan_tag, 1);
2340 MLX5_SET(fte_match_set_misc, misc_v,
2341 outer_second_svlan_tag, 1);
2343 MLX5_SET(fte_match_set_misc, misc_c,
2344 outer_second_cvlan_tag, 1);
2345 MLX5_SET(fte_match_set_misc, misc_v,
2346 outer_second_cvlan_tag, 1);
2349 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2350 match.mask->vlan_id);
2351 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2352 match.key->vlan_id);
2353 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2354 match.mask->vlan_priority);
2355 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2356 match.key->vlan_priority);
2358 *match_level = MLX5_MATCH_L2;
2359 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2363 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2364 struct flow_match_eth_addrs match;
2366 flow_rule_match_eth_addrs(rule, &match);
2367 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2370 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2374 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2377 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2381 if (!is_zero_ether_addr(match.mask->src) ||
2382 !is_zero_ether_addr(match.mask->dst))
2383 *match_level = MLX5_MATCH_L2;
2386 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2387 struct flow_match_control match;
2389 flow_rule_match_control(rule, &match);
2390 addr_type = match.key->addr_type;
2392 /* the HW doesn't support frag first/later */
2393 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2396 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2397 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2398 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2399 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2401 /* the HW doesn't need L3 inline to match on frag=no */
2402 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2403 *match_level = MLX5_MATCH_L2;
2404 /* *** L2 attributes parsing up to here *** */
2406 *match_level = MLX5_MATCH_L3;
2410 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2411 struct flow_match_basic match;
2413 flow_rule_match_basic(rule, &match);
2414 ip_proto = match.key->ip_proto;
2416 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2417 match.mask->ip_proto);
2418 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2419 match.key->ip_proto);
2421 if (match.mask->ip_proto)
2422 *match_level = MLX5_MATCH_L3;
2425 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2426 struct flow_match_ipv4_addrs match;
2428 flow_rule_match_ipv4_addrs(rule, &match);
2429 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2430 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2431 &match.mask->src, sizeof(match.mask->src));
2432 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2433 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2434 &match.key->src, sizeof(match.key->src));
2435 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2436 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2437 &match.mask->dst, sizeof(match.mask->dst));
2438 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2439 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2440 &match.key->dst, sizeof(match.key->dst));
2442 if (match.mask->src || match.mask->dst)
2443 *match_level = MLX5_MATCH_L3;
2446 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2447 struct flow_match_ipv6_addrs match;
2449 flow_rule_match_ipv6_addrs(rule, &match);
2450 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2451 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2452 &match.mask->src, sizeof(match.mask->src));
2453 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2454 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2455 &match.key->src, sizeof(match.key->src));
2457 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2458 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2459 &match.mask->dst, sizeof(match.mask->dst));
2460 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2461 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2462 &match.key->dst, sizeof(match.key->dst));
2464 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2465 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2466 *match_level = MLX5_MATCH_L3;
2469 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2470 struct flow_match_ip match;
2472 flow_rule_match_ip(rule, &match);
2473 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2474 match.mask->tos & 0x3);
2475 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2476 match.key->tos & 0x3);
2478 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2479 match.mask->tos >> 2);
2480 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2481 match.key->tos >> 2);
2483 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2485 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2488 if (match.mask->ttl &&
2489 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2490 ft_field_support.outer_ipv4_ttl)) {
2491 NL_SET_ERR_MSG_MOD(extack,
2492 "Matching on TTL is not supported");
2496 if (match.mask->tos || match.mask->ttl)
2497 *match_level = MLX5_MATCH_L3;
2500 /* *** L3 attributes parsing up to here *** */
2502 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2503 struct flow_match_ports match;
2505 flow_rule_match_ports(rule, &match);
2508 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2509 tcp_sport, ntohs(match.mask->src));
2510 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2511 tcp_sport, ntohs(match.key->src));
2513 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2514 tcp_dport, ntohs(match.mask->dst));
2515 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2516 tcp_dport, ntohs(match.key->dst));
2520 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2521 udp_sport, ntohs(match.mask->src));
2522 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2523 udp_sport, ntohs(match.key->src));
2525 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2526 udp_dport, ntohs(match.mask->dst));
2527 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2528 udp_dport, ntohs(match.key->dst));
2531 NL_SET_ERR_MSG_MOD(extack,
2532 "Only UDP and TCP transports are supported for L4 matching");
2533 netdev_err(priv->netdev,
2534 "Only UDP and TCP transport are supported\n");
2538 if (match.mask->src || match.mask->dst)
2539 *match_level = MLX5_MATCH_L4;
2542 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2543 struct flow_match_tcp match;
2545 flow_rule_match_tcp(rule, &match);
2546 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2547 ntohs(match.mask->flags));
2548 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2549 ntohs(match.key->flags));
2551 if (match.mask->flags)
2552 *match_level = MLX5_MATCH_L4;
2558 static int parse_cls_flower(struct mlx5e_priv *priv,
2559 struct mlx5e_tc_flow *flow,
2560 struct mlx5_flow_spec *spec,
2561 struct flow_cls_offload *f,
2562 struct net_device *filter_dev)
2564 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2565 struct netlink_ext_ack *extack = f->common.extack;
2566 struct mlx5_core_dev *dev = priv->mdev;
2567 struct mlx5_eswitch *esw = dev->priv.eswitch;
2568 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2569 struct mlx5_eswitch_rep *rep;
2570 bool is_eswitch_flow;
2573 inner_match_level = MLX5_MATCH_NONE;
2574 outer_match_level = MLX5_MATCH_NONE;
2576 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2577 &inner_match_level, &outer_match_level);
2578 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2579 outer_match_level : inner_match_level;
2581 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2582 if (!err && is_eswitch_flow) {
2584 if (rep->vport != MLX5_VPORT_UPLINK &&
2585 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2586 esw->offloads.inline_mode < non_tunnel_match_level)) {
2587 NL_SET_ERR_MSG_MOD(extack,
2588 "Flow is not offloaded due to min inline setting");
2589 netdev_warn(priv->netdev,
2590 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2591 non_tunnel_match_level, esw->offloads.inline_mode);
2596 if (is_eswitch_flow) {
2597 flow->esw_attr->inner_match_level = inner_match_level;
2598 flow->esw_attr->outer_match_level = outer_match_level;
2600 flow->nic_attr->match_level = non_tunnel_match_level;
2606 struct pedit_headers {
2608 struct vlan_hdr vlan;
2615 struct pedit_headers_action {
2616 struct pedit_headers vals;
2617 struct pedit_headers masks;
2621 static int pedit_header_offsets[] = {
2622 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2623 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2624 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2625 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2626 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2629 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2631 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2632 struct pedit_headers_action *hdrs)
2634 u32 *curr_pmask, *curr_pval;
2636 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2637 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2639 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2642 *curr_pmask |= mask;
2643 *curr_pval |= (val & mask);
2651 struct mlx5_fields {
2659 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2660 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2661 offsetof(struct pedit_headers, field) + (off), \
2662 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2664 /* masked values are the same and there are no rewrites that do not have a
2667 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2668 type matchmaskx = *(type *)(matchmaskp); \
2669 type matchvalx = *(type *)(matchvalp); \
2670 type maskx = *(type *)(maskp); \
2671 type valx = *(type *)(valp); \
2673 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2677 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2678 void *matchmaskp, u8 bsize)
2684 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2687 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2690 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2697 static struct mlx5_fields fields[] = {
2698 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2699 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2700 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2701 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2702 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2703 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2705 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2706 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2707 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2708 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2710 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2711 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2712 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2713 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2714 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2715 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2716 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2717 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2718 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2719 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2720 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2721 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2722 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2723 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2724 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2725 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2726 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2728 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2729 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2730 /* in linux iphdr tcp_flags is 8 bits long */
2731 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2733 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2734 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2737 static int offload_pedit_fields(struct mlx5e_priv *priv,
2739 struct pedit_headers_action *hdrs,
2740 struct mlx5e_tc_flow_parse_attr *parse_attr,
2742 struct netlink_ext_ack *extack)
2744 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2745 int i, action_size, first, last, next_z;
2746 void *headers_c, *headers_v, *action, *vals_p;
2747 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2748 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2749 struct mlx5_fields *f;
2756 mod_acts = &parse_attr->mod_hdr_acts;
2757 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2758 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2760 set_masks = &hdrs[0].masks;
2761 add_masks = &hdrs[1].masks;
2762 set_vals = &hdrs[0].vals;
2763 add_vals = &hdrs[1].vals;
2765 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2767 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2771 /* avoid seeing bits set from previous iterations */
2775 s_masks_p = (void *)set_masks + f->offset;
2776 a_masks_p = (void *)add_masks + f->offset;
2778 s_mask = *s_masks_p & f->field_mask;
2779 a_mask = *a_masks_p & f->field_mask;
2781 if (!s_mask && !a_mask) /* nothing to offload here */
2784 if (s_mask && a_mask) {
2785 NL_SET_ERR_MSG_MOD(extack,
2786 "can't set and add to the same HW field");
2787 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2793 void *match_mask = headers_c + f->match_offset;
2794 void *match_val = headers_v + f->match_offset;
2796 cmd = MLX5_ACTION_TYPE_SET;
2798 vals_p = (void *)set_vals + f->offset;
2799 /* don't rewrite if we have a match on the same value */
2800 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2801 match_mask, f->field_bsize))
2803 /* clear to denote we consumed this field */
2804 *s_masks_p &= ~f->field_mask;
2806 cmd = MLX5_ACTION_TYPE_ADD;
2808 vals_p = (void *)add_vals + f->offset;
2809 /* add 0 is no change */
2810 if ((*(u32 *)vals_p & f->field_mask) == 0)
2812 /* clear to denote we consumed this field */
2813 *a_masks_p &= ~f->field_mask;
2818 if (f->field_bsize == 32) {
2819 mask_be32 = (__force __be32)(mask);
2820 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2821 } else if (f->field_bsize == 16) {
2822 mask_be32 = (__force __be32)(mask);
2823 mask_be16 = *(__be16 *)&mask_be32;
2824 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2827 first = find_first_bit(&mask, f->field_bsize);
2828 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2829 last = find_last_bit(&mask, f->field_bsize);
2830 if (first < next_z && next_z < last) {
2831 NL_SET_ERR_MSG_MOD(extack,
2832 "rewrite of few sub-fields isn't supported");
2833 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2838 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2840 NL_SET_ERR_MSG_MOD(extack,
2841 "too many pedit actions, can't offload");
2842 mlx5_core_warn(priv->mdev,
2843 "mlx5: parsed %d pedit actions, can't do more\n",
2844 mod_acts->num_actions);
2848 action = mod_acts->actions +
2849 (mod_acts->num_actions * action_size);
2850 MLX5_SET(set_action_in, action, action_type, cmd);
2851 MLX5_SET(set_action_in, action, field, f->field);
2853 if (cmd == MLX5_ACTION_TYPE_SET) {
2856 /* if field is bit sized it can start not from first bit */
2857 start = find_first_bit((unsigned long *)&f->field_mask,
2860 MLX5_SET(set_action_in, action, offset, first - start);
2861 /* length is num of bits to be written, zero means length of 32 */
2862 MLX5_SET(set_action_in, action, length, (last - first + 1));
2865 if (f->field_bsize == 32)
2866 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2867 else if (f->field_bsize == 16)
2868 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2869 else if (f->field_bsize == 8)
2870 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2872 ++mod_acts->num_actions;
2878 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2881 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2882 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2883 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2884 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2887 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2889 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2891 int action_size, new_num_actions, max_hw_actions;
2892 size_t new_sz, old_sz;
2895 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2898 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2900 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2902 new_num_actions = min(max_hw_actions,
2903 mod_hdr_acts->actions ?
2904 mod_hdr_acts->max_actions * 2 : 1);
2905 if (mod_hdr_acts->max_actions == new_num_actions)
2908 new_sz = action_size * new_num_actions;
2909 old_sz = mod_hdr_acts->max_actions * action_size;
2910 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2914 memset(ret + old_sz, 0, new_sz - old_sz);
2915 mod_hdr_acts->actions = ret;
2916 mod_hdr_acts->max_actions = new_num_actions;
2921 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2923 kfree(mod_hdr_acts->actions);
2924 mod_hdr_acts->actions = NULL;
2925 mod_hdr_acts->num_actions = 0;
2926 mod_hdr_acts->max_actions = 0;
2929 static const struct pedit_headers zero_masks = {};
2932 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2933 const struct flow_action_entry *act, int namespace,
2934 struct mlx5e_tc_flow_parse_attr *parse_attr,
2935 struct pedit_headers_action *hdrs,
2936 struct netlink_ext_ack *extack)
2938 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2939 int err = -EOPNOTSUPP;
2940 u32 mask, val, offset;
2943 htype = act->mangle.htype;
2944 err = -EOPNOTSUPP; /* can't be all optimistic */
2946 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2947 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2951 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2952 NL_SET_ERR_MSG_MOD(extack,
2953 "The pedit offload action is not supported");
2957 mask = act->mangle.mask;
2958 val = act->mangle.val;
2959 offset = act->mangle.offset;
2961 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2973 parse_pedit_to_reformat(struct mlx5e_priv *priv,
2974 const struct flow_action_entry *act,
2975 struct mlx5e_tc_flow_parse_attr *parse_attr,
2976 struct netlink_ext_ack *extack)
2978 u32 mask, val, offset;
2981 if (act->id != FLOW_ACTION_MANGLE)
2984 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2985 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2989 mask = ~act->mangle.mask;
2990 val = act->mangle.val;
2991 offset = act->mangle.offset;
2992 p = (u32 *)&parse_attr->eth;
2993 *(p + (offset >> 2)) |= (val & mask);
2998 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2999 const struct flow_action_entry *act, int namespace,
3000 struct mlx5e_tc_flow_parse_attr *parse_attr,
3001 struct pedit_headers_action *hdrs,
3002 struct mlx5e_tc_flow *flow,
3003 struct netlink_ext_ack *extack)
3005 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
3006 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
3008 return parse_pedit_to_modify_hdr(priv, act, namespace,
3009 parse_attr, hdrs, extack);
3012 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3013 struct mlx5e_tc_flow_parse_attr *parse_attr,
3014 struct pedit_headers_action *hdrs,
3016 struct netlink_ext_ack *extack)
3018 struct pedit_headers *cmd_masks;
3022 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
3023 action_flags, extack);
3025 goto out_dealloc_parsed_actions;
3027 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3028 cmd_masks = &hdrs[cmd].masks;
3029 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3030 NL_SET_ERR_MSG_MOD(extack,
3031 "attempt to offload an unsupported field");
3032 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3033 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3034 16, 1, cmd_masks, sizeof(zero_masks), true);
3036 goto out_dealloc_parsed_actions;
3042 out_dealloc_parsed_actions:
3043 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3047 static bool csum_offload_supported(struct mlx5e_priv *priv,
3050 struct netlink_ext_ack *extack)
3052 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
3053 TCA_CSUM_UPDATE_FLAG_UDP;
3055 /* The HW recalcs checksums only if re-writing headers */
3056 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
3057 NL_SET_ERR_MSG_MOD(extack,
3058 "TC csum action is only offloaded with pedit");
3059 netdev_warn(priv->netdev,
3060 "TC csum action is only offloaded with pedit\n");
3064 if (update_flags & ~prot_flags) {
3065 NL_SET_ERR_MSG_MOD(extack,
3066 "can't offload TC csum action for some header/s");
3067 netdev_warn(priv->netdev,
3068 "can't offload TC csum action for some header/s - flags %#x\n",
3076 struct ip_ttl_word {
3082 struct ipv6_hoplimit_word {
3088 static int is_action_keys_supported(const struct flow_action_entry *act,
3089 bool ct_flow, bool *modify_ip_header,
3090 struct netlink_ext_ack *extack)
3095 htype = act->mangle.htype;
3096 offset = act->mangle.offset;
3097 mask = ~act->mangle.mask;
3098 /* For IPv4 & IPv6 header check 4 byte word,
3099 * to determine that modified fields
3100 * are NOT ttl & hop_limit only.
3102 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3103 struct ip_ttl_word *ttl_word =
3104 (struct ip_ttl_word *)&mask;
3106 if (offset != offsetof(struct iphdr, ttl) ||
3107 ttl_word->protocol ||
3109 *modify_ip_header = true;
3112 if (ct_flow && offset >= offsetof(struct iphdr, saddr)) {
3113 NL_SET_ERR_MSG_MOD(extack,
3114 "can't offload re-write of ipv4 address with action ct");
3117 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3118 struct ipv6_hoplimit_word *hoplimit_word =
3119 (struct ipv6_hoplimit_word *)&mask;
3121 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3122 hoplimit_word->payload_len ||
3123 hoplimit_word->nexthdr) {
3124 *modify_ip_header = true;
3127 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr)) {
3128 NL_SET_ERR_MSG_MOD(extack,
3129 "can't offload re-write of ipv6 address with action ct");
3132 } else if (ct_flow && (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3133 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP)) {
3134 NL_SET_ERR_MSG_MOD(extack,
3135 "can't offload re-write of transport header ports with action ct");
3142 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
3143 struct flow_action *flow_action,
3144 u32 actions, bool ct_flow,
3145 struct netlink_ext_ack *extack)
3147 const struct flow_action_entry *act;
3148 bool modify_ip_header;
3155 headers_c = get_match_headers_criteria(actions, spec);
3156 headers_v = get_match_headers_value(actions, spec);
3157 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3159 /* for non-IP we only re-write MACs, so we're okay */
3160 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3161 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3164 modify_ip_header = false;
3165 flow_action_for_each(i, act, flow_action) {
3166 if (act->id != FLOW_ACTION_MANGLE &&
3167 act->id != FLOW_ACTION_ADD)
3170 err = is_action_keys_supported(act, ct_flow,
3171 &modify_ip_header, extack);
3176 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3177 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3178 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3179 NL_SET_ERR_MSG_MOD(extack,
3180 "can't offload re-write of non TCP/UDP");
3181 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
3189 static bool actions_match_supported(struct mlx5e_priv *priv,
3190 struct flow_action *flow_action,
3191 struct mlx5e_tc_flow_parse_attr *parse_attr,
3192 struct mlx5e_tc_flow *flow,
3193 struct netlink_ext_ack *extack)
3198 ct_flow = flow_flag_test(flow, CT);
3199 if (mlx5e_is_eswitch_flow(flow)) {
3200 actions = flow->esw_attr->action;
3202 if (flow->esw_attr->split_count && ct_flow) {
3203 /* All registers used by ct are cleared when using
3206 NL_SET_ERR_MSG_MOD(extack,
3207 "Can't offload mirroring with action ct");
3211 actions = flow->nic_attr->action;
3214 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3215 return modify_header_match_supported(&parse_attr->spec,
3216 flow_action, actions,
3222 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3224 return priv->mdev == peer_priv->mdev;
3227 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3229 struct mlx5_core_dev *fmdev, *pmdev;
3230 u64 fsystem_guid, psystem_guid;
3233 pmdev = peer_priv->mdev;
3235 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3236 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3238 return (fsystem_guid == psystem_guid);
3241 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3242 const struct flow_action_entry *act,
3243 struct mlx5e_tc_flow_parse_attr *parse_attr,
3244 struct pedit_headers_action *hdrs,
3245 u32 *action, struct netlink_ext_ack *extack)
3247 u16 mask16 = VLAN_VID_MASK;
3248 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3249 const struct flow_action_entry pedit_act = {
3250 .id = FLOW_ACTION_MANGLE,
3251 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3252 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3253 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3254 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3256 u8 match_prio_mask, match_prio_val;
3257 void *headers_c, *headers_v;
3260 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3261 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3263 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3264 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3265 NL_SET_ERR_MSG_MOD(extack,
3266 "VLAN rewrite action must have VLAN protocol match");
3270 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3271 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3272 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3273 NL_SET_ERR_MSG_MOD(extack,
3274 "Changing VLAN prio is not supported");
3278 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3279 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3285 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3286 struct mlx5e_tc_flow_parse_attr *parse_attr,
3287 struct pedit_headers_action *hdrs,
3288 u32 *action, struct netlink_ext_ack *extack)
3290 const struct flow_action_entry prio_tag_act = {
3293 MLX5_GET(fte_match_set_lyr_2_4,
3294 get_match_headers_value(*action,
3297 MLX5_GET(fte_match_set_lyr_2_4,
3298 get_match_headers_criteria(*action,
3303 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3304 &prio_tag_act, parse_attr, hdrs, action,
3308 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3309 struct flow_action *flow_action,
3310 struct mlx5e_tc_flow_parse_attr *parse_attr,
3311 struct mlx5e_tc_flow *flow,
3312 struct netlink_ext_ack *extack)
3314 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
3315 struct pedit_headers_action hdrs[2] = {};
3316 const struct flow_action_entry *act;
3320 if (!flow_action_has_entries(flow_action))
3323 if (!flow_action_hw_stats_check(flow_action, extack,
3324 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3327 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3329 flow_action_for_each(i, act, flow_action) {
3331 case FLOW_ACTION_ACCEPT:
3332 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3333 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3335 case FLOW_ACTION_DROP:
3336 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3337 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3338 flow_table_properties_nic_receive.flow_counter))
3339 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3341 case FLOW_ACTION_MANGLE:
3342 case FLOW_ACTION_ADD:
3343 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3344 parse_attr, hdrs, NULL, extack);
3348 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
3349 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3351 case FLOW_ACTION_VLAN_MANGLE:
3352 err = add_vlan_rewrite_action(priv,
3353 MLX5_FLOW_NAMESPACE_KERNEL,
3354 act, parse_attr, hdrs,
3360 case FLOW_ACTION_CSUM:
3361 if (csum_offload_supported(priv, action,
3367 case FLOW_ACTION_REDIRECT: {
3368 struct net_device *peer_dev = act->dev;
3370 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3371 same_hw_devs(priv, netdev_priv(peer_dev))) {
3372 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3373 flow_flag_set(flow, HAIRPIN);
3374 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3375 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3377 NL_SET_ERR_MSG_MOD(extack,
3378 "device is not on same HW, can't offload");
3379 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3385 case FLOW_ACTION_MARK: {
3386 u32 mark = act->mark;
3388 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3389 NL_SET_ERR_MSG_MOD(extack,
3390 "Bad flow mark - only 16 bit is supported");
3394 attr->flow_tag = mark;
3395 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3399 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3404 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3405 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3406 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3407 parse_attr, hdrs, &action, extack);
3410 /* in case all pedit actions are skipped, remove the MOD_HDR
3413 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3414 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3415 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3419 attr->action = action;
3420 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3427 const struct ip_tunnel_key *ip_tun_key;
3428 struct mlx5e_tc_tunnel *tc_tunnel;
3431 static inline int cmp_encap_info(struct encap_key *a,
3432 struct encap_key *b)
3434 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3435 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3438 static inline int cmp_decap_info(struct mlx5e_decap_key *a,
3439 struct mlx5e_decap_key *b)
3441 return memcmp(&a->key, &b->key, sizeof(b->key));
3444 static inline int hash_encap_info(struct encap_key *key)
3446 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3447 key->tc_tunnel->tunnel_type);
3450 static inline int hash_decap_info(struct mlx5e_decap_key *key)
3452 return jhash(&key->key, sizeof(key->key), 0);
3455 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3456 struct net_device *peer_netdev)
3458 struct mlx5e_priv *peer_priv;
3460 peer_priv = netdev_priv(peer_netdev);
3462 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3463 mlx5e_eswitch_vf_rep(priv->netdev) &&
3464 mlx5e_eswitch_vf_rep(peer_netdev) &&
3465 same_hw_devs(priv, peer_priv));
3468 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3470 return refcount_inc_not_zero(&e->refcnt);
3473 static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
3475 return refcount_inc_not_zero(&e->refcnt);
3478 static struct mlx5e_encap_entry *
3479 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3482 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3483 struct mlx5e_encap_entry *e;
3484 struct encap_key e_key;
3486 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3487 encap_hlist, hash_key) {
3488 e_key.ip_tun_key = &e->tun_info->key;
3489 e_key.tc_tunnel = e->tunnel;
3490 if (!cmp_encap_info(&e_key, key) &&
3491 mlx5e_encap_take(e))
3498 static struct mlx5e_decap_entry *
3499 mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
3502 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3503 struct mlx5e_decap_key r_key;
3504 struct mlx5e_decap_entry *e;
3506 hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
3509 if (!cmp_decap_info(&r_key, key) &&
3510 mlx5e_decap_take(e))
3516 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3518 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3520 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3523 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3524 struct mlx5e_tc_flow *flow,
3526 struct mlx5e_encap_entry *e,
3527 struct netlink_ext_ack *extack)
3531 for (i = 0; i < out_index; i++) {
3532 if (flow->encaps[i].e != e)
3534 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3535 netdev_err(priv->netdev, "can't duplicate encap action\n");
3542 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3543 struct mlx5e_tc_flow *flow,
3544 struct net_device *mirred_dev,
3546 struct netlink_ext_ack *extack,
3547 struct net_device **encap_dev,
3550 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3551 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3552 struct mlx5e_tc_flow_parse_attr *parse_attr;
3553 const struct ip_tunnel_info *tun_info;
3554 struct encap_key key;
3555 struct mlx5e_encap_entry *e;
3556 unsigned short family;
3560 parse_attr = attr->parse_attr;
3561 tun_info = parse_attr->tun_info[out_index];
3562 family = ip_tunnel_info_af(tun_info);
3563 key.ip_tun_key = &tun_info->key;
3564 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3565 if (!key.tc_tunnel) {
3566 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3570 hash_key = hash_encap_info(&key);
3572 mutex_lock(&esw->offloads.encap_tbl_lock);
3573 e = mlx5e_encap_get(priv, &key, hash_key);
3575 /* must verify if encap is valid or not */
3577 /* Check that entry was not already attached to this flow */
3578 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3583 mutex_unlock(&esw->offloads.encap_tbl_lock);
3584 wait_for_completion(&e->res_ready);
3586 /* Protect against concurrent neigh update. */
3587 mutex_lock(&esw->offloads.encap_tbl_lock);
3588 if (e->compl_result < 0) {
3595 e = kzalloc(sizeof(*e), GFP_KERNEL);
3601 refcount_set(&e->refcnt, 1);
3602 init_completion(&e->res_ready);
3604 tun_info = dup_tun_info(tun_info);
3609 e->tun_info = tun_info;
3610 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3614 INIT_LIST_HEAD(&e->flows);
3615 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3616 mutex_unlock(&esw->offloads.encap_tbl_lock);
3618 if (family == AF_INET)
3619 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3620 else if (family == AF_INET6)
3621 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3623 /* Protect against concurrent neigh update. */
3624 mutex_lock(&esw->offloads.encap_tbl_lock);
3625 complete_all(&e->res_ready);
3627 e->compl_result = err;
3630 e->compl_result = 1;
3633 flow->encaps[out_index].e = e;
3634 list_add(&flow->encaps[out_index].list, &e->flows);
3635 flow->encaps[out_index].index = out_index;
3636 *encap_dev = e->out_dev;
3637 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3638 attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3639 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3640 *encap_valid = true;
3642 *encap_valid = false;
3644 mutex_unlock(&esw->offloads.encap_tbl_lock);
3649 mutex_unlock(&esw->offloads.encap_tbl_lock);
3651 mlx5e_encap_put(priv, e);
3655 mutex_unlock(&esw->offloads.encap_tbl_lock);
3661 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
3662 struct mlx5e_tc_flow *flow,
3663 struct netlink_ext_ack *extack)
3665 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3666 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3667 struct mlx5e_tc_flow_parse_attr *parse_attr;
3668 struct mlx5e_decap_entry *d;
3669 struct mlx5e_decap_key key;
3673 parse_attr = attr->parse_attr;
3674 if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
3675 NL_SET_ERR_MSG_MOD(extack,
3676 "encap header larger than max supported");
3680 key.key = parse_attr->eth;
3681 hash_key = hash_decap_info(&key);
3682 mutex_lock(&esw->offloads.decap_tbl_lock);
3683 d = mlx5e_decap_get(priv, &key, hash_key);
3685 mutex_unlock(&esw->offloads.decap_tbl_lock);
3686 wait_for_completion(&d->res_ready);
3687 mutex_lock(&esw->offloads.decap_tbl_lock);
3688 if (d->compl_result) {
3695 d = kzalloc(sizeof(*d), GFP_KERNEL);
3702 refcount_set(&d->refcnt, 1);
3703 init_completion(&d->res_ready);
3704 INIT_LIST_HEAD(&d->flows);
3705 hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
3706 mutex_unlock(&esw->offloads.decap_tbl_lock);
3708 d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
3709 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
3710 sizeof(parse_attr->eth),
3712 MLX5_FLOW_NAMESPACE_FDB);
3713 if (IS_ERR(d->pkt_reformat)) {
3714 err = PTR_ERR(d->pkt_reformat);
3715 d->compl_result = err;
3717 mutex_lock(&esw->offloads.decap_tbl_lock);
3718 complete_all(&d->res_ready);
3723 flow->decap_reformat = d;
3724 attr->decap_pkt_reformat = d->pkt_reformat;
3725 list_add(&flow->l3_to_l2_reformat, &d->flows);
3726 mutex_unlock(&esw->offloads.decap_tbl_lock);
3730 mutex_unlock(&esw->offloads.decap_tbl_lock);
3731 mlx5e_decap_put(priv, d);
3735 mutex_unlock(&esw->offloads.decap_tbl_lock);
3739 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3740 const struct flow_action_entry *act,
3741 struct mlx5_esw_flow_attr *attr,
3744 u8 vlan_idx = attr->total_vlan;
3746 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3750 case FLOW_ACTION_VLAN_POP:
3752 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3753 MLX5_FS_VLAN_DEPTH))
3756 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3758 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3761 case FLOW_ACTION_VLAN_PUSH:
3762 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3763 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3764 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3765 if (!attr->vlan_proto[vlan_idx])
3766 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3769 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3770 MLX5_FS_VLAN_DEPTH))
3773 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3775 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3776 (act->vlan.proto != htons(ETH_P_8021Q) ||
3780 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3787 attr->total_vlan = vlan_idx + 1;
3792 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3793 struct net_device *out_dev)
3795 struct net_device *fdb_out_dev = out_dev;
3796 struct net_device *uplink_upper;
3799 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3800 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3801 uplink_upper == out_dev) {
3802 fdb_out_dev = uplink_dev;
3803 } else if (netif_is_lag_master(out_dev)) {
3804 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3806 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3807 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3814 static int add_vlan_push_action(struct mlx5e_priv *priv,
3815 struct mlx5_esw_flow_attr *attr,
3816 struct net_device **out_dev,
3819 struct net_device *vlan_dev = *out_dev;
3820 struct flow_action_entry vlan_act = {
3821 .id = FLOW_ACTION_VLAN_PUSH,
3822 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3823 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3828 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3832 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3833 dev_get_iflink(vlan_dev));
3834 if (is_vlan_dev(*out_dev))
3835 err = add_vlan_push_action(priv, attr, out_dev, action);
3840 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3841 struct mlx5_esw_flow_attr *attr,
3844 struct flow_action_entry vlan_act = {
3845 .id = FLOW_ACTION_VLAN_POP,
3847 int nest_level, err = 0;
3849 nest_level = attr->parse_attr->filter_dev->lower_level -
3850 priv->netdev->lower_level;
3851 while (nest_level--) {
3852 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3860 static bool same_hw_reps(struct mlx5e_priv *priv,
3861 struct net_device *peer_netdev)
3863 struct mlx5e_priv *peer_priv;
3865 peer_priv = netdev_priv(peer_netdev);
3867 return mlx5e_eswitch_rep(priv->netdev) &&
3868 mlx5e_eswitch_rep(peer_netdev) &&
3869 same_hw_devs(priv, peer_priv);
3872 static bool is_lag_dev(struct mlx5e_priv *priv,
3873 struct net_device *peer_netdev)
3875 return ((mlx5_lag_is_sriov(priv->mdev) ||
3876 mlx5_lag_is_multipath(priv->mdev)) &&
3877 same_hw_reps(priv, peer_netdev));
3880 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3881 struct net_device *out_dev)
3883 if (is_merged_eswitch_vfs(priv, out_dev))
3886 if (is_lag_dev(priv, out_dev))
3889 return mlx5e_eswitch_rep(out_dev) &&
3890 same_port_devs(priv, netdev_priv(out_dev));
3893 static bool is_duplicated_output_device(struct net_device *dev,
3894 struct net_device *out_dev,
3895 int *ifindexes, int if_count,
3896 struct netlink_ext_ack *extack)
3900 for (i = 0; i < if_count; i++) {
3901 if (ifindexes[i] == out_dev->ifindex) {
3902 NL_SET_ERR_MSG_MOD(extack,
3903 "can't duplicate output to same device");
3904 netdev_err(dev, "can't duplicate output to same device: %s\n",
3913 static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
3914 struct mlx5e_tc_flow *flow,
3915 const struct flow_action_entry *act,
3917 struct netlink_ext_ack *extack)
3919 u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
3920 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3921 bool ft_flow = mlx5e_is_ft_flow(flow);
3922 u32 dest_chain = act->chain_index;
3925 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3929 if (!mlx5_esw_chains_backwards_supported(esw) &&
3930 dest_chain <= attr->chain) {
3931 NL_SET_ERR_MSG_MOD(extack,
3932 "Goto lower numbered chain isn't supported");
3935 if (dest_chain > max_chain) {
3936 NL_SET_ERR_MSG_MOD(extack,
3937 "Requested destination chain is out of supported range");
3941 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3942 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3943 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
3944 NL_SET_ERR_MSG_MOD(extack,
3945 "Goto chain is not allowed if action has reformat or decap");
3952 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3953 struct mlx5e_tc_flow *flow,
3954 struct net_device *out_dev,
3955 struct netlink_ext_ack *extack)
3957 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3958 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3959 struct mlx5e_rep_priv *rep_priv;
3961 /* Forwarding non encapsulated traffic between
3962 * uplink ports is allowed only if
3963 * termination_table_raw_traffic cap is set.
3965 * Input vport was stored esw_attr->in_rep.
3966 * In LAG case, *priv* is the private data of
3967 * uplink which may be not the input vport.
3969 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3971 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3972 mlx5e_eswitch_uplink_rep(out_dev)))
3975 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3976 termination_table_raw_traffic)) {
3977 NL_SET_ERR_MSG_MOD(extack,
3978 "devices are both uplink, can't offload forwarding");
3979 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3980 priv->netdev->name, out_dev->name);
3982 } else if (out_dev != rep_priv->netdev) {
3983 NL_SET_ERR_MSG_MOD(extack,
3984 "devices are not the same uplink, can't offload forwarding");
3985 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3986 priv->netdev->name, out_dev->name);
3992 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3993 struct flow_action *flow_action,
3994 struct mlx5e_tc_flow *flow,
3995 struct netlink_ext_ack *extack,
3996 struct net_device *filter_dev)
3998 struct pedit_headers_action hdrs[2] = {};
3999 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4000 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4001 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
4002 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4003 const struct ip_tunnel_info *info = NULL;
4004 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
4005 bool ft_flow = mlx5e_is_ft_flow(flow);
4006 const struct flow_action_entry *act;
4007 bool encap = false, decap = false;
4008 u32 action = attr->action;
4009 int err, i, if_count = 0;
4010 bool mpls_push = false;
4012 if (!flow_action_has_entries(flow_action))
4015 if (!flow_action_hw_stats_check(flow_action, extack,
4016 FLOW_ACTION_HW_STATS_DELAYED_BIT))
4019 flow_action_for_each(i, act, flow_action) {
4021 case FLOW_ACTION_DROP:
4022 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
4023 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4025 case FLOW_ACTION_MPLS_PUSH:
4026 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
4027 reformat_l2_to_l3_tunnel) ||
4028 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
4029 NL_SET_ERR_MSG_MOD(extack,
4030 "mpls push is supported only for mpls_uc protocol");
4035 case FLOW_ACTION_MPLS_POP:
4036 /* we only support mpls pop if it is the first action
4037 * and the filter net device is bareudp. Subsequent
4038 * actions can be pedit and the last can be mirred
4042 NL_SET_ERR_MSG_MOD(extack,
4043 "mpls pop supported only as first action");
4046 if (!netif_is_bareudp(filter_dev)) {
4047 NL_SET_ERR_MSG_MOD(extack,
4048 "mpls pop supported only on bareudp devices");
4052 parse_attr->eth.h_proto = act->mpls_pop.proto;
4053 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
4054 flow_flag_set(flow, L3_TO_L2_DECAP);
4056 case FLOW_ACTION_MANGLE:
4057 case FLOW_ACTION_ADD:
4058 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
4059 parse_attr, hdrs, flow, extack);
4063 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
4064 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4065 attr->split_count = attr->out_count;
4068 case FLOW_ACTION_CSUM:
4069 if (csum_offload_supported(priv, action,
4070 act->csum_flags, extack))
4074 case FLOW_ACTION_REDIRECT:
4075 case FLOW_ACTION_MIRRED: {
4076 struct mlx5e_priv *out_priv;
4077 struct net_device *out_dev;
4081 /* out_dev is NULL when filters with
4082 * non-existing mirred device are replayed to
4088 if (mpls_push && !netif_is_bareudp(out_dev)) {
4089 NL_SET_ERR_MSG_MOD(extack,
4090 "mpls is supported only through a bareudp device");
4094 if (ft_flow && out_dev == priv->netdev) {
4095 /* Ignore forward to self rules generated
4096 * by adding both mlx5 devs to the flow table
4097 * block on a normal nft offload setup.
4102 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4103 NL_SET_ERR_MSG_MOD(extack,
4104 "can't support more output ports, can't offload forwarding");
4105 netdev_warn(priv->netdev,
4106 "can't support more than %d output ports, can't offload forwarding\n",
4111 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4112 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4114 parse_attr->mirred_ifindex[attr->out_count] =
4116 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
4117 if (!parse_attr->tun_info[attr->out_count])
4120 attr->dests[attr->out_count].flags |=
4121 MLX5_ESW_DEST_ENCAP;
4123 /* attr->dests[].rep is resolved when we
4126 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4127 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4128 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4130 if (is_duplicated_output_device(priv->netdev,
4137 ifindexes[if_count] = out_dev->ifindex;
4140 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4144 if (is_vlan_dev(out_dev)) {
4145 err = add_vlan_push_action(priv, attr,
4152 if (is_vlan_dev(parse_attr->filter_dev)) {
4153 err = add_vlan_pop_action(priv, attr,
4159 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4163 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4164 NL_SET_ERR_MSG_MOD(extack,
4165 "devices are not on same switch HW, can't offload forwarding");
4169 out_priv = netdev_priv(out_dev);
4170 rpriv = out_priv->ppriv;
4171 attr->dests[attr->out_count].rep = rpriv->rep;
4172 attr->dests[attr->out_count].mdev = out_priv->mdev;
4174 } else if (parse_attr->filter_dev != priv->netdev) {
4175 /* All mlx5 devices are called to configure
4176 * high level device filters. Therefore, the
4177 * *attempt* to install a filter on invalid
4178 * eswitch should not trigger an explicit error
4182 NL_SET_ERR_MSG_MOD(extack,
4183 "devices are not on same switch HW, can't offload forwarding");
4184 netdev_warn(priv->netdev,
4185 "devices %s %s not on same switch HW, can't offload forwarding\n",
4192 case FLOW_ACTION_TUNNEL_ENCAP:
4200 case FLOW_ACTION_VLAN_PUSH:
4201 case FLOW_ACTION_VLAN_POP:
4202 if (act->id == FLOW_ACTION_VLAN_PUSH &&
4203 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4204 /* Replace vlan pop+push with vlan modify */
4205 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4206 err = add_vlan_rewrite_action(priv,
4207 MLX5_FLOW_NAMESPACE_FDB,
4208 act, parse_attr, hdrs,
4211 err = parse_tc_vlan_action(priv, act, attr, &action);
4216 attr->split_count = attr->out_count;
4218 case FLOW_ACTION_VLAN_MANGLE:
4219 err = add_vlan_rewrite_action(priv,
4220 MLX5_FLOW_NAMESPACE_FDB,
4221 act, parse_attr, hdrs,
4226 attr->split_count = attr->out_count;
4228 case FLOW_ACTION_TUNNEL_DECAP:
4231 case FLOW_ACTION_GOTO:
4232 err = mlx5_validate_goto_chain(esw, flow, act, action,
4237 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4238 attr->dest_chain = act->chain_index;
4240 case FLOW_ACTION_CT:
4241 err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
4245 flow_flag_set(flow, CT);
4248 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4253 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4254 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4255 /* For prio tag mode, replace vlan pop with rewrite vlan prio
4258 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4259 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4265 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4266 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4267 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4268 parse_attr, hdrs, &action, extack);
4271 /* in case all pedit actions are skipped, remove the MOD_HDR
4272 * flag. we might have set split_count either by pedit or
4273 * pop/push. if there is no pop/push either, reset it too.
4275 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4276 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4277 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4278 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4279 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4280 attr->split_count = 0;
4284 attr->action = action;
4285 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4288 if (attr->dest_chain) {
4290 /* It can be supported if we'll create a mapping for
4291 * the tunnel device only (without tunnel), and set
4292 * this tunnel id with this decap flow.
4294 * On restore (miss), we'll just set this saved tunnel
4298 NL_SET_ERR_MSG(extack,
4299 "Decap with goto isn't supported");
4300 netdev_warn(priv->netdev,
4301 "Decap with goto isn't supported");
4305 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
4306 NL_SET_ERR_MSG_MOD(extack,
4307 "Mirroring goto chain rules isn't supported");
4310 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4313 if (!(attr->action &
4314 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4315 NL_SET_ERR_MSG_MOD(extack,
4316 "Rule must have at least one forward/drop action");
4320 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4321 NL_SET_ERR_MSG_MOD(extack,
4322 "current firmware doesn't support split rule for port mirroring");
4323 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4330 static void get_flags(int flags, unsigned long *flow_flags)
4332 unsigned long __flow_flags = 0;
4334 if (flags & MLX5_TC_FLAG(INGRESS))
4335 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4336 if (flags & MLX5_TC_FLAG(EGRESS))
4337 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4339 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4340 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4341 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4342 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4343 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4344 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4346 *flow_flags = __flow_flags;
4349 static const struct rhashtable_params tc_ht_params = {
4350 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4351 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4352 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4353 .automatic_shrinking = true,
4356 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4357 unsigned long flags)
4359 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4360 struct mlx5e_rep_priv *uplink_rpriv;
4362 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4363 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4364 return &uplink_rpriv->uplink_priv.tc_ht;
4365 } else /* NIC offload */
4366 return &priv->fs.tc.ht;
4369 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4371 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4372 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4373 flow_flag_test(flow, INGRESS);
4374 bool act_is_encap = !!(attr->action &
4375 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4376 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
4377 MLX5_DEVCOM_ESW_OFFLOADS);
4382 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
4383 mlx5_lag_is_multipath(attr->in_mdev)) &&
4384 (is_rep_ingress || act_is_encap))
4391 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4392 struct flow_cls_offload *f, unsigned long flow_flags,
4393 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4394 struct mlx5e_tc_flow **__flow)
4396 struct mlx5e_tc_flow_parse_attr *parse_attr;
4397 struct mlx5e_tc_flow *flow;
4400 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
4401 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4402 if (!parse_attr || !flow) {
4407 flow->cookie = f->cookie;
4408 flow->flags = flow_flags;
4410 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4411 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4412 INIT_LIST_HEAD(&flow->mod_hdr);
4413 INIT_LIST_HEAD(&flow->hairpin);
4414 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4415 refcount_set(&flow->refcnt, 1);
4416 init_completion(&flow->init_done);
4419 *__parse_attr = parse_attr;
4430 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4431 struct mlx5e_priv *priv,
4432 struct mlx5e_tc_flow_parse_attr *parse_attr,
4433 struct flow_cls_offload *f,
4434 struct mlx5_eswitch_rep *in_rep,
4435 struct mlx5_core_dev *in_mdev)
4437 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4439 esw_attr->parse_attr = parse_attr;
4440 esw_attr->chain = f->common.chain_index;
4441 esw_attr->prio = f->common.prio;
4443 esw_attr->in_rep = in_rep;
4444 esw_attr->in_mdev = in_mdev;
4446 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4447 MLX5_COUNTER_SOURCE_ESWITCH)
4448 esw_attr->counter_dev = in_mdev;
4450 esw_attr->counter_dev = priv->mdev;
4453 static struct mlx5e_tc_flow *
4454 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4455 struct flow_cls_offload *f,
4456 unsigned long flow_flags,
4457 struct net_device *filter_dev,
4458 struct mlx5_eswitch_rep *in_rep,
4459 struct mlx5_core_dev *in_mdev)
4461 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4462 struct netlink_ext_ack *extack = f->common.extack;
4463 struct mlx5e_tc_flow_parse_attr *parse_attr;
4464 struct mlx5e_tc_flow *flow;
4467 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4468 attr_size = sizeof(struct mlx5_esw_flow_attr);
4469 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4470 &parse_attr, &flow);
4474 parse_attr->filter_dev = filter_dev;
4475 mlx5e_flow_esw_attr_init(flow->esw_attr,
4477 f, in_rep, in_mdev);
4479 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4484 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4488 err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
4492 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4493 complete_all(&flow->init_done);
4495 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4498 add_unready_flow(flow);
4504 mlx5e_flow_put(priv, flow);
4506 return ERR_PTR(err);
4509 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4510 struct mlx5e_tc_flow *flow,
4511 unsigned long flow_flags)
4513 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4514 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4515 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4516 struct mlx5e_tc_flow_parse_attr *parse_attr;
4517 struct mlx5e_rep_priv *peer_urpriv;
4518 struct mlx5e_tc_flow *peer_flow;
4519 struct mlx5_core_dev *in_mdev;
4522 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4526 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4527 peer_priv = netdev_priv(peer_urpriv->netdev);
4529 /* in_mdev is assigned of which the packet originated from.
4530 * So packets redirected to uplink use the same mdev of the
4531 * original flow and packets redirected from uplink use the
4534 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
4535 in_mdev = peer_priv->mdev;
4537 in_mdev = priv->mdev;
4539 parse_attr = flow->esw_attr->parse_attr;
4540 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4541 parse_attr->filter_dev,
4542 flow->esw_attr->in_rep, in_mdev);
4543 if (IS_ERR(peer_flow)) {
4544 err = PTR_ERR(peer_flow);
4548 flow->peer_flow = peer_flow;
4549 flow_flag_set(flow, DUP);
4550 mutex_lock(&esw->offloads.peer_mutex);
4551 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4552 mutex_unlock(&esw->offloads.peer_mutex);
4555 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4560 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4561 struct flow_cls_offload *f,
4562 unsigned long flow_flags,
4563 struct net_device *filter_dev,
4564 struct mlx5e_tc_flow **__flow)
4566 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4567 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4568 struct mlx5_core_dev *in_mdev = priv->mdev;
4569 struct mlx5e_tc_flow *flow;
4572 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4575 return PTR_ERR(flow);
4577 if (is_peer_flow_needed(flow)) {
4578 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4580 mlx5e_tc_del_fdb_flow(priv, flow);
4594 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4595 struct flow_cls_offload *f,
4596 unsigned long flow_flags,
4597 struct net_device *filter_dev,
4598 struct mlx5e_tc_flow **__flow)
4600 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4601 struct netlink_ext_ack *extack = f->common.extack;
4602 struct mlx5e_tc_flow_parse_attr *parse_attr;
4603 struct mlx5e_tc_flow *flow;
4606 /* multi-chain not supported for NIC rules */
4607 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4610 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4611 attr_size = sizeof(struct mlx5_nic_flow_attr);
4612 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4613 &parse_attr, &flow);
4617 parse_attr->filter_dev = filter_dev;
4618 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4623 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4627 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4631 flow_flag_set(flow, OFFLOADED);
4638 mlx5e_flow_put(priv, flow);
4645 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4646 struct flow_cls_offload *f,
4647 unsigned long flags,
4648 struct net_device *filter_dev,
4649 struct mlx5e_tc_flow **flow)
4651 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4652 unsigned long flow_flags;
4655 get_flags(flags, &flow_flags);
4657 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4660 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4661 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4664 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4670 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4671 struct mlx5e_rep_priv *rpriv)
4673 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4674 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4675 * function is called from NIC mode.
4677 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4680 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4681 struct flow_cls_offload *f, unsigned long flags)
4683 struct netlink_ext_ack *extack = f->common.extack;
4684 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4685 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4686 struct mlx5e_tc_flow *flow;
4690 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4692 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4695 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4698 NL_SET_ERR_MSG_MOD(extack,
4699 "flow cookie already exists, ignoring");
4700 netdev_warn_once(priv->netdev,
4701 "flow cookie %lx already exists, ignoring\n",
4711 trace_mlx5e_configure_flower(f);
4712 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4716 /* Flow rule offloaded to non-uplink representor sharing tc block,
4717 * set the flow's owner dev.
4719 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4720 flow->orig_dev = dev;
4722 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4729 mlx5e_flow_put(priv, flow);
4734 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4736 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4737 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4739 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4740 flow_flag_test(flow, EGRESS) == dir_egress;
4743 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4744 struct flow_cls_offload *f, unsigned long flags)
4746 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4747 struct mlx5e_tc_flow *flow;
4751 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4752 if (!flow || !same_flow_direction(flow, flags)) {
4757 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4760 if (flow_flag_test_and_set(flow, DELETED)) {
4764 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4767 trace_mlx5e_delete_flower(f);
4768 mlx5e_flow_put(priv, flow);
4777 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4778 struct flow_cls_offload *f, unsigned long flags)
4780 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4781 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4782 struct mlx5_eswitch *peer_esw;
4783 struct mlx5e_tc_flow *flow;
4784 struct mlx5_fc *counter;
4791 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4795 return PTR_ERR(flow);
4797 if (!same_flow_direction(flow, flags)) {
4802 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4803 counter = mlx5e_tc_get_counter(flow);
4807 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4810 /* Under multipath it's possible for one rule to be currently
4811 * un-offloaded while the other rule is offloaded.
4813 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4817 if (flow_flag_test(flow, DUP) &&
4818 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4823 counter = mlx5e_tc_get_counter(flow->peer_flow);
4825 goto no_peer_counter;
4826 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4829 packets += packets2;
4830 lastuse = max_t(u64, lastuse, lastuse2);
4834 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4836 flow_stats_update(&f->stats, bytes, packets, lastuse,
4837 FLOW_ACTION_HW_STATS_DELAYED);
4838 trace_mlx5e_stats_flower(f);
4840 mlx5e_flow_put(priv, flow);
4844 static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4845 struct netlink_ext_ack *extack)
4847 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4848 struct mlx5_eswitch *esw;
4853 vport_num = rpriv->rep->vport;
4854 if (vport_num >= MLX5_VPORT_ECPF) {
4855 NL_SET_ERR_MSG_MOD(extack,
4856 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4860 esw = priv->mdev->priv.eswitch;
4861 /* rate is given in bytes/sec.
4862 * First convert to bits/sec and then round to the nearest mbit/secs.
4863 * mbit means million bits.
4864 * Moreover, if rate is non zero we choose to configure to a minimum of
4867 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4868 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4870 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4875 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4876 struct flow_action *flow_action,
4877 struct netlink_ext_ack *extack)
4879 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4880 const struct flow_action_entry *act;
4884 if (!flow_action_has_entries(flow_action)) {
4885 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4889 if (!flow_offload_has_one_action(flow_action)) {
4890 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4894 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4897 flow_action_for_each(i, act, flow_action) {
4899 case FLOW_ACTION_POLICE:
4900 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4904 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4907 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4915 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4916 struct tc_cls_matchall_offload *ma)
4918 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4919 struct netlink_ext_ack *extack = ma->common.extack;
4921 if (!mlx5_esw_qos_enabled(esw)) {
4922 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4926 if (ma->common.prio != 1) {
4927 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4931 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4934 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4935 struct tc_cls_matchall_offload *ma)
4937 struct netlink_ext_ack *extack = ma->common.extack;
4939 return apply_police_params(priv, 0, extack);
4942 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4943 struct tc_cls_matchall_offload *ma)
4945 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4946 struct rtnl_link_stats64 cur_stats;
4950 cur_stats = priv->stats.vf_vport;
4951 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4952 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4953 rpriv->prev_vf_vport_stats = cur_stats;
4954 flow_stats_update(&ma->stats, dbytes, dpkts, jiffies,
4955 FLOW_ACTION_HW_STATS_DELAYED);
4958 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4959 struct mlx5e_priv *peer_priv)
4961 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4962 struct mlx5e_hairpin_entry *hpe, *tmp;
4963 LIST_HEAD(init_wait_list);
4967 if (!same_hw_devs(priv, peer_priv))
4970 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4972 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4973 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4974 if (refcount_inc_not_zero(&hpe->refcnt))
4975 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4976 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4978 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4979 wait_for_completion(&hpe->res_ready);
4980 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4981 hpe->hp->pair->peer_gone = true;
4983 mlx5e_hairpin_put(priv, hpe);
4987 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4988 unsigned long event, void *ptr)
4990 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4991 struct mlx5e_flow_steering *fs;
4992 struct mlx5e_priv *peer_priv;
4993 struct mlx5e_tc_table *tc;
4994 struct mlx5e_priv *priv;
4996 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4997 event != NETDEV_UNREGISTER ||
4998 ndev->reg_state == NETREG_REGISTERED)
5001 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5002 fs = container_of(tc, struct mlx5e_flow_steering, tc);
5003 priv = container_of(fs, struct mlx5e_priv, fs);
5004 peer_priv = netdev_priv(ndev);
5005 if (priv == peer_priv ||
5006 !(priv->netdev->features & NETIF_F_HW_TC))
5009 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5014 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5016 struct mlx5e_tc_table *tc = &priv->fs.tc;
5019 mutex_init(&tc->t_lock);
5020 mutex_init(&tc->mod_hdr.lock);
5021 hash_init(tc->mod_hdr.hlist);
5022 mutex_init(&tc->hairpin_tbl_lock);
5023 hash_init(tc->hairpin_tbl);
5025 err = rhashtable_init(&tc->ht, &tc_ht_params);
5029 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5030 err = register_netdevice_notifier_dev_net(priv->netdev,
5034 tc->netdevice_nb.notifier_call = NULL;
5035 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5041 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5043 struct mlx5e_tc_flow *flow = ptr;
5044 struct mlx5e_priv *priv = flow->priv;
5046 mlx5e_tc_del_flow(priv, flow);
5050 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5052 struct mlx5e_tc_table *tc = &priv->fs.tc;
5054 if (tc->netdevice_nb.notifier_call)
5055 unregister_netdevice_notifier_dev_net(priv->netdev,
5059 mutex_destroy(&tc->mod_hdr.lock);
5060 mutex_destroy(&tc->hairpin_tbl_lock);
5062 rhashtable_destroy(&tc->ht);
5064 if (!IS_ERR_OR_NULL(tc->t)) {
5065 mlx5_destroy_flow_table(tc->t);
5068 mutex_destroy(&tc->t_lock);
5071 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
5073 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5074 struct mlx5_rep_uplink_priv *uplink_priv;
5075 struct mlx5e_rep_priv *priv;
5076 struct mapping_ctx *mapping;
5079 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5080 priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5082 err = mlx5_tc_ct_init(uplink_priv);
5086 mapping = mapping_create(sizeof(struct tunnel_match_key),
5087 TUNNEL_INFO_BITS_MASK, true);
5088 if (IS_ERR(mapping)) {
5089 err = PTR_ERR(mapping);
5090 goto err_tun_mapping;
5092 uplink_priv->tunnel_mapping = mapping;
5094 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
5095 if (IS_ERR(mapping)) {
5096 err = PTR_ERR(mapping);
5097 goto err_enc_opts_mapping;
5099 uplink_priv->tunnel_enc_opts_mapping = mapping;
5101 err = rhashtable_init(tc_ht, &tc_ht_params);
5108 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5109 err_enc_opts_mapping:
5110 mapping_destroy(uplink_priv->tunnel_mapping);
5112 mlx5_tc_ct_clean(uplink_priv);
5114 netdev_warn(priv->netdev,
5115 "Failed to initialize tc (eswitch), err: %d", err);
5119 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5121 struct mlx5_rep_uplink_priv *uplink_priv;
5123 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5125 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5126 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5127 mapping_destroy(uplink_priv->tunnel_mapping);
5129 mlx5_tc_ct_clean(uplink_priv);
5132 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5134 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5136 return atomic_read(&tc_ht->nelems);
5139 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5141 struct mlx5e_tc_flow *flow, *tmp;
5143 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5144 __mlx5e_tc_del_fdb_peer_flow(flow);
5147 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5149 struct mlx5_rep_uplink_priv *rpriv =
5150 container_of(work, struct mlx5_rep_uplink_priv,
5151 reoffload_flows_work);
5152 struct mlx5e_tc_flow *flow, *tmp;
5154 mutex_lock(&rpriv->unready_flows_lock);
5155 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5156 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5157 unready_flow_del(flow);
5159 mutex_unlock(&rpriv->unready_flows_lock);
5162 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5163 struct flow_cls_offload *cls_flower,
5164 unsigned long flags)
5166 switch (cls_flower->command) {
5167 case FLOW_CLS_REPLACE:
5168 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5170 case FLOW_CLS_DESTROY:
5171 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5173 case FLOW_CLS_STATS:
5174 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5181 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5184 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5185 struct mlx5e_priv *priv = cb_priv;
5188 case TC_SETUP_CLSFLOWER:
5189 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);