2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
51 #include <net/ipv6_stubs.h>
52 #include <net/bareudp.h>
53 #include <net/bonding.h>
56 #include "en/rep/tc.h"
57 #include "en/rep/neigh.h"
60 #include "esw/chains.h"
63 #include "en/tc_tun.h"
64 #include "en/mapping.h"
66 #include "en/mod_hdr.h"
67 #include "lib/devcom.h"
68 #include "lib/geneve.h"
69 #include "diag/en_tc_tracepoint.h"
71 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
73 struct mlx5_nic_flow_attr {
76 struct mlx5_modify_hdr *modify_hdr;
79 struct mlx5_flow_table *hairpin_ft;
80 struct mlx5_fc *counter;
83 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
86 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
87 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
88 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
89 MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
90 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
91 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
92 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
93 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
94 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
95 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
96 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
97 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
98 MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
99 MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
102 #define MLX5E_TC_MAX_SPLITS 1
104 /* Helper struct for accessing a struct containing list_head array.
108 * |- list_head item 0
111 * |- list_head item 1
113 * To access the containing struct from one of the list_head items:
114 * 1. Get the helper item from the list_head item using
116 * container_of(list_head item, helper struct type, list_head field)
117 * 2. Get the contining struct from the helper item and its index in the array:
118 * containing struct =
119 * container_of(helper item, containing struct type, helper field[index])
121 struct encap_flow_item {
122 struct mlx5e_encap_entry *e; /* attached encap instance */
123 struct list_head list;
127 struct mlx5e_tc_flow {
128 struct rhash_head node;
129 struct mlx5e_priv *priv;
132 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
134 /* flows sharing the same reformat object - currently mpls decap */
135 struct list_head l3_to_l2_reformat;
136 struct mlx5e_decap_entry *decap_reformat;
138 /* Flow can be associated with multiple encap IDs.
139 * The number of encaps is bounded by the number of supported
142 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
143 struct mlx5e_tc_flow *peer_flow;
144 struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
145 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
146 struct list_head hairpin; /* flows sharing the same hairpin */
147 struct list_head peer; /* flows with peer flow */
148 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
149 struct net_device *orig_dev; /* netdev adding flow first */
151 struct list_head tmp_list; /* temporary flow list used by neigh update */
153 struct rcu_head rcu_head;
154 struct completion init_done;
155 int tunnel_id; /* the mapped tunnel id of this flow */
158 struct mlx5_esw_flow_attr esw_attr[0];
159 struct mlx5_nic_flow_attr nic_attr[0];
163 struct mlx5e_tc_flow_parse_attr {
164 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
165 struct net_device *filter_dev;
166 struct mlx5_flow_spec spec;
167 struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
168 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
172 #define MLX5E_TC_TABLE_NUM_GROUPS 4
173 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
175 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
177 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
182 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
185 .soffset = MLX5_BYTE_OFF(fte_match_param,
186 misc_parameters_2.metadata_reg_c_1),
188 [ZONE_TO_REG] = zone_to_reg_ct,
189 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
190 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
191 [MARK_TO_REG] = mark_to_reg_ct,
192 [LABELS_TO_REG] = labels_to_reg_ct,
193 [FTEID_TO_REG] = fteid_to_reg_ct,
196 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
199 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
200 enum mlx5e_tc_attr_to_reg type,
204 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
205 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
206 void *headers_c = spec->match_criteria;
207 void *headers_v = spec->match_value;
210 fmask = headers_c + soffset;
211 fval = headers_v + soffset;
213 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
214 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
216 memcpy(fmask, &mask, match_len);
217 memcpy(fval, &data, match_len);
219 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
223 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
224 enum mlx5e_tc_attr_to_reg type,
228 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
229 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
230 void *headers_c = spec->match_criteria;
231 void *headers_v = spec->match_value;
234 fmask = headers_c + soffset;
235 fval = headers_v + soffset;
237 memcpy(mask, fmask, match_len);
238 memcpy(data, fval, match_len);
240 *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
241 *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
245 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
246 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
247 enum mlx5e_tc_attr_to_reg type,
250 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
251 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
252 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
256 err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
261 modact = mod_hdr_acts->actions +
262 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
264 /* Firmware has 5bit length field and 0 means 32bits */
268 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
269 MLX5_SET(set_action_in, modact, field, mfield);
270 MLX5_SET(set_action_in, modact, offset, moffset * 8);
271 MLX5_SET(set_action_in, modact, length, mlen * 8);
272 MLX5_SET(set_action_in, modact, data, data);
273 mod_hdr_acts->num_actions++;
278 struct mlx5e_hairpin {
279 struct mlx5_hairpin *pair;
281 struct mlx5_core_dev *func_mdev;
282 struct mlx5e_priv *func_priv;
287 struct mlx5e_rqt indir_rqt;
288 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
289 struct mlx5e_ttc_table ttc;
292 struct mlx5e_hairpin_entry {
293 /* a node of a hash table which keeps all the hairpin entries */
294 struct hlist_node hairpin_hlist;
296 /* protects flows list */
297 spinlock_t flows_lock;
298 /* flows sharing the same hairpin */
299 struct list_head flows;
300 /* hpe's that were not fully initialized when dead peer update event
301 * function traversed them.
303 struct list_head dead_peer_wait_list;
307 struct mlx5e_hairpin *hp;
309 struct completion res_ready;
312 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
313 struct mlx5e_tc_flow *flow);
315 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
317 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
318 return ERR_PTR(-EINVAL);
322 static void mlx5e_flow_put(struct mlx5e_priv *priv,
323 struct mlx5e_tc_flow *flow)
325 if (refcount_dec_and_test(&flow->refcnt)) {
326 mlx5e_tc_del_flow(priv, flow);
327 kfree_rcu(flow, rcu_head);
331 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
333 /* Complete all memory stores before setting bit. */
334 smp_mb__before_atomic();
335 set_bit(flag, &flow->flags);
338 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
340 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
343 /* test_and_set_bit() provides all necessary barriers */
344 return test_and_set_bit(flag, &flow->flags);
347 #define flow_flag_test_and_set(flow, flag) \
348 __flow_flag_test_and_set(flow, \
349 MLX5E_TC_FLOW_FLAG_##flag)
351 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
353 /* Complete all memory stores before clearing bit. */
354 smp_mb__before_atomic();
355 clear_bit(flag, &flow->flags);
358 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
359 MLX5E_TC_FLOW_FLAG_##flag)
361 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
363 bool ret = test_bit(flag, &flow->flags);
365 /* Read fields of flow structure only after checking flags. */
366 smp_mb__after_atomic();
370 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
371 MLX5E_TC_FLOW_FLAG_##flag)
373 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
375 return flow_flag_test(flow, ESWITCH);
378 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
380 return flow_flag_test(flow, FT);
383 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
385 return flow_flag_test(flow, OFFLOADED);
388 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
390 return mlx5e_is_eswitch_flow(flow) ?
391 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
394 static struct mod_hdr_tbl *
395 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
397 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
399 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
400 &esw->offloads.mod_hdr :
401 &priv->fs.tc.mod_hdr;
404 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
405 struct mlx5e_tc_flow *flow,
406 struct mlx5e_tc_flow_parse_attr *parse_attr)
408 struct mlx5_modify_hdr *modify_hdr;
409 struct mlx5e_mod_hdr_handle *mh;
411 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
412 get_flow_name_space(flow),
413 &parse_attr->mod_hdr_acts);
417 modify_hdr = mlx5e_mod_hdr_get(mh);
418 if (mlx5e_is_eswitch_flow(flow))
419 flow->esw_attr->modify_hdr = modify_hdr;
421 flow->nic_attr->modify_hdr = modify_hdr;
427 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
428 struct mlx5e_tc_flow *flow)
430 /* flow wasn't fully initialized */
434 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
440 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
442 struct net_device *netdev;
443 struct mlx5e_priv *priv;
445 netdev = __dev_get_by_index(net, ifindex);
446 priv = netdev_priv(netdev);
450 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
452 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
456 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
460 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
462 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
463 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
464 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
466 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
473 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
478 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
480 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
481 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
484 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
486 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
487 struct mlx5e_priv *priv = hp->func_priv;
488 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
490 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
493 for (i = 0; i < sz; i++) {
495 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
496 ix = mlx5e_bits_invert(i, ilog2(sz));
497 ix = indirection_rqt[ix];
498 rqn = hp->pair->rqn[ix];
499 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
503 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
505 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
506 struct mlx5e_priv *priv = hp->func_priv;
507 struct mlx5_core_dev *mdev = priv->mdev;
511 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
512 in = kvzalloc(inlen, GFP_KERNEL);
516 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
518 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
519 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
521 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
523 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
525 hp->indir_rqt.enabled = true;
531 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
533 struct mlx5e_priv *priv = hp->func_priv;
534 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
538 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
539 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
541 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
542 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
544 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
545 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
546 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
547 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
549 err = mlx5_core_create_tir(hp->func_mdev, in,
550 &hp->indir_tirn[tt]);
552 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
553 goto err_destroy_tirs;
559 for (i = 0; i < tt; i++)
560 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
564 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
568 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
569 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
572 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
573 struct ttc_params *ttc_params)
575 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
578 memset(ttc_params, 0, sizeof(*ttc_params));
580 ttc_params->any_tt_tirn = hp->tirn;
582 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
583 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
585 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
586 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
587 ft_attr->prio = MLX5E_TC_PRIO;
590 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
592 struct mlx5e_priv *priv = hp->func_priv;
593 struct ttc_params ttc_params;
596 err = mlx5e_hairpin_create_indirect_rqt(hp);
600 err = mlx5e_hairpin_create_indirect_tirs(hp);
602 goto err_create_indirect_tirs;
604 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
605 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
607 goto err_create_ttc_table;
609 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
610 hp->num_channels, hp->ttc.ft.t->id);
614 err_create_ttc_table:
615 mlx5e_hairpin_destroy_indirect_tirs(hp);
616 err_create_indirect_tirs:
617 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
622 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
624 struct mlx5e_priv *priv = hp->func_priv;
626 mlx5e_destroy_ttc_table(priv, &hp->ttc);
627 mlx5e_hairpin_destroy_indirect_tirs(hp);
628 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
631 static struct mlx5e_hairpin *
632 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
635 struct mlx5_core_dev *func_mdev, *peer_mdev;
636 struct mlx5e_hairpin *hp;
637 struct mlx5_hairpin *pair;
640 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
642 return ERR_PTR(-ENOMEM);
644 func_mdev = priv->mdev;
645 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
647 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
650 goto create_pair_err;
653 hp->func_mdev = func_mdev;
654 hp->func_priv = priv;
655 hp->num_channels = params->num_channels;
657 err = mlx5e_hairpin_create_transport(hp);
659 goto create_transport_err;
661 if (hp->num_channels > 1) {
662 err = mlx5e_hairpin_rss_init(hp);
670 mlx5e_hairpin_destroy_transport(hp);
671 create_transport_err:
672 mlx5_core_hairpin_destroy(hp->pair);
678 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
680 if (hp->num_channels > 1)
681 mlx5e_hairpin_rss_cleanup(hp);
682 mlx5e_hairpin_destroy_transport(hp);
683 mlx5_core_hairpin_destroy(hp->pair);
687 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
689 return (peer_vhca_id << 16 | prio);
692 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
693 u16 peer_vhca_id, u8 prio)
695 struct mlx5e_hairpin_entry *hpe;
696 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
698 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
699 hairpin_hlist, hash_key) {
700 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
701 refcount_inc(&hpe->refcnt);
709 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
710 struct mlx5e_hairpin_entry *hpe)
712 /* no more hairpin flows for us, release the hairpin pair */
713 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
715 hash_del(&hpe->hairpin_hlist);
716 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
718 if (!IS_ERR_OR_NULL(hpe->hp)) {
719 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
720 dev_name(hpe->hp->pair->peer_mdev->device));
722 mlx5e_hairpin_destroy(hpe->hp);
725 WARN_ON(!list_empty(&hpe->flows));
729 #define UNKNOWN_MATCH_PRIO 8
731 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
732 struct mlx5_flow_spec *spec, u8 *match_prio,
733 struct netlink_ext_ack *extack)
735 void *headers_c, *headers_v;
736 u8 prio_val, prio_mask = 0;
739 #ifdef CONFIG_MLX5_CORE_EN_DCB
740 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
741 NL_SET_ERR_MSG_MOD(extack,
742 "only PCP trust state supported for hairpin");
746 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
747 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
749 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
751 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
752 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
755 if (!vlan_present || !prio_mask) {
756 prio_val = UNKNOWN_MATCH_PRIO;
757 } else if (prio_mask != 0x7) {
758 NL_SET_ERR_MSG_MOD(extack,
759 "masked priority match not supported for hairpin");
763 *match_prio = prio_val;
767 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
768 struct mlx5e_tc_flow *flow,
769 struct mlx5e_tc_flow_parse_attr *parse_attr,
770 struct netlink_ext_ack *extack)
772 int peer_ifindex = parse_attr->mirred_ifindex[0];
773 struct mlx5_hairpin_params params;
774 struct mlx5_core_dev *peer_mdev;
775 struct mlx5e_hairpin_entry *hpe;
776 struct mlx5e_hairpin *hp;
783 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
784 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
785 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
789 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
790 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
795 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
796 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
798 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
799 wait_for_completion(&hpe->res_ready);
801 if (IS_ERR(hpe->hp)) {
808 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
810 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
814 spin_lock_init(&hpe->flows_lock);
815 INIT_LIST_HEAD(&hpe->flows);
816 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
817 hpe->peer_vhca_id = peer_id;
818 hpe->prio = match_prio;
819 refcount_set(&hpe->refcnt, 1);
820 init_completion(&hpe->res_ready);
822 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
823 hash_hairpin_info(peer_id, match_prio));
824 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
826 params.log_data_size = 15;
827 params.log_data_size = min_t(u8, params.log_data_size,
828 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
829 params.log_data_size = max_t(u8, params.log_data_size,
830 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
832 params.log_num_packets = params.log_data_size -
833 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
834 params.log_num_packets = min_t(u8, params.log_num_packets,
835 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
837 params.q_counter = priv->q_counter;
838 /* set hairpin pair per each 50Gbs share of the link */
839 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
840 link_speed = max_t(u32, link_speed, 50000);
841 link_speed64 = link_speed;
842 do_div(link_speed64, 50000);
843 params.num_channels = link_speed64;
845 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
847 complete_all(&hpe->res_ready);
853 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
854 hp->tirn, hp->pair->rqn[0],
855 dev_name(hp->pair->peer_mdev->device),
856 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
859 if (hpe->hp->num_channels > 1) {
860 flow_flag_set(flow, HAIRPIN_RSS);
861 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
863 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
867 spin_lock(&hpe->flows_lock);
868 list_add(&flow->hairpin, &hpe->flows);
869 spin_unlock(&hpe->flows_lock);
874 mlx5e_hairpin_put(priv, hpe);
878 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
879 struct mlx5e_tc_flow *flow)
881 /* flow wasn't fully initialized */
885 spin_lock(&flow->hpe->flows_lock);
886 list_del(&flow->hairpin);
887 spin_unlock(&flow->hpe->flows_lock);
889 mlx5e_hairpin_put(priv, flow->hpe);
894 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
895 struct mlx5e_tc_flow_parse_attr *parse_attr,
896 struct mlx5e_tc_flow *flow,
897 struct netlink_ext_ack *extack)
899 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
900 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
901 struct mlx5_core_dev *dev = priv->mdev;
902 struct mlx5_flow_destination dest[2] = {};
903 struct mlx5_flow_act flow_act = {
904 .action = attr->action,
905 .flags = FLOW_ACT_NO_APPEND,
907 struct mlx5_fc *counter = NULL;
908 int err, dest_ix = 0;
910 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
911 flow_context->flow_tag = attr->flow_tag;
913 if (flow_flag_test(flow, HAIRPIN)) {
914 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
918 if (flow_flag_test(flow, HAIRPIN_RSS)) {
919 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
920 dest[dest_ix].ft = attr->hairpin_ft;
922 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
923 dest[dest_ix].tir_num = attr->hairpin_tirn;
926 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
927 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
928 dest[dest_ix].ft = priv->fs.vlan.ft.t;
932 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
933 counter = mlx5_fc_create(dev, true);
935 return PTR_ERR(counter);
937 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
938 dest[dest_ix].counter_id = mlx5_fc_id(counter);
940 attr->counter = counter;
943 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
944 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
945 flow_act.modify_hdr = attr->modify_hdr;
946 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
951 mutex_lock(&priv->fs.tc.t_lock);
952 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
953 struct mlx5_flow_table_attr ft_attr = {};
954 int tc_grp_size, tc_tbl_size, tc_num_grps;
955 u32 max_flow_counter;
957 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
958 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
960 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
962 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
963 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
964 tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
966 ft_attr.prio = MLX5E_TC_PRIO;
967 ft_attr.max_fte = tc_tbl_size;
968 ft_attr.level = MLX5E_TC_FT_LEVEL;
969 ft_attr.autogroup.max_num_groups = tc_num_grps;
971 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
973 if (IS_ERR(priv->fs.tc.t)) {
974 mutex_unlock(&priv->fs.tc.t_lock);
975 NL_SET_ERR_MSG_MOD(extack,
976 "Failed to create tc offload table");
977 netdev_err(priv->netdev,
978 "Failed to create tc offload table\n");
979 return PTR_ERR(priv->fs.tc.t);
983 if (attr->match_level != MLX5_MATCH_NONE)
984 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
986 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
987 &flow_act, dest, dest_ix);
988 mutex_unlock(&priv->fs.tc.t_lock);
990 return PTR_ERR_OR_ZERO(flow->rule[0]);
993 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
994 struct mlx5e_tc_flow *flow)
996 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
997 struct mlx5_fc *counter = NULL;
999 counter = attr->counter;
1000 if (!IS_ERR_OR_NULL(flow->rule[0]))
1001 mlx5_del_flow_rules(flow->rule[0]);
1002 mlx5_fc_destroy(priv->mdev, counter);
1004 mutex_lock(&priv->fs.tc.t_lock);
1005 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1006 mlx5_destroy_flow_table(priv->fs.tc.t);
1007 priv->fs.tc.t = NULL;
1009 mutex_unlock(&priv->fs.tc.t_lock);
1011 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1012 mlx5e_detach_mod_hdr(priv, flow);
1014 if (flow_flag_test(flow, HAIRPIN))
1015 mlx5e_hairpin_flow_del(priv, flow);
1018 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1019 struct mlx5e_tc_flow *flow, int out_index);
1021 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1022 struct mlx5e_tc_flow *flow,
1023 struct net_device *mirred_dev,
1025 struct netlink_ext_ack *extack,
1026 struct net_device **encap_dev,
1028 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
1029 struct mlx5e_tc_flow *flow,
1030 struct netlink_ext_ack *extack);
1031 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1032 struct mlx5e_tc_flow *flow);
1034 static struct mlx5_flow_handle *
1035 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1036 struct mlx5e_tc_flow *flow,
1037 struct mlx5_flow_spec *spec,
1038 struct mlx5_esw_flow_attr *attr)
1040 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1041 struct mlx5_flow_handle *rule;
1043 if (flow_flag_test(flow, CT)) {
1044 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1046 return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
1050 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1054 if (attr->split_count) {
1055 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1056 if (IS_ERR(flow->rule[1])) {
1057 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1058 return flow->rule[1];
1066 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1067 struct mlx5e_tc_flow *flow,
1068 struct mlx5_esw_flow_attr *attr)
1070 flow_flag_clear(flow, OFFLOADED);
1072 if (flow_flag_test(flow, CT)) {
1073 mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
1077 if (attr->split_count)
1078 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1080 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1083 static struct mlx5_flow_handle *
1084 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1085 struct mlx5e_tc_flow *flow,
1086 struct mlx5_flow_spec *spec)
1088 struct mlx5_esw_flow_attr slow_attr;
1089 struct mlx5_flow_handle *rule;
1091 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1092 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1093 slow_attr.split_count = 0;
1094 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1096 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
1098 flow_flag_set(flow, SLOW);
1104 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1105 struct mlx5e_tc_flow *flow)
1107 struct mlx5_esw_flow_attr slow_attr;
1109 memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
1110 slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1111 slow_attr.split_count = 0;
1112 slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1113 mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
1114 flow_flag_clear(flow, SLOW);
1117 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1120 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1121 struct list_head *unready_flows)
1123 flow_flag_set(flow, NOT_READY);
1124 list_add_tail(&flow->unready, unready_flows);
1127 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1130 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1132 list_del(&flow->unready);
1133 flow_flag_clear(flow, NOT_READY);
1136 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1138 struct mlx5_rep_uplink_priv *uplink_priv;
1139 struct mlx5e_rep_priv *rpriv;
1140 struct mlx5_eswitch *esw;
1142 esw = flow->priv->mdev->priv.eswitch;
1143 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1144 uplink_priv = &rpriv->uplink_priv;
1146 mutex_lock(&uplink_priv->unready_flows_lock);
1147 unready_flow_add(flow, &uplink_priv->unready_flows);
1148 mutex_unlock(&uplink_priv->unready_flows_lock);
1151 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1153 struct mlx5_rep_uplink_priv *uplink_priv;
1154 struct mlx5e_rep_priv *rpriv;
1155 struct mlx5_eswitch *esw;
1157 esw = flow->priv->mdev->priv.eswitch;
1158 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1159 uplink_priv = &rpriv->uplink_priv;
1161 mutex_lock(&uplink_priv->unready_flows_lock);
1162 unready_flow_del(flow);
1163 mutex_unlock(&uplink_priv->unready_flows_lock);
1167 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1168 struct mlx5e_tc_flow *flow,
1169 struct netlink_ext_ack *extack)
1171 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1172 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1173 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1174 struct net_device *out_dev, *encap_dev = NULL;
1175 struct mlx5_fc *counter = NULL;
1176 struct mlx5e_rep_priv *rpriv;
1177 struct mlx5e_priv *out_priv;
1178 bool encap_valid = true;
1179 u32 max_prio, max_chain;
1183 if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
1184 NL_SET_ERR_MSG_MOD(extack,
1185 "E-switch priorities unsupported, upgrade FW");
1189 /* We check chain range only for tc flows.
1190 * For ft flows, we checked attr->chain was originally 0 and set it to
1191 * FDB_FT_CHAIN which is outside tc range.
1192 * See mlx5e_rep_setup_ft_cb().
1194 max_chain = mlx5_esw_chains_get_chain_range(esw);
1195 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1196 NL_SET_ERR_MSG_MOD(extack,
1197 "Requested chain is out of supported range");
1201 max_prio = mlx5_esw_chains_get_prio_range(esw);
1202 if (attr->prio > max_prio) {
1203 NL_SET_ERR_MSG_MOD(extack,
1204 "Requested priority is out of supported range");
1208 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1209 err = mlx5e_attach_decap(priv, flow, extack);
1214 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1217 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1220 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1221 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1223 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1224 extack, &encap_dev, &encap_valid);
1228 out_priv = netdev_priv(encap_dev);
1229 rpriv = out_priv->ppriv;
1230 attr->dests[out_index].rep = rpriv->rep;
1231 attr->dests[out_index].mdev = out_priv->mdev;
1234 err = mlx5_eswitch_add_vlan_action(esw, attr);
1238 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1239 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1240 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1241 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1246 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1247 counter = mlx5_fc_create(attr->counter_dev, true);
1248 if (IS_ERR(counter))
1249 return PTR_ERR(counter);
1251 attr->counter = counter;
1254 /* we get here if one of the following takes place:
1255 * (1) there's no error
1256 * (2) there's an encap action and we don't have valid neigh
1259 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1261 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1263 if (IS_ERR(flow->rule[0]))
1264 return PTR_ERR(flow->rule[0]);
1266 flow_flag_set(flow, OFFLOADED);
1271 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1273 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1274 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1277 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1279 geneve_tlv_option_0_data);
1281 return !!geneve_tlv_opt_0_data;
1284 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1285 struct mlx5e_tc_flow *flow)
1287 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1288 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1291 mlx5e_put_flow_tunnel_id(flow);
1293 if (flow_flag_test(flow, NOT_READY)) {
1294 remove_unready_flow(flow);
1295 kvfree(attr->parse_attr);
1299 if (mlx5e_is_offloaded_flow(flow)) {
1300 if (flow_flag_test(flow, SLOW))
1301 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1303 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1306 if (mlx5_flow_has_geneve_opt(flow))
1307 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1309 mlx5_eswitch_del_vlan_action(esw, attr);
1311 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1312 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1313 mlx5e_detach_encap(priv, flow, out_index);
1314 kfree(attr->parse_attr->tun_info[out_index]);
1316 kvfree(attr->parse_attr);
1318 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1319 mlx5e_detach_mod_hdr(priv, flow);
1321 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1322 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1324 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1325 mlx5e_detach_decap(priv, flow);
1328 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1329 struct mlx5e_encap_entry *e,
1330 struct list_head *flow_list)
1332 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1333 struct mlx5_esw_flow_attr *esw_attr;
1334 struct mlx5_flow_handle *rule;
1335 struct mlx5_flow_spec *spec;
1336 struct mlx5e_tc_flow *flow;
1339 e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1341 e->encap_size, e->encap_header,
1342 MLX5_FLOW_NAMESPACE_FDB);
1343 if (IS_ERR(e->pkt_reformat)) {
1344 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1345 PTR_ERR(e->pkt_reformat));
1348 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1349 mlx5e_rep_queue_neigh_stats_work(priv);
1351 list_for_each_entry(flow, flow_list, tmp_list) {
1352 bool all_flow_encaps_valid = true;
1355 if (!mlx5e_is_offloaded_flow(flow))
1357 esw_attr = flow->esw_attr;
1358 spec = &esw_attr->parse_attr->spec;
1360 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1361 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1362 /* Flow can be associated with multiple encap entries.
1363 * Before offloading the flow verify that all of them have
1364 * a valid neighbour.
1366 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1367 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1369 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1370 all_flow_encaps_valid = false;
1374 /* Do not offload flows with unresolved neighbors */
1375 if (!all_flow_encaps_valid)
1377 /* update from slow path rule to encap rule */
1378 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1380 err = PTR_ERR(rule);
1381 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1386 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1387 flow->rule[0] = rule;
1388 /* was unset when slow path rule removed */
1389 flow_flag_set(flow, OFFLOADED);
1393 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1394 struct mlx5e_encap_entry *e,
1395 struct list_head *flow_list)
1397 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1398 struct mlx5_flow_handle *rule;
1399 struct mlx5_flow_spec *spec;
1400 struct mlx5e_tc_flow *flow;
1403 list_for_each_entry(flow, flow_list, tmp_list) {
1404 if (!mlx5e_is_offloaded_flow(flow))
1406 spec = &flow->esw_attr->parse_attr->spec;
1408 /* update from encap rule to slow path rule */
1409 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1410 /* mark the flow's encap dest as non-valid */
1411 flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1414 err = PTR_ERR(rule);
1415 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1420 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1421 flow->rule[0] = rule;
1422 /* was unset when fast path rule removed */
1423 flow_flag_set(flow, OFFLOADED);
1426 /* we know that the encap is valid */
1427 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1428 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1431 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1433 if (mlx5e_is_eswitch_flow(flow))
1434 return flow->esw_attr->counter;
1436 return flow->nic_attr->counter;
1439 /* Takes reference to all flows attached to encap and adds the flows to
1440 * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1442 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1444 struct encap_flow_item *efi;
1445 struct mlx5e_tc_flow *flow;
1447 list_for_each_entry(efi, &e->flows, list) {
1448 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1449 if (IS_ERR(mlx5e_flow_get(flow)))
1451 wait_for_completion(&flow->init_done);
1453 flow->tmp_efi_index = efi->index;
1454 list_add(&flow->tmp_list, flow_list);
1458 /* Iterate over tmp_list of flows attached to flow_list head. */
1459 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1461 struct mlx5e_tc_flow *flow, *tmp;
1463 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1464 mlx5e_flow_put(priv, flow);
1467 static struct mlx5e_encap_entry *
1468 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1469 struct mlx5e_encap_entry *e)
1471 struct mlx5e_encap_entry *next = NULL;
1476 /* find encap with non-zero reference counter value */
1478 list_next_or_null_rcu(&nhe->encap_list,
1480 struct mlx5e_encap_entry,
1482 list_first_or_null_rcu(&nhe->encap_list,
1483 struct mlx5e_encap_entry,
1486 next = list_next_or_null_rcu(&nhe->encap_list,
1488 struct mlx5e_encap_entry,
1490 if (mlx5e_encap_take(next))
1495 /* release starting encap */
1497 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1501 /* wait for encap to be fully initialized */
1502 wait_for_completion(&next->res_ready);
1503 /* continue searching if encap entry is not in valid state after completion */
1504 if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1512 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1514 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1515 struct mlx5e_encap_entry *e = NULL;
1516 struct mlx5e_tc_flow *flow;
1517 struct mlx5_fc *counter;
1518 struct neigh_table *tbl;
1519 bool neigh_used = false;
1520 struct neighbour *n;
1523 if (m_neigh->family == AF_INET)
1525 #if IS_ENABLED(CONFIG_IPV6)
1526 else if (m_neigh->family == AF_INET6)
1527 tbl = ipv6_stub->nd_tbl;
1532 /* mlx5e_get_next_valid_encap() releases previous encap before returning
1535 while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1536 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1537 struct encap_flow_item *efi, *tmp;
1538 struct mlx5_eswitch *esw;
1539 LIST_HEAD(flow_list);
1541 esw = priv->mdev->priv.eswitch;
1542 mutex_lock(&esw->offloads.encap_tbl_lock);
1543 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1544 flow = container_of(efi, struct mlx5e_tc_flow,
1545 encaps[efi->index]);
1546 if (IS_ERR(mlx5e_flow_get(flow)))
1548 list_add(&flow->tmp_list, &flow_list);
1550 if (mlx5e_is_offloaded_flow(flow)) {
1551 counter = mlx5e_tc_get_counter(flow);
1552 lastuse = mlx5_fc_query_lastuse(counter);
1553 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1559 mutex_unlock(&esw->offloads.encap_tbl_lock);
1561 mlx5e_put_encap_flow_list(priv, &flow_list);
1563 /* release current encap before breaking the loop */
1564 mlx5e_encap_put(priv, e);
1569 trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1572 nhe->reported_lastuse = jiffies;
1574 /* find the relevant neigh according to the cached device and
1577 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1581 neigh_event_send(n, NULL);
1586 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1588 WARN_ON(!list_empty(&e->flows));
1590 if (e->compl_result > 0) {
1591 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1593 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1594 mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1598 kfree(e->encap_header);
1602 static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
1603 struct mlx5e_decap_entry *d)
1605 WARN_ON(!list_empty(&d->flows));
1607 if (!d->compl_result)
1608 mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
1613 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1615 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1617 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1619 hash_del_rcu(&e->encap_hlist);
1620 mutex_unlock(&esw->offloads.encap_tbl_lock);
1622 mlx5e_encap_dealloc(priv, e);
1625 static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
1627 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1629 if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
1631 hash_del_rcu(&d->hlist);
1632 mutex_unlock(&esw->offloads.decap_tbl_lock);
1634 mlx5e_decap_dealloc(priv, d);
1637 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1638 struct mlx5e_tc_flow *flow, int out_index)
1640 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1641 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1643 /* flow wasn't fully initialized */
1647 mutex_lock(&esw->offloads.encap_tbl_lock);
1648 list_del(&flow->encaps[out_index].list);
1649 flow->encaps[out_index].e = NULL;
1650 if (!refcount_dec_and_test(&e->refcnt)) {
1651 mutex_unlock(&esw->offloads.encap_tbl_lock);
1654 hash_del_rcu(&e->encap_hlist);
1655 mutex_unlock(&esw->offloads.encap_tbl_lock);
1657 mlx5e_encap_dealloc(priv, e);
1660 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1661 struct mlx5e_tc_flow *flow)
1663 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1664 struct mlx5e_decap_entry *d = flow->decap_reformat;
1669 mutex_lock(&esw->offloads.decap_tbl_lock);
1670 list_del(&flow->l3_to_l2_reformat);
1671 flow->decap_reformat = NULL;
1673 if (!refcount_dec_and_test(&d->refcnt)) {
1674 mutex_unlock(&esw->offloads.decap_tbl_lock);
1677 hash_del_rcu(&d->hlist);
1678 mutex_unlock(&esw->offloads.decap_tbl_lock);
1680 mlx5e_decap_dealloc(priv, d);
1683 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1685 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1687 if (!flow_flag_test(flow, ESWITCH) ||
1688 !flow_flag_test(flow, DUP))
1691 mutex_lock(&esw->offloads.peer_mutex);
1692 list_del(&flow->peer);
1693 mutex_unlock(&esw->offloads.peer_mutex);
1695 flow_flag_clear(flow, DUP);
1697 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1698 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1699 kfree(flow->peer_flow);
1702 flow->peer_flow = NULL;
1705 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1707 struct mlx5_core_dev *dev = flow->priv->mdev;
1708 struct mlx5_devcom *devcom = dev->priv.devcom;
1709 struct mlx5_eswitch *peer_esw;
1711 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1715 __mlx5e_tc_del_fdb_peer_flow(flow);
1716 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1719 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1720 struct mlx5e_tc_flow *flow)
1722 if (mlx5e_is_eswitch_flow(flow)) {
1723 mlx5e_tc_del_fdb_peer_flow(flow);
1724 mlx5e_tc_del_fdb_flow(priv, flow);
1726 mlx5e_tc_del_nic_flow(priv, flow);
1730 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1732 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1733 struct flow_action *flow_action = &rule->action;
1734 const struct flow_action_entry *act;
1737 flow_action_for_each(i, act, flow_action) {
1739 case FLOW_ACTION_GOTO:
1750 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1751 struct flow_dissector_key_enc_opts *opts,
1752 struct netlink_ext_ack *extack,
1755 struct geneve_opt *opt;
1760 while (opts->len > off) {
1761 opt = (struct geneve_opt *)&opts->data[off];
1763 if (!(*dont_care) || opt->opt_class || opt->type ||
1764 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1767 if (opt->opt_class != htons(U16_MAX) ||
1768 opt->type != U8_MAX) {
1769 NL_SET_ERR_MSG(extack,
1770 "Partial match of tunnel options in chain > 0 isn't supported");
1771 netdev_warn(priv->netdev,
1772 "Partial match of tunnel options in chain > 0 isn't supported");
1777 off += sizeof(struct geneve_opt) + opt->length * 4;
1783 #define COPY_DISSECTOR(rule, diss_key, dst)\
1785 struct flow_rule *__rule = (rule);\
1786 typeof(dst) __dst = dst;\
1789 skb_flow_dissector_target(__rule->match.dissector,\
1791 __rule->match.key),\
1795 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1796 struct mlx5e_tc_flow *flow,
1797 struct flow_cls_offload *f,
1798 struct net_device *filter_dev)
1800 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1801 struct netlink_ext_ack *extack = f->common.extack;
1802 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1803 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1804 struct flow_match_enc_opts enc_opts_match;
1805 struct tunnel_match_enc_opts tun_enc_opts;
1806 struct mlx5_rep_uplink_priv *uplink_priv;
1807 struct mlx5e_rep_priv *uplink_rpriv;
1808 struct tunnel_match_key tunnel_key;
1809 bool enc_opts_is_dont_care = true;
1810 u32 tun_id, enc_opts_id = 0;
1811 struct mlx5_eswitch *esw;
1815 esw = priv->mdev->priv.eswitch;
1816 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1817 uplink_priv = &uplink_rpriv->uplink_priv;
1819 memset(&tunnel_key, 0, sizeof(tunnel_key));
1820 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1821 &tunnel_key.enc_control);
1822 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1823 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1824 &tunnel_key.enc_ipv4);
1826 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1827 &tunnel_key.enc_ipv6);
1828 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1829 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1830 &tunnel_key.enc_tp);
1831 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1832 &tunnel_key.enc_key_id);
1833 tunnel_key.filter_ifindex = filter_dev->ifindex;
1835 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1839 flow_rule_match_enc_opts(rule, &enc_opts_match);
1840 err = enc_opts_is_dont_care_or_full_match(priv,
1841 enc_opts_match.mask,
1843 &enc_opts_is_dont_care);
1847 if (!enc_opts_is_dont_care) {
1848 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1849 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1850 sizeof(*enc_opts_match.key));
1851 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1852 sizeof(*enc_opts_match.mask));
1854 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1855 &tun_enc_opts, &enc_opts_id);
1860 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1861 mask = enc_opts_id ? TUNNEL_ID_MASK :
1862 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1865 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1866 TUNNEL_TO_REG, value, mask);
1868 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1869 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1871 TUNNEL_TO_REG, value);
1875 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1878 flow->tunnel_id = value;
1883 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1886 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1890 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1892 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1893 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1894 struct mlx5_rep_uplink_priv *uplink_priv;
1895 struct mlx5e_rep_priv *uplink_rpriv;
1896 struct mlx5_eswitch *esw;
1898 esw = flow->priv->mdev->priv.eswitch;
1899 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1900 uplink_priv = &uplink_rpriv->uplink_priv;
1903 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1905 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1909 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1911 return flow->tunnel_id;
1914 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1915 struct flow_match_basic *match, bool outer,
1916 void *headers_c, void *headers_v)
1918 bool ip_version_cap;
1920 ip_version_cap = outer ?
1921 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1922 ft_field_support.outer_ip_version) :
1923 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1924 ft_field_support.inner_ip_version);
1926 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1927 (match->key->n_proto == htons(ETH_P_IP) ||
1928 match->key->n_proto == htons(ETH_P_IPV6))) {
1929 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1930 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1931 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1933 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1934 ntohs(match->mask->n_proto));
1935 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1936 ntohs(match->key->n_proto));
1940 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1941 struct mlx5e_tc_flow *flow,
1942 struct mlx5_flow_spec *spec,
1943 struct flow_cls_offload *f,
1944 struct net_device *filter_dev,
1948 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1949 struct netlink_ext_ack *extack = f->common.extack;
1950 bool needs_mapping, sets_mapping;
1953 if (!mlx5e_is_eswitch_flow(flow))
1956 needs_mapping = !!flow->esw_attr->chain;
1957 sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
1958 *match_inner = !needs_mapping;
1960 if ((needs_mapping || sets_mapping) &&
1961 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1962 NL_SET_ERR_MSG(extack,
1963 "Chains on tunnel devices isn't supported without register loopback support");
1964 netdev_warn(priv->netdev,
1965 "Chains on tunnel devices isn't supported without register loopback support");
1969 if (!flow->esw_attr->chain) {
1970 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1973 NL_SET_ERR_MSG_MOD(extack,
1974 "Failed to parse tunnel attributes");
1975 netdev_warn(priv->netdev,
1976 "Failed to parse tunnel attributes");
1980 /* With mpls over udp we decapsulate using packet reformat
1983 if (!netif_is_bareudp(filter_dev))
1984 flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1987 if (!needs_mapping && !sets_mapping)
1990 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
1993 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
1995 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1999 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2001 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2005 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2007 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2011 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2013 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2017 static void *get_match_headers_value(u32 flags,
2018 struct mlx5_flow_spec *spec)
2020 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2021 get_match_inner_headers_value(spec) :
2022 get_match_outer_headers_value(spec);
2025 static void *get_match_headers_criteria(u32 flags,
2026 struct mlx5_flow_spec *spec)
2028 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2029 get_match_inner_headers_criteria(spec) :
2030 get_match_outer_headers_criteria(spec);
2033 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2034 struct flow_cls_offload *f)
2036 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2037 struct netlink_ext_ack *extack = f->common.extack;
2038 struct net_device *ingress_dev;
2039 struct flow_match_meta match;
2041 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2044 flow_rule_match_meta(rule, &match);
2045 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2046 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2050 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2051 match.key->ingress_ifindex);
2053 NL_SET_ERR_MSG_MOD(extack,
2054 "Can't find the ingress port to match on");
2058 if (ingress_dev != filter_dev) {
2059 NL_SET_ERR_MSG_MOD(extack,
2060 "Can't match on the ingress filter port");
2067 static bool skip_key_basic(struct net_device *filter_dev,
2068 struct flow_cls_offload *f)
2070 /* When doing mpls over udp decap, the user needs to provide
2071 * MPLS_UC as the protocol in order to be able to match on mpls
2072 * label fields. However, the actual ethertype is IP so we want to
2073 * avoid matching on this, otherwise we'll fail the match.
2075 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2081 static int __parse_cls_flower(struct mlx5e_priv *priv,
2082 struct mlx5e_tc_flow *flow,
2083 struct mlx5_flow_spec *spec,
2084 struct flow_cls_offload *f,
2085 struct net_device *filter_dev,
2086 u8 *inner_match_level, u8 *outer_match_level)
2088 struct netlink_ext_ack *extack = f->common.extack;
2089 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2091 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2093 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2095 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2097 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2098 struct flow_dissector *dissector = rule->match.dissector;
2104 match_level = outer_match_level;
2106 if (dissector->used_keys &
2107 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2108 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2109 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2110 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2111 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2112 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2113 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2114 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2115 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2116 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2117 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2118 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2119 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2120 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2121 BIT(FLOW_DISSECTOR_KEY_TCP) |
2122 BIT(FLOW_DISSECTOR_KEY_IP) |
2123 BIT(FLOW_DISSECTOR_KEY_CT) |
2124 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2125 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2126 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2127 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2128 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
2129 dissector->used_keys);
2133 if (mlx5e_get_tc_tun(filter_dev)) {
2134 bool match_inner = false;
2136 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2137 outer_match_level, &match_inner);
2142 /* header pointers should point to the inner headers
2143 * if the packet was decapsulated already.
2144 * outer headers are set by parse_tunnel_attr.
2146 match_level = inner_match_level;
2147 headers_c = get_match_inner_headers_criteria(spec);
2148 headers_v = get_match_inner_headers_value(spec);
2152 err = mlx5e_flower_parse_meta(filter_dev, f);
2156 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2157 !skip_key_basic(filter_dev, f)) {
2158 struct flow_match_basic match;
2160 flow_rule_match_basic(rule, &match);
2161 mlx5e_tc_set_ethertype(priv->mdev, &match,
2162 match_level == outer_match_level,
2163 headers_c, headers_v);
2165 if (match.mask->n_proto)
2166 *match_level = MLX5_MATCH_L2;
2168 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2169 is_vlan_dev(filter_dev)) {
2170 struct flow_dissector_key_vlan filter_dev_mask;
2171 struct flow_dissector_key_vlan filter_dev_key;
2172 struct flow_match_vlan match;
2174 if (is_vlan_dev(filter_dev)) {
2175 match.key = &filter_dev_key;
2176 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2177 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2178 match.key->vlan_priority = 0;
2179 match.mask = &filter_dev_mask;
2180 memset(match.mask, 0xff, sizeof(*match.mask));
2181 match.mask->vlan_priority = 0;
2183 flow_rule_match_vlan(rule, &match);
2185 if (match.mask->vlan_id ||
2186 match.mask->vlan_priority ||
2187 match.mask->vlan_tpid) {
2188 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2189 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2191 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2194 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2196 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2200 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2201 match.mask->vlan_id);
2202 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2203 match.key->vlan_id);
2205 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2206 match.mask->vlan_priority);
2207 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2208 match.key->vlan_priority);
2210 *match_level = MLX5_MATCH_L2;
2212 } else if (*match_level != MLX5_MATCH_NONE) {
2213 /* cvlan_tag enabled in match criteria and
2214 * disabled in match value means both S & C tags
2215 * don't exist (untagged of both)
2217 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2218 *match_level = MLX5_MATCH_L2;
2221 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2222 struct flow_match_vlan match;
2224 flow_rule_match_cvlan(rule, &match);
2225 if (match.mask->vlan_id ||
2226 match.mask->vlan_priority ||
2227 match.mask->vlan_tpid) {
2228 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2229 MLX5_SET(fte_match_set_misc, misc_c,
2230 outer_second_svlan_tag, 1);
2231 MLX5_SET(fte_match_set_misc, misc_v,
2232 outer_second_svlan_tag, 1);
2234 MLX5_SET(fte_match_set_misc, misc_c,
2235 outer_second_cvlan_tag, 1);
2236 MLX5_SET(fte_match_set_misc, misc_v,
2237 outer_second_cvlan_tag, 1);
2240 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2241 match.mask->vlan_id);
2242 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2243 match.key->vlan_id);
2244 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2245 match.mask->vlan_priority);
2246 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2247 match.key->vlan_priority);
2249 *match_level = MLX5_MATCH_L2;
2253 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2254 struct flow_match_eth_addrs match;
2256 flow_rule_match_eth_addrs(rule, &match);
2257 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2260 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2264 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2267 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2271 if (!is_zero_ether_addr(match.mask->src) ||
2272 !is_zero_ether_addr(match.mask->dst))
2273 *match_level = MLX5_MATCH_L2;
2276 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2277 struct flow_match_control match;
2279 flow_rule_match_control(rule, &match);
2280 addr_type = match.key->addr_type;
2282 /* the HW doesn't support frag first/later */
2283 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2286 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2287 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2288 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2289 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2291 /* the HW doesn't need L3 inline to match on frag=no */
2292 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2293 *match_level = MLX5_MATCH_L2;
2294 /* *** L2 attributes parsing up to here *** */
2296 *match_level = MLX5_MATCH_L3;
2300 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2301 struct flow_match_basic match;
2303 flow_rule_match_basic(rule, &match);
2304 ip_proto = match.key->ip_proto;
2306 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2307 match.mask->ip_proto);
2308 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2309 match.key->ip_proto);
2311 if (match.mask->ip_proto)
2312 *match_level = MLX5_MATCH_L3;
2315 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2316 struct flow_match_ipv4_addrs match;
2318 flow_rule_match_ipv4_addrs(rule, &match);
2319 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2320 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2321 &match.mask->src, sizeof(match.mask->src));
2322 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2323 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2324 &match.key->src, sizeof(match.key->src));
2325 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2326 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2327 &match.mask->dst, sizeof(match.mask->dst));
2328 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2329 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2330 &match.key->dst, sizeof(match.key->dst));
2332 if (match.mask->src || match.mask->dst)
2333 *match_level = MLX5_MATCH_L3;
2336 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2337 struct flow_match_ipv6_addrs match;
2339 flow_rule_match_ipv6_addrs(rule, &match);
2340 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2341 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2342 &match.mask->src, sizeof(match.mask->src));
2343 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2344 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2345 &match.key->src, sizeof(match.key->src));
2347 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2348 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2349 &match.mask->dst, sizeof(match.mask->dst));
2350 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2351 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2352 &match.key->dst, sizeof(match.key->dst));
2354 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2355 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2356 *match_level = MLX5_MATCH_L3;
2359 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2360 struct flow_match_ip match;
2362 flow_rule_match_ip(rule, &match);
2363 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2364 match.mask->tos & 0x3);
2365 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2366 match.key->tos & 0x3);
2368 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2369 match.mask->tos >> 2);
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2371 match.key->tos >> 2);
2373 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2375 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2378 if (match.mask->ttl &&
2379 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2380 ft_field_support.outer_ipv4_ttl)) {
2381 NL_SET_ERR_MSG_MOD(extack,
2382 "Matching on TTL is not supported");
2386 if (match.mask->tos || match.mask->ttl)
2387 *match_level = MLX5_MATCH_L3;
2390 /* *** L3 attributes parsing up to here *** */
2392 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2393 struct flow_match_ports match;
2395 flow_rule_match_ports(rule, &match);
2398 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2399 tcp_sport, ntohs(match.mask->src));
2400 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2401 tcp_sport, ntohs(match.key->src));
2403 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2404 tcp_dport, ntohs(match.mask->dst));
2405 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2406 tcp_dport, ntohs(match.key->dst));
2410 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2411 udp_sport, ntohs(match.mask->src));
2412 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2413 udp_sport, ntohs(match.key->src));
2415 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2416 udp_dport, ntohs(match.mask->dst));
2417 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2418 udp_dport, ntohs(match.key->dst));
2421 NL_SET_ERR_MSG_MOD(extack,
2422 "Only UDP and TCP transports are supported for L4 matching");
2423 netdev_err(priv->netdev,
2424 "Only UDP and TCP transport are supported\n");
2428 if (match.mask->src || match.mask->dst)
2429 *match_level = MLX5_MATCH_L4;
2432 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2433 struct flow_match_tcp match;
2435 flow_rule_match_tcp(rule, &match);
2436 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2437 ntohs(match.mask->flags));
2438 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2439 ntohs(match.key->flags));
2441 if (match.mask->flags)
2442 *match_level = MLX5_MATCH_L4;
2448 static int parse_cls_flower(struct mlx5e_priv *priv,
2449 struct mlx5e_tc_flow *flow,
2450 struct mlx5_flow_spec *spec,
2451 struct flow_cls_offload *f,
2452 struct net_device *filter_dev)
2454 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2455 struct netlink_ext_ack *extack = f->common.extack;
2456 struct mlx5_core_dev *dev = priv->mdev;
2457 struct mlx5_eswitch *esw = dev->priv.eswitch;
2458 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2459 struct mlx5_eswitch_rep *rep;
2460 bool is_eswitch_flow;
2463 inner_match_level = MLX5_MATCH_NONE;
2464 outer_match_level = MLX5_MATCH_NONE;
2466 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2467 &inner_match_level, &outer_match_level);
2468 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2469 outer_match_level : inner_match_level;
2471 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2472 if (!err && is_eswitch_flow) {
2474 if (rep->vport != MLX5_VPORT_UPLINK &&
2475 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2476 esw->offloads.inline_mode < non_tunnel_match_level)) {
2477 NL_SET_ERR_MSG_MOD(extack,
2478 "Flow is not offloaded due to min inline setting");
2479 netdev_warn(priv->netdev,
2480 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2481 non_tunnel_match_level, esw->offloads.inline_mode);
2486 if (is_eswitch_flow) {
2487 flow->esw_attr->inner_match_level = inner_match_level;
2488 flow->esw_attr->outer_match_level = outer_match_level;
2490 flow->nic_attr->match_level = non_tunnel_match_level;
2496 struct pedit_headers {
2498 struct vlan_hdr vlan;
2505 struct pedit_headers_action {
2506 struct pedit_headers vals;
2507 struct pedit_headers masks;
2511 static int pedit_header_offsets[] = {
2512 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2513 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2514 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2515 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2516 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2519 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2521 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2522 struct pedit_headers_action *hdrs)
2524 u32 *curr_pmask, *curr_pval;
2526 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2527 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2529 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2532 *curr_pmask |= mask;
2533 *curr_pval |= (val & mask);
2541 struct mlx5_fields {
2549 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2550 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2551 offsetof(struct pedit_headers, field) + (off), \
2552 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2554 /* masked values are the same and there are no rewrites that do not have a
2557 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2558 type matchmaskx = *(type *)(matchmaskp); \
2559 type matchvalx = *(type *)(matchvalp); \
2560 type maskx = *(type *)(maskp); \
2561 type valx = *(type *)(valp); \
2563 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2567 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2568 void *matchmaskp, u8 bsize)
2574 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2577 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2580 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2587 static struct mlx5_fields fields[] = {
2588 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2589 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2590 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2591 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2592 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2593 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2595 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2596 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2597 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2598 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2600 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2601 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2602 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2603 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2604 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2605 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2606 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2607 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2608 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2609 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2610 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2611 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2612 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2613 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2614 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2615 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2616 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2618 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2619 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2620 /* in linux iphdr tcp_flags is 8 bits long */
2621 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2623 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2624 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2627 static int offload_pedit_fields(struct mlx5e_priv *priv,
2629 struct pedit_headers_action *hdrs,
2630 struct mlx5e_tc_flow_parse_attr *parse_attr,
2632 struct netlink_ext_ack *extack)
2634 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2635 int i, action_size, first, last, next_z;
2636 void *headers_c, *headers_v, *action, *vals_p;
2637 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2638 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2639 struct mlx5_fields *f;
2646 mod_acts = &parse_attr->mod_hdr_acts;
2647 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2648 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2650 set_masks = &hdrs[0].masks;
2651 add_masks = &hdrs[1].masks;
2652 set_vals = &hdrs[0].vals;
2653 add_vals = &hdrs[1].vals;
2655 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2657 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2661 /* avoid seeing bits set from previous iterations */
2665 s_masks_p = (void *)set_masks + f->offset;
2666 a_masks_p = (void *)add_masks + f->offset;
2668 s_mask = *s_masks_p & f->field_mask;
2669 a_mask = *a_masks_p & f->field_mask;
2671 if (!s_mask && !a_mask) /* nothing to offload here */
2674 if (s_mask && a_mask) {
2675 NL_SET_ERR_MSG_MOD(extack,
2676 "can't set and add to the same HW field");
2677 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2683 void *match_mask = headers_c + f->match_offset;
2684 void *match_val = headers_v + f->match_offset;
2686 cmd = MLX5_ACTION_TYPE_SET;
2688 vals_p = (void *)set_vals + f->offset;
2689 /* don't rewrite if we have a match on the same value */
2690 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2691 match_mask, f->field_bsize))
2693 /* clear to denote we consumed this field */
2694 *s_masks_p &= ~f->field_mask;
2696 cmd = MLX5_ACTION_TYPE_ADD;
2698 vals_p = (void *)add_vals + f->offset;
2699 /* add 0 is no change */
2700 if ((*(u32 *)vals_p & f->field_mask) == 0)
2702 /* clear to denote we consumed this field */
2703 *a_masks_p &= ~f->field_mask;
2708 if (f->field_bsize == 32) {
2709 mask_be32 = (__force __be32)(mask);
2710 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2711 } else if (f->field_bsize == 16) {
2712 mask_be32 = (__force __be32)(mask);
2713 mask_be16 = *(__be16 *)&mask_be32;
2714 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2717 first = find_first_bit(&mask, f->field_bsize);
2718 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2719 last = find_last_bit(&mask, f->field_bsize);
2720 if (first < next_z && next_z < last) {
2721 NL_SET_ERR_MSG_MOD(extack,
2722 "rewrite of few sub-fields isn't supported");
2723 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2728 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2730 NL_SET_ERR_MSG_MOD(extack,
2731 "too many pedit actions, can't offload");
2732 mlx5_core_warn(priv->mdev,
2733 "mlx5: parsed %d pedit actions, can't do more\n",
2734 mod_acts->num_actions);
2738 action = mod_acts->actions +
2739 (mod_acts->num_actions * action_size);
2740 MLX5_SET(set_action_in, action, action_type, cmd);
2741 MLX5_SET(set_action_in, action, field, f->field);
2743 if (cmd == MLX5_ACTION_TYPE_SET) {
2746 /* if field is bit sized it can start not from first bit */
2747 start = find_first_bit((unsigned long *)&f->field_mask,
2750 MLX5_SET(set_action_in, action, offset, first - start);
2751 /* length is num of bits to be written, zero means length of 32 */
2752 MLX5_SET(set_action_in, action, length, (last - first + 1));
2755 if (f->field_bsize == 32)
2756 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2757 else if (f->field_bsize == 16)
2758 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2759 else if (f->field_bsize == 8)
2760 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2762 ++mod_acts->num_actions;
2768 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2771 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2772 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2773 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2774 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2777 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2779 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2781 int action_size, new_num_actions, max_hw_actions;
2782 size_t new_sz, old_sz;
2785 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2788 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2790 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2792 new_num_actions = min(max_hw_actions,
2793 mod_hdr_acts->actions ?
2794 mod_hdr_acts->max_actions * 2 : 1);
2795 if (mod_hdr_acts->max_actions == new_num_actions)
2798 new_sz = action_size * new_num_actions;
2799 old_sz = mod_hdr_acts->max_actions * action_size;
2800 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2804 memset(ret + old_sz, 0, new_sz - old_sz);
2805 mod_hdr_acts->actions = ret;
2806 mod_hdr_acts->max_actions = new_num_actions;
2811 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2813 kfree(mod_hdr_acts->actions);
2814 mod_hdr_acts->actions = NULL;
2815 mod_hdr_acts->num_actions = 0;
2816 mod_hdr_acts->max_actions = 0;
2819 static const struct pedit_headers zero_masks = {};
2822 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2823 const struct flow_action_entry *act, int namespace,
2824 struct mlx5e_tc_flow_parse_attr *parse_attr,
2825 struct pedit_headers_action *hdrs,
2826 struct netlink_ext_ack *extack)
2828 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2829 int err = -EOPNOTSUPP;
2830 u32 mask, val, offset;
2833 htype = act->mangle.htype;
2834 err = -EOPNOTSUPP; /* can't be all optimistic */
2836 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2837 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2841 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2842 NL_SET_ERR_MSG_MOD(extack,
2843 "The pedit offload action is not supported");
2847 mask = act->mangle.mask;
2848 val = act->mangle.val;
2849 offset = act->mangle.offset;
2851 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2863 parse_pedit_to_reformat(struct mlx5e_priv *priv,
2864 const struct flow_action_entry *act,
2865 struct mlx5e_tc_flow_parse_attr *parse_attr,
2866 struct netlink_ext_ack *extack)
2868 u32 mask, val, offset;
2871 if (act->id != FLOW_ACTION_MANGLE)
2874 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2875 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2879 mask = ~act->mangle.mask;
2880 val = act->mangle.val;
2881 offset = act->mangle.offset;
2882 p = (u32 *)&parse_attr->eth;
2883 *(p + (offset >> 2)) |= (val & mask);
2888 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2889 const struct flow_action_entry *act, int namespace,
2890 struct mlx5e_tc_flow_parse_attr *parse_attr,
2891 struct pedit_headers_action *hdrs,
2892 struct mlx5e_tc_flow *flow,
2893 struct netlink_ext_ack *extack)
2895 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
2896 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
2898 return parse_pedit_to_modify_hdr(priv, act, namespace,
2899 parse_attr, hdrs, extack);
2902 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2903 struct mlx5e_tc_flow_parse_attr *parse_attr,
2904 struct pedit_headers_action *hdrs,
2906 struct netlink_ext_ack *extack)
2908 struct pedit_headers *cmd_masks;
2912 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2913 action_flags, extack);
2915 goto out_dealloc_parsed_actions;
2917 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2918 cmd_masks = &hdrs[cmd].masks;
2919 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2920 NL_SET_ERR_MSG_MOD(extack,
2921 "attempt to offload an unsupported field");
2922 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2923 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2924 16, 1, cmd_masks, sizeof(zero_masks), true);
2926 goto out_dealloc_parsed_actions;
2932 out_dealloc_parsed_actions:
2933 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2937 static bool csum_offload_supported(struct mlx5e_priv *priv,
2940 struct netlink_ext_ack *extack)
2942 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2943 TCA_CSUM_UPDATE_FLAG_UDP;
2945 /* The HW recalcs checksums only if re-writing headers */
2946 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2947 NL_SET_ERR_MSG_MOD(extack,
2948 "TC csum action is only offloaded with pedit");
2949 netdev_warn(priv->netdev,
2950 "TC csum action is only offloaded with pedit\n");
2954 if (update_flags & ~prot_flags) {
2955 NL_SET_ERR_MSG_MOD(extack,
2956 "can't offload TC csum action for some header/s");
2957 netdev_warn(priv->netdev,
2958 "can't offload TC csum action for some header/s - flags %#x\n",
2966 struct ip_ttl_word {
2972 struct ipv6_hoplimit_word {
2978 static int is_action_keys_supported(const struct flow_action_entry *act,
2979 bool ct_flow, bool *modify_ip_header,
2981 struct netlink_ext_ack *extack)
2986 htype = act->mangle.htype;
2987 offset = act->mangle.offset;
2988 mask = ~act->mangle.mask;
2989 /* For IPv4 & IPv6 header check 4 byte word,
2990 * to determine that modified fields
2991 * are NOT ttl & hop_limit only.
2993 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2994 struct ip_ttl_word *ttl_word =
2995 (struct ip_ttl_word *)&mask;
2997 if (offset != offsetof(struct iphdr, ttl) ||
2998 ttl_word->protocol ||
3000 *modify_ip_header = true;
3003 if (offset >= offsetof(struct iphdr, saddr))
3004 *modify_tuple = true;
3006 if (ct_flow && *modify_tuple) {
3007 NL_SET_ERR_MSG_MOD(extack,
3008 "can't offload re-write of ipv4 address with action ct");
3011 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3012 struct ipv6_hoplimit_word *hoplimit_word =
3013 (struct ipv6_hoplimit_word *)&mask;
3015 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3016 hoplimit_word->payload_len ||
3017 hoplimit_word->nexthdr) {
3018 *modify_ip_header = true;
3021 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3022 *modify_tuple = true;
3024 if (ct_flow && *modify_tuple) {
3025 NL_SET_ERR_MSG_MOD(extack,
3026 "can't offload re-write of ipv6 address with action ct");
3029 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3030 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3031 *modify_tuple = true;
3033 NL_SET_ERR_MSG_MOD(extack,
3034 "can't offload re-write of transport header ports with action ct");
3042 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3043 struct mlx5_flow_spec *spec,
3044 struct flow_action *flow_action,
3045 u32 actions, bool ct_flow,
3047 struct netlink_ext_ack *extack)
3049 const struct flow_action_entry *act;
3050 bool modify_ip_header, modify_tuple;
3057 headers_c = get_match_headers_criteria(actions, spec);
3058 headers_v = get_match_headers_value(actions, spec);
3059 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3061 /* for non-IP we only re-write MACs, so we're okay */
3062 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3063 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3066 modify_ip_header = false;
3067 modify_tuple = false;
3068 flow_action_for_each(i, act, flow_action) {
3069 if (act->id != FLOW_ACTION_MANGLE &&
3070 act->id != FLOW_ACTION_ADD)
3073 err = is_action_keys_supported(act, ct_flow,
3075 &modify_tuple, extack);
3080 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3081 * (or after clear action), as otherwise, since the tuple is changed,
3082 * we can't restore ct state
3084 if (!ct_clear && modify_tuple &&
3085 mlx5_tc_ct_add_no_trk_match(priv, spec)) {
3086 NL_SET_ERR_MSG_MOD(extack,
3087 "can't offload tuple modify header with ct matches");
3088 netdev_info(priv->netdev,
3089 "can't offload tuple modify header with ct matches");
3093 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3094 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3095 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3096 NL_SET_ERR_MSG_MOD(extack,
3097 "can't offload re-write of non TCP/UDP");
3098 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3107 static bool actions_match_supported(struct mlx5e_priv *priv,
3108 struct flow_action *flow_action,
3109 struct mlx5e_tc_flow_parse_attr *parse_attr,
3110 struct mlx5e_tc_flow *flow,
3111 struct netlink_ext_ack *extack)
3113 bool ct_flow = false, ct_clear = false;
3116 if (mlx5e_is_eswitch_flow(flow)) {
3117 actions = flow->esw_attr->action;
3118 ct_clear = flow->esw_attr->ct_attr.ct_action &
3120 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3121 if (flow->esw_attr->split_count && ct_flow) {
3122 /* All registers used by ct are cleared when using
3125 NL_SET_ERR_MSG_MOD(extack,
3126 "Can't offload mirroring with action ct");
3130 actions = flow->nic_attr->action;
3133 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3134 return modify_header_match_supported(priv, &parse_attr->spec,
3135 flow_action, actions,
3142 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3144 return priv->mdev == peer_priv->mdev;
3147 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3149 struct mlx5_core_dev *fmdev, *pmdev;
3150 u64 fsystem_guid, psystem_guid;
3153 pmdev = peer_priv->mdev;
3155 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3156 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3158 return (fsystem_guid == psystem_guid);
3161 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3162 const struct flow_action_entry *act,
3163 struct mlx5e_tc_flow_parse_attr *parse_attr,
3164 struct pedit_headers_action *hdrs,
3165 u32 *action, struct netlink_ext_ack *extack)
3167 u16 mask16 = VLAN_VID_MASK;
3168 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3169 const struct flow_action_entry pedit_act = {
3170 .id = FLOW_ACTION_MANGLE,
3171 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3172 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3173 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3174 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3176 u8 match_prio_mask, match_prio_val;
3177 void *headers_c, *headers_v;
3180 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3181 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3183 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3184 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3185 NL_SET_ERR_MSG_MOD(extack,
3186 "VLAN rewrite action must have VLAN protocol match");
3190 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3191 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3192 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3193 NL_SET_ERR_MSG_MOD(extack,
3194 "Changing VLAN prio is not supported");
3198 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3199 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3205 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3206 struct mlx5e_tc_flow_parse_attr *parse_attr,
3207 struct pedit_headers_action *hdrs,
3208 u32 *action, struct netlink_ext_ack *extack)
3210 const struct flow_action_entry prio_tag_act = {
3213 MLX5_GET(fte_match_set_lyr_2_4,
3214 get_match_headers_value(*action,
3217 MLX5_GET(fte_match_set_lyr_2_4,
3218 get_match_headers_criteria(*action,
3223 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3224 &prio_tag_act, parse_attr, hdrs, action,
3228 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3229 struct flow_action *flow_action,
3230 struct mlx5e_tc_flow_parse_attr *parse_attr,
3231 struct mlx5e_tc_flow *flow,
3232 struct netlink_ext_ack *extack)
3234 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
3235 struct pedit_headers_action hdrs[2] = {};
3236 const struct flow_action_entry *act;
3240 if (!flow_action_has_entries(flow_action))
3243 if (!flow_action_hw_stats_check(flow_action, extack,
3244 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3247 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3249 flow_action_for_each(i, act, flow_action) {
3251 case FLOW_ACTION_ACCEPT:
3252 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3253 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3255 case FLOW_ACTION_DROP:
3256 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3257 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3258 flow_table_properties_nic_receive.flow_counter))
3259 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3261 case FLOW_ACTION_MANGLE:
3262 case FLOW_ACTION_ADD:
3263 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3264 parse_attr, hdrs, NULL, extack);
3268 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
3269 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3271 case FLOW_ACTION_VLAN_MANGLE:
3272 err = add_vlan_rewrite_action(priv,
3273 MLX5_FLOW_NAMESPACE_KERNEL,
3274 act, parse_attr, hdrs,
3280 case FLOW_ACTION_CSUM:
3281 if (csum_offload_supported(priv, action,
3287 case FLOW_ACTION_REDIRECT: {
3288 struct net_device *peer_dev = act->dev;
3290 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3291 same_hw_devs(priv, netdev_priv(peer_dev))) {
3292 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3293 flow_flag_set(flow, HAIRPIN);
3294 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3295 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3297 NL_SET_ERR_MSG_MOD(extack,
3298 "device is not on same HW, can't offload");
3299 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3305 case FLOW_ACTION_MARK: {
3306 u32 mark = act->mark;
3308 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3309 NL_SET_ERR_MSG_MOD(extack,
3310 "Bad flow mark - only 16 bit is supported");
3314 attr->flow_tag = mark;
3315 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3319 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3324 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3325 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3326 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3327 parse_attr, hdrs, &action, extack);
3330 /* in case all pedit actions are skipped, remove the MOD_HDR
3333 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3334 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3335 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3339 attr->action = action;
3340 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3347 const struct ip_tunnel_key *ip_tun_key;
3348 struct mlx5e_tc_tunnel *tc_tunnel;
3351 static inline int cmp_encap_info(struct encap_key *a,
3352 struct encap_key *b)
3354 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3355 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3358 static inline int cmp_decap_info(struct mlx5e_decap_key *a,
3359 struct mlx5e_decap_key *b)
3361 return memcmp(&a->key, &b->key, sizeof(b->key));
3364 static inline int hash_encap_info(struct encap_key *key)
3366 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3367 key->tc_tunnel->tunnel_type);
3370 static inline int hash_decap_info(struct mlx5e_decap_key *key)
3372 return jhash(&key->key, sizeof(key->key), 0);
3375 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3376 struct net_device *peer_netdev)
3378 struct mlx5e_priv *peer_priv;
3380 peer_priv = netdev_priv(peer_netdev);
3382 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3383 mlx5e_eswitch_vf_rep(priv->netdev) &&
3384 mlx5e_eswitch_vf_rep(peer_netdev) &&
3385 same_hw_devs(priv, peer_priv));
3388 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3390 return refcount_inc_not_zero(&e->refcnt);
3393 static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
3395 return refcount_inc_not_zero(&e->refcnt);
3398 static struct mlx5e_encap_entry *
3399 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3402 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3403 struct mlx5e_encap_entry *e;
3404 struct encap_key e_key;
3406 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3407 encap_hlist, hash_key) {
3408 e_key.ip_tun_key = &e->tun_info->key;
3409 e_key.tc_tunnel = e->tunnel;
3410 if (!cmp_encap_info(&e_key, key) &&
3411 mlx5e_encap_take(e))
3418 static struct mlx5e_decap_entry *
3419 mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
3422 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3423 struct mlx5e_decap_key r_key;
3424 struct mlx5e_decap_entry *e;
3426 hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
3429 if (!cmp_decap_info(&r_key, key) &&
3430 mlx5e_decap_take(e))
3436 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3438 size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3440 return kmemdup(tun_info, tun_size, GFP_KERNEL);
3443 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3444 struct mlx5e_tc_flow *flow,
3446 struct mlx5e_encap_entry *e,
3447 struct netlink_ext_ack *extack)
3451 for (i = 0; i < out_index; i++) {
3452 if (flow->encaps[i].e != e)
3454 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3455 netdev_err(priv->netdev, "can't duplicate encap action\n");
3462 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3463 struct mlx5e_tc_flow *flow,
3464 struct net_device *mirred_dev,
3466 struct netlink_ext_ack *extack,
3467 struct net_device **encap_dev,
3470 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3471 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3472 struct mlx5e_tc_flow_parse_attr *parse_attr;
3473 const struct ip_tunnel_info *tun_info;
3474 struct encap_key key;
3475 struct mlx5e_encap_entry *e;
3476 unsigned short family;
3480 parse_attr = attr->parse_attr;
3481 tun_info = parse_attr->tun_info[out_index];
3482 family = ip_tunnel_info_af(tun_info);
3483 key.ip_tun_key = &tun_info->key;
3484 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3485 if (!key.tc_tunnel) {
3486 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3490 hash_key = hash_encap_info(&key);
3492 mutex_lock(&esw->offloads.encap_tbl_lock);
3493 e = mlx5e_encap_get(priv, &key, hash_key);
3495 /* must verify if encap is valid or not */
3497 /* Check that entry was not already attached to this flow */
3498 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3503 mutex_unlock(&esw->offloads.encap_tbl_lock);
3504 wait_for_completion(&e->res_ready);
3506 /* Protect against concurrent neigh update. */
3507 mutex_lock(&esw->offloads.encap_tbl_lock);
3508 if (e->compl_result < 0) {
3515 e = kzalloc(sizeof(*e), GFP_KERNEL);
3521 refcount_set(&e->refcnt, 1);
3522 init_completion(&e->res_ready);
3524 tun_info = dup_tun_info(tun_info);
3529 e->tun_info = tun_info;
3530 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3534 INIT_LIST_HEAD(&e->flows);
3535 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3536 mutex_unlock(&esw->offloads.encap_tbl_lock);
3538 if (family == AF_INET)
3539 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3540 else if (family == AF_INET6)
3541 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3543 /* Protect against concurrent neigh update. */
3544 mutex_lock(&esw->offloads.encap_tbl_lock);
3545 complete_all(&e->res_ready);
3547 e->compl_result = err;
3550 e->compl_result = 1;
3553 flow->encaps[out_index].e = e;
3554 list_add(&flow->encaps[out_index].list, &e->flows);
3555 flow->encaps[out_index].index = out_index;
3556 *encap_dev = e->out_dev;
3557 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3558 attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3559 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3560 *encap_valid = true;
3562 *encap_valid = false;
3564 mutex_unlock(&esw->offloads.encap_tbl_lock);
3569 mutex_unlock(&esw->offloads.encap_tbl_lock);
3571 mlx5e_encap_put(priv, e);
3575 mutex_unlock(&esw->offloads.encap_tbl_lock);
3581 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
3582 struct mlx5e_tc_flow *flow,
3583 struct netlink_ext_ack *extack)
3585 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3586 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3587 struct mlx5e_tc_flow_parse_attr *parse_attr;
3588 struct mlx5e_decap_entry *d;
3589 struct mlx5e_decap_key key;
3593 parse_attr = attr->parse_attr;
3594 if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
3595 NL_SET_ERR_MSG_MOD(extack,
3596 "encap header larger than max supported");
3600 key.key = parse_attr->eth;
3601 hash_key = hash_decap_info(&key);
3602 mutex_lock(&esw->offloads.decap_tbl_lock);
3603 d = mlx5e_decap_get(priv, &key, hash_key);
3605 mutex_unlock(&esw->offloads.decap_tbl_lock);
3606 wait_for_completion(&d->res_ready);
3607 mutex_lock(&esw->offloads.decap_tbl_lock);
3608 if (d->compl_result) {
3615 d = kzalloc(sizeof(*d), GFP_KERNEL);
3622 refcount_set(&d->refcnt, 1);
3623 init_completion(&d->res_ready);
3624 INIT_LIST_HEAD(&d->flows);
3625 hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
3626 mutex_unlock(&esw->offloads.decap_tbl_lock);
3628 d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
3629 MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
3630 sizeof(parse_attr->eth),
3632 MLX5_FLOW_NAMESPACE_FDB);
3633 if (IS_ERR(d->pkt_reformat)) {
3634 err = PTR_ERR(d->pkt_reformat);
3635 d->compl_result = err;
3637 mutex_lock(&esw->offloads.decap_tbl_lock);
3638 complete_all(&d->res_ready);
3643 flow->decap_reformat = d;
3644 attr->decap_pkt_reformat = d->pkt_reformat;
3645 list_add(&flow->l3_to_l2_reformat, &d->flows);
3646 mutex_unlock(&esw->offloads.decap_tbl_lock);
3650 mutex_unlock(&esw->offloads.decap_tbl_lock);
3651 mlx5e_decap_put(priv, d);
3655 mutex_unlock(&esw->offloads.decap_tbl_lock);
3659 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3660 const struct flow_action_entry *act,
3661 struct mlx5_esw_flow_attr *attr,
3664 u8 vlan_idx = attr->total_vlan;
3666 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3670 case FLOW_ACTION_VLAN_POP:
3672 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3673 MLX5_FS_VLAN_DEPTH))
3676 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3678 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3681 case FLOW_ACTION_VLAN_PUSH:
3682 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3683 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3684 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3685 if (!attr->vlan_proto[vlan_idx])
3686 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3689 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3690 MLX5_FS_VLAN_DEPTH))
3693 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3695 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3696 (act->vlan.proto != htons(ETH_P_8021Q) ||
3700 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3707 attr->total_vlan = vlan_idx + 1;
3712 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3713 struct net_device *out_dev)
3715 struct net_device *fdb_out_dev = out_dev;
3716 struct net_device *uplink_upper;
3719 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3720 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3721 uplink_upper == out_dev) {
3722 fdb_out_dev = uplink_dev;
3723 } else if (netif_is_lag_master(out_dev)) {
3724 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3726 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3727 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3734 static int add_vlan_push_action(struct mlx5e_priv *priv,
3735 struct mlx5_esw_flow_attr *attr,
3736 struct net_device **out_dev,
3739 struct net_device *vlan_dev = *out_dev;
3740 struct flow_action_entry vlan_act = {
3741 .id = FLOW_ACTION_VLAN_PUSH,
3742 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3743 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3748 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3752 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3753 dev_get_iflink(vlan_dev));
3754 if (is_vlan_dev(*out_dev))
3755 err = add_vlan_push_action(priv, attr, out_dev, action);
3760 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3761 struct mlx5_esw_flow_attr *attr,
3764 struct flow_action_entry vlan_act = {
3765 .id = FLOW_ACTION_VLAN_POP,
3767 int nest_level, err = 0;
3769 nest_level = attr->parse_attr->filter_dev->lower_level -
3770 priv->netdev->lower_level;
3771 while (nest_level--) {
3772 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3780 static bool same_hw_reps(struct mlx5e_priv *priv,
3781 struct net_device *peer_netdev)
3783 struct mlx5e_priv *peer_priv;
3785 peer_priv = netdev_priv(peer_netdev);
3787 return mlx5e_eswitch_rep(priv->netdev) &&
3788 mlx5e_eswitch_rep(peer_netdev) &&
3789 same_hw_devs(priv, peer_priv);
3792 static bool is_lag_dev(struct mlx5e_priv *priv,
3793 struct net_device *peer_netdev)
3795 return ((mlx5_lag_is_sriov(priv->mdev) ||
3796 mlx5_lag_is_multipath(priv->mdev)) &&
3797 same_hw_reps(priv, peer_netdev));
3800 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3801 struct net_device *out_dev)
3803 if (is_merged_eswitch_vfs(priv, out_dev))
3806 if (is_lag_dev(priv, out_dev))
3809 return mlx5e_eswitch_rep(out_dev) &&
3810 same_port_devs(priv, netdev_priv(out_dev));
3813 static bool is_duplicated_output_device(struct net_device *dev,
3814 struct net_device *out_dev,
3815 int *ifindexes, int if_count,
3816 struct netlink_ext_ack *extack)
3820 for (i = 0; i < if_count; i++) {
3821 if (ifindexes[i] == out_dev->ifindex) {
3822 NL_SET_ERR_MSG_MOD(extack,
3823 "can't duplicate output to same device");
3824 netdev_err(dev, "can't duplicate output to same device: %s\n",
3833 static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
3834 struct mlx5e_tc_flow *flow,
3835 const struct flow_action_entry *act,
3837 struct netlink_ext_ack *extack)
3839 u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
3840 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3841 bool ft_flow = mlx5e_is_ft_flow(flow);
3842 u32 dest_chain = act->chain_index;
3845 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3849 if (!mlx5_esw_chains_backwards_supported(esw) &&
3850 dest_chain <= attr->chain) {
3851 NL_SET_ERR_MSG_MOD(extack,
3852 "Goto lower numbered chain isn't supported");
3855 if (dest_chain > max_chain) {
3856 NL_SET_ERR_MSG_MOD(extack,
3857 "Requested destination chain is out of supported range");
3861 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3862 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3863 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
3864 NL_SET_ERR_MSG_MOD(extack,
3865 "Goto chain is not allowed if action has reformat or decap");
3872 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3873 struct mlx5e_tc_flow *flow,
3874 struct net_device *out_dev,
3875 struct netlink_ext_ack *extack)
3877 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3878 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3879 struct mlx5e_rep_priv *rep_priv;
3881 /* Forwarding non encapsulated traffic between
3882 * uplink ports is allowed only if
3883 * termination_table_raw_traffic cap is set.
3885 * Input vport was stored esw_attr->in_rep.
3886 * In LAG case, *priv* is the private data of
3887 * uplink which may be not the input vport.
3889 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3891 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3892 mlx5e_eswitch_uplink_rep(out_dev)))
3895 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3896 termination_table_raw_traffic)) {
3897 NL_SET_ERR_MSG_MOD(extack,
3898 "devices are both uplink, can't offload forwarding");
3899 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3900 priv->netdev->name, out_dev->name);
3902 } else if (out_dev != rep_priv->netdev) {
3903 NL_SET_ERR_MSG_MOD(extack,
3904 "devices are not the same uplink, can't offload forwarding");
3905 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3906 priv->netdev->name, out_dev->name);
3912 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3913 struct flow_action *flow_action,
3914 struct mlx5e_tc_flow *flow,
3915 struct netlink_ext_ack *extack,
3916 struct net_device *filter_dev)
3918 struct pedit_headers_action hdrs[2] = {};
3919 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3920 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3921 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3922 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3923 const struct ip_tunnel_info *info = NULL;
3924 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3925 bool ft_flow = mlx5e_is_ft_flow(flow);
3926 const struct flow_action_entry *act;
3927 bool encap = false, decap = false;
3928 u32 action = attr->action;
3929 int err, i, if_count = 0;
3930 bool mpls_push = false;
3932 if (!flow_action_has_entries(flow_action))
3935 if (!flow_action_hw_stats_check(flow_action, extack,
3936 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3939 flow_action_for_each(i, act, flow_action) {
3941 case FLOW_ACTION_DROP:
3942 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3943 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3945 case FLOW_ACTION_MPLS_PUSH:
3946 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3947 reformat_l2_to_l3_tunnel) ||
3948 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3949 NL_SET_ERR_MSG_MOD(extack,
3950 "mpls push is supported only for mpls_uc protocol");
3955 case FLOW_ACTION_MPLS_POP:
3956 /* we only support mpls pop if it is the first action
3957 * and the filter net device is bareudp. Subsequent
3958 * actions can be pedit and the last can be mirred
3962 NL_SET_ERR_MSG_MOD(extack,
3963 "mpls pop supported only as first action");
3966 if (!netif_is_bareudp(filter_dev)) {
3967 NL_SET_ERR_MSG_MOD(extack,
3968 "mpls pop supported only on bareudp devices");
3972 parse_attr->eth.h_proto = act->mpls_pop.proto;
3973 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3974 flow_flag_set(flow, L3_TO_L2_DECAP);
3976 case FLOW_ACTION_MANGLE:
3977 case FLOW_ACTION_ADD:
3978 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3979 parse_attr, hdrs, flow, extack);
3983 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3984 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3985 attr->split_count = attr->out_count;
3988 case FLOW_ACTION_CSUM:
3989 if (csum_offload_supported(priv, action,
3990 act->csum_flags, extack))
3994 case FLOW_ACTION_REDIRECT:
3995 case FLOW_ACTION_MIRRED: {
3996 struct mlx5e_priv *out_priv;
3997 struct net_device *out_dev;
4001 /* out_dev is NULL when filters with
4002 * non-existing mirred device are replayed to
4008 if (mpls_push && !netif_is_bareudp(out_dev)) {
4009 NL_SET_ERR_MSG_MOD(extack,
4010 "mpls is supported only through a bareudp device");
4014 if (ft_flow && out_dev == priv->netdev) {
4015 /* Ignore forward to self rules generated
4016 * by adding both mlx5 devs to the flow table
4017 * block on a normal nft offload setup.
4022 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4023 NL_SET_ERR_MSG_MOD(extack,
4024 "can't support more output ports, can't offload forwarding");
4025 netdev_warn(priv->netdev,
4026 "can't support more than %d output ports, can't offload forwarding\n",
4031 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4032 MLX5_FLOW_CONTEXT_ACTION_COUNT;
4034 parse_attr->mirred_ifindex[attr->out_count] =
4036 parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
4037 if (!parse_attr->tun_info[attr->out_count])
4040 attr->dests[attr->out_count].flags |=
4041 MLX5_ESW_DEST_ENCAP;
4043 /* attr->dests[].rep is resolved when we
4046 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4047 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4048 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4050 if (is_duplicated_output_device(priv->netdev,
4057 ifindexes[if_count] = out_dev->ifindex;
4060 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4064 if (is_vlan_dev(out_dev)) {
4065 err = add_vlan_push_action(priv, attr,
4072 if (is_vlan_dev(parse_attr->filter_dev)) {
4073 err = add_vlan_pop_action(priv, attr,
4079 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4083 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4084 NL_SET_ERR_MSG_MOD(extack,
4085 "devices are not on same switch HW, can't offload forwarding");
4089 out_priv = netdev_priv(out_dev);
4090 rpriv = out_priv->ppriv;
4091 attr->dests[attr->out_count].rep = rpriv->rep;
4092 attr->dests[attr->out_count].mdev = out_priv->mdev;
4094 } else if (parse_attr->filter_dev != priv->netdev) {
4095 /* All mlx5 devices are called to configure
4096 * high level device filters. Therefore, the
4097 * *attempt* to install a filter on invalid
4098 * eswitch should not trigger an explicit error
4102 NL_SET_ERR_MSG_MOD(extack,
4103 "devices are not on same switch HW, can't offload forwarding");
4104 netdev_warn(priv->netdev,
4105 "devices %s %s not on same switch HW, can't offload forwarding\n",
4112 case FLOW_ACTION_TUNNEL_ENCAP:
4120 case FLOW_ACTION_VLAN_PUSH:
4121 case FLOW_ACTION_VLAN_POP:
4122 if (act->id == FLOW_ACTION_VLAN_PUSH &&
4123 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4124 /* Replace vlan pop+push with vlan modify */
4125 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4126 err = add_vlan_rewrite_action(priv,
4127 MLX5_FLOW_NAMESPACE_FDB,
4128 act, parse_attr, hdrs,
4131 err = parse_tc_vlan_action(priv, act, attr, &action);
4136 attr->split_count = attr->out_count;
4138 case FLOW_ACTION_VLAN_MANGLE:
4139 err = add_vlan_rewrite_action(priv,
4140 MLX5_FLOW_NAMESPACE_FDB,
4141 act, parse_attr, hdrs,
4146 attr->split_count = attr->out_count;
4148 case FLOW_ACTION_TUNNEL_DECAP:
4151 case FLOW_ACTION_GOTO:
4152 err = mlx5_validate_goto_chain(esw, flow, act, action,
4157 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4158 attr->dest_chain = act->chain_index;
4160 case FLOW_ACTION_CT:
4161 err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
4165 flow_flag_set(flow, CT);
4168 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4173 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4174 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4175 /* For prio tag mode, replace vlan pop with rewrite vlan prio
4178 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4179 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4185 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4186 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4187 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4188 parse_attr, hdrs, &action, extack);
4191 /* in case all pedit actions are skipped, remove the MOD_HDR
4192 * flag. we might have set split_count either by pedit or
4193 * pop/push. if there is no pop/push either, reset it too.
4195 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4196 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4197 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4198 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4199 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4200 attr->split_count = 0;
4204 attr->action = action;
4205 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4208 if (attr->dest_chain) {
4210 /* It can be supported if we'll create a mapping for
4211 * the tunnel device only (without tunnel), and set
4212 * this tunnel id with this decap flow.
4214 * On restore (miss), we'll just set this saved tunnel
4218 NL_SET_ERR_MSG(extack,
4219 "Decap with goto isn't supported");
4220 netdev_warn(priv->netdev,
4221 "Decap with goto isn't supported");
4225 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
4226 NL_SET_ERR_MSG_MOD(extack,
4227 "Mirroring goto chain rules isn't supported");
4230 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4233 if (!(attr->action &
4234 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4235 NL_SET_ERR_MSG_MOD(extack,
4236 "Rule must have at least one forward/drop action");
4240 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4241 NL_SET_ERR_MSG_MOD(extack,
4242 "current firmware doesn't support split rule for port mirroring");
4243 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4250 static void get_flags(int flags, unsigned long *flow_flags)
4252 unsigned long __flow_flags = 0;
4254 if (flags & MLX5_TC_FLAG(INGRESS))
4255 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4256 if (flags & MLX5_TC_FLAG(EGRESS))
4257 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4259 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4260 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4261 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4262 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4263 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4264 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4266 *flow_flags = __flow_flags;
4269 static const struct rhashtable_params tc_ht_params = {
4270 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4271 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4272 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4273 .automatic_shrinking = true,
4276 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4277 unsigned long flags)
4279 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4280 struct mlx5e_rep_priv *uplink_rpriv;
4282 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4283 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4284 return &uplink_rpriv->uplink_priv.tc_ht;
4285 } else /* NIC offload */
4286 return &priv->fs.tc.ht;
4289 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4291 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
4292 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4293 flow_flag_test(flow, INGRESS);
4294 bool act_is_encap = !!(attr->action &
4295 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4296 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
4297 MLX5_DEVCOM_ESW_OFFLOADS);
4302 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
4303 mlx5_lag_is_multipath(attr->in_mdev)) &&
4304 (is_rep_ingress || act_is_encap))
4311 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4312 struct flow_cls_offload *f, unsigned long flow_flags,
4313 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4314 struct mlx5e_tc_flow **__flow)
4316 struct mlx5e_tc_flow_parse_attr *parse_attr;
4317 struct mlx5e_tc_flow *flow;
4320 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
4321 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4322 if (!parse_attr || !flow) {
4327 flow->cookie = f->cookie;
4328 flow->flags = flow_flags;
4330 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4331 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4332 INIT_LIST_HEAD(&flow->hairpin);
4333 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4334 refcount_set(&flow->refcnt, 1);
4335 init_completion(&flow->init_done);
4338 *__parse_attr = parse_attr;
4349 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
4350 struct mlx5e_priv *priv,
4351 struct mlx5e_tc_flow_parse_attr *parse_attr,
4352 struct flow_cls_offload *f,
4353 struct mlx5_eswitch_rep *in_rep,
4354 struct mlx5_core_dev *in_mdev)
4356 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4358 esw_attr->parse_attr = parse_attr;
4359 esw_attr->chain = f->common.chain_index;
4360 esw_attr->prio = f->common.prio;
4362 esw_attr->in_rep = in_rep;
4363 esw_attr->in_mdev = in_mdev;
4365 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4366 MLX5_COUNTER_SOURCE_ESWITCH)
4367 esw_attr->counter_dev = in_mdev;
4369 esw_attr->counter_dev = priv->mdev;
4372 static struct mlx5e_tc_flow *
4373 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4374 struct flow_cls_offload *f,
4375 unsigned long flow_flags,
4376 struct net_device *filter_dev,
4377 struct mlx5_eswitch_rep *in_rep,
4378 struct mlx5_core_dev *in_mdev)
4380 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4381 struct netlink_ext_ack *extack = f->common.extack;
4382 struct mlx5e_tc_flow_parse_attr *parse_attr;
4383 struct mlx5e_tc_flow *flow;
4386 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4387 attr_size = sizeof(struct mlx5_esw_flow_attr);
4388 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4389 &parse_attr, &flow);
4393 parse_attr->filter_dev = filter_dev;
4394 mlx5e_flow_esw_attr_init(flow->esw_attr,
4396 f, in_rep, in_mdev);
4398 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4403 /* actions validation depends on parsing the ct matches first */
4404 err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, extack);
4408 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4412 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4413 complete_all(&flow->init_done);
4415 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4418 add_unready_flow(flow);
4424 mlx5e_flow_put(priv, flow);
4426 return ERR_PTR(err);
4429 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4430 struct mlx5e_tc_flow *flow,
4431 unsigned long flow_flags)
4433 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4434 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4435 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4436 struct mlx5e_tc_flow_parse_attr *parse_attr;
4437 struct mlx5e_rep_priv *peer_urpriv;
4438 struct mlx5e_tc_flow *peer_flow;
4439 struct mlx5_core_dev *in_mdev;
4442 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4446 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4447 peer_priv = netdev_priv(peer_urpriv->netdev);
4449 /* in_mdev is assigned of which the packet originated from.
4450 * So packets redirected to uplink use the same mdev of the
4451 * original flow and packets redirected from uplink use the
4454 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
4455 in_mdev = peer_priv->mdev;
4457 in_mdev = priv->mdev;
4459 parse_attr = flow->esw_attr->parse_attr;
4460 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4461 parse_attr->filter_dev,
4462 flow->esw_attr->in_rep, in_mdev);
4463 if (IS_ERR(peer_flow)) {
4464 err = PTR_ERR(peer_flow);
4468 flow->peer_flow = peer_flow;
4469 flow_flag_set(flow, DUP);
4470 mutex_lock(&esw->offloads.peer_mutex);
4471 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4472 mutex_unlock(&esw->offloads.peer_mutex);
4475 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4480 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4481 struct flow_cls_offload *f,
4482 unsigned long flow_flags,
4483 struct net_device *filter_dev,
4484 struct mlx5e_tc_flow **__flow)
4486 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4487 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4488 struct mlx5_core_dev *in_mdev = priv->mdev;
4489 struct mlx5e_tc_flow *flow;
4492 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4495 return PTR_ERR(flow);
4497 if (is_peer_flow_needed(flow)) {
4498 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4500 mlx5e_tc_del_fdb_flow(priv, flow);
4514 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4515 struct flow_cls_offload *f,
4516 unsigned long flow_flags,
4517 struct net_device *filter_dev,
4518 struct mlx5e_tc_flow **__flow)
4520 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4521 struct netlink_ext_ack *extack = f->common.extack;
4522 struct mlx5e_tc_flow_parse_attr *parse_attr;
4523 struct mlx5e_tc_flow *flow;
4526 /* multi-chain not supported for NIC rules */
4527 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4530 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4531 attr_size = sizeof(struct mlx5_nic_flow_attr);
4532 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4533 &parse_attr, &flow);
4537 parse_attr->filter_dev = filter_dev;
4538 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4543 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4547 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4551 flow_flag_set(flow, OFFLOADED);
4558 mlx5e_flow_put(priv, flow);
4565 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4566 struct flow_cls_offload *f,
4567 unsigned long flags,
4568 struct net_device *filter_dev,
4569 struct mlx5e_tc_flow **flow)
4571 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4572 unsigned long flow_flags;
4575 get_flags(flags, &flow_flags);
4577 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4580 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4581 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4584 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4590 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4591 struct mlx5e_rep_priv *rpriv)
4593 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4594 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4595 * function is called from NIC mode.
4597 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4600 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4601 struct flow_cls_offload *f, unsigned long flags)
4603 struct netlink_ext_ack *extack = f->common.extack;
4604 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4605 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4606 struct mlx5e_tc_flow *flow;
4610 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4612 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4615 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4618 NL_SET_ERR_MSG_MOD(extack,
4619 "flow cookie already exists, ignoring");
4620 netdev_warn_once(priv->netdev,
4621 "flow cookie %lx already exists, ignoring\n",
4631 trace_mlx5e_configure_flower(f);
4632 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4636 /* Flow rule offloaded to non-uplink representor sharing tc block,
4637 * set the flow's owner dev.
4639 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4640 flow->orig_dev = dev;
4642 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4649 mlx5e_flow_put(priv, flow);
4654 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4656 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4657 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4659 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4660 flow_flag_test(flow, EGRESS) == dir_egress;
4663 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4664 struct flow_cls_offload *f, unsigned long flags)
4666 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4667 struct mlx5e_tc_flow *flow;
4671 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4672 if (!flow || !same_flow_direction(flow, flags)) {
4677 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4680 if (flow_flag_test_and_set(flow, DELETED)) {
4684 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4687 trace_mlx5e_delete_flower(f);
4688 mlx5e_flow_put(priv, flow);
4697 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4698 struct flow_cls_offload *f, unsigned long flags)
4700 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4701 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4702 struct mlx5_eswitch *peer_esw;
4703 struct mlx5e_tc_flow *flow;
4704 struct mlx5_fc *counter;
4711 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4715 return PTR_ERR(flow);
4717 if (!same_flow_direction(flow, flags)) {
4722 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4723 counter = mlx5e_tc_get_counter(flow);
4727 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4730 /* Under multipath it's possible for one rule to be currently
4731 * un-offloaded while the other rule is offloaded.
4733 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4737 if (flow_flag_test(flow, DUP) &&
4738 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4743 counter = mlx5e_tc_get_counter(flow->peer_flow);
4745 goto no_peer_counter;
4746 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4749 packets += packets2;
4750 lastuse = max_t(u64, lastuse, lastuse2);
4754 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4756 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4757 FLOW_ACTION_HW_STATS_DELAYED);
4758 trace_mlx5e_stats_flower(f);
4760 mlx5e_flow_put(priv, flow);
4764 static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
4765 struct netlink_ext_ack *extack)
4767 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4768 struct mlx5_eswitch *esw;
4773 vport_num = rpriv->rep->vport;
4774 if (vport_num >= MLX5_VPORT_ECPF) {
4775 NL_SET_ERR_MSG_MOD(extack,
4776 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4780 esw = priv->mdev->priv.eswitch;
4781 /* rate is given in bytes/sec.
4782 * First convert to bits/sec and then round to the nearest mbit/secs.
4783 * mbit means million bits.
4784 * Moreover, if rate is non zero we choose to configure to a minimum of
4787 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
4788 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4790 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4795 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4796 struct flow_action *flow_action,
4797 struct netlink_ext_ack *extack)
4799 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4800 const struct flow_action_entry *act;
4804 if (!flow_action_has_entries(flow_action)) {
4805 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4809 if (!flow_offload_has_one_action(flow_action)) {
4810 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4814 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4817 flow_action_for_each(i, act, flow_action) {
4819 case FLOW_ACTION_POLICE:
4820 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4824 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4827 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4835 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4836 struct tc_cls_matchall_offload *ma)
4838 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4839 struct netlink_ext_ack *extack = ma->common.extack;
4841 if (!mlx5_esw_qos_enabled(esw)) {
4842 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4846 if (ma->common.prio != 1) {
4847 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4851 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4854 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4855 struct tc_cls_matchall_offload *ma)
4857 struct netlink_ext_ack *extack = ma->common.extack;
4859 return apply_police_params(priv, 0, extack);
4862 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4863 struct tc_cls_matchall_offload *ma)
4865 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4866 struct rtnl_link_stats64 cur_stats;
4870 cur_stats = priv->stats.vf_vport;
4871 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4872 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4873 rpriv->prev_vf_vport_stats = cur_stats;
4874 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4875 FLOW_ACTION_HW_STATS_DELAYED);
4878 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4879 struct mlx5e_priv *peer_priv)
4881 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4882 struct mlx5e_hairpin_entry *hpe, *tmp;
4883 LIST_HEAD(init_wait_list);
4887 if (!same_hw_devs(priv, peer_priv))
4890 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4892 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4893 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4894 if (refcount_inc_not_zero(&hpe->refcnt))
4895 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4896 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4898 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4899 wait_for_completion(&hpe->res_ready);
4900 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4901 hpe->hp->pair->peer_gone = true;
4903 mlx5e_hairpin_put(priv, hpe);
4907 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4908 unsigned long event, void *ptr)
4910 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4911 struct mlx5e_flow_steering *fs;
4912 struct mlx5e_priv *peer_priv;
4913 struct mlx5e_tc_table *tc;
4914 struct mlx5e_priv *priv;
4916 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4917 event != NETDEV_UNREGISTER ||
4918 ndev->reg_state == NETREG_REGISTERED)
4921 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4922 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4923 priv = container_of(fs, struct mlx5e_priv, fs);
4924 peer_priv = netdev_priv(ndev);
4925 if (priv == peer_priv ||
4926 !(priv->netdev->features & NETIF_F_HW_TC))
4929 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4934 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4936 struct mlx5e_tc_table *tc = &priv->fs.tc;
4939 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4940 mutex_init(&tc->t_lock);
4941 mutex_init(&tc->hairpin_tbl_lock);
4942 hash_init(tc->hairpin_tbl);
4944 err = rhashtable_init(&tc->ht, &tc_ht_params);
4948 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4949 err = register_netdevice_notifier_dev_net(priv->netdev,
4953 tc->netdevice_nb.notifier_call = NULL;
4954 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4960 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4962 struct mlx5e_tc_flow *flow = ptr;
4963 struct mlx5e_priv *priv = flow->priv;
4965 mlx5e_tc_del_flow(priv, flow);
4969 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4971 struct mlx5e_tc_table *tc = &priv->fs.tc;
4973 if (tc->netdevice_nb.notifier_call)
4974 unregister_netdevice_notifier_dev_net(priv->netdev,
4978 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4979 mutex_destroy(&tc->hairpin_tbl_lock);
4981 rhashtable_destroy(&tc->ht);
4983 if (!IS_ERR_OR_NULL(tc->t)) {
4984 mlx5_destroy_flow_table(tc->t);
4987 mutex_destroy(&tc->t_lock);
4990 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4992 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4993 struct mlx5_rep_uplink_priv *uplink_priv;
4994 struct mlx5e_rep_priv *priv;
4995 struct mapping_ctx *mapping;
4998 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4999 priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5001 err = mlx5_tc_ct_init(uplink_priv);
5005 mapping = mapping_create(sizeof(struct tunnel_match_key),
5006 TUNNEL_INFO_BITS_MASK, true);
5007 if (IS_ERR(mapping)) {
5008 err = PTR_ERR(mapping);
5009 goto err_tun_mapping;
5011 uplink_priv->tunnel_mapping = mapping;
5013 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
5014 if (IS_ERR(mapping)) {
5015 err = PTR_ERR(mapping);
5016 goto err_enc_opts_mapping;
5018 uplink_priv->tunnel_enc_opts_mapping = mapping;
5020 err = rhashtable_init(tc_ht, &tc_ht_params);
5027 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5028 err_enc_opts_mapping:
5029 mapping_destroy(uplink_priv->tunnel_mapping);
5031 mlx5_tc_ct_clean(uplink_priv);
5033 netdev_warn(priv->netdev,
5034 "Failed to initialize tc (eswitch), err: %d", err);
5038 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5040 struct mlx5_rep_uplink_priv *uplink_priv;
5042 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5044 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5045 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5046 mapping_destroy(uplink_priv->tunnel_mapping);
5048 mlx5_tc_ct_clean(uplink_priv);
5051 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5053 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5055 return atomic_read(&tc_ht->nelems);
5058 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5060 struct mlx5e_tc_flow *flow, *tmp;
5062 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5063 __mlx5e_tc_del_fdb_peer_flow(flow);
5066 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5068 struct mlx5_rep_uplink_priv *rpriv =
5069 container_of(work, struct mlx5_rep_uplink_priv,
5070 reoffload_flows_work);
5071 struct mlx5e_tc_flow *flow, *tmp;
5073 mutex_lock(&rpriv->unready_flows_lock);
5074 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5075 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5076 unready_flow_del(flow);
5078 mutex_unlock(&rpriv->unready_flows_lock);
5081 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5082 struct flow_cls_offload *cls_flower,
5083 unsigned long flags)
5085 switch (cls_flower->command) {
5086 case FLOW_CLS_REPLACE:
5087 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5089 case FLOW_CLS_DESTROY:
5090 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5092 case FLOW_CLS_STATS:
5093 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5100 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5103 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5104 struct mlx5e_priv *priv = cb_priv;
5107 case TC_SETUP_CLSFLOWER:
5108 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);