Merge tag 'acpi-5.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
50 #include <net/arp.h>
51 #include <net/ipv6_stubs.h>
52 #include <net/bareudp.h>
53 #include <net/bonding.h>
54 #include "en.h"
55 #include "en_rep.h"
56 #include "en/rep/tc.h"
57 #include "en/rep/neigh.h"
58 #include "en_tc.h"
59 #include "eswitch.h"
60 #include "fs_core.h"
61 #include "en/port.h"
62 #include "en/tc_tun.h"
63 #include "en/mapping.h"
64 #include "en/tc_ct.h"
65 #include "en/mod_hdr.h"
66 #include "lib/devcom.h"
67 #include "lib/geneve.h"
68 #include "lib/fs_chains.h"
69 #include "diag/en_tc_tracepoint.h"
70 #include <asm/div64.h>
71
72 #define nic_chains(priv) ((priv)->fs.tc.chains)
73 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
74 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
75
76 enum {
77         MLX5E_TC_FLOW_FLAG_INGRESS      = MLX5E_TC_FLAG_INGRESS_BIT,
78         MLX5E_TC_FLOW_FLAG_EGRESS       = MLX5E_TC_FLAG_EGRESS_BIT,
79         MLX5E_TC_FLOW_FLAG_ESWITCH      = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
80         MLX5E_TC_FLOW_FLAG_FT           = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
81         MLX5E_TC_FLOW_FLAG_NIC          = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
82         MLX5E_TC_FLOW_FLAG_OFFLOADED    = MLX5E_TC_FLOW_BASE,
83         MLX5E_TC_FLOW_FLAG_HAIRPIN      = MLX5E_TC_FLOW_BASE + 1,
84         MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS  = MLX5E_TC_FLOW_BASE + 2,
85         MLX5E_TC_FLOW_FLAG_SLOW         = MLX5E_TC_FLOW_BASE + 3,
86         MLX5E_TC_FLOW_FLAG_DUP          = MLX5E_TC_FLOW_BASE + 4,
87         MLX5E_TC_FLOW_FLAG_NOT_READY    = MLX5E_TC_FLOW_BASE + 5,
88         MLX5E_TC_FLOW_FLAG_DELETED      = MLX5E_TC_FLOW_BASE + 6,
89         MLX5E_TC_FLOW_FLAG_CT           = MLX5E_TC_FLOW_BASE + 7,
90         MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
91 };
92
93 #define MLX5E_TC_MAX_SPLITS 1
94
95 /* Helper struct for accessing a struct containing list_head array.
96  * Containing struct
97  *   |- Helper array
98  *      [0] Helper item 0
99  *          |- list_head item 0
100  *          |- index (0)
101  *      [1] Helper item 1
102  *          |- list_head item 1
103  *          |- index (1)
104  * To access the containing struct from one of the list_head items:
105  * 1. Get the helper item from the list_head item using
106  *    helper item =
107  *        container_of(list_head item, helper struct type, list_head field)
108  * 2. Get the contining struct from the helper item and its index in the array:
109  *    containing struct =
110  *        container_of(helper item, containing struct type, helper field[index])
111  */
112 struct encap_flow_item {
113         struct mlx5e_encap_entry *e; /* attached encap instance */
114         struct list_head list;
115         int index;
116 };
117
118 struct mlx5e_tc_flow {
119         struct rhash_head       node;
120         struct mlx5e_priv       *priv;
121         u64                     cookie;
122         unsigned long           flags;
123         struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
124
125         /* flows sharing the same reformat object - currently mpls decap */
126         struct list_head l3_to_l2_reformat;
127         struct mlx5e_decap_entry *decap_reformat;
128
129         /* Flow can be associated with multiple encap IDs.
130          * The number of encaps is bounded by the number of supported
131          * destinations.
132          */
133         struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
134         struct mlx5e_tc_flow    *peer_flow;
135         struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
136         struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
137         struct list_head        hairpin; /* flows sharing the same hairpin */
138         struct list_head        peer;    /* flows with peer flow */
139         struct list_head        unready; /* flows not ready to be offloaded (e.g due to missing route) */
140         struct net_device       *orig_dev; /* netdev adding flow first */
141         int                     tmp_efi_index;
142         struct list_head        tmp_list; /* temporary flow list used by neigh update */
143         refcount_t              refcnt;
144         struct rcu_head         rcu_head;
145         struct completion       init_done;
146         int tunnel_id; /* the mapped tunnel id of this flow */
147         struct mlx5_flow_attr *attr;
148 };
149
150 struct mlx5e_tc_flow_parse_attr {
151         const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
152         struct net_device *filter_dev;
153         struct mlx5_flow_spec spec;
154         struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
155         int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
156         struct ethhdr eth;
157 };
158
159 #define MLX5E_TC_TABLE_NUM_GROUPS 4
160 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
161
162 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
163         [CHAIN_TO_REG] = {
164                 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
165                 .moffset = 0,
166                 .mlen = 2,
167         },
168         [TUNNEL_TO_REG] = {
169                 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
170                 .moffset = 1,
171                 .mlen = 3,
172                 .soffset = MLX5_BYTE_OFF(fte_match_param,
173                                          misc_parameters_2.metadata_reg_c_1),
174         },
175         [ZONE_TO_REG] = zone_to_reg_ct,
176         [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
177         [CTSTATE_TO_REG] = ctstate_to_reg_ct,
178         [MARK_TO_REG] = mark_to_reg_ct,
179         [LABELS_TO_REG] = labels_to_reg_ct,
180         [FTEID_TO_REG] = fteid_to_reg_ct,
181         /* For NIC rules we store the retore metadata directly
182          * into reg_b that is passed to SW since we don't
183          * jump between steering domains.
184          */
185         [NIC_CHAIN_TO_REG] = {
186                 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
187                 .moffset = 0,
188                 .mlen = 2,
189         },
190         [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
191 };
192
193 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
194
195 void
196 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
197                             enum mlx5e_tc_attr_to_reg type,
198                             u32 data,
199                             u32 mask)
200 {
201         int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
202         int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
203         void *headers_c = spec->match_criteria;
204         void *headers_v = spec->match_value;
205         void *fmask, *fval;
206
207         fmask = headers_c + soffset;
208         fval = headers_v + soffset;
209
210         mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
211         data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
212
213         memcpy(fmask, &mask, match_len);
214         memcpy(fval, &data, match_len);
215
216         spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
217 }
218
219 void
220 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
221                                 enum mlx5e_tc_attr_to_reg type,
222                                 u32 *data,
223                                 u32 *mask)
224 {
225         int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
226         int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
227         void *headers_c = spec->match_criteria;
228         void *headers_v = spec->match_value;
229         void *fmask, *fval;
230
231         fmask = headers_c + soffset;
232         fval = headers_v + soffset;
233
234         memcpy(mask, fmask, match_len);
235         memcpy(data, fval, match_len);
236
237         *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
238         *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
239 }
240
241 int
242 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
243                           struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
244                           enum mlx5_flow_namespace_type ns,
245                           enum mlx5e_tc_attr_to_reg type,
246                           u32 data)
247 {
248         int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
249         int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
250         int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
251         char *modact;
252         int err;
253
254         err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
255         if (err)
256                 return err;
257
258         modact = mod_hdr_acts->actions +
259                  (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
260
261         /* Firmware has 5bit length field and 0 means 32bits */
262         if (mlen == 4)
263                 mlen = 0;
264
265         MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
266         MLX5_SET(set_action_in, modact, field, mfield);
267         MLX5_SET(set_action_in, modact, offset, moffset * 8);
268         MLX5_SET(set_action_in, modact, length, mlen * 8);
269         MLX5_SET(set_action_in, modact, data, data);
270         mod_hdr_acts->num_actions++;
271
272         return 0;
273 }
274
275 static struct mlx5_tc_ct_priv *
276 get_ct_priv(struct mlx5e_priv *priv)
277 {
278         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
279         struct mlx5_rep_uplink_priv *uplink_priv;
280         struct mlx5e_rep_priv *uplink_rpriv;
281
282         if (is_mdev_switchdev_mode(priv->mdev)) {
283                 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
284                 uplink_priv = &uplink_rpriv->uplink_priv;
285
286                 return uplink_priv->ct_priv;
287         }
288
289         return priv->fs.tc.ct;
290 }
291
292 struct mlx5_flow_handle *
293 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
294                     struct mlx5_flow_spec *spec,
295                     struct mlx5_flow_attr *attr)
296 {
297         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
298
299         if (is_mdev_switchdev_mode(priv->mdev))
300                 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
301
302         return  mlx5e_add_offloaded_nic_rule(priv, spec, attr);
303 }
304
305 void
306 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
307                     struct mlx5_flow_handle *rule,
308                     struct mlx5_flow_attr *attr)
309 {
310         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
311
312         if (is_mdev_switchdev_mode(priv->mdev)) {
313                 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
314
315                 return;
316         }
317
318         mlx5e_del_offloaded_nic_rule(priv, rule, attr);
319 }
320
321 struct mlx5e_hairpin {
322         struct mlx5_hairpin *pair;
323
324         struct mlx5_core_dev *func_mdev;
325         struct mlx5e_priv *func_priv;
326         u32 tdn;
327         u32 tirn;
328
329         int num_channels;
330         struct mlx5e_rqt indir_rqt;
331         u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
332         struct mlx5e_ttc_table ttc;
333 };
334
335 struct mlx5e_hairpin_entry {
336         /* a node of a hash table which keeps all the  hairpin entries */
337         struct hlist_node hairpin_hlist;
338
339         /* protects flows list */
340         spinlock_t flows_lock;
341         /* flows sharing the same hairpin */
342         struct list_head flows;
343         /* hpe's that were not fully initialized when dead peer update event
344          * function traversed them.
345          */
346         struct list_head dead_peer_wait_list;
347
348         u16 peer_vhca_id;
349         u8 prio;
350         struct mlx5e_hairpin *hp;
351         refcount_t refcnt;
352         struct completion res_ready;
353 };
354
355 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
356                               struct mlx5e_tc_flow *flow);
357
358 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
359 {
360         if (!flow || !refcount_inc_not_zero(&flow->refcnt))
361                 return ERR_PTR(-EINVAL);
362         return flow;
363 }
364
365 static void mlx5e_flow_put(struct mlx5e_priv *priv,
366                            struct mlx5e_tc_flow *flow)
367 {
368         if (refcount_dec_and_test(&flow->refcnt)) {
369                 mlx5e_tc_del_flow(priv, flow);
370                 kfree_rcu(flow, rcu_head);
371         }
372 }
373
374 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
375 {
376         /* Complete all memory stores before setting bit. */
377         smp_mb__before_atomic();
378         set_bit(flag, &flow->flags);
379 }
380
381 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
382
383 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
384                                      unsigned long flag)
385 {
386         /* test_and_set_bit() provides all necessary barriers */
387         return test_and_set_bit(flag, &flow->flags);
388 }
389
390 #define flow_flag_test_and_set(flow, flag)                      \
391         __flow_flag_test_and_set(flow,                          \
392                                  MLX5E_TC_FLOW_FLAG_##flag)
393
394 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
395 {
396         /* Complete all memory stores before clearing bit. */
397         smp_mb__before_atomic();
398         clear_bit(flag, &flow->flags);
399 }
400
401 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
402                                                       MLX5E_TC_FLOW_FLAG_##flag)
403
404 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
405 {
406         bool ret = test_bit(flag, &flow->flags);
407
408         /* Read fields of flow structure only after checking flags. */
409         smp_mb__after_atomic();
410         return ret;
411 }
412
413 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
414                                                     MLX5E_TC_FLOW_FLAG_##flag)
415
416 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
417 {
418         return flow_flag_test(flow, ESWITCH);
419 }
420
421 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
422 {
423         return flow_flag_test(flow, FT);
424 }
425
426 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
427 {
428         return flow_flag_test(flow, OFFLOADED);
429 }
430
431 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
432 {
433         return mlx5e_is_eswitch_flow(flow) ?
434                 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
435 }
436
437 static struct mod_hdr_tbl *
438 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
439 {
440         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
441
442         return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
443                 &esw->offloads.mod_hdr :
444                 &priv->fs.tc.mod_hdr;
445 }
446
447 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
448                                 struct mlx5e_tc_flow *flow,
449                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
450 {
451         struct mlx5_modify_hdr *modify_hdr;
452         struct mlx5e_mod_hdr_handle *mh;
453
454         mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
455                                   get_flow_name_space(flow),
456                                   &parse_attr->mod_hdr_acts);
457         if (IS_ERR(mh))
458                 return PTR_ERR(mh);
459
460         modify_hdr = mlx5e_mod_hdr_get(mh);
461         flow->attr->modify_hdr = modify_hdr;
462         flow->mh = mh;
463
464         return 0;
465 }
466
467 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
468                                  struct mlx5e_tc_flow *flow)
469 {
470         /* flow wasn't fully initialized */
471         if (!flow->mh)
472                 return;
473
474         mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
475                              flow->mh);
476         flow->mh = NULL;
477 }
478
479 static
480 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
481 {
482         struct net_device *netdev;
483         struct mlx5e_priv *priv;
484
485         netdev = __dev_get_by_index(net, ifindex);
486         priv = netdev_priv(netdev);
487         return priv->mdev;
488 }
489
490 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
491 {
492         u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
493         void *tirc;
494         int err;
495
496         err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
497         if (err)
498                 goto alloc_tdn_err;
499
500         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
501
502         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
503         MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
504         MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
505
506         err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
507         if (err)
508                 goto create_tir_err;
509
510         return 0;
511
512 create_tir_err:
513         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
514 alloc_tdn_err:
515         return err;
516 }
517
518 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
519 {
520         mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
521         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
522 }
523
524 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
525 {
526         u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
527         struct mlx5e_priv *priv = hp->func_priv;
528         int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
529
530         mlx5e_build_default_indir_rqt(indirection_rqt, sz,
531                                       hp->num_channels);
532
533         for (i = 0; i < sz; i++) {
534                 ix = i;
535                 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
536                         ix = mlx5e_bits_invert(i, ilog2(sz));
537                 ix = indirection_rqt[ix];
538                 rqn = hp->pair->rqn[ix];
539                 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
540         }
541 }
542
543 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
544 {
545         int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
546         struct mlx5e_priv *priv = hp->func_priv;
547         struct mlx5_core_dev *mdev = priv->mdev;
548         void *rqtc;
549         u32 *in;
550
551         inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
552         in = kvzalloc(inlen, GFP_KERNEL);
553         if (!in)
554                 return -ENOMEM;
555
556         rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
557
558         MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
559         MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
560
561         mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
562
563         err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
564         if (!err)
565                 hp->indir_rqt.enabled = true;
566
567         kvfree(in);
568         return err;
569 }
570
571 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
572 {
573         struct mlx5e_priv *priv = hp->func_priv;
574         u32 in[MLX5_ST_SZ_DW(create_tir_in)];
575         int tt, i, err;
576         void *tirc;
577
578         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
579                 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
580
581                 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
582                 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
583
584                 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
585                 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
586                 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
587                 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
588
589                 err = mlx5_core_create_tir(hp->func_mdev, in,
590                                            &hp->indir_tirn[tt]);
591                 if (err) {
592                         mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
593                         goto err_destroy_tirs;
594                 }
595         }
596         return 0;
597
598 err_destroy_tirs:
599         for (i = 0; i < tt; i++)
600                 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
601         return err;
602 }
603
604 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
605 {
606         int tt;
607
608         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
609                 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
610 }
611
612 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
613                                          struct ttc_params *ttc_params)
614 {
615         struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
616         int tt;
617
618         memset(ttc_params, 0, sizeof(*ttc_params));
619
620         ttc_params->any_tt_tirn = hp->tirn;
621
622         for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
623                 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
624
625         ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
626         ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
627         ft_attr->prio = MLX5E_TC_PRIO;
628 }
629
630 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
631 {
632         struct mlx5e_priv *priv = hp->func_priv;
633         struct ttc_params ttc_params;
634         int err;
635
636         err = mlx5e_hairpin_create_indirect_rqt(hp);
637         if (err)
638                 return err;
639
640         err = mlx5e_hairpin_create_indirect_tirs(hp);
641         if (err)
642                 goto err_create_indirect_tirs;
643
644         mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
645         err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
646         if (err)
647                 goto err_create_ttc_table;
648
649         netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
650                    hp->num_channels, hp->ttc.ft.t->id);
651
652         return 0;
653
654 err_create_ttc_table:
655         mlx5e_hairpin_destroy_indirect_tirs(hp);
656 err_create_indirect_tirs:
657         mlx5e_destroy_rqt(priv, &hp->indir_rqt);
658
659         return err;
660 }
661
662 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
663 {
664         struct mlx5e_priv *priv = hp->func_priv;
665
666         mlx5e_destroy_ttc_table(priv, &hp->ttc);
667         mlx5e_hairpin_destroy_indirect_tirs(hp);
668         mlx5e_destroy_rqt(priv, &hp->indir_rqt);
669 }
670
671 static struct mlx5e_hairpin *
672 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
673                      int peer_ifindex)
674 {
675         struct mlx5_core_dev *func_mdev, *peer_mdev;
676         struct mlx5e_hairpin *hp;
677         struct mlx5_hairpin *pair;
678         int err;
679
680         hp = kzalloc(sizeof(*hp), GFP_KERNEL);
681         if (!hp)
682                 return ERR_PTR(-ENOMEM);
683
684         func_mdev = priv->mdev;
685         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
686
687         pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
688         if (IS_ERR(pair)) {
689                 err = PTR_ERR(pair);
690                 goto create_pair_err;
691         }
692         hp->pair = pair;
693         hp->func_mdev = func_mdev;
694         hp->func_priv = priv;
695         hp->num_channels = params->num_channels;
696
697         err = mlx5e_hairpin_create_transport(hp);
698         if (err)
699                 goto create_transport_err;
700
701         if (hp->num_channels > 1) {
702                 err = mlx5e_hairpin_rss_init(hp);
703                 if (err)
704                         goto rss_init_err;
705         }
706
707         return hp;
708
709 rss_init_err:
710         mlx5e_hairpin_destroy_transport(hp);
711 create_transport_err:
712         mlx5_core_hairpin_destroy(hp->pair);
713 create_pair_err:
714         kfree(hp);
715         return ERR_PTR(err);
716 }
717
718 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
719 {
720         if (hp->num_channels > 1)
721                 mlx5e_hairpin_rss_cleanup(hp);
722         mlx5e_hairpin_destroy_transport(hp);
723         mlx5_core_hairpin_destroy(hp->pair);
724         kvfree(hp);
725 }
726
727 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
728 {
729         return (peer_vhca_id << 16 | prio);
730 }
731
732 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
733                                                      u16 peer_vhca_id, u8 prio)
734 {
735         struct mlx5e_hairpin_entry *hpe;
736         u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
737
738         hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
739                                hairpin_hlist, hash_key) {
740                 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
741                         refcount_inc(&hpe->refcnt);
742                         return hpe;
743                 }
744         }
745
746         return NULL;
747 }
748
749 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
750                               struct mlx5e_hairpin_entry *hpe)
751 {
752         /* no more hairpin flows for us, release the hairpin pair */
753         if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
754                 return;
755         hash_del(&hpe->hairpin_hlist);
756         mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
757
758         if (!IS_ERR_OR_NULL(hpe->hp)) {
759                 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
760                            dev_name(hpe->hp->pair->peer_mdev->device));
761
762                 mlx5e_hairpin_destroy(hpe->hp);
763         }
764
765         WARN_ON(!list_empty(&hpe->flows));
766         kfree(hpe);
767 }
768
769 #define UNKNOWN_MATCH_PRIO 8
770
771 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
772                                   struct mlx5_flow_spec *spec, u8 *match_prio,
773                                   struct netlink_ext_ack *extack)
774 {
775         void *headers_c, *headers_v;
776         u8 prio_val, prio_mask = 0;
777         bool vlan_present;
778
779 #ifdef CONFIG_MLX5_CORE_EN_DCB
780         if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
781                 NL_SET_ERR_MSG_MOD(extack,
782                                    "only PCP trust state supported for hairpin");
783                 return -EOPNOTSUPP;
784         }
785 #endif
786         headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
787         headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
788
789         vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
790         if (vlan_present) {
791                 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
792                 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
793         }
794
795         if (!vlan_present || !prio_mask) {
796                 prio_val = UNKNOWN_MATCH_PRIO;
797         } else if (prio_mask != 0x7) {
798                 NL_SET_ERR_MSG_MOD(extack,
799                                    "masked priority match not supported for hairpin");
800                 return -EOPNOTSUPP;
801         }
802
803         *match_prio = prio_val;
804         return 0;
805 }
806
807 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
808                                   struct mlx5e_tc_flow *flow,
809                                   struct mlx5e_tc_flow_parse_attr *parse_attr,
810                                   struct netlink_ext_ack *extack)
811 {
812         int peer_ifindex = parse_attr->mirred_ifindex[0];
813         struct mlx5_hairpin_params params;
814         struct mlx5_core_dev *peer_mdev;
815         struct mlx5e_hairpin_entry *hpe;
816         struct mlx5e_hairpin *hp;
817         u64 link_speed64;
818         u32 link_speed;
819         u8 match_prio;
820         u16 peer_id;
821         int err;
822
823         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
824         if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
825                 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
826                 return -EOPNOTSUPP;
827         }
828
829         peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
830         err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
831                                      extack);
832         if (err)
833                 return err;
834
835         mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
836         hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
837         if (hpe) {
838                 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
839                 wait_for_completion(&hpe->res_ready);
840
841                 if (IS_ERR(hpe->hp)) {
842                         err = -EREMOTEIO;
843                         goto out_err;
844                 }
845                 goto attach_flow;
846         }
847
848         hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
849         if (!hpe) {
850                 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
851                 return -ENOMEM;
852         }
853
854         spin_lock_init(&hpe->flows_lock);
855         INIT_LIST_HEAD(&hpe->flows);
856         INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
857         hpe->peer_vhca_id = peer_id;
858         hpe->prio = match_prio;
859         refcount_set(&hpe->refcnt, 1);
860         init_completion(&hpe->res_ready);
861
862         hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
863                  hash_hairpin_info(peer_id, match_prio));
864         mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
865
866         params.log_data_size = 15;
867         params.log_data_size = min_t(u8, params.log_data_size,
868                                      MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
869         params.log_data_size = max_t(u8, params.log_data_size,
870                                      MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
871
872         params.log_num_packets = params.log_data_size -
873                                  MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
874         params.log_num_packets = min_t(u8, params.log_num_packets,
875                                        MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
876
877         params.q_counter = priv->q_counter;
878         /* set hairpin pair per each 50Gbs share of the link */
879         mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
880         link_speed = max_t(u32, link_speed, 50000);
881         link_speed64 = link_speed;
882         do_div(link_speed64, 50000);
883         params.num_channels = link_speed64;
884
885         hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
886         hpe->hp = hp;
887         complete_all(&hpe->res_ready);
888         if (IS_ERR(hp)) {
889                 err = PTR_ERR(hp);
890                 goto out_err;
891         }
892
893         netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
894                    hp->tirn, hp->pair->rqn[0],
895                    dev_name(hp->pair->peer_mdev->device),
896                    hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
897
898 attach_flow:
899         if (hpe->hp->num_channels > 1) {
900                 flow_flag_set(flow, HAIRPIN_RSS);
901                 flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
902         } else {
903                 flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
904         }
905
906         flow->hpe = hpe;
907         spin_lock(&hpe->flows_lock);
908         list_add(&flow->hairpin, &hpe->flows);
909         spin_unlock(&hpe->flows_lock);
910
911         return 0;
912
913 out_err:
914         mlx5e_hairpin_put(priv, hpe);
915         return err;
916 }
917
918 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
919                                    struct mlx5e_tc_flow *flow)
920 {
921         /* flow wasn't fully initialized */
922         if (!flow->hpe)
923                 return;
924
925         spin_lock(&flow->hpe->flows_lock);
926         list_del(&flow->hairpin);
927         spin_unlock(&flow->hpe->flows_lock);
928
929         mlx5e_hairpin_put(priv, flow->hpe);
930         flow->hpe = NULL;
931 }
932
933 struct mlx5_flow_handle *
934 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
935                              struct mlx5_flow_spec *spec,
936                              struct mlx5_flow_attr *attr)
937 {
938         struct mlx5_flow_context *flow_context = &spec->flow_context;
939         struct mlx5_fs_chains *nic_chains = nic_chains(priv);
940         struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
941         struct mlx5e_tc_table *tc = &priv->fs.tc;
942         struct mlx5_flow_destination dest[2] = {};
943         struct mlx5_flow_act flow_act = {
944                 .action = attr->action,
945                 .flags    = FLOW_ACT_NO_APPEND,
946         };
947         struct mlx5_flow_handle *rule;
948         struct mlx5_flow_table *ft;
949         int dest_ix = 0;
950
951         flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
952         flow_context->flow_tag = nic_attr->flow_tag;
953
954         if (attr->dest_ft) {
955                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
956                 dest[dest_ix].ft = attr->dest_ft;
957                 dest_ix++;
958         } else if (nic_attr->hairpin_ft) {
959                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
960                 dest[dest_ix].ft = nic_attr->hairpin_ft;
961                 dest_ix++;
962         } else if (nic_attr->hairpin_tirn) {
963                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
964                 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
965                 dest_ix++;
966         } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
967                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
968                 if (attr->dest_chain) {
969                         dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
970                                                                  attr->dest_chain, 1,
971                                                                  MLX5E_TC_FT_LEVEL);
972                         if (IS_ERR(dest[dest_ix].ft))
973                                 return ERR_CAST(dest[dest_ix].ft);
974                 } else {
975                         dest[dest_ix].ft = priv->fs.vlan.ft.t;
976                 }
977                 dest_ix++;
978         }
979
980         if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
981             MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
982                 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
983
984         if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
985                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
986                 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
987                 dest_ix++;
988         }
989
990         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
991                 flow_act.modify_hdr = attr->modify_hdr;
992
993         mutex_lock(&tc->t_lock);
994         if (IS_ERR_OR_NULL(tc->t)) {
995                 /* Create the root table here if doesn't exist yet */
996                 tc->t =
997                         mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
998
999                 if (IS_ERR(tc->t)) {
1000                         mutex_unlock(&tc->t_lock);
1001                         netdev_err(priv->netdev,
1002                                    "Failed to create tc offload table\n");
1003                         rule = ERR_CAST(priv->fs.tc.t);
1004                         goto err_ft_get;
1005                 }
1006         }
1007         mutex_unlock(&tc->t_lock);
1008
1009         if (attr->chain || attr->prio)
1010                 ft = mlx5_chains_get_table(nic_chains,
1011                                            attr->chain, attr->prio,
1012                                            MLX5E_TC_FT_LEVEL);
1013         else
1014                 ft = attr->ft;
1015
1016         if (IS_ERR(ft)) {
1017                 rule = ERR_CAST(ft);
1018                 goto err_ft_get;
1019         }
1020
1021         if (attr->outer_match_level != MLX5_MATCH_NONE)
1022                 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1023
1024         rule = mlx5_add_flow_rules(ft, spec,
1025                                    &flow_act, dest, dest_ix);
1026         if (IS_ERR(rule))
1027                 goto err_rule;
1028
1029         return rule;
1030
1031 err_rule:
1032         if (attr->chain || attr->prio)
1033                 mlx5_chains_put_table(nic_chains,
1034                                       attr->chain, attr->prio,
1035                                       MLX5E_TC_FT_LEVEL);
1036 err_ft_get:
1037         if (attr->dest_chain)
1038                 mlx5_chains_put_table(nic_chains,
1039                                       attr->dest_chain, 1,
1040                                       MLX5E_TC_FT_LEVEL);
1041
1042         return ERR_CAST(rule);
1043 }
1044
1045 static int
1046 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1047                       struct mlx5e_tc_flow_parse_attr *parse_attr,
1048                       struct mlx5e_tc_flow *flow,
1049                       struct netlink_ext_ack *extack)
1050 {
1051         struct mlx5_flow_attr *attr = flow->attr;
1052         struct mlx5_core_dev *dev = priv->mdev;
1053         struct mlx5_fc *counter = NULL;
1054         int err;
1055
1056         if (flow_flag_test(flow, HAIRPIN)) {
1057                 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1058                 if (err)
1059                         return err;
1060         }
1061
1062         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1063                 counter = mlx5_fc_create(dev, true);
1064                 if (IS_ERR(counter))
1065                         return PTR_ERR(counter);
1066
1067                 attr->counter = counter;
1068         }
1069
1070         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1071                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1072                 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1073                 if (err)
1074                         return err;
1075         }
1076
1077         if (flow_flag_test(flow, CT))
1078                 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1079                                                         attr, &parse_attr->mod_hdr_acts);
1080         else
1081                 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1082                                                              attr);
1083
1084         return PTR_ERR_OR_ZERO(flow->rule[0]);
1085 }
1086
1087 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1088                                   struct mlx5_flow_handle *rule,
1089                                   struct mlx5_flow_attr *attr)
1090 {
1091         struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1092
1093         mlx5_del_flow_rules(rule);
1094
1095         if (attr->chain || attr->prio)
1096                 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1097                                       MLX5E_TC_FT_LEVEL);
1098
1099         if (attr->dest_chain)
1100                 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1101                                       MLX5E_TC_FT_LEVEL);
1102 }
1103
1104 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1105                                   struct mlx5e_tc_flow *flow)
1106 {
1107         struct mlx5_flow_attr *attr = flow->attr;
1108         struct mlx5e_tc_table *tc = &priv->fs.tc;
1109
1110         flow_flag_clear(flow, OFFLOADED);
1111
1112         if (flow_flag_test(flow, CT))
1113                 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1114         else if (!IS_ERR_OR_NULL(flow->rule[0]))
1115                 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1116
1117         /* Remove root table if no rules are left to avoid
1118          * extra steering hops.
1119          */
1120         mutex_lock(&priv->fs.tc.t_lock);
1121         if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1122             !IS_ERR_OR_NULL(tc->t)) {
1123                 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1124                 priv->fs.tc.t = NULL;
1125         }
1126         mutex_unlock(&priv->fs.tc.t_lock);
1127
1128         kvfree(attr->parse_attr);
1129
1130         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1131                 mlx5e_detach_mod_hdr(priv, flow);
1132
1133         mlx5_fc_destroy(priv->mdev, attr->counter);
1134
1135         if (flow_flag_test(flow, HAIRPIN))
1136                 mlx5e_hairpin_flow_del(priv, flow);
1137
1138         kfree(flow->attr);
1139 }
1140
1141 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1142                                struct mlx5e_tc_flow *flow, int out_index);
1143
1144 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1145                               struct mlx5e_tc_flow *flow,
1146                               struct net_device *mirred_dev,
1147                               int out_index,
1148                               struct netlink_ext_ack *extack,
1149                               struct net_device **encap_dev,
1150                               bool *encap_valid);
1151 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
1152                               struct mlx5e_tc_flow *flow,
1153                               struct netlink_ext_ack *extack);
1154 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1155                                struct mlx5e_tc_flow *flow);
1156
1157 static struct mlx5_flow_handle *
1158 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1159                            struct mlx5e_tc_flow *flow,
1160                            struct mlx5_flow_spec *spec,
1161                            struct mlx5_flow_attr *attr)
1162 {
1163         struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1164         struct mlx5_flow_handle *rule;
1165
1166         if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1167                 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1168
1169         if (flow_flag_test(flow, CT)) {
1170                 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1171
1172                 return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1173                                                flow, spec, attr,
1174                                                mod_hdr_acts);
1175         }
1176
1177         rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1178         if (IS_ERR(rule))
1179                 return rule;
1180
1181         if (attr->esw_attr->split_count) {
1182                 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1183                 if (IS_ERR(flow->rule[1])) {
1184                         mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1185                         return flow->rule[1];
1186                 }
1187         }
1188
1189         return rule;
1190 }
1191
1192 static void
1193 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1194                              struct mlx5e_tc_flow *flow,
1195                              struct mlx5_flow_attr *attr)
1196 {
1197         flow_flag_clear(flow, OFFLOADED);
1198
1199         if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1200                 goto offload_rule_0;
1201
1202         if (flow_flag_test(flow, CT)) {
1203                 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1204                 return;
1205         }
1206
1207         if (attr->esw_attr->split_count)
1208                 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1209
1210 offload_rule_0:
1211         mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1212 }
1213
1214 static struct mlx5_flow_handle *
1215 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1216                               struct mlx5e_tc_flow *flow,
1217                               struct mlx5_flow_spec *spec)
1218 {
1219         struct mlx5_flow_attr *slow_attr;
1220         struct mlx5_flow_handle *rule;
1221
1222         slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1223         if (!slow_attr)
1224                 return ERR_PTR(-ENOMEM);
1225
1226         memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1227         slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1228         slow_attr->esw_attr->split_count = 0;
1229         slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1230
1231         rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1232         if (!IS_ERR(rule))
1233                 flow_flag_set(flow, SLOW);
1234
1235         kfree(slow_attr);
1236
1237         return rule;
1238 }
1239
1240 static void
1241 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1242                                   struct mlx5e_tc_flow *flow)
1243 {
1244         struct mlx5_flow_attr *slow_attr;
1245
1246         slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1247         if (!slow_attr) {
1248                 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1249                 return;
1250         }
1251
1252         memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1253         slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1254         slow_attr->esw_attr->split_count = 0;
1255         slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1256         mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1257         flow_flag_clear(flow, SLOW);
1258         kfree(slow_attr);
1259 }
1260
1261 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1262  * function.
1263  */
1264 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1265                              struct list_head *unready_flows)
1266 {
1267         flow_flag_set(flow, NOT_READY);
1268         list_add_tail(&flow->unready, unready_flows);
1269 }
1270
1271 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1272  * function.
1273  */
1274 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1275 {
1276         list_del(&flow->unready);
1277         flow_flag_clear(flow, NOT_READY);
1278 }
1279
1280 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1281 {
1282         struct mlx5_rep_uplink_priv *uplink_priv;
1283         struct mlx5e_rep_priv *rpriv;
1284         struct mlx5_eswitch *esw;
1285
1286         esw = flow->priv->mdev->priv.eswitch;
1287         rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1288         uplink_priv = &rpriv->uplink_priv;
1289
1290         mutex_lock(&uplink_priv->unready_flows_lock);
1291         unready_flow_add(flow, &uplink_priv->unready_flows);
1292         mutex_unlock(&uplink_priv->unready_flows_lock);
1293 }
1294
1295 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1296 {
1297         struct mlx5_rep_uplink_priv *uplink_priv;
1298         struct mlx5e_rep_priv *rpriv;
1299         struct mlx5_eswitch *esw;
1300
1301         esw = flow->priv->mdev->priv.eswitch;
1302         rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1303         uplink_priv = &rpriv->uplink_priv;
1304
1305         mutex_lock(&uplink_priv->unready_flows_lock);
1306         unready_flow_del(flow);
1307         mutex_unlock(&uplink_priv->unready_flows_lock);
1308 }
1309
1310 static int
1311 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1312                       struct mlx5e_tc_flow *flow,
1313                       struct netlink_ext_ack *extack)
1314 {
1315         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1316         struct net_device *out_dev, *encap_dev = NULL;
1317         struct mlx5e_tc_flow_parse_attr *parse_attr;
1318         struct mlx5_flow_attr *attr = flow->attr;
1319         struct mlx5_esw_flow_attr *esw_attr;
1320         struct mlx5_fc *counter = NULL;
1321         struct mlx5e_rep_priv *rpriv;
1322         struct mlx5e_priv *out_priv;
1323         bool encap_valid = true;
1324         u32 max_prio, max_chain;
1325         int err = 0;
1326         int out_index;
1327
1328         if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
1329                 NL_SET_ERR_MSG_MOD(extack,
1330                                    "E-switch priorities unsupported, upgrade FW");
1331                 return -EOPNOTSUPP;
1332         }
1333
1334         /* We check chain range only for tc flows.
1335          * For ft flows, we checked attr->chain was originally 0 and set it to
1336          * FDB_FT_CHAIN which is outside tc range.
1337          * See mlx5e_rep_setup_ft_cb().
1338          */
1339         max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1340         if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1341                 NL_SET_ERR_MSG_MOD(extack,
1342                                    "Requested chain is out of supported range");
1343                 return -EOPNOTSUPP;
1344         }
1345
1346         max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1347         if (attr->prio > max_prio) {
1348                 NL_SET_ERR_MSG_MOD(extack,
1349                                    "Requested priority is out of supported range");
1350                 return -EOPNOTSUPP;
1351         }
1352
1353         if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1354                 err = mlx5e_attach_decap(priv, flow, extack);
1355                 if (err)
1356                         return err;
1357         }
1358
1359         parse_attr = attr->parse_attr;
1360         esw_attr = attr->esw_attr;
1361
1362         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1363                 int mirred_ifindex;
1364
1365                 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1366                         continue;
1367
1368                 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1369                 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1370                                              mirred_ifindex);
1371                 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1372                                          extack, &encap_dev, &encap_valid);
1373                 if (err)
1374                         return err;
1375
1376                 out_priv = netdev_priv(encap_dev);
1377                 rpriv = out_priv->ppriv;
1378                 esw_attr->dests[out_index].rep = rpriv->rep;
1379                 esw_attr->dests[out_index].mdev = out_priv->mdev;
1380         }
1381
1382         err = mlx5_eswitch_add_vlan_action(esw, attr);
1383         if (err)
1384                 return err;
1385
1386         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1387             !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1388                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1389                 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1390                 if (err)
1391                         return err;
1392         }
1393
1394         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1395                 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1396                 if (IS_ERR(counter))
1397                         return PTR_ERR(counter);
1398
1399                 attr->counter = counter;
1400         }
1401
1402         /* we get here if one of the following takes place:
1403          * (1) there's no error
1404          * (2) there's an encap action and we don't have valid neigh
1405          */
1406         if (!encap_valid)
1407                 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1408         else
1409                 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1410
1411         if (IS_ERR(flow->rule[0]))
1412                 return PTR_ERR(flow->rule[0]);
1413         else
1414                 flow_flag_set(flow, OFFLOADED);
1415
1416         return 0;
1417 }
1418
1419 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1420 {
1421         struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1422         void *headers_v = MLX5_ADDR_OF(fte_match_param,
1423                                        spec->match_value,
1424                                        misc_parameters_3);
1425         u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1426                                              headers_v,
1427                                              geneve_tlv_option_0_data);
1428
1429         return !!geneve_tlv_opt_0_data;
1430 }
1431
1432 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1433                                   struct mlx5e_tc_flow *flow)
1434 {
1435         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1436         struct mlx5_flow_attr *attr = flow->attr;
1437         int out_index;
1438
1439         mlx5e_put_flow_tunnel_id(flow);
1440
1441         if (flow_flag_test(flow, NOT_READY))
1442                 remove_unready_flow(flow);
1443
1444         if (mlx5e_is_offloaded_flow(flow)) {
1445                 if (flow_flag_test(flow, SLOW))
1446                         mlx5e_tc_unoffload_from_slow_path(esw, flow);
1447                 else
1448                         mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1449         }
1450
1451         if (mlx5_flow_has_geneve_opt(flow))
1452                 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1453
1454         mlx5_eswitch_del_vlan_action(esw, attr);
1455
1456         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1457                 if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1458                         mlx5e_detach_encap(priv, flow, out_index);
1459                         kfree(attr->parse_attr->tun_info[out_index]);
1460                 }
1461         kvfree(attr->parse_attr);
1462
1463         mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1464
1465         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1466                 mlx5e_detach_mod_hdr(priv, flow);
1467
1468         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1469                 mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
1470
1471         if (flow_flag_test(flow, L3_TO_L2_DECAP))
1472                 mlx5e_detach_decap(priv, flow);
1473
1474         kfree(flow->attr);
1475 }
1476
1477 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1478                               struct mlx5e_encap_entry *e,
1479                               struct list_head *flow_list)
1480 {
1481         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1482         struct mlx5_esw_flow_attr *esw_attr;
1483         struct mlx5_flow_handle *rule;
1484         struct mlx5_flow_attr *attr;
1485         struct mlx5_flow_spec *spec;
1486         struct mlx5e_tc_flow *flow;
1487         int err;
1488
1489         e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
1490                                                      e->reformat_type,
1491                                                      e->encap_size, e->encap_header,
1492                                                      MLX5_FLOW_NAMESPACE_FDB);
1493         if (IS_ERR(e->pkt_reformat)) {
1494                 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
1495                                PTR_ERR(e->pkt_reformat));
1496                 return;
1497         }
1498         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1499         mlx5e_rep_queue_neigh_stats_work(priv);
1500
1501         list_for_each_entry(flow, flow_list, tmp_list) {
1502                 bool all_flow_encaps_valid = true;
1503                 int i;
1504
1505                 if (!mlx5e_is_offloaded_flow(flow))
1506                         continue;
1507                 attr = flow->attr;
1508                 esw_attr = attr->esw_attr;
1509                 spec = &attr->parse_attr->spec;
1510
1511                 esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
1512                 esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1513                 /* Flow can be associated with multiple encap entries.
1514                  * Before offloading the flow verify that all of them have
1515                  * a valid neighbour.
1516                  */
1517                 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1518                         if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1519                                 continue;
1520                         if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1521                                 all_flow_encaps_valid = false;
1522                                 break;
1523                         }
1524                 }
1525                 /* Do not offload flows with unresolved neighbors */
1526                 if (!all_flow_encaps_valid)
1527                         continue;
1528                 /* update from slow path rule to encap rule */
1529                 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
1530                 if (IS_ERR(rule)) {
1531                         err = PTR_ERR(rule);
1532                         mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1533                                        err);
1534                         continue;
1535                 }
1536
1537                 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1538                 flow->rule[0] = rule;
1539                 /* was unset when slow path rule removed */
1540                 flow_flag_set(flow, OFFLOADED);
1541         }
1542 }
1543
1544 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1545                               struct mlx5e_encap_entry *e,
1546                               struct list_head *flow_list)
1547 {
1548         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1549         struct mlx5_esw_flow_attr *esw_attr;
1550         struct mlx5_flow_handle *rule;
1551         struct mlx5_flow_attr *attr;
1552         struct mlx5_flow_spec *spec;
1553         struct mlx5e_tc_flow *flow;
1554         int err;
1555
1556         list_for_each_entry(flow, flow_list, tmp_list) {
1557                 if (!mlx5e_is_offloaded_flow(flow))
1558                         continue;
1559                 attr = flow->attr;
1560                 esw_attr = attr->esw_attr;
1561                 spec = &attr->parse_attr->spec;
1562
1563                 /* update from encap rule to slow path rule */
1564                 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
1565                 /* mark the flow's encap dest as non-valid */
1566                 esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1567
1568                 if (IS_ERR(rule)) {
1569                         err = PTR_ERR(rule);
1570                         mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1571                                        err);
1572                         continue;
1573                 }
1574
1575                 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1576                 flow->rule[0] = rule;
1577                 /* was unset when fast path rule removed */
1578                 flow_flag_set(flow, OFFLOADED);
1579         }
1580
1581         /* we know that the encap is valid */
1582         e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1583         mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1584 }
1585
1586 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1587 {
1588         return flow->attr->counter;
1589 }
1590
1591 /* Takes reference to all flows attached to encap and adds the flows to
1592  * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
1593  */
1594 void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
1595 {
1596         struct encap_flow_item *efi;
1597         struct mlx5e_tc_flow *flow;
1598
1599         list_for_each_entry(efi, &e->flows, list) {
1600                 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1601                 if (IS_ERR(mlx5e_flow_get(flow)))
1602                         continue;
1603                 wait_for_completion(&flow->init_done);
1604
1605                 flow->tmp_efi_index = efi->index;
1606                 list_add(&flow->tmp_list, flow_list);
1607         }
1608 }
1609
1610 /* Iterate over tmp_list of flows attached to flow_list head. */
1611 void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1612 {
1613         struct mlx5e_tc_flow *flow, *tmp;
1614
1615         list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1616                 mlx5e_flow_put(priv, flow);
1617 }
1618
1619 static struct mlx5e_encap_entry *
1620 mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1621                            struct mlx5e_encap_entry *e)
1622 {
1623         struct mlx5e_encap_entry *next = NULL;
1624
1625 retry:
1626         rcu_read_lock();
1627
1628         /* find encap with non-zero reference counter value */
1629         for (next = e ?
1630                      list_next_or_null_rcu(&nhe->encap_list,
1631                                            &e->encap_list,
1632                                            struct mlx5e_encap_entry,
1633                                            encap_list) :
1634                      list_first_or_null_rcu(&nhe->encap_list,
1635                                             struct mlx5e_encap_entry,
1636                                             encap_list);
1637              next;
1638              next = list_next_or_null_rcu(&nhe->encap_list,
1639                                           &next->encap_list,
1640                                           struct mlx5e_encap_entry,
1641                                           encap_list))
1642                 if (mlx5e_encap_take(next))
1643                         break;
1644
1645         rcu_read_unlock();
1646
1647         /* release starting encap */
1648         if (e)
1649                 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1650         if (!next)
1651                 return next;
1652
1653         /* wait for encap to be fully initialized */
1654         wait_for_completion(&next->res_ready);
1655         /* continue searching if encap entry is not in valid state after completion */
1656         if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1657                 e = next;
1658                 goto retry;
1659         }
1660
1661         return next;
1662 }
1663
1664 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1665 {
1666         struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1667         struct mlx5e_encap_entry *e = NULL;
1668         struct mlx5e_tc_flow *flow;
1669         struct mlx5_fc *counter;
1670         struct neigh_table *tbl;
1671         bool neigh_used = false;
1672         struct neighbour *n;
1673         u64 lastuse;
1674
1675         if (m_neigh->family == AF_INET)
1676                 tbl = &arp_tbl;
1677 #if IS_ENABLED(CONFIG_IPV6)
1678         else if (m_neigh->family == AF_INET6)
1679                 tbl = ipv6_stub->nd_tbl;
1680 #endif
1681         else
1682                 return;
1683
1684         /* mlx5e_get_next_valid_encap() releases previous encap before returning
1685          * next one.
1686          */
1687         while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
1688                 struct mlx5e_priv *priv = netdev_priv(e->out_dev);
1689                 struct encap_flow_item *efi, *tmp;
1690                 struct mlx5_eswitch *esw;
1691                 LIST_HEAD(flow_list);
1692
1693                 esw = priv->mdev->priv.eswitch;
1694                 mutex_lock(&esw->offloads.encap_tbl_lock);
1695                 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1696                         flow = container_of(efi, struct mlx5e_tc_flow,
1697                                             encaps[efi->index]);
1698                         if (IS_ERR(mlx5e_flow_get(flow)))
1699                                 continue;
1700                         list_add(&flow->tmp_list, &flow_list);
1701
1702                         if (mlx5e_is_offloaded_flow(flow)) {
1703                                 counter = mlx5e_tc_get_counter(flow);
1704                                 lastuse = mlx5_fc_query_lastuse(counter);
1705                                 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1706                                         neigh_used = true;
1707                                         break;
1708                                 }
1709                         }
1710                 }
1711                 mutex_unlock(&esw->offloads.encap_tbl_lock);
1712
1713                 mlx5e_put_encap_flow_list(priv, &flow_list);
1714                 if (neigh_used) {
1715                         /* release current encap before breaking the loop */
1716                         mlx5e_encap_put(priv, e);
1717                         break;
1718                 }
1719         }
1720
1721         trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
1722
1723         if (neigh_used) {
1724                 nhe->reported_lastuse = jiffies;
1725
1726                 /* find the relevant neigh according to the cached device and
1727                  * dst ip pair
1728                  */
1729                 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1730                 if (!n)
1731                         return;
1732
1733                 neigh_event_send(n, NULL);
1734                 neigh_release(n);
1735         }
1736 }
1737
1738 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1739 {
1740         WARN_ON(!list_empty(&e->flows));
1741
1742         if (e->compl_result > 0) {
1743                 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1744
1745                 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1746                         mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
1747         }
1748
1749         kfree(e->tun_info);
1750         kfree(e->encap_header);
1751         kfree_rcu(e, rcu);
1752 }
1753
1754 static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
1755                                 struct mlx5e_decap_entry *d)
1756 {
1757         WARN_ON(!list_empty(&d->flows));
1758
1759         if (!d->compl_result)
1760                 mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
1761
1762         kfree_rcu(d, rcu);
1763 }
1764
1765 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1766 {
1767         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1768
1769         if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1770                 return;
1771         hash_del_rcu(&e->encap_hlist);
1772         mutex_unlock(&esw->offloads.encap_tbl_lock);
1773
1774         mlx5e_encap_dealloc(priv, e);
1775 }
1776
1777 static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
1778 {
1779         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1780
1781         if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
1782                 return;
1783         hash_del_rcu(&d->hlist);
1784         mutex_unlock(&esw->offloads.decap_tbl_lock);
1785
1786         mlx5e_decap_dealloc(priv, d);
1787 }
1788
1789 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1790                                struct mlx5e_tc_flow *flow, int out_index)
1791 {
1792         struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1793         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1794
1795         /* flow wasn't fully initialized */
1796         if (!e)
1797                 return;
1798
1799         mutex_lock(&esw->offloads.encap_tbl_lock);
1800         list_del(&flow->encaps[out_index].list);
1801         flow->encaps[out_index].e = NULL;
1802         if (!refcount_dec_and_test(&e->refcnt)) {
1803                 mutex_unlock(&esw->offloads.encap_tbl_lock);
1804                 return;
1805         }
1806         hash_del_rcu(&e->encap_hlist);
1807         mutex_unlock(&esw->offloads.encap_tbl_lock);
1808
1809         mlx5e_encap_dealloc(priv, e);
1810 }
1811
1812 static void mlx5e_detach_decap(struct mlx5e_priv *priv,
1813                                struct mlx5e_tc_flow *flow)
1814 {
1815         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1816         struct mlx5e_decap_entry *d = flow->decap_reformat;
1817
1818         if (!d)
1819                 return;
1820
1821         mutex_lock(&esw->offloads.decap_tbl_lock);
1822         list_del(&flow->l3_to_l2_reformat);
1823         flow->decap_reformat = NULL;
1824
1825         if (!refcount_dec_and_test(&d->refcnt)) {
1826                 mutex_unlock(&esw->offloads.decap_tbl_lock);
1827                 return;
1828         }
1829         hash_del_rcu(&d->hlist);
1830         mutex_unlock(&esw->offloads.decap_tbl_lock);
1831
1832         mlx5e_decap_dealloc(priv, d);
1833 }
1834
1835 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1836 {
1837         struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1838
1839         if (!flow_flag_test(flow, ESWITCH) ||
1840             !flow_flag_test(flow, DUP))
1841                 return;
1842
1843         mutex_lock(&esw->offloads.peer_mutex);
1844         list_del(&flow->peer);
1845         mutex_unlock(&esw->offloads.peer_mutex);
1846
1847         flow_flag_clear(flow, DUP);
1848
1849         if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1850                 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1851                 kfree(flow->peer_flow);
1852         }
1853
1854         flow->peer_flow = NULL;
1855 }
1856
1857 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1858 {
1859         struct mlx5_core_dev *dev = flow->priv->mdev;
1860         struct mlx5_devcom *devcom = dev->priv.devcom;
1861         struct mlx5_eswitch *peer_esw;
1862
1863         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1864         if (!peer_esw)
1865                 return;
1866
1867         __mlx5e_tc_del_fdb_peer_flow(flow);
1868         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1869 }
1870
1871 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1872                               struct mlx5e_tc_flow *flow)
1873 {
1874         if (mlx5e_is_eswitch_flow(flow)) {
1875                 mlx5e_tc_del_fdb_peer_flow(flow);
1876                 mlx5e_tc_del_fdb_flow(priv, flow);
1877         } else {
1878                 mlx5e_tc_del_nic_flow(priv, flow);
1879         }
1880 }
1881
1882 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1883 {
1884         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1885         struct flow_action *flow_action = &rule->action;
1886         const struct flow_action_entry *act;
1887         int i;
1888
1889         flow_action_for_each(i, act, flow_action) {
1890                 switch (act->id) {
1891                 case FLOW_ACTION_GOTO:
1892                         return true;
1893                 default:
1894                         continue;
1895                 }
1896         }
1897
1898         return false;
1899 }
1900
1901 static int
1902 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1903                                     struct flow_dissector_key_enc_opts *opts,
1904                                     struct netlink_ext_ack *extack,
1905                                     bool *dont_care)
1906 {
1907         struct geneve_opt *opt;
1908         int off = 0;
1909
1910         *dont_care = true;
1911
1912         while (opts->len > off) {
1913                 opt = (struct geneve_opt *)&opts->data[off];
1914
1915                 if (!(*dont_care) || opt->opt_class || opt->type ||
1916                     memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1917                         *dont_care = false;
1918
1919                         if (opt->opt_class != htons(U16_MAX) ||
1920                             opt->type != U8_MAX) {
1921                                 NL_SET_ERR_MSG(extack,
1922                                                "Partial match of tunnel options in chain > 0 isn't supported");
1923                                 netdev_warn(priv->netdev,
1924                                             "Partial match of tunnel options in chain > 0 isn't supported");
1925                                 return -EOPNOTSUPP;
1926                         }
1927                 }
1928
1929                 off += sizeof(struct geneve_opt) + opt->length * 4;
1930         }
1931
1932         return 0;
1933 }
1934
1935 #define COPY_DISSECTOR(rule, diss_key, dst)\
1936 ({ \
1937         struct flow_rule *__rule = (rule);\
1938         typeof(dst) __dst = dst;\
1939 \
1940         memcpy(__dst,\
1941                skb_flow_dissector_target(__rule->match.dissector,\
1942                                          diss_key,\
1943                                          __rule->match.key),\
1944                sizeof(*__dst));\
1945 })
1946
1947 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1948                                     struct mlx5e_tc_flow *flow,
1949                                     struct flow_cls_offload *f,
1950                                     struct net_device *filter_dev)
1951 {
1952         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1953         struct netlink_ext_ack *extack = f->common.extack;
1954         struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1955         struct flow_match_enc_opts enc_opts_match;
1956         struct tunnel_match_enc_opts tun_enc_opts;
1957         struct mlx5_rep_uplink_priv *uplink_priv;
1958         struct mlx5_flow_attr *attr = flow->attr;
1959         struct mlx5e_rep_priv *uplink_rpriv;
1960         struct tunnel_match_key tunnel_key;
1961         bool enc_opts_is_dont_care = true;
1962         u32 tun_id, enc_opts_id = 0;
1963         struct mlx5_eswitch *esw;
1964         u32 value, mask;
1965         int err;
1966
1967         esw = priv->mdev->priv.eswitch;
1968         uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1969         uplink_priv = &uplink_rpriv->uplink_priv;
1970
1971         memset(&tunnel_key, 0, sizeof(tunnel_key));
1972         COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1973                        &tunnel_key.enc_control);
1974         if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1975                 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1976                                &tunnel_key.enc_ipv4);
1977         else
1978                 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1979                                &tunnel_key.enc_ipv6);
1980         COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1981         COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1982                        &tunnel_key.enc_tp);
1983         COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1984                        &tunnel_key.enc_key_id);
1985         tunnel_key.filter_ifindex = filter_dev->ifindex;
1986
1987         err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1988         if (err)
1989                 return err;
1990
1991         flow_rule_match_enc_opts(rule, &enc_opts_match);
1992         err = enc_opts_is_dont_care_or_full_match(priv,
1993                                                   enc_opts_match.mask,
1994                                                   extack,
1995                                                   &enc_opts_is_dont_care);
1996         if (err)
1997                 goto err_enc_opts;
1998
1999         if (!enc_opts_is_dont_care) {
2000                 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2001                 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2002                        sizeof(*enc_opts_match.key));
2003                 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2004                        sizeof(*enc_opts_match.mask));
2005
2006                 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2007                                   &tun_enc_opts, &enc_opts_id);
2008                 if (err)
2009                         goto err_enc_opts;
2010         }
2011
2012         value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2013         mask = enc_opts_id ? TUNNEL_ID_MASK :
2014                              (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2015
2016         if (attr->chain) {
2017                 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2018                                             TUNNEL_TO_REG, value, mask);
2019         } else {
2020                 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2021                 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2022                                                 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2023                                                 TUNNEL_TO_REG, value);
2024                 if (err)
2025                         goto err_set;
2026
2027                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2028         }
2029
2030         flow->tunnel_id = value;
2031         return 0;
2032
2033 err_set:
2034         if (enc_opts_id)
2035                 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2036                                enc_opts_id);
2037 err_enc_opts:
2038         mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2039         return err;
2040 }
2041
2042 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2043 {
2044         u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
2045         u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
2046         struct mlx5_rep_uplink_priv *uplink_priv;
2047         struct mlx5e_rep_priv *uplink_rpriv;
2048         struct mlx5_eswitch *esw;
2049
2050         esw = flow->priv->mdev->priv.eswitch;
2051         uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2052         uplink_priv = &uplink_rpriv->uplink_priv;
2053
2054         if (tun_id)
2055                 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2056         if (enc_opts_id)
2057                 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2058                                enc_opts_id);
2059 }
2060
2061 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
2062 {
2063         return flow->tunnel_id;
2064 }
2065
2066 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2067                             struct flow_match_basic *match, bool outer,
2068                             void *headers_c, void *headers_v)
2069 {
2070         bool ip_version_cap;
2071
2072         ip_version_cap = outer ?
2073                 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2074                                           ft_field_support.outer_ip_version) :
2075                 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2076                                           ft_field_support.inner_ip_version);
2077
2078         if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2079             (match->key->n_proto == htons(ETH_P_IP) ||
2080              match->key->n_proto == htons(ETH_P_IPV6))) {
2081                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2082                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2083                          match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2084         } else {
2085                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2086                          ntohs(match->mask->n_proto));
2087                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2088                          ntohs(match->key->n_proto));
2089         }
2090 }
2091
2092 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2093                              struct mlx5e_tc_flow *flow,
2094                              struct mlx5_flow_spec *spec,
2095                              struct flow_cls_offload *f,
2096                              struct net_device *filter_dev,
2097                              u8 *match_level,
2098                              bool *match_inner)
2099 {
2100         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2101         struct netlink_ext_ack *extack = f->common.extack;
2102         bool needs_mapping, sets_mapping;
2103         int err;
2104
2105         if (!mlx5e_is_eswitch_flow(flow))
2106                 return -EOPNOTSUPP;
2107
2108         needs_mapping = !!flow->attr->chain;
2109         sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
2110         *match_inner = !needs_mapping;
2111
2112         if ((needs_mapping || sets_mapping) &&
2113             !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2114                 NL_SET_ERR_MSG(extack,
2115                                "Chains on tunnel devices isn't supported without register loopback support");
2116                 netdev_warn(priv->netdev,
2117                             "Chains on tunnel devices isn't supported without register loopback support");
2118                 return -EOPNOTSUPP;
2119         }
2120
2121         if (!flow->attr->chain) {
2122                 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2123                                          match_level);
2124                 if (err) {
2125                         NL_SET_ERR_MSG_MOD(extack,
2126                                            "Failed to parse tunnel attributes");
2127                         netdev_warn(priv->netdev,
2128                                     "Failed to parse tunnel attributes");
2129                         return err;
2130                 }
2131
2132                 /* With mpls over udp we decapsulate using packet reformat
2133                  * object
2134                  */
2135                 if (!netif_is_bareudp(filter_dev))
2136                         flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2137         }
2138
2139         if (!needs_mapping && !sets_mapping)
2140                 return 0;
2141
2142         return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2143 }
2144
2145 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2146 {
2147         return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2148                             inner_headers);
2149 }
2150
2151 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2152 {
2153         return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2154                             inner_headers);
2155 }
2156
2157 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2158 {
2159         return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2160                             outer_headers);
2161 }
2162
2163 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2164 {
2165         return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2166                             outer_headers);
2167 }
2168
2169 static void *get_match_headers_value(u32 flags,
2170                                      struct mlx5_flow_spec *spec)
2171 {
2172         return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2173                 get_match_inner_headers_value(spec) :
2174                 get_match_outer_headers_value(spec);
2175 }
2176
2177 static void *get_match_headers_criteria(u32 flags,
2178                                         struct mlx5_flow_spec *spec)
2179 {
2180         return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2181                 get_match_inner_headers_criteria(spec) :
2182                 get_match_outer_headers_criteria(spec);
2183 }
2184
2185 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2186                                    struct flow_cls_offload *f)
2187 {
2188         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2189         struct netlink_ext_ack *extack = f->common.extack;
2190         struct net_device *ingress_dev;
2191         struct flow_match_meta match;
2192
2193         if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2194                 return 0;
2195
2196         flow_rule_match_meta(rule, &match);
2197         if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2198                 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2199                 return -EOPNOTSUPP;
2200         }
2201
2202         ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2203                                          match.key->ingress_ifindex);
2204         if (!ingress_dev) {
2205                 NL_SET_ERR_MSG_MOD(extack,
2206                                    "Can't find the ingress port to match on");
2207                 return -ENOENT;
2208         }
2209
2210         if (ingress_dev != filter_dev) {
2211                 NL_SET_ERR_MSG_MOD(extack,
2212                                    "Can't match on the ingress filter port");
2213                 return -EOPNOTSUPP;
2214         }
2215
2216         return 0;
2217 }
2218
2219 static bool skip_key_basic(struct net_device *filter_dev,
2220                            struct flow_cls_offload *f)
2221 {
2222         /* When doing mpls over udp decap, the user needs to provide
2223          * MPLS_UC as the protocol in order to be able to match on mpls
2224          * label fields.  However, the actual ethertype is IP so we want to
2225          * avoid matching on this, otherwise we'll fail the match.
2226          */
2227         if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2228                 return true;
2229
2230         return false;
2231 }
2232
2233 static int __parse_cls_flower(struct mlx5e_priv *priv,
2234                               struct mlx5e_tc_flow *flow,
2235                               struct mlx5_flow_spec *spec,
2236                               struct flow_cls_offload *f,
2237                               struct net_device *filter_dev,
2238                               u8 *inner_match_level, u8 *outer_match_level)
2239 {
2240         struct netlink_ext_ack *extack = f->common.extack;
2241         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2242                                        outer_headers);
2243         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2244                                        outer_headers);
2245         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2246                                     misc_parameters);
2247         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2248                                     misc_parameters);
2249         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2250         struct flow_dissector *dissector = rule->match.dissector;
2251         u16 addr_type = 0;
2252         u8 ip_proto = 0;
2253         u8 *match_level;
2254         int err;
2255
2256         match_level = outer_match_level;
2257
2258         if (dissector->used_keys &
2259             ~(BIT(FLOW_DISSECTOR_KEY_META) |
2260               BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2261               BIT(FLOW_DISSECTOR_KEY_BASIC) |
2262               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2263               BIT(FLOW_DISSECTOR_KEY_VLAN) |
2264               BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2265               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2266               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2267               BIT(FLOW_DISSECTOR_KEY_PORTS) |
2268               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2269               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2270               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2271               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2272               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2273               BIT(FLOW_DISSECTOR_KEY_TCP) |
2274               BIT(FLOW_DISSECTOR_KEY_IP)  |
2275               BIT(FLOW_DISSECTOR_KEY_CT) |
2276               BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2277               BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2278               BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2279                 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2280                 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2281                            dissector->used_keys);
2282                 return -EOPNOTSUPP;
2283         }
2284
2285         if (mlx5e_get_tc_tun(filter_dev)) {
2286                 bool match_inner = false;
2287
2288                 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2289                                         outer_match_level, &match_inner);
2290                 if (err)
2291                         return err;
2292
2293                 if (match_inner) {
2294                         /* header pointers should point to the inner headers
2295                          * if the packet was decapsulated already.
2296                          * outer headers are set by parse_tunnel_attr.
2297                          */
2298                         match_level = inner_match_level;
2299                         headers_c = get_match_inner_headers_criteria(spec);
2300                         headers_v = get_match_inner_headers_value(spec);
2301                 }
2302         }
2303
2304         err = mlx5e_flower_parse_meta(filter_dev, f);
2305         if (err)
2306                 return err;
2307
2308         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2309             !skip_key_basic(filter_dev, f)) {
2310                 struct flow_match_basic match;
2311
2312                 flow_rule_match_basic(rule, &match);
2313                 mlx5e_tc_set_ethertype(priv->mdev, &match,
2314                                        match_level == outer_match_level,
2315                                        headers_c, headers_v);
2316
2317                 if (match.mask->n_proto)
2318                         *match_level = MLX5_MATCH_L2;
2319         }
2320         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2321             is_vlan_dev(filter_dev)) {
2322                 struct flow_dissector_key_vlan filter_dev_mask;
2323                 struct flow_dissector_key_vlan filter_dev_key;
2324                 struct flow_match_vlan match;
2325
2326                 if (is_vlan_dev(filter_dev)) {
2327                         match.key = &filter_dev_key;
2328                         match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2329                         match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2330                         match.key->vlan_priority = 0;
2331                         match.mask = &filter_dev_mask;
2332                         memset(match.mask, 0xff, sizeof(*match.mask));
2333                         match.mask->vlan_priority = 0;
2334                 } else {
2335                         flow_rule_match_vlan(rule, &match);
2336                 }
2337                 if (match.mask->vlan_id ||
2338                     match.mask->vlan_priority ||
2339                     match.mask->vlan_tpid) {
2340                         if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2341                                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2342                                          svlan_tag, 1);
2343                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2344                                          svlan_tag, 1);
2345                         } else {
2346                                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2347                                          cvlan_tag, 1);
2348                                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2349                                          cvlan_tag, 1);
2350                         }
2351
2352                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2353                                  match.mask->vlan_id);
2354                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2355                                  match.key->vlan_id);
2356
2357                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2358                                  match.mask->vlan_priority);
2359                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2360                                  match.key->vlan_priority);
2361
2362                         *match_level = MLX5_MATCH_L2;
2363                 }
2364         } else if (*match_level != MLX5_MATCH_NONE) {
2365                 /* cvlan_tag enabled in match criteria and
2366                  * disabled in match value means both S & C tags
2367                  * don't exist (untagged of both)
2368                  */
2369                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2370                 *match_level = MLX5_MATCH_L2;
2371         }
2372
2373         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2374                 struct flow_match_vlan match;
2375
2376                 flow_rule_match_cvlan(rule, &match);
2377                 if (match.mask->vlan_id ||
2378                     match.mask->vlan_priority ||
2379                     match.mask->vlan_tpid) {
2380                         if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2381                                 MLX5_SET(fte_match_set_misc, misc_c,
2382                                          outer_second_svlan_tag, 1);
2383                                 MLX5_SET(fte_match_set_misc, misc_v,
2384                                          outer_second_svlan_tag, 1);
2385                         } else {
2386                                 MLX5_SET(fte_match_set_misc, misc_c,
2387                                          outer_second_cvlan_tag, 1);
2388                                 MLX5_SET(fte_match_set_misc, misc_v,
2389                                          outer_second_cvlan_tag, 1);
2390                         }
2391
2392                         MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2393                                  match.mask->vlan_id);
2394                         MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2395                                  match.key->vlan_id);
2396                         MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2397                                  match.mask->vlan_priority);
2398                         MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2399                                  match.key->vlan_priority);
2400
2401                         *match_level = MLX5_MATCH_L2;
2402                         spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2403                 }
2404         }
2405
2406         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2407                 struct flow_match_eth_addrs match;
2408
2409                 flow_rule_match_eth_addrs(rule, &match);
2410                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2411                                              dmac_47_16),
2412                                 match.mask->dst);
2413                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2414                                              dmac_47_16),
2415                                 match.key->dst);
2416
2417                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2418                                              smac_47_16),
2419                                 match.mask->src);
2420                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2421                                              smac_47_16),
2422                                 match.key->src);
2423
2424                 if (!is_zero_ether_addr(match.mask->src) ||
2425                     !is_zero_ether_addr(match.mask->dst))
2426                         *match_level = MLX5_MATCH_L2;
2427         }
2428
2429         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2430                 struct flow_match_control match;
2431
2432                 flow_rule_match_control(rule, &match);
2433                 addr_type = match.key->addr_type;
2434
2435                 /* the HW doesn't support frag first/later */
2436                 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2437                         return -EOPNOTSUPP;
2438
2439                 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2440                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2441                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2442                                  match.key->flags & FLOW_DIS_IS_FRAGMENT);
2443
2444                         /* the HW doesn't need L3 inline to match on frag=no */
2445                         if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2446                                 *match_level = MLX5_MATCH_L2;
2447         /* ***  L2 attributes parsing up to here *** */
2448                         else
2449                                 *match_level = MLX5_MATCH_L3;
2450                 }
2451         }
2452
2453         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2454                 struct flow_match_basic match;
2455
2456                 flow_rule_match_basic(rule, &match);
2457                 ip_proto = match.key->ip_proto;
2458
2459                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2460                          match.mask->ip_proto);
2461                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2462                          match.key->ip_proto);
2463
2464                 if (match.mask->ip_proto)
2465                         *match_level = MLX5_MATCH_L3;
2466         }
2467
2468         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2469                 struct flow_match_ipv4_addrs match;
2470
2471                 flow_rule_match_ipv4_addrs(rule, &match);
2472                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2473                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2474                        &match.mask->src, sizeof(match.mask->src));
2475                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2476                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
2477                        &match.key->src, sizeof(match.key->src));
2478                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2479                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2480                        &match.mask->dst, sizeof(match.mask->dst));
2481                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2482                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2483                        &match.key->dst, sizeof(match.key->dst));
2484
2485                 if (match.mask->src || match.mask->dst)
2486                         *match_level = MLX5_MATCH_L3;
2487         }
2488
2489         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2490                 struct flow_match_ipv6_addrs match;
2491
2492                 flow_rule_match_ipv6_addrs(rule, &match);
2493                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2494                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2495                        &match.mask->src, sizeof(match.mask->src));
2496                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2497                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
2498                        &match.key->src, sizeof(match.key->src));
2499
2500                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2501                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2502                        &match.mask->dst, sizeof(match.mask->dst));
2503                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2504                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2505                        &match.key->dst, sizeof(match.key->dst));
2506
2507                 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2508                     ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2509                         *match_level = MLX5_MATCH_L3;
2510         }
2511
2512         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2513                 struct flow_match_ip match;
2514
2515                 flow_rule_match_ip(rule, &match);
2516                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2517                          match.mask->tos & 0x3);
2518                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2519                          match.key->tos & 0x3);
2520
2521                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2522                          match.mask->tos >> 2);
2523                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2524                          match.key->tos  >> 2);
2525
2526                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2527                          match.mask->ttl);
2528                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2529                          match.key->ttl);
2530
2531                 if (match.mask->ttl &&
2532                     !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2533                                                 ft_field_support.outer_ipv4_ttl)) {
2534                         NL_SET_ERR_MSG_MOD(extack,
2535                                            "Matching on TTL is not supported");
2536                         return -EOPNOTSUPP;
2537                 }
2538
2539                 if (match.mask->tos || match.mask->ttl)
2540                         *match_level = MLX5_MATCH_L3;
2541         }
2542
2543         /* ***  L3 attributes parsing up to here *** */
2544
2545         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2546                 struct flow_match_ports match;
2547
2548                 flow_rule_match_ports(rule, &match);
2549                 switch (ip_proto) {
2550                 case IPPROTO_TCP:
2551                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2552                                  tcp_sport, ntohs(match.mask->src));
2553                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2554                                  tcp_sport, ntohs(match.key->src));
2555
2556                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2557                                  tcp_dport, ntohs(match.mask->dst));
2558                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2559                                  tcp_dport, ntohs(match.key->dst));
2560                         break;
2561
2562                 case IPPROTO_UDP:
2563                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2564                                  udp_sport, ntohs(match.mask->src));
2565                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2566                                  udp_sport, ntohs(match.key->src));
2567
2568                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2569                                  udp_dport, ntohs(match.mask->dst));
2570                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2571                                  udp_dport, ntohs(match.key->dst));
2572                         break;
2573                 default:
2574                         NL_SET_ERR_MSG_MOD(extack,
2575                                            "Only UDP and TCP transports are supported for L4 matching");
2576                         netdev_err(priv->netdev,
2577                                    "Only UDP and TCP transport are supported\n");
2578                         return -EINVAL;
2579                 }
2580
2581                 if (match.mask->src || match.mask->dst)
2582                         *match_level = MLX5_MATCH_L4;
2583         }
2584
2585         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2586                 struct flow_match_tcp match;
2587
2588                 flow_rule_match_tcp(rule, &match);
2589                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2590                          ntohs(match.mask->flags));
2591                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2592                          ntohs(match.key->flags));
2593
2594                 if (match.mask->flags)
2595                         *match_level = MLX5_MATCH_L4;
2596         }
2597
2598         return 0;
2599 }
2600
2601 static int parse_cls_flower(struct mlx5e_priv *priv,
2602                             struct mlx5e_tc_flow *flow,
2603                             struct mlx5_flow_spec *spec,
2604                             struct flow_cls_offload *f,
2605                             struct net_device *filter_dev)
2606 {
2607         u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2608         struct netlink_ext_ack *extack = f->common.extack;
2609         struct mlx5_core_dev *dev = priv->mdev;
2610         struct mlx5_eswitch *esw = dev->priv.eswitch;
2611         struct mlx5e_rep_priv *rpriv = priv->ppriv;
2612         struct mlx5_eswitch_rep *rep;
2613         bool is_eswitch_flow;
2614         int err;
2615
2616         inner_match_level = MLX5_MATCH_NONE;
2617         outer_match_level = MLX5_MATCH_NONE;
2618
2619         err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2620                                  &inner_match_level, &outer_match_level);
2621         non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2622                                  outer_match_level : inner_match_level;
2623
2624         is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2625         if (!err && is_eswitch_flow) {
2626                 rep = rpriv->rep;
2627                 if (rep->vport != MLX5_VPORT_UPLINK &&
2628                     (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2629                     esw->offloads.inline_mode < non_tunnel_match_level)) {
2630                         NL_SET_ERR_MSG_MOD(extack,
2631                                            "Flow is not offloaded due to min inline setting");
2632                         netdev_warn(priv->netdev,
2633                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2634                                     non_tunnel_match_level, esw->offloads.inline_mode);
2635                         return -EOPNOTSUPP;
2636                 }
2637         }
2638
2639         flow->attr->inner_match_level = inner_match_level;
2640         flow->attr->outer_match_level = outer_match_level;
2641
2642
2643         return err;
2644 }
2645
2646 struct pedit_headers {
2647         struct ethhdr  eth;
2648         struct vlan_hdr vlan;
2649         struct iphdr   ip4;
2650         struct ipv6hdr ip6;
2651         struct tcphdr  tcp;
2652         struct udphdr  udp;
2653 };
2654
2655 struct pedit_headers_action {
2656         struct pedit_headers    vals;
2657         struct pedit_headers    masks;
2658         u32                     pedits;
2659 };
2660
2661 static int pedit_header_offsets[] = {
2662         [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2663         [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2664         [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2665         [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2666         [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2667 };
2668
2669 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2670
2671 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2672                          struct pedit_headers_action *hdrs)
2673 {
2674         u32 *curr_pmask, *curr_pval;
2675
2676         curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2677         curr_pval  = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2678
2679         if (*curr_pmask & mask)  /* disallow acting twice on the same location */
2680                 goto out_err;
2681
2682         *curr_pmask |= mask;
2683         *curr_pval  |= (val & mask);
2684
2685         return 0;
2686
2687 out_err:
2688         return -EOPNOTSUPP;
2689 }
2690
2691 struct mlx5_fields {
2692         u8  field;
2693         u8  field_bsize;
2694         u32 field_mask;
2695         u32 offset;
2696         u32 match_offset;
2697 };
2698
2699 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2700                 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2701                  offsetof(struct pedit_headers, field) + (off), \
2702                  MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2703
2704 /* masked values are the same and there are no rewrites that do not have a
2705  * match.
2706  */
2707 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2708         type matchmaskx = *(type *)(matchmaskp); \
2709         type matchvalx = *(type *)(matchvalp); \
2710         type maskx = *(type *)(maskp); \
2711         type valx = *(type *)(valp); \
2712         \
2713         (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2714                                                                  matchmaskx)); \
2715 })
2716
2717 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2718                          void *matchmaskp, u8 bsize)
2719 {
2720         bool same = false;
2721
2722         switch (bsize) {
2723         case 8:
2724                 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2725                 break;
2726         case 16:
2727                 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2728                 break;
2729         case 32:
2730                 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2731                 break;
2732         }
2733
2734         return same;
2735 }
2736
2737 static struct mlx5_fields fields[] = {
2738         OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2739         OFFLOAD(DMAC_15_0,  16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2740         OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2741         OFFLOAD(SMAC_15_0,  16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2742         OFFLOAD(ETHERTYPE,  16, U16_MAX, eth.h_proto, 0, ethertype),
2743         OFFLOAD(FIRST_VID,  16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2744
2745         OFFLOAD(IP_DSCP, 8,    0xfc, ip4.tos,   0, ip_dscp),
2746         OFFLOAD(IP_TTL,  8,  U8_MAX, ip4.ttl,   0, ttl_hoplimit),
2747         OFFLOAD(SIPV4,  32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2748         OFFLOAD(DIPV4,  32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2749
2750         OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2751                 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2752         OFFLOAD(SIPV6_95_64,  32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2753                 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2754         OFFLOAD(SIPV6_63_32,  32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2755                 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2756         OFFLOAD(SIPV6_31_0,   32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2757                 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2758         OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2759                 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2760         OFFLOAD(DIPV6_95_64,  32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2761                 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2762         OFFLOAD(DIPV6_63_32,  32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2763                 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2764         OFFLOAD(DIPV6_31_0,   32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2765                 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2766         OFFLOAD(IPV6_HOPLIMIT, 8,  U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2767         OFFLOAD(IP_DSCP, 16,  0xc00f, ip6, 0, ip_dscp),
2768
2769         OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source,  0, tcp_sport),
2770         OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest,    0, tcp_dport),
2771         /* in linux iphdr tcp_flags is 8 bits long */
2772         OFFLOAD(TCP_FLAGS,  8,  U8_MAX, tcp.ack_seq, 5, tcp_flags),
2773
2774         OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2775         OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest,   0, udp_dport),
2776 };
2777
2778 static unsigned long mask_to_le(unsigned long mask, int size)
2779 {
2780         __be32 mask_be32;
2781         __be16 mask_be16;
2782
2783         if (size == 32) {
2784                 mask_be32 = (__force __be32)(mask);
2785                 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2786         } else if (size == 16) {
2787                 mask_be32 = (__force __be32)(mask);
2788                 mask_be16 = *(__be16 *)&mask_be32;
2789                 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2790         }
2791
2792         return mask;
2793 }
2794 static int offload_pedit_fields(struct mlx5e_priv *priv,
2795                                 int namespace,
2796                                 struct pedit_headers_action *hdrs,
2797                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
2798                                 u32 *action_flags,
2799                                 struct netlink_ext_ack *extack)
2800 {
2801         struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2802         int i, action_size, first, last, next_z;
2803         void *headers_c, *headers_v, *action, *vals_p;
2804         u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2805         struct mlx5e_tc_mod_hdr_acts *mod_acts;
2806         struct mlx5_fields *f;
2807         unsigned long mask, field_mask;
2808         int err;
2809         u8 cmd;
2810
2811         mod_acts = &parse_attr->mod_hdr_acts;
2812         headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2813         headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2814
2815         set_masks = &hdrs[0].masks;
2816         add_masks = &hdrs[1].masks;
2817         set_vals = &hdrs[0].vals;
2818         add_vals = &hdrs[1].vals;
2819
2820         action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2821
2822         for (i = 0; i < ARRAY_SIZE(fields); i++) {
2823                 bool skip;
2824
2825                 f = &fields[i];
2826                 /* avoid seeing bits set from previous iterations */
2827                 s_mask = 0;
2828                 a_mask = 0;
2829
2830                 s_masks_p = (void *)set_masks + f->offset;
2831                 a_masks_p = (void *)add_masks + f->offset;
2832
2833                 s_mask = *s_masks_p & f->field_mask;
2834                 a_mask = *a_masks_p & f->field_mask;
2835
2836                 if (!s_mask && !a_mask) /* nothing to offload here */
2837                         continue;
2838
2839                 if (s_mask && a_mask) {
2840                         NL_SET_ERR_MSG_MOD(extack,
2841                                            "can't set and add to the same HW field");
2842                         printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2843                         return -EOPNOTSUPP;
2844                 }
2845
2846                 skip = false;
2847                 if (s_mask) {
2848                         void *match_mask = headers_c + f->match_offset;
2849                         void *match_val = headers_v + f->match_offset;
2850
2851                         cmd  = MLX5_ACTION_TYPE_SET;
2852                         mask = s_mask;
2853                         vals_p = (void *)set_vals + f->offset;
2854                         /* don't rewrite if we have a match on the same value */
2855                         if (cmp_val_mask(vals_p, s_masks_p, match_val,
2856                                          match_mask, f->field_bsize))
2857                                 skip = true;
2858                         /* clear to denote we consumed this field */
2859                         *s_masks_p &= ~f->field_mask;
2860                 } else {
2861                         cmd  = MLX5_ACTION_TYPE_ADD;
2862                         mask = a_mask;
2863                         vals_p = (void *)add_vals + f->offset;
2864                         /* add 0 is no change */
2865                         if ((*(u32 *)vals_p & f->field_mask) == 0)
2866                                 skip = true;
2867                         /* clear to denote we consumed this field */
2868                         *a_masks_p &= ~f->field_mask;
2869                 }
2870                 if (skip)
2871                         continue;
2872
2873                 mask = mask_to_le(mask, f->field_bsize);
2874
2875                 first = find_first_bit(&mask, f->field_bsize);
2876                 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2877                 last  = find_last_bit(&mask, f->field_bsize);
2878                 if (first < next_z && next_z < last) {
2879                         NL_SET_ERR_MSG_MOD(extack,
2880                                            "rewrite of few sub-fields isn't supported");
2881                         printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2882                                mask);
2883                         return -EOPNOTSUPP;
2884                 }
2885
2886                 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2887                 if (err) {
2888                         NL_SET_ERR_MSG_MOD(extack,
2889                                            "too many pedit actions, can't offload");
2890                         mlx5_core_warn(priv->mdev,
2891                                        "mlx5: parsed %d pedit actions, can't do more\n",
2892                                        mod_acts->num_actions);
2893                         return err;
2894                 }
2895
2896                 action = mod_acts->actions +
2897                          (mod_acts->num_actions * action_size);
2898                 MLX5_SET(set_action_in, action, action_type, cmd);
2899                 MLX5_SET(set_action_in, action, field, f->field);
2900
2901                 if (cmd == MLX5_ACTION_TYPE_SET) {
2902                         int start;
2903
2904                         field_mask = mask_to_le(f->field_mask, f->field_bsize);
2905
2906                         /* if field is bit sized it can start not from first bit */
2907                         start = find_first_bit(&field_mask, f->field_bsize);
2908
2909                         MLX5_SET(set_action_in, action, offset, first - start);
2910                         /* length is num of bits to be written, zero means length of 32 */
2911                         MLX5_SET(set_action_in, action, length, (last - first + 1));
2912                 }
2913
2914                 if (f->field_bsize == 32)
2915                         MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2916                 else if (f->field_bsize == 16)
2917                         MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2918                 else if (f->field_bsize == 8)
2919                         MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2920
2921                 ++mod_acts->num_actions;
2922         }
2923
2924         return 0;
2925 }
2926
2927 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2928                                                   int namespace)
2929 {
2930         if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2931                 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2932         else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2933                 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2934 }
2935
2936 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2937                           int namespace,
2938                           struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2939 {
2940         int action_size, new_num_actions, max_hw_actions;
2941         size_t new_sz, old_sz;
2942         void *ret;
2943
2944         if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2945                 return 0;
2946
2947         action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2948
2949         max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2950                                                                 namespace);
2951         new_num_actions = min(max_hw_actions,
2952                               mod_hdr_acts->actions ?
2953                               mod_hdr_acts->max_actions * 2 : 1);
2954         if (mod_hdr_acts->max_actions == new_num_actions)
2955                 return -ENOSPC;
2956
2957         new_sz = action_size * new_num_actions;
2958         old_sz = mod_hdr_acts->max_actions * action_size;
2959         ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2960         if (!ret)
2961                 return -ENOMEM;
2962
2963         memset(ret + old_sz, 0, new_sz - old_sz);
2964         mod_hdr_acts->actions = ret;
2965         mod_hdr_acts->max_actions = new_num_actions;
2966
2967         return 0;
2968 }
2969
2970 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2971 {
2972         kfree(mod_hdr_acts->actions);
2973         mod_hdr_acts->actions = NULL;
2974         mod_hdr_acts->num_actions = 0;
2975         mod_hdr_acts->max_actions = 0;
2976 }
2977
2978 static const struct pedit_headers zero_masks = {};
2979
2980 static int
2981 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2982                           const struct flow_action_entry *act, int namespace,
2983                           struct mlx5e_tc_flow_parse_attr *parse_attr,
2984                           struct pedit_headers_action *hdrs,
2985                           struct netlink_ext_ack *extack)
2986 {
2987         u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2988         int err = -EOPNOTSUPP;
2989         u32 mask, val, offset;
2990         u8 htype;
2991
2992         htype = act->mangle.htype;
2993         err = -EOPNOTSUPP; /* can't be all optimistic */
2994
2995         if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2996                 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2997                 goto out_err;
2998         }
2999
3000         if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
3001                 NL_SET_ERR_MSG_MOD(extack,
3002                                    "The pedit offload action is not supported");
3003                 goto out_err;
3004         }
3005
3006         mask = act->mangle.mask;
3007         val = act->mangle.val;
3008         offset = act->mangle.offset;
3009
3010         err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
3011         if (err)
3012                 goto out_err;
3013
3014         hdrs[cmd].pedits++;
3015
3016         return 0;
3017 out_err:
3018         return err;
3019 }
3020
3021 static int
3022 parse_pedit_to_reformat(struct mlx5e_priv *priv,
3023                         const struct flow_action_entry *act,
3024                         struct mlx5e_tc_flow_parse_attr *parse_attr,
3025                         struct netlink_ext_ack *extack)
3026 {
3027         u32 mask, val, offset;
3028         u32 *p;
3029
3030         if (act->id != FLOW_ACTION_MANGLE)
3031                 return -EOPNOTSUPP;
3032
3033         if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
3034                 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
3035                 return -EOPNOTSUPP;
3036         }
3037
3038         mask = ~act->mangle.mask;
3039         val = act->mangle.val;
3040         offset = act->mangle.offset;
3041         p = (u32 *)&parse_attr->eth;
3042         *(p + (offset >> 2)) |= (val & mask);
3043
3044         return 0;
3045 }
3046
3047 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
3048                                  const struct flow_action_entry *act, int namespace,
3049                                  struct mlx5e_tc_flow_parse_attr *parse_attr,
3050                                  struct pedit_headers_action *hdrs,
3051                                  struct mlx5e_tc_flow *flow,
3052                                  struct netlink_ext_ack *extack)
3053 {
3054         if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
3055                 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
3056
3057         return parse_pedit_to_modify_hdr(priv, act, namespace,
3058                                          parse_attr, hdrs, extack);
3059 }
3060
3061 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3062                                  struct mlx5e_tc_flow_parse_attr *parse_attr,
3063                                  struct pedit_headers_action *hdrs,
3064                                  u32 *action_flags,
3065                                  struct netlink_ext_ack *extack)
3066 {
3067         struct pedit_headers *cmd_masks;
3068         int err;
3069         u8 cmd;
3070
3071         err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
3072                                    action_flags, extack);
3073         if (err < 0)
3074                 goto out_dealloc_parsed_actions;
3075
3076         for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3077                 cmd_masks = &hdrs[cmd].masks;
3078                 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3079                         NL_SET_ERR_MSG_MOD(extack,
3080                                            "attempt to offload an unsupported field");
3081                         netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3082                         print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3083                                        16, 1, cmd_masks, sizeof(zero_masks), true);
3084                         err = -EOPNOTSUPP;
3085                         goto out_dealloc_parsed_actions;
3086                 }
3087         }
3088
3089         return 0;
3090
3091 out_dealloc_parsed_actions:
3092         dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3093         return err;
3094 }
3095
3096 static bool csum_offload_supported(struct mlx5e_priv *priv,
3097                                    u32 action,
3098                                    u32 update_flags,
3099                                    struct netlink_ext_ack *extack)
3100 {
3101         u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
3102                          TCA_CSUM_UPDATE_FLAG_UDP;
3103
3104         /*  The HW recalcs checksums only if re-writing headers */
3105         if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
3106                 NL_SET_ERR_MSG_MOD(extack,
3107                                    "TC csum action is only offloaded with pedit");
3108                 netdev_warn(priv->netdev,
3109                             "TC csum action is only offloaded with pedit\n");
3110                 return false;
3111         }
3112
3113         if (update_flags & ~prot_flags) {
3114                 NL_SET_ERR_MSG_MOD(extack,
3115                                    "can't offload TC csum action for some header/s");
3116                 netdev_warn(priv->netdev,
3117                             "can't offload TC csum action for some header/s - flags %#x\n",
3118                             update_flags);
3119                 return false;
3120         }
3121
3122         return true;
3123 }
3124
3125 struct ip_ttl_word {
3126         __u8    ttl;
3127         __u8    protocol;
3128         __sum16 check;
3129 };
3130
3131 struct ipv6_hoplimit_word {
3132         __be16  payload_len;
3133         __u8    nexthdr;
3134         __u8    hop_limit;
3135 };
3136
3137 static int is_action_keys_supported(const struct flow_action_entry *act,
3138                                     bool ct_flow, bool *modify_ip_header,
3139                                     bool *modify_tuple,
3140                                     struct netlink_ext_ack *extack)
3141 {
3142         u32 mask, offset;
3143         u8 htype;
3144
3145         htype = act->mangle.htype;
3146         offset = act->mangle.offset;
3147         mask = ~act->mangle.mask;
3148         /* For IPv4 & IPv6 header check 4 byte word,
3149          * to determine that modified fields
3150          * are NOT ttl & hop_limit only.
3151          */
3152         if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3153                 struct ip_ttl_word *ttl_word =
3154                         (struct ip_ttl_word *)&mask;
3155
3156                 if (offset != offsetof(struct iphdr, ttl) ||
3157                     ttl_word->protocol ||
3158                     ttl_word->check) {
3159                         *modify_ip_header = true;
3160                 }
3161
3162                 if (offset >= offsetof(struct iphdr, saddr))
3163                         *modify_tuple = true;
3164
3165                 if (ct_flow && *modify_tuple) {
3166                         NL_SET_ERR_MSG_MOD(extack,
3167                                            "can't offload re-write of ipv4 address with action ct");
3168                         return -EOPNOTSUPP;
3169                 }
3170         } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3171                 struct ipv6_hoplimit_word *hoplimit_word =
3172                         (struct ipv6_hoplimit_word *)&mask;
3173
3174                 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3175                     hoplimit_word->payload_len ||
3176                     hoplimit_word->nexthdr) {
3177                         *modify_ip_header = true;
3178                 }
3179
3180                 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3181                         *modify_tuple = true;
3182
3183                 if (ct_flow && *modify_tuple) {
3184                         NL_SET_ERR_MSG_MOD(extack,
3185                                            "can't offload re-write of ipv6 address with action ct");
3186                         return -EOPNOTSUPP;
3187                 }
3188         } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3189                    htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3190                 *modify_tuple = true;
3191                 if (ct_flow) {
3192                         NL_SET_ERR_MSG_MOD(extack,
3193                                            "can't offload re-write of transport header ports with action ct");
3194                         return -EOPNOTSUPP;
3195                 }
3196         }
3197
3198         return 0;
3199 }
3200
3201 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3202                                           struct mlx5_flow_spec *spec,
3203                                           struct flow_action *flow_action,
3204                                           u32 actions, bool ct_flow,
3205                                           bool ct_clear,
3206                                           struct netlink_ext_ack *extack)
3207 {
3208         const struct flow_action_entry *act;
3209         bool modify_ip_header, modify_tuple;
3210         void *headers_c;
3211         void *headers_v;
3212         u16 ethertype;
3213         u8 ip_proto;
3214         int i, err;
3215
3216         headers_c = get_match_headers_criteria(actions, spec);
3217         headers_v = get_match_headers_value(actions, spec);
3218         ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3219
3220         /* for non-IP we only re-write MACs, so we're okay */
3221         if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3222             ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3223                 goto out_ok;
3224
3225         modify_ip_header = false;
3226         modify_tuple = false;
3227         flow_action_for_each(i, act, flow_action) {
3228                 if (act->id != FLOW_ACTION_MANGLE &&
3229                     act->id != FLOW_ACTION_ADD)
3230                         continue;
3231
3232                 err = is_action_keys_supported(act, ct_flow,
3233                                                &modify_ip_header,
3234                                                &modify_tuple, extack);
3235                 if (err)
3236                         return err;
3237         }
3238
3239         /* Add ct_state=-trk match so it will be offloaded for non ct flows
3240          * (or after clear action), as otherwise, since the tuple is changed,
3241          *  we can't restore ct state
3242          */
3243         if (!ct_clear && modify_tuple &&
3244             mlx5_tc_ct_add_no_trk_match(spec)) {
3245                 NL_SET_ERR_MSG_MOD(extack,
3246                                    "can't offload tuple modify header with ct matches");
3247                 netdev_info(priv->netdev,
3248                             "can't offload tuple modify header with ct matches");
3249                 return false;
3250         }
3251
3252         ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3253         if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3254             ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3255                 NL_SET_ERR_MSG_MOD(extack,
3256                                    "can't offload re-write of non TCP/UDP");
3257                 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3258                             ip_proto);
3259                 return false;
3260         }
3261
3262 out_ok:
3263         return true;
3264 }
3265
3266 static bool actions_match_supported(struct mlx5e_priv *priv,
3267                                     struct flow_action *flow_action,
3268                                     struct mlx5e_tc_flow_parse_attr *parse_attr,
3269                                     struct mlx5e_tc_flow *flow,
3270                                     struct netlink_ext_ack *extack)
3271 {
3272         bool ct_flow = false, ct_clear = false;
3273         u32 actions;
3274
3275         ct_clear = flow->attr->ct_attr.ct_action &
3276                 TCA_CT_ACT_CLEAR;
3277         ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3278         actions = flow->attr->action;
3279
3280         if (mlx5e_is_eswitch_flow(flow)) {
3281                 if (flow->attr->esw_attr->split_count && ct_flow) {
3282                         /* All registers used by ct are cleared when using
3283                          * split rules.
3284                          */
3285                         NL_SET_ERR_MSG_MOD(extack,
3286                                            "Can't offload mirroring with action ct");
3287                         return false;
3288                 }
3289         }
3290
3291         if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3292                 return modify_header_match_supported(priv, &parse_attr->spec,
3293                                                      flow_action, actions,
3294                                                      ct_flow, ct_clear,
3295                                                      extack);
3296
3297         return true;
3298 }
3299
3300 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3301 {
3302         return priv->mdev == peer_priv->mdev;
3303 }
3304
3305 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3306 {
3307         struct mlx5_core_dev *fmdev, *pmdev;
3308         u64 fsystem_guid, psystem_guid;
3309
3310         fmdev = priv->mdev;
3311         pmdev = peer_priv->mdev;
3312
3313         fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3314         psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3315
3316         return (fsystem_guid == psystem_guid);
3317 }
3318
3319 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3320                                    const struct flow_action_entry *act,
3321                                    struct mlx5e_tc_flow_parse_attr *parse_attr,
3322                                    struct pedit_headers_action *hdrs,
3323                                    u32 *action, struct netlink_ext_ack *extack)
3324 {
3325         u16 mask16 = VLAN_VID_MASK;
3326         u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3327         const struct flow_action_entry pedit_act = {
3328                 .id = FLOW_ACTION_MANGLE,
3329                 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3330                 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3331                 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3332                 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3333         };
3334         u8 match_prio_mask, match_prio_val;
3335         void *headers_c, *headers_v;
3336         int err;
3337
3338         headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3339         headers_v = get_match_headers_value(*action, &parse_attr->spec);
3340
3341         if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3342               MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3343                 NL_SET_ERR_MSG_MOD(extack,
3344                                    "VLAN rewrite action must have VLAN protocol match");
3345                 return -EOPNOTSUPP;
3346         }
3347
3348         match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3349         match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3350         if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3351                 NL_SET_ERR_MSG_MOD(extack,
3352                                    "Changing VLAN prio is not supported");
3353                 return -EOPNOTSUPP;
3354         }
3355
3356         err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3357         *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3358
3359         return err;
3360 }
3361
3362 static int
3363 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3364                                  struct mlx5e_tc_flow_parse_attr *parse_attr,
3365                                  struct pedit_headers_action *hdrs,
3366                                  u32 *action, struct netlink_ext_ack *extack)
3367 {
3368         const struct flow_action_entry prio_tag_act = {
3369                 .vlan.vid = 0,
3370                 .vlan.prio =
3371                         MLX5_GET(fte_match_set_lyr_2_4,
3372                                  get_match_headers_value(*action,
3373                                                          &parse_attr->spec),
3374                                  first_prio) &
3375                         MLX5_GET(fte_match_set_lyr_2_4,
3376                                  get_match_headers_criteria(*action,
3377                                                             &parse_attr->spec),
3378                                  first_prio),
3379         };
3380
3381         return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3382                                        &prio_tag_act, parse_attr, hdrs, action,
3383                                        extack);
3384 }
3385
3386 static int validate_goto_chain(struct mlx5e_priv *priv,
3387                                struct mlx5e_tc_flow *flow,
3388                                const struct flow_action_entry *act,
3389                                u32 actions,
3390                                struct netlink_ext_ack *extack)
3391 {
3392         bool is_esw = mlx5e_is_eswitch_flow(flow);
3393         struct mlx5_flow_attr *attr = flow->attr;
3394         bool ft_flow = mlx5e_is_ft_flow(flow);
3395         u32 dest_chain = act->chain_index;
3396         struct mlx5_fs_chains *chains;
3397         struct mlx5_eswitch *esw;
3398         u32 reformat_and_fwd;
3399         u32 max_chain;
3400
3401         esw = priv->mdev->priv.eswitch;
3402         chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3403         max_chain = mlx5_chains_get_chain_range(chains);
3404         reformat_and_fwd = is_esw ?
3405                            MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3406                            MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3407
3408         if (ft_flow) {
3409                 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3410                 return -EOPNOTSUPP;
3411         }
3412
3413         if (!mlx5_chains_backwards_supported(chains) &&
3414             dest_chain <= attr->chain) {
3415                 NL_SET_ERR_MSG_MOD(extack,
3416                                    "Goto lower numbered chain isn't supported");
3417                 return -EOPNOTSUPP;
3418         }
3419
3420         if (dest_chain > max_chain) {
3421                 NL_SET_ERR_MSG_MOD(extack,
3422                                    "Requested destination chain is out of supported range");
3423                 return -EOPNOTSUPP;
3424         }
3425
3426         if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3427                        MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3428             !reformat_and_fwd) {
3429                 NL_SET_ERR_MSG_MOD(extack,
3430                                    "Goto chain is not allowed if action has reformat or decap");
3431                 return -EOPNOTSUPP;
3432         }
3433
3434         return 0;
3435 }
3436
3437 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3438                                 struct flow_action *flow_action,
3439                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
3440                                 struct mlx5e_tc_flow *flow,
3441                                 struct netlink_ext_ack *extack)
3442 {
3443         struct mlx5_flow_attr *attr = flow->attr;
3444         struct pedit_headers_action hdrs[2] = {};
3445         const struct flow_action_entry *act;
3446         struct mlx5_nic_flow_attr *nic_attr;
3447         u32 action = 0;
3448         int err, i;
3449
3450         if (!flow_action_has_entries(flow_action))
3451                 return -EINVAL;
3452
3453         if (!flow_action_hw_stats_check(flow_action, extack,
3454                                         FLOW_ACTION_HW_STATS_DELAYED_BIT))
3455                 return -EOPNOTSUPP;
3456
3457         nic_attr = attr->nic_attr;
3458
3459         nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3460
3461         flow_action_for_each(i, act, flow_action) {
3462                 switch (act->id) {
3463                 case FLOW_ACTION_ACCEPT:
3464                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3465                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
3466                         break;
3467                 case FLOW_ACTION_DROP:
3468                         action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3469                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
3470                                                flow_table_properties_nic_receive.flow_counter))
3471                                 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3472                         break;
3473                 case FLOW_ACTION_MANGLE:
3474                 case FLOW_ACTION_ADD:
3475                         err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3476                                                     parse_attr, hdrs, NULL, extack);
3477                         if (err)
3478                                 return err;
3479
3480                         action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3481                         break;
3482                 case FLOW_ACTION_VLAN_MANGLE:
3483                         err = add_vlan_rewrite_action(priv,
3484                                                       MLX5_FLOW_NAMESPACE_KERNEL,
3485                                                       act, parse_attr, hdrs,
3486                                                       &action, extack);
3487                         if (err)
3488                                 return err;
3489
3490                         break;
3491                 case FLOW_ACTION_CSUM:
3492                         if (csum_offload_supported(priv, action,
3493                                                    act->csum_flags,
3494                                                    extack))
3495                                 break;
3496
3497                         return -EOPNOTSUPP;
3498                 case FLOW_ACTION_REDIRECT: {
3499                         struct net_device *peer_dev = act->dev;
3500
3501                         if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3502                             same_hw_devs(priv, netdev_priv(peer_dev))) {
3503                                 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3504                                 flow_flag_set(flow, HAIRPIN);
3505                                 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3506                                           MLX5_FLOW_CONTEXT_ACTION_COUNT;
3507                         } else {
3508                                 NL_SET_ERR_MSG_MOD(extack,
3509                                                    "device is not on same HW, can't offload");
3510                                 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3511                                             peer_dev->name);
3512                                 return -EINVAL;
3513                         }
3514                         }
3515                         break;
3516                 case FLOW_ACTION_MARK: {
3517                         u32 mark = act->mark;
3518
3519                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3520                                 NL_SET_ERR_MSG_MOD(extack,
3521                                                    "Bad flow mark - only 16 bit is supported");
3522                                 return -EINVAL;
3523                         }
3524
3525                         nic_attr->flow_tag = mark;
3526                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3527                         }
3528                         break;
3529                 case FLOW_ACTION_GOTO:
3530                         err = validate_goto_chain(priv, flow, act, action,
3531                                                   extack);
3532                         if (err)
3533                                 return err;
3534
3535                         action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3536                         attr->dest_chain = act->chain_index;
3537                         break;
3538                 case FLOW_ACTION_CT:
3539                         err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3540                         if (err)
3541                                 return err;
3542
3543                         flow_flag_set(flow, CT);
3544                         break;
3545                 default:
3546                         NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3547                         return -EOPNOTSUPP;
3548                 }
3549         }
3550
3551         if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3552             hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3553                 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3554                                             parse_attr, hdrs, &action, extack);
3555                 if (err)
3556                         return err;
3557                 /* in case all pedit actions are skipped, remove the MOD_HDR
3558                  * flag.
3559                  */
3560                 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3561                         action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3562                         dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3563                 }
3564         }
3565
3566         attr->action = action;
3567
3568         if (attr->dest_chain) {
3569                 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3570                         NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3571                         return -EOPNOTSUPP;
3572                 }
3573                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3574         }
3575
3576         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3577                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3578
3579         if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3580                 return -EOPNOTSUPP;
3581
3582         return 0;
3583 }
3584
3585 struct encap_key {
3586         const struct ip_tunnel_key *ip_tun_key;
3587         struct mlx5e_tc_tunnel *tc_tunnel;
3588 };
3589
3590 static inline int cmp_encap_info(struct encap_key *a,
3591                                  struct encap_key *b)
3592 {
3593         return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
3594                a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
3595 }
3596
3597 static inline int cmp_decap_info(struct mlx5e_decap_key *a,
3598                                  struct mlx5e_decap_key *b)
3599 {
3600         return memcmp(&a->key, &b->key, sizeof(b->key));
3601 }
3602
3603 static inline int hash_encap_info(struct encap_key *key)
3604 {
3605         return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
3606                      key->tc_tunnel->tunnel_type);
3607 }
3608
3609 static inline int hash_decap_info(struct mlx5e_decap_key *key)
3610 {
3611         return jhash(&key->key, sizeof(key->key), 0);
3612 }
3613
3614 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3615                                   struct net_device *peer_netdev)
3616 {
3617         struct mlx5e_priv *peer_priv;
3618
3619         peer_priv = netdev_priv(peer_netdev);
3620
3621         return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3622                 mlx5e_eswitch_vf_rep(priv->netdev) &&
3623                 mlx5e_eswitch_vf_rep(peer_netdev) &&
3624                 same_hw_devs(priv, peer_priv));
3625 }
3626
3627 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
3628 {
3629         return refcount_inc_not_zero(&e->refcnt);
3630 }
3631
3632 static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
3633 {
3634         return refcount_inc_not_zero(&e->refcnt);
3635 }
3636
3637 static struct mlx5e_encap_entry *
3638 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
3639                 uintptr_t hash_key)
3640 {
3641         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3642         struct mlx5e_encap_entry *e;
3643         struct encap_key e_key;
3644
3645         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
3646                                    encap_hlist, hash_key) {
3647                 e_key.ip_tun_key = &e->tun_info->key;
3648                 e_key.tc_tunnel = e->tunnel;
3649                 if (!cmp_encap_info(&e_key, key) &&
3650                     mlx5e_encap_take(e))
3651                         return e;
3652         }
3653
3654         return NULL;
3655 }
3656
3657 static struct mlx5e_decap_entry *
3658 mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
3659                 uintptr_t hash_key)
3660 {
3661         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3662         struct mlx5e_decap_key r_key;
3663         struct mlx5e_decap_entry *e;
3664
3665         hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
3666                                    hlist, hash_key) {
3667                 r_key = e->key;
3668                 if (!cmp_decap_info(&r_key, key) &&
3669                     mlx5e_decap_take(e))
3670                         return e;
3671         }
3672         return NULL;
3673 }
3674
3675 static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
3676 {
3677         size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
3678
3679         return kmemdup(tun_info, tun_size, GFP_KERNEL);
3680 }
3681
3682 static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
3683                                       struct mlx5e_tc_flow *flow,
3684                                       int out_index,
3685                                       struct mlx5e_encap_entry *e,
3686                                       struct netlink_ext_ack *extack)
3687 {
3688         int i;
3689
3690         for (i = 0; i < out_index; i++) {
3691                 if (flow->encaps[i].e != e)
3692                         continue;
3693                 NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
3694                 netdev_err(priv->netdev, "can't duplicate encap action\n");
3695                 return true;
3696         }
3697
3698         return false;
3699 }
3700
3701 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
3702                               struct mlx5e_tc_flow *flow,
3703                               struct net_device *mirred_dev,
3704                               int out_index,
3705                               struct netlink_ext_ack *extack,
3706                               struct net_device **encap_dev,
3707                               bool *encap_valid)
3708 {
3709         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3710         struct mlx5e_tc_flow_parse_attr *parse_attr;
3711         struct mlx5_flow_attr *attr = flow->attr;
3712         const struct ip_tunnel_info *tun_info;
3713         struct encap_key key;
3714         struct mlx5e_encap_entry *e;
3715         unsigned short family;
3716         uintptr_t hash_key;
3717         int err = 0;
3718
3719         parse_attr = attr->parse_attr;
3720         tun_info = parse_attr->tun_info[out_index];
3721         family = ip_tunnel_info_af(tun_info);
3722         key.ip_tun_key = &tun_info->key;
3723         key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
3724         if (!key.tc_tunnel) {
3725                 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
3726                 return -EOPNOTSUPP;
3727         }
3728
3729         hash_key = hash_encap_info(&key);
3730
3731         mutex_lock(&esw->offloads.encap_tbl_lock);
3732         e = mlx5e_encap_get(priv, &key, hash_key);
3733
3734         /* must verify if encap is valid or not */
3735         if (e) {
3736                 /* Check that entry was not already attached to this flow */
3737                 if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
3738                         err = -EOPNOTSUPP;
3739                         goto out_err;
3740                 }
3741
3742                 mutex_unlock(&esw->offloads.encap_tbl_lock);
3743                 wait_for_completion(&e->res_ready);
3744
3745                 /* Protect against concurrent neigh update. */
3746                 mutex_lock(&esw->offloads.encap_tbl_lock);
3747                 if (e->compl_result < 0) {
3748                         err = -EREMOTEIO;
3749                         goto out_err;
3750                 }
3751                 goto attach_flow;
3752         }
3753
3754         e = kzalloc(sizeof(*e), GFP_KERNEL);
3755         if (!e) {
3756                 err = -ENOMEM;
3757                 goto out_err;
3758         }
3759
3760         refcount_set(&e->refcnt, 1);
3761         init_completion(&e->res_ready);
3762
3763         tun_info = dup_tun_info(tun_info);
3764         if (!tun_info) {
3765                 err = -ENOMEM;
3766                 goto out_err_init;
3767         }
3768         e->tun_info = tun_info;
3769         err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
3770         if (err)
3771                 goto out_err_init;
3772
3773         INIT_LIST_HEAD(&e->flows);
3774         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
3775         mutex_unlock(&esw->offloads.encap_tbl_lock);
3776
3777         if (family == AF_INET)
3778                 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
3779         else if (family == AF_INET6)
3780                 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
3781
3782         /* Protect against concurrent neigh update. */
3783         mutex_lock(&esw->offloads.encap_tbl_lock);
3784         complete_all(&e->res_ready);
3785         if (err) {
3786                 e->compl_result = err;
3787                 goto out_err;
3788         }
3789         e->compl_result = 1;
3790
3791 attach_flow:
3792         flow->encaps[out_index].e = e;
3793         list_add(&flow->encaps[out_index].list, &e->flows);
3794         flow->encaps[out_index].index = out_index;
3795         *encap_dev = e->out_dev;
3796         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
3797                 attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
3798                 attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
3799                 *encap_valid = true;
3800         } else {
3801                 *encap_valid = false;
3802         }
3803         mutex_unlock(&esw->offloads.encap_tbl_lock);
3804
3805         return err;
3806
3807 out_err:
3808         mutex_unlock(&esw->offloads.encap_tbl_lock);
3809         if (e)
3810                 mlx5e_encap_put(priv, e);
3811         return err;
3812
3813 out_err_init:
3814         mutex_unlock(&esw->offloads.encap_tbl_lock);
3815         kfree(tun_info);
3816         kfree(e);
3817         return err;
3818 }
3819
3820 static int mlx5e_attach_decap(struct mlx5e_priv *priv,
3821                               struct mlx5e_tc_flow *flow,
3822                               struct netlink_ext_ack *extack)
3823 {
3824         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3825         struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3826         struct mlx5e_tc_flow_parse_attr *parse_attr;
3827         struct mlx5e_decap_entry *d;
3828         struct mlx5e_decap_key key;
3829         uintptr_t hash_key;
3830         int err = 0;
3831
3832         parse_attr = flow->attr->parse_attr;
3833         if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
3834                 NL_SET_ERR_MSG_MOD(extack,
3835                                    "encap header larger than max supported");
3836                 return -EOPNOTSUPP;
3837         }
3838
3839         key.key = parse_attr->eth;
3840         hash_key = hash_decap_info(&key);
3841         mutex_lock(&esw->offloads.decap_tbl_lock);
3842         d = mlx5e_decap_get(priv, &key, hash_key);
3843         if (d) {
3844                 mutex_unlock(&esw->offloads.decap_tbl_lock);
3845                 wait_for_completion(&d->res_ready);
3846                 mutex_lock(&esw->offloads.decap_tbl_lock);
3847                 if (d->compl_result) {
3848                         err = -EREMOTEIO;
3849                         goto out_free;
3850                 }
3851                 goto found;
3852         }
3853
3854         d = kzalloc(sizeof(*d), GFP_KERNEL);
3855         if (!d) {
3856                 err = -ENOMEM;
3857                 goto out_err;
3858         }
3859
3860         d->key = key;
3861         refcount_set(&d->refcnt, 1);
3862         init_completion(&d->res_ready);
3863         INIT_LIST_HEAD(&d->flows);
3864         hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
3865         mutex_unlock(&esw->offloads.decap_tbl_lock);
3866
3867         d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
3868                                                      MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
3869                                                      sizeof(parse_attr->eth),
3870                                                      &parse_attr->eth,
3871                                                      MLX5_FLOW_NAMESPACE_FDB);
3872         if (IS_ERR(d->pkt_reformat)) {
3873                 err = PTR_ERR(d->pkt_reformat);
3874                 d->compl_result = err;
3875         }
3876         mutex_lock(&esw->offloads.decap_tbl_lock);
3877         complete_all(&d->res_ready);
3878         if (err)
3879                 goto out_free;
3880
3881 found:
3882         flow->decap_reformat = d;
3883         attr->decap_pkt_reformat = d->pkt_reformat;
3884         list_add(&flow->l3_to_l2_reformat, &d->flows);
3885         mutex_unlock(&esw->offloads.decap_tbl_lock);
3886         return 0;
3887
3888 out_free:
3889         mutex_unlock(&esw->offloads.decap_tbl_lock);
3890         mlx5e_decap_put(priv, d);
3891         return err;
3892
3893 out_err:
3894         mutex_unlock(&esw->offloads.decap_tbl_lock);
3895         return err;
3896 }
3897
3898 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3899                                 const struct flow_action_entry *act,
3900                                 struct mlx5_esw_flow_attr *attr,
3901                                 u32 *action)
3902 {
3903         u8 vlan_idx = attr->total_vlan;
3904
3905         if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3906                 return -EOPNOTSUPP;
3907
3908         switch (act->id) {
3909         case FLOW_ACTION_VLAN_POP:
3910                 if (vlan_idx) {
3911                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3912                                                                  MLX5_FS_VLAN_DEPTH))
3913                                 return -EOPNOTSUPP;
3914
3915                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3916                 } else {
3917                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3918                 }
3919                 break;
3920         case FLOW_ACTION_VLAN_PUSH:
3921                 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3922                 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3923                 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3924                 if (!attr->vlan_proto[vlan_idx])
3925                         attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3926
3927                 if (vlan_idx) {
3928                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3929                                                                  MLX5_FS_VLAN_DEPTH))
3930                                 return -EOPNOTSUPP;
3931
3932                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3933                 } else {
3934                         if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3935                             (act->vlan.proto != htons(ETH_P_8021Q) ||
3936                              act->vlan.prio))
3937                                 return -EOPNOTSUPP;
3938
3939                         *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3940                 }
3941                 break;
3942         default:
3943                 return -EINVAL;
3944         }
3945
3946         attr->total_vlan = vlan_idx + 1;
3947
3948         return 0;
3949 }
3950
3951 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3952                                           struct net_device *out_dev)
3953 {
3954         struct net_device *fdb_out_dev = out_dev;
3955         struct net_device *uplink_upper;
3956
3957         rcu_read_lock();
3958         uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3959         if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3960             uplink_upper == out_dev) {
3961                 fdb_out_dev = uplink_dev;
3962         } else if (netif_is_lag_master(out_dev)) {
3963                 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3964                 if (fdb_out_dev &&
3965                     (!mlx5e_eswitch_rep(fdb_out_dev) ||
3966                      !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3967                         fdb_out_dev = NULL;
3968         }
3969         rcu_read_unlock();
3970         return fdb_out_dev;
3971 }
3972
3973 static int add_vlan_push_action(struct mlx5e_priv *priv,
3974                                 struct mlx5_flow_attr *attr,
3975                                 struct net_device **out_dev,
3976                                 u32 *action)
3977 {
3978         struct net_device *vlan_dev = *out_dev;
3979         struct flow_action_entry vlan_act = {
3980                 .id = FLOW_ACTION_VLAN_PUSH,
3981                 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3982                 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3983                 .vlan.prio = 0,
3984         };
3985         int err;
3986
3987         err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3988         if (err)
3989                 return err;
3990
3991         *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3992                                         dev_get_iflink(vlan_dev));
3993         if (is_vlan_dev(*out_dev))
3994                 err = add_vlan_push_action(priv, attr, out_dev, action);
3995
3996         return err;
3997 }
3998
3999 static int add_vlan_pop_action(struct mlx5e_priv *priv,
4000                                struct mlx5_flow_attr *attr,
4001                                u32 *action)
4002 {
4003         struct flow_action_entry vlan_act = {
4004                 .id = FLOW_ACTION_VLAN_POP,
4005         };
4006         int nest_level, err = 0;
4007
4008         nest_level = attr->parse_attr->filter_dev->lower_level -
4009                                                 priv->netdev->lower_level;
4010         while (nest_level--) {
4011                 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
4012                 if (err)
4013                         return err;
4014         }
4015
4016         return err;
4017 }
4018
4019 static bool same_hw_reps(struct mlx5e_priv *priv,
4020                          struct net_device *peer_netdev)
4021 {
4022         struct mlx5e_priv *peer_priv;
4023
4024         peer_priv = netdev_priv(peer_netdev);
4025
4026         return mlx5e_eswitch_rep(priv->netdev) &&
4027                mlx5e_eswitch_rep(peer_netdev) &&
4028                same_hw_devs(priv, peer_priv);
4029 }
4030
4031 static bool is_lag_dev(struct mlx5e_priv *priv,
4032                        struct net_device *peer_netdev)
4033 {
4034         return ((mlx5_lag_is_sriov(priv->mdev) ||
4035                  mlx5_lag_is_multipath(priv->mdev)) &&
4036                  same_hw_reps(priv, peer_netdev));
4037 }
4038
4039 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4040                                     struct net_device *out_dev)
4041 {
4042         if (is_merged_eswitch_vfs(priv, out_dev))
4043                 return true;
4044
4045         if (is_lag_dev(priv, out_dev))
4046                 return true;
4047
4048         return mlx5e_eswitch_rep(out_dev) &&
4049                same_port_devs(priv, netdev_priv(out_dev));
4050 }
4051
4052 static bool is_duplicated_output_device(struct net_device *dev,
4053                                         struct net_device *out_dev,
4054                                         int *ifindexes, int if_count,
4055                                         struct netlink_ext_ack *extack)
4056 {
4057         int i;
4058
4059         for (i = 0; i < if_count; i++) {
4060                 if (ifindexes[i] == out_dev->ifindex) {
4061                         NL_SET_ERR_MSG_MOD(extack,
4062                                            "can't duplicate output to same device");
4063                         netdev_err(dev, "can't duplicate output to same device: %s\n",
4064                                    out_dev->name);
4065                         return true;
4066                 }
4067         }
4068
4069         return false;
4070 }
4071
4072 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
4073                                     struct mlx5e_tc_flow *flow,
4074                                     struct net_device *out_dev,
4075                                     struct netlink_ext_ack *extack)
4076 {
4077         struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4078         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4079         struct mlx5e_rep_priv *rep_priv;
4080
4081         /* Forwarding non encapsulated traffic between
4082          * uplink ports is allowed only if
4083          * termination_table_raw_traffic cap is set.
4084          *
4085          * Input vport was stored attr->in_rep.
4086          * In LAG case, *priv* is the private data of
4087          * uplink which may be not the input vport.
4088          */
4089         rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
4090
4091         if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
4092               mlx5e_eswitch_uplink_rep(out_dev)))
4093                 return 0;
4094
4095         if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
4096                                         termination_table_raw_traffic)) {
4097                 NL_SET_ERR_MSG_MOD(extack,
4098                                    "devices are both uplink, can't offload forwarding");
4099                         pr_err("devices %s %s are both uplink, can't offload forwarding\n",
4100                                priv->netdev->name, out_dev->name);
4101                         return -EOPNOTSUPP;
4102         } else if (out_dev != rep_priv->netdev) {
4103                 NL_SET_ERR_MSG_MOD(extack,
4104                                    "devices are not the same uplink, can't offload forwarding");
4105                 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
4106                        priv->netdev->name, out_dev->name);
4107                 return -EOPNOTSUPP;
4108         }
4109         return 0;
4110 }
4111
4112 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
4113                                 struct flow_action *flow_action,
4114                                 struct mlx5e_tc_flow *flow,
4115                                 struct netlink_ext_ack *extack,
4116                                 struct net_device *filter_dev)
4117 {
4118         struct pedit_headers_action hdrs[2] = {};
4119         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4120         struct mlx5e_tc_flow_parse_attr *parse_attr;
4121         struct mlx5e_rep_priv *rpriv = priv->ppriv;
4122         const struct ip_tunnel_info *info = NULL;
4123         struct mlx5_flow_attr *attr = flow->attr;
4124         int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
4125         bool ft_flow = mlx5e_is_ft_flow(flow);
4126         const struct flow_action_entry *act;
4127         struct mlx5_esw_flow_attr *esw_attr;
4128         bool encap = false, decap = false;
4129         u32 action = attr->action;
4130         int err, i, if_count = 0;
4131         bool mpls_push = false;
4132
4133         if (!flow_action_has_entries(flow_action))
4134                 return -EINVAL;
4135
4136         if (!flow_action_hw_stats_check(flow_action, extack,
4137                                         FLOW_ACTION_HW_STATS_DELAYED_BIT))
4138                 return -EOPNOTSUPP;
4139
4140         esw_attr = attr->esw_attr;
4141         parse_attr = attr->parse_attr;
4142
4143         flow_action_for_each(i, act, flow_action) {
4144                 switch (act->id) {
4145                 case FLOW_ACTION_DROP:
4146                         action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
4147                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
4148                         break;
4149                 case FLOW_ACTION_TRAP:
4150                         if (!flow_offload_has_one_action(flow_action)) {
4151                                 NL_SET_ERR_MSG_MOD(extack,
4152                                                    "action trap is supported as a sole action only");
4153                                 return -EOPNOTSUPP;
4154                         }
4155                         action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4156                                    MLX5_FLOW_CONTEXT_ACTION_COUNT);
4157                         attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
4158                         break;
4159                 case FLOW_ACTION_MPLS_PUSH:
4160                         if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
4161                                                         reformat_l2_to_l3_tunnel) ||
4162                             act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
4163                                 NL_SET_ERR_MSG_MOD(extack,
4164                                                    "mpls push is supported only for mpls_uc protocol");
4165                                 return -EOPNOTSUPP;
4166                         }
4167                         mpls_push = true;
4168                         break;
4169                 case FLOW_ACTION_MPLS_POP:
4170                         /* we only support mpls pop if it is the first action
4171                          * and the filter net device is bareudp. Subsequent
4172                          * actions can be pedit and the last can be mirred
4173                          * egress redirect.
4174                          */
4175                         if (i) {
4176                                 NL_SET_ERR_MSG_MOD(extack,
4177                                                    "mpls pop supported only as first action");
4178                                 return -EOPNOTSUPP;
4179                         }
4180                         if (!netif_is_bareudp(filter_dev)) {
4181                                 NL_SET_ERR_MSG_MOD(extack,
4182                                                    "mpls pop supported only on bareudp devices");
4183                                 return -EOPNOTSUPP;
4184                         }
4185
4186                         parse_attr->eth.h_proto = act->mpls_pop.proto;
4187                         action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
4188                         flow_flag_set(flow, L3_TO_L2_DECAP);
4189                         break;
4190                 case FLOW_ACTION_MANGLE:
4191                 case FLOW_ACTION_ADD:
4192                         err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
4193                                                     parse_attr, hdrs, flow, extack);
4194                         if (err)
4195                                 return err;
4196
4197                         if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
4198                                 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4199                                 esw_attr->split_count = esw_attr->out_count;
4200                         }
4201                         break;
4202                 case FLOW_ACTION_CSUM:
4203                         if (csum_offload_supported(priv, action,
4204                                                    act->csum_flags, extack))
4205                                 break;
4206
4207                         return -EOPNOTSUPP;
4208                 case FLOW_ACTION_REDIRECT:
4209                 case FLOW_ACTION_MIRRED: {
4210                         struct mlx5e_priv *out_priv;
4211                         struct net_device *out_dev;
4212
4213                         out_dev = act->dev;
4214                         if (!out_dev) {
4215                                 /* out_dev is NULL when filters with
4216                                  * non-existing mirred device are replayed to
4217                                  * the driver.
4218                                  */
4219                                 return -EINVAL;
4220                         }
4221
4222                         if (mpls_push && !netif_is_bareudp(out_dev)) {
4223                                 NL_SET_ERR_MSG_MOD(extack,
4224                                                    "mpls is supported only through a bareudp device");
4225                                 return -EOPNOTSUPP;
4226                         }
4227
4228                         if (ft_flow && out_dev == priv->netdev) {
4229                                 /* Ignore forward to self rules generated
4230                                  * by adding both mlx5 devs to the flow table
4231                                  * block on a normal nft offload setup.
4232                                  */
4233                                 return -EOPNOTSUPP;
4234                         }
4235
4236                         if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
4237                                 NL_SET_ERR_MSG_MOD(extack,
4238                                                    "can't support more output ports, can't offload forwarding");
4239                                 netdev_warn(priv->netdev,
4240                                             "can't support more than %d output ports, can't offload forwarding\n",
4241                                             esw_attr->out_count);
4242                                 return -EOPNOTSUPP;
4243                         }
4244
4245                         action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
4246                                   MLX5_FLOW_CONTEXT_ACTION_COUNT;
4247                         if (encap) {
4248                                 parse_attr->mirred_ifindex[esw_attr->out_count] =
4249                                         out_dev->ifindex;
4250                                 parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
4251                                 if (!parse_attr->tun_info[esw_attr->out_count])
4252                                         return -ENOMEM;
4253                                 encap = false;
4254                                 esw_attr->dests[esw_attr->out_count].flags |=
4255                                         MLX5_ESW_DEST_ENCAP;
4256                                 esw_attr->out_count++;
4257                                 /* attr->dests[].rep is resolved when we
4258                                  * handle encap
4259                                  */
4260                         } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
4261                                 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4262                                 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
4263
4264                                 if (is_duplicated_output_device(priv->netdev,
4265                                                                 out_dev,
4266                                                                 ifindexes,
4267                                                                 if_count,
4268                                                                 extack))
4269                                         return -EOPNOTSUPP;
4270
4271                                 ifindexes[if_count] = out_dev->ifindex;
4272                                 if_count++;
4273
4274                                 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
4275                                 if (!out_dev)
4276                                         return -ENODEV;
4277
4278                                 if (is_vlan_dev(out_dev)) {
4279                                         err = add_vlan_push_action(priv, attr,
4280                                                                    &out_dev,
4281                                                                    &action);
4282                                         if (err)
4283                                                 return err;
4284                                 }
4285
4286                                 if (is_vlan_dev(parse_attr->filter_dev)) {
4287                                         err = add_vlan_pop_action(priv, attr,
4288                                                                   &action);
4289                                         if (err)
4290                                                 return err;
4291                                 }
4292
4293                                 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
4294                                 if (err)
4295                                         return err;
4296
4297                                 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
4298                                         NL_SET_ERR_MSG_MOD(extack,
4299                                                            "devices are not on same switch HW, can't offload forwarding");
4300                                         return -EOPNOTSUPP;
4301                                 }
4302
4303                                 out_priv = netdev_priv(out_dev);
4304                                 rpriv = out_priv->ppriv;
4305                                 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
4306                                 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
4307                                 esw_attr->out_count++;
4308                         } else if (parse_attr->filter_dev != priv->netdev) {
4309                                 /* All mlx5 devices are called to configure
4310                                  * high level device filters. Therefore, the
4311                                  * *attempt* to  install a filter on invalid
4312                                  * eswitch should not trigger an explicit error
4313                                  */
4314                                 return -EINVAL;
4315                         } else {
4316                                 NL_SET_ERR_MSG_MOD(extack,
4317                                                    "devices are not on same switch HW, can't offload forwarding");
4318                                 netdev_warn(priv->netdev,
4319                                             "devices %s %s not on same switch HW, can't offload forwarding\n",
4320                                             priv->netdev->name,
4321                                             out_dev->name);
4322                                 return -EINVAL;
4323                         }
4324                         }
4325                         break;
4326                 case FLOW_ACTION_TUNNEL_ENCAP:
4327                         info = act->tunnel;
4328                         if (info)
4329                                 encap = true;
4330                         else
4331                                 return -EOPNOTSUPP;
4332
4333                         break;
4334                 case FLOW_ACTION_VLAN_PUSH:
4335                 case FLOW_ACTION_VLAN_POP:
4336                         if (act->id == FLOW_ACTION_VLAN_PUSH &&
4337                             (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
4338                                 /* Replace vlan pop+push with vlan modify */
4339                                 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4340                                 err = add_vlan_rewrite_action(priv,
4341                                                               MLX5_FLOW_NAMESPACE_FDB,
4342                                                               act, parse_attr, hdrs,
4343                                                               &action, extack);
4344                         } else {
4345                                 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
4346                         }
4347                         if (err)
4348                                 return err;
4349
4350                         esw_attr->split_count = esw_attr->out_count;
4351                         break;
4352                 case FLOW_ACTION_VLAN_MANGLE:
4353                         err = add_vlan_rewrite_action(priv,
4354                                                       MLX5_FLOW_NAMESPACE_FDB,
4355                                                       act, parse_attr, hdrs,
4356                                                       &action, extack);
4357                         if (err)
4358                                 return err;
4359
4360                         esw_attr->split_count = esw_attr->out_count;
4361                         break;
4362                 case FLOW_ACTION_TUNNEL_DECAP:
4363                         decap = true;
4364                         break;
4365                 case FLOW_ACTION_GOTO:
4366                         err = validate_goto_chain(priv, flow, act, action,
4367                                                   extack);
4368                         if (err)
4369                                 return err;
4370
4371                         action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4372                         attr->dest_chain = act->chain_index;
4373                         break;
4374                 case FLOW_ACTION_CT:
4375                         err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
4376                         if (err)
4377                                 return err;
4378
4379                         flow_flag_set(flow, CT);
4380                         break;
4381                 default:
4382                         NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
4383                         return -EOPNOTSUPP;
4384                 }
4385         }
4386
4387         if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
4388             action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
4389                 /* For prio tag mode, replace vlan pop with rewrite vlan prio
4390                  * tag rewrite.
4391                  */
4392                 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
4393                 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
4394                                                        &action, extack);
4395                 if (err)
4396                         return err;
4397         }
4398
4399         if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
4400             hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
4401                 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
4402                                             parse_attr, hdrs, &action, extack);
4403                 if (err)
4404                         return err;
4405                 /* in case all pedit actions are skipped, remove the MOD_HDR
4406                  * flag. we might have set split_count either by pedit or
4407                  * pop/push. if there is no pop/push either, reset it too.
4408                  */
4409                 if (parse_attr->mod_hdr_acts.num_actions == 0) {
4410                         action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4411                         dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4412                         if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
4413                               (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
4414                                 esw_attr->split_count = 0;
4415                 }
4416         }
4417
4418         attr->action = action;
4419         if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
4420                 return -EOPNOTSUPP;
4421
4422         if (attr->dest_chain) {
4423                 if (decap) {
4424                         /* It can be supported if we'll create a mapping for
4425                          * the tunnel device only (without tunnel), and set
4426                          * this tunnel id with this decap flow.
4427                          *
4428                          * On restore (miss), we'll just set this saved tunnel
4429                          * device.
4430                          */
4431
4432                         NL_SET_ERR_MSG(extack,
4433                                        "Decap with goto isn't supported");
4434                         netdev_warn(priv->netdev,
4435                                     "Decap with goto isn't supported");
4436                         return -EOPNOTSUPP;
4437                 }
4438
4439                 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
4440                         NL_SET_ERR_MSG_MOD(extack,
4441                                            "Mirroring goto chain rules isn't supported");
4442                         return -EOPNOTSUPP;
4443                 }
4444                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4445         }
4446
4447         if (!(attr->action &
4448               (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4449                 NL_SET_ERR_MSG_MOD(extack,
4450                                    "Rule must have at least one forward/drop action");
4451                 return -EOPNOTSUPP;
4452         }
4453
4454         if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4455                 NL_SET_ERR_MSG_MOD(extack,
4456                                    "current firmware doesn't support split rule for port mirroring");
4457                 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4458                 return -EOPNOTSUPP;
4459         }
4460
4461         return 0;
4462 }
4463
4464 static void get_flags(int flags, unsigned long *flow_flags)
4465 {
4466         unsigned long __flow_flags = 0;
4467
4468         if (flags & MLX5_TC_FLAG(INGRESS))
4469                 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4470         if (flags & MLX5_TC_FLAG(EGRESS))
4471                 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4472
4473         if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4474                 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4475         if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4476                 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4477         if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4478                 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4479
4480         *flow_flags = __flow_flags;
4481 }
4482
4483 static const struct rhashtable_params tc_ht_params = {
4484         .head_offset = offsetof(struct mlx5e_tc_flow, node),
4485         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4486         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4487         .automatic_shrinking = true,
4488 };
4489
4490 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4491                                     unsigned long flags)
4492 {
4493         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4494         struct mlx5e_rep_priv *uplink_rpriv;
4495
4496         if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4497                 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4498                 return &uplink_rpriv->uplink_priv.tc_ht;
4499         } else /* NIC offload */
4500                 return &priv->fs.tc.ht;
4501 }
4502
4503 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4504 {
4505         struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4506         struct mlx5_flow_attr *attr = flow->attr;
4507         bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4508                 flow_flag_test(flow, INGRESS);
4509         bool act_is_encap = !!(attr->action &
4510                                MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4511         bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4512                                                 MLX5_DEVCOM_ESW_OFFLOADS);
4513
4514         if (!esw_paired)
4515                 return false;
4516
4517         if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4518              mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4519             (is_rep_ingress || act_is_encap))
4520                 return true;
4521
4522         return false;
4523 }
4524
4525 struct mlx5_flow_attr *
4526 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4527 {
4528         u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB)  ?
4529                                 sizeof(struct mlx5_esw_flow_attr) :
4530                                 sizeof(struct mlx5_nic_flow_attr);
4531         struct mlx5_flow_attr *attr;
4532
4533         return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4534 }
4535
4536 static int
4537 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4538                  struct flow_cls_offload *f, unsigned long flow_flags,
4539                  struct mlx5e_tc_flow_parse_attr **__parse_attr,
4540                  struct mlx5e_tc_flow **__flow)
4541 {
4542         struct mlx5e_tc_flow_parse_attr *parse_attr;
4543         struct mlx5_flow_attr *attr;
4544         struct mlx5e_tc_flow *flow;
4545         int err = -ENOMEM;
4546         int out_index;
4547
4548         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4549         parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4550         if (!parse_attr || !flow)
4551                 goto err_free;
4552
4553         flow->flags = flow_flags;
4554         flow->cookie = f->cookie;
4555         flow->priv = priv;
4556
4557         attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4558         if (!attr)
4559                 goto err_free;
4560
4561         flow->attr = attr;
4562
4563         for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4564                 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4565         INIT_LIST_HEAD(&flow->hairpin);
4566         INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4567         refcount_set(&flow->refcnt, 1);
4568         init_completion(&flow->init_done);
4569
4570         *__flow = flow;
4571         *__parse_attr = parse_attr;
4572
4573         return 0;
4574
4575 err_free:
4576         kfree(flow);
4577         kvfree(parse_attr);
4578         return err;
4579 }
4580
4581 static void
4582 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4583                      struct mlx5e_tc_flow_parse_attr *parse_attr,
4584                      struct flow_cls_offload *f)
4585 {
4586         attr->parse_attr = parse_attr;
4587         attr->chain = f->common.chain_index;
4588         attr->prio = f->common.prio;
4589 }
4590
4591 static void
4592 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4593                          struct mlx5e_priv *priv,
4594                          struct mlx5e_tc_flow_parse_attr *parse_attr,
4595                          struct flow_cls_offload *f,
4596                          struct mlx5_eswitch_rep *in_rep,
4597                          struct mlx5_core_dev *in_mdev)
4598 {
4599         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4600         struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4601
4602         mlx5e_flow_attr_init(attr, parse_attr, f);
4603
4604         esw_attr->in_rep = in_rep;
4605         esw_attr->in_mdev = in_mdev;
4606
4607         if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4608             MLX5_COUNTER_SOURCE_ESWITCH)
4609                 esw_attr->counter_dev = in_mdev;
4610         else
4611                 esw_attr->counter_dev = priv->mdev;
4612 }
4613
4614 static struct mlx5e_tc_flow *
4615 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4616                      struct flow_cls_offload *f,
4617                      unsigned long flow_flags,
4618                      struct net_device *filter_dev,
4619                      struct mlx5_eswitch_rep *in_rep,
4620                      struct mlx5_core_dev *in_mdev)
4621 {
4622         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4623         struct netlink_ext_ack *extack = f->common.extack;
4624         struct mlx5e_tc_flow_parse_attr *parse_attr;
4625         struct mlx5e_tc_flow *flow;
4626         int attr_size, err;
4627
4628         flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4629         attr_size  = sizeof(struct mlx5_esw_flow_attr);
4630         err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4631                                &parse_attr, &flow);
4632         if (err)
4633                 goto out;
4634
4635         parse_attr->filter_dev = filter_dev;
4636         mlx5e_flow_esw_attr_init(flow->attr,
4637                                  priv, parse_attr,
4638                                  f, in_rep, in_mdev);
4639
4640         err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4641                                f, filter_dev);
4642         if (err)
4643                 goto err_free;
4644
4645         /* actions validation depends on parsing the ct matches first */
4646         err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4647                                    &flow->attr->ct_attr, extack);
4648         if (err)
4649                 goto err_free;
4650
4651         err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4652         if (err)
4653                 goto err_free;
4654
4655         err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4656         complete_all(&flow->init_done);
4657         if (err) {
4658                 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4659                         goto err_free;
4660
4661                 add_unready_flow(flow);
4662         }
4663
4664         return flow;
4665
4666 err_free:
4667         dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4668         mlx5e_flow_put(priv, flow);
4669 out:
4670         return ERR_PTR(err);
4671 }
4672
4673 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4674                                       struct mlx5e_tc_flow *flow,
4675                                       unsigned long flow_flags)
4676 {
4677         struct mlx5e_priv *priv = flow->priv, *peer_priv;
4678         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4679         struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4680         struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4681         struct mlx5e_tc_flow_parse_attr *parse_attr;
4682         struct mlx5e_rep_priv *peer_urpriv;
4683         struct mlx5e_tc_flow *peer_flow;
4684         struct mlx5_core_dev *in_mdev;
4685         int err = 0;
4686
4687         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4688         if (!peer_esw)
4689                 return -ENODEV;
4690
4691         peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4692         peer_priv = netdev_priv(peer_urpriv->netdev);
4693
4694         /* in_mdev is assigned of which the packet originated from.
4695          * So packets redirected to uplink use the same mdev of the
4696          * original flow and packets redirected from uplink use the
4697          * peer mdev.
4698          */
4699         if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4700                 in_mdev = peer_priv->mdev;
4701         else
4702                 in_mdev = priv->mdev;
4703
4704         parse_attr = flow->attr->parse_attr;
4705         peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4706                                          parse_attr->filter_dev,
4707                                          attr->in_rep, in_mdev);
4708         if (IS_ERR(peer_flow)) {
4709                 err = PTR_ERR(peer_flow);
4710                 goto out;
4711         }
4712
4713         flow->peer_flow = peer_flow;
4714         flow_flag_set(flow, DUP);
4715         mutex_lock(&esw->offloads.peer_mutex);
4716         list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4717         mutex_unlock(&esw->offloads.peer_mutex);
4718
4719 out:
4720         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4721         return err;
4722 }
4723
4724 static int
4725 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4726                    struct flow_cls_offload *f,
4727                    unsigned long flow_flags,
4728                    struct net_device *filter_dev,
4729                    struct mlx5e_tc_flow **__flow)
4730 {
4731         struct mlx5e_rep_priv *rpriv = priv->ppriv;
4732         struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4733         struct mlx5_core_dev *in_mdev = priv->mdev;
4734         struct mlx5e_tc_flow *flow;
4735         int err;
4736
4737         flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4738                                     in_mdev);
4739         if (IS_ERR(flow))
4740                 return PTR_ERR(flow);
4741
4742         if (is_peer_flow_needed(flow)) {
4743                 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4744                 if (err) {
4745                         mlx5e_tc_del_fdb_flow(priv, flow);
4746                         goto out;
4747                 }
4748         }
4749
4750         *__flow = flow;
4751
4752         return 0;
4753
4754 out:
4755         return err;
4756 }
4757
4758 static int
4759 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4760                    struct flow_cls_offload *f,
4761                    unsigned long flow_flags,
4762                    struct net_device *filter_dev,
4763                    struct mlx5e_tc_flow **__flow)
4764 {
4765         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4766         struct netlink_ext_ack *extack = f->common.extack;
4767         struct mlx5e_tc_flow_parse_attr *parse_attr;
4768         struct mlx5e_tc_flow *flow;
4769         int attr_size, err;
4770
4771         if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4772                 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4773                         return -EOPNOTSUPP;
4774         } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4775                 return -EOPNOTSUPP;
4776         }
4777
4778         flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4779         attr_size  = sizeof(struct mlx5_nic_flow_attr);
4780         err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4781                                &parse_attr, &flow);
4782         if (err)
4783                 goto out;
4784
4785         parse_attr->filter_dev = filter_dev;
4786         mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4787
4788         err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4789                                f, filter_dev);
4790         if (err)
4791                 goto err_free;
4792
4793         err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4794                                    &flow->attr->ct_attr, extack);
4795         if (err)
4796                 goto err_free;
4797
4798         err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4799         if (err)
4800                 goto err_free;
4801
4802         err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4803         if (err)
4804                 goto err_free;
4805
4806         flow_flag_set(flow, OFFLOADED);
4807         *__flow = flow;
4808
4809         return 0;
4810
4811 err_free:
4812         dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4813         mlx5e_flow_put(priv, flow);
4814 out:
4815         return err;
4816 }
4817
4818 static int
4819 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4820                   struct flow_cls_offload *f,
4821                   unsigned long flags,
4822                   struct net_device *filter_dev,
4823                   struct mlx5e_tc_flow **flow)
4824 {
4825         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4826         unsigned long flow_flags;
4827         int err;
4828
4829         get_flags(flags, &flow_flags);
4830
4831         if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4832                 return -EOPNOTSUPP;
4833
4834         if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4835                 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4836                                          filter_dev, flow);
4837         else
4838                 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4839                                          filter_dev, flow);
4840
4841         return err;
4842 }
4843
4844 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4845                                            struct mlx5e_rep_priv *rpriv)
4846 {
4847         /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4848          * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4849          * function is called from NIC mode.
4850          */
4851         return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4852 }
4853
4854 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4855                            struct flow_cls_offload *f, unsigned long flags)
4856 {
4857         struct netlink_ext_ack *extack = f->common.extack;
4858         struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4859         struct mlx5e_rep_priv *rpriv = priv->ppriv;
4860         struct mlx5e_tc_flow *flow;
4861         int err = 0;
4862
4863         rcu_read_lock();
4864         flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4865         if (flow) {
4866                 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4867                  * just return 0.
4868                  */
4869                 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4870                         goto rcu_unlock;
4871
4872                 NL_SET_ERR_MSG_MOD(extack,
4873                                    "flow cookie already exists, ignoring");
4874                 netdev_warn_once(priv->netdev,
4875                                  "flow cookie %lx already exists, ignoring\n",
4876                                  f->cookie);
4877                 err = -EEXIST;
4878                 goto rcu_unlock;
4879         }
4880 rcu_unlock:
4881         rcu_read_unlock();
4882         if (flow)
4883                 goto out;
4884
4885         trace_mlx5e_configure_flower(f);
4886         err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4887         if (err)
4888                 goto out;
4889
4890         /* Flow rule offloaded to non-uplink representor sharing tc block,
4891          * set the flow's owner dev.
4892          */
4893         if (is_flow_rule_duplicate_allowed(dev, rpriv))
4894                 flow->orig_dev = dev;
4895
4896         err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4897         if (err)
4898                 goto err_free;
4899
4900         return 0;
4901
4902 err_free:
4903         mlx5e_flow_put(priv, flow);
4904 out:
4905         return err;
4906 }
4907
4908 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4909 {
4910         bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4911         bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4912
4913         return flow_flag_test(flow, INGRESS) == dir_ingress &&
4914                 flow_flag_test(flow, EGRESS) == dir_egress;
4915 }
4916
4917 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4918                         struct flow_cls_offload *f, unsigned long flags)
4919 {
4920         struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4921         struct mlx5e_tc_flow *flow;
4922         int err;
4923
4924         rcu_read_lock();
4925         flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4926         if (!flow || !same_flow_direction(flow, flags)) {
4927                 err = -EINVAL;
4928                 goto errout;
4929         }
4930
4931         /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4932          * set.
4933          */
4934         if (flow_flag_test_and_set(flow, DELETED)) {
4935                 err = -EINVAL;
4936                 goto errout;
4937         }
4938         rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4939         rcu_read_unlock();
4940
4941         trace_mlx5e_delete_flower(f);
4942         mlx5e_flow_put(priv, flow);
4943
4944         return 0;
4945
4946 errout:
4947         rcu_read_unlock();
4948         return err;
4949 }
4950
4951 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4952                        struct flow_cls_offload *f, unsigned long flags)
4953 {
4954         struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4955         struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4956         struct mlx5_eswitch *peer_esw;
4957         struct mlx5e_tc_flow *flow;
4958         struct mlx5_fc *counter;
4959         u64 lastuse = 0;
4960         u64 packets = 0;
4961         u64 bytes = 0;
4962         int err = 0;
4963
4964         rcu_read_lock();
4965         flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4966                                                 tc_ht_params));
4967         rcu_read_unlock();
4968         if (IS_ERR(flow))
4969                 return PTR_ERR(flow);
4970
4971         if (!same_flow_direction(flow, flags)) {
4972                 err = -EINVAL;
4973                 goto errout;
4974         }
4975
4976         if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4977                 counter = mlx5e_tc_get_counter(flow);
4978                 if (!counter)
4979                         goto errout;
4980
4981                 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4982         }
4983
4984         /* Under multipath it's possible for one rule to be currently
4985          * un-offloaded while the other rule is offloaded.
4986          */
4987         peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4988         if (!peer_esw)
4989                 goto out;
4990
4991         if (flow_flag_test(flow, DUP) &&
4992             flow_flag_test(flow->peer_flow, OFFLOADED)) {
4993                 u64 bytes2;
4994                 u64 packets2;
4995                 u64 lastuse2;
4996
4997                 counter = mlx5e_tc_get_counter(flow->peer_flow);
4998                 if (!counter)
4999                         goto no_peer_counter;
5000                 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
5001
5002                 bytes += bytes2;
5003                 packets += packets2;
5004                 lastuse = max_t(u64, lastuse, lastuse2);
5005         }
5006
5007 no_peer_counter:
5008         mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5009 out:
5010         flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
5011                           FLOW_ACTION_HW_STATS_DELAYED);
5012         trace_mlx5e_stats_flower(f);
5013 errout:
5014         mlx5e_flow_put(priv, flow);
5015         return err;
5016 }
5017
5018 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
5019                                struct netlink_ext_ack *extack)
5020 {
5021         struct mlx5e_rep_priv *rpriv = priv->ppriv;
5022         struct mlx5_eswitch *esw;
5023         u32 rate_mbps = 0;
5024         u16 vport_num;
5025         int err;
5026
5027         vport_num = rpriv->rep->vport;
5028         if (vport_num >= MLX5_VPORT_ECPF) {
5029                 NL_SET_ERR_MSG_MOD(extack,
5030                                    "Ingress rate limit is supported only for Eswitch ports connected to VFs");
5031                 return -EOPNOTSUPP;
5032         }
5033
5034         esw = priv->mdev->priv.eswitch;
5035         /* rate is given in bytes/sec.
5036          * First convert to bits/sec and then round to the nearest mbit/secs.
5037          * mbit means million bits.
5038          * Moreover, if rate is non zero we choose to configure to a minimum of
5039          * 1 mbit/sec.
5040          */
5041         if (rate) {
5042                 rate = (rate * BITS_PER_BYTE) + 500000;
5043                 rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
5044         }
5045
5046         err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
5047         if (err)
5048                 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
5049
5050         return err;
5051 }
5052
5053 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
5054                                         struct flow_action *flow_action,
5055                                         struct netlink_ext_ack *extack)
5056 {
5057         struct mlx5e_rep_priv *rpriv = priv->ppriv;
5058         const struct flow_action_entry *act;
5059         int err;
5060         int i;
5061
5062         if (!flow_action_has_entries(flow_action)) {
5063                 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
5064                 return -EINVAL;
5065         }
5066
5067         if (!flow_offload_has_one_action(flow_action)) {
5068                 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
5069                 return -EOPNOTSUPP;
5070         }
5071
5072         if (!flow_action_basic_hw_stats_check(flow_action, extack))
5073                 return -EOPNOTSUPP;
5074
5075         flow_action_for_each(i, act, flow_action) {
5076                 switch (act->id) {
5077                 case FLOW_ACTION_POLICE:
5078                         err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
5079                         if (err)
5080                                 return err;
5081
5082                         rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
5083                         break;
5084                 default:
5085                         NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
5086                         return -EOPNOTSUPP;
5087                 }
5088         }
5089
5090         return 0;
5091 }
5092
5093 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
5094                                 struct tc_cls_matchall_offload *ma)
5095 {
5096         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5097         struct netlink_ext_ack *extack = ma->common.extack;
5098
5099         if (!mlx5_esw_qos_enabled(esw)) {
5100                 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
5101                 return -EOPNOTSUPP;
5102         }
5103
5104         if (ma->common.prio != 1) {
5105                 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
5106                 return -EINVAL;
5107         }
5108
5109         return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
5110 }
5111
5112 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
5113                              struct tc_cls_matchall_offload *ma)
5114 {
5115         struct netlink_ext_ack *extack = ma->common.extack;
5116
5117         return apply_police_params(priv, 0, extack);
5118 }
5119
5120 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
5121                              struct tc_cls_matchall_offload *ma)
5122 {
5123         struct mlx5e_rep_priv *rpriv = priv->ppriv;
5124         struct rtnl_link_stats64 cur_stats;
5125         u64 dbytes;
5126         u64 dpkts;
5127
5128         cur_stats = priv->stats.vf_vport;
5129         dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
5130         dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5131         rpriv->prev_vf_vport_stats = cur_stats;
5132         flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5133                           FLOW_ACTION_HW_STATS_DELAYED);
5134 }
5135
5136 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5137                                               struct mlx5e_priv *peer_priv)
5138 {
5139         struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5140         struct mlx5e_hairpin_entry *hpe, *tmp;
5141         LIST_HEAD(init_wait_list);
5142         u16 peer_vhca_id;
5143         int bkt;
5144
5145         if (!same_hw_devs(priv, peer_priv))
5146                 return;
5147
5148         peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5149
5150         mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
5151         hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
5152                 if (refcount_inc_not_zero(&hpe->refcnt))
5153                         list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5154         mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
5155
5156         list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5157                 wait_for_completion(&hpe->res_ready);
5158                 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5159                         hpe->hp->pair->peer_gone = true;
5160
5161                 mlx5e_hairpin_put(priv, hpe);
5162         }
5163 }
5164
5165 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5166                                  unsigned long event, void *ptr)
5167 {
5168         struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5169         struct mlx5e_flow_steering *fs;
5170         struct mlx5e_priv *peer_priv;
5171         struct mlx5e_tc_table *tc;
5172         struct mlx5e_priv *priv;
5173
5174         if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5175             event != NETDEV_UNREGISTER ||
5176             ndev->reg_state == NETREG_REGISTERED)
5177                 return NOTIFY_DONE;
5178
5179         tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5180         fs = container_of(tc, struct mlx5e_flow_steering, tc);
5181         priv = container_of(fs, struct mlx5e_priv, fs);
5182         peer_priv = netdev_priv(ndev);
5183         if (priv == peer_priv ||
5184             !(priv->netdev->features & NETIF_F_HW_TC))
5185                 return NOTIFY_DONE;
5186
5187         mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5188
5189         return NOTIFY_DONE;
5190 }
5191
5192 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
5193 {
5194         int tc_grp_size, tc_tbl_size;
5195         u32 max_flow_counter;
5196
5197         max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
5198                             MLX5_CAP_GEN(dev, max_flow_counter_15_0);
5199
5200         tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
5201
5202         tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
5203                             BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
5204
5205         return tc_tbl_size;
5206 }
5207
5208 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5209 {
5210         struct mlx5e_tc_table *tc = &priv->fs.tc;
5211         struct mlx5_core_dev *dev = priv->mdev;
5212         struct mlx5_chains_attr attr = {};
5213         int err;
5214
5215         mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5216         mutex_init(&tc->t_lock);
5217         mutex_init(&tc->hairpin_tbl_lock);
5218         hash_init(tc->hairpin_tbl);
5219
5220         err = rhashtable_init(&tc->ht, &tc_ht_params);
5221         if (err)
5222                 return err;
5223
5224         if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
5225                 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5226                         MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5227                 attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5228         }
5229         attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5230         attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5231         attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5232         attr.default_ft = priv->fs.vlan.ft.t;
5233
5234         tc->chains = mlx5_chains_create(dev, &attr);
5235         if (IS_ERR(tc->chains)) {
5236                 err = PTR_ERR(tc->chains);
5237                 goto err_chains;
5238         }
5239
5240         tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
5241                                  MLX5_FLOW_NAMESPACE_KERNEL);
5242         if (IS_ERR(tc->ct)) {
5243                 err = PTR_ERR(tc->ct);
5244                 goto err_ct;
5245         }
5246
5247         tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5248         err = register_netdevice_notifier_dev_net(priv->netdev,
5249                                                   &tc->netdevice_nb,
5250                                                   &tc->netdevice_nn);
5251         if (err) {
5252                 tc->netdevice_nb.notifier_call = NULL;
5253                 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5254                 goto err_reg;
5255         }
5256
5257         return 0;
5258
5259 err_reg:
5260         mlx5_tc_ct_clean(tc->ct);
5261 err_ct:
5262         mlx5_chains_destroy(tc->chains);
5263 err_chains:
5264         rhashtable_destroy(&tc->ht);
5265         return err;
5266 }
5267
5268 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5269 {
5270         struct mlx5e_tc_flow *flow = ptr;
5271         struct mlx5e_priv *priv = flow->priv;
5272
5273         mlx5e_tc_del_flow(priv, flow);
5274         kfree(flow);
5275 }
5276
5277 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5278 {
5279         struct mlx5e_tc_table *tc = &priv->fs.tc;
5280
5281         if (tc->netdevice_nb.notifier_call)
5282                 unregister_netdevice_notifier_dev_net(priv->netdev,
5283                                                       &tc->netdevice_nb,
5284                                                       &tc->netdevice_nn);
5285
5286         mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5287         mutex_destroy(&tc->hairpin_tbl_lock);
5288
5289         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5290
5291         if (!IS_ERR_OR_NULL(tc->t)) {
5292                 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5293                 tc->t = NULL;
5294         }
5295         mutex_destroy(&tc->t_lock);
5296
5297         mlx5_tc_ct_clean(tc->ct);
5298         mlx5_chains_destroy(tc->chains);
5299 }
5300
5301 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
5302 {
5303         const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5304         struct mlx5_rep_uplink_priv *uplink_priv;
5305         struct mlx5e_rep_priv *rpriv;
5306         struct mapping_ctx *mapping;
5307         struct mlx5_eswitch *esw;
5308         struct mlx5e_priv *priv;
5309         int err = 0;
5310
5311         uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5312         rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5313         priv = netdev_priv(rpriv->netdev);
5314         esw = priv->mdev->priv.eswitch;
5315
5316         uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5317                                                esw_chains(esw),
5318                                                &esw->offloads.mod_hdr,
5319                                                MLX5_FLOW_NAMESPACE_FDB);
5320         if (IS_ERR(uplink_priv->ct_priv))
5321                 goto err_ct;
5322
5323         mapping = mapping_create(sizeof(struct tunnel_match_key),
5324                                  TUNNEL_INFO_BITS_MASK, true);
5325         if (IS_ERR(mapping)) {
5326                 err = PTR_ERR(mapping);
5327                 goto err_tun_mapping;
5328         }
5329         uplink_priv->tunnel_mapping = mapping;
5330
5331         mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
5332         if (IS_ERR(mapping)) {
5333                 err = PTR_ERR(mapping);
5334                 goto err_enc_opts_mapping;
5335         }
5336         uplink_priv->tunnel_enc_opts_mapping = mapping;
5337
5338         err = rhashtable_init(tc_ht, &tc_ht_params);
5339         if (err)
5340                 goto err_ht_init;
5341
5342         return err;
5343
5344 err_ht_init:
5345         mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5346 err_enc_opts_mapping:
5347         mapping_destroy(uplink_priv->tunnel_mapping);
5348 err_tun_mapping:
5349         mlx5_tc_ct_clean(uplink_priv->ct_priv);
5350 err_ct:
5351         netdev_warn(priv->netdev,
5352                     "Failed to initialize tc (eswitch), err: %d", err);
5353         return err;
5354 }
5355
5356 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
5357 {
5358         struct mlx5_rep_uplink_priv *uplink_priv;
5359
5360         rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5361
5362         uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
5363
5364         mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5365         mapping_destroy(uplink_priv->tunnel_mapping);
5366
5367         mlx5_tc_ct_clean(uplink_priv->ct_priv);
5368 }
5369
5370 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5371 {
5372         struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5373
5374         return atomic_read(&tc_ht->nelems);
5375 }
5376
5377 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5378 {
5379         struct mlx5e_tc_flow *flow, *tmp;
5380
5381         list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5382                 __mlx5e_tc_del_fdb_peer_flow(flow);
5383 }
5384
5385 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5386 {
5387         struct mlx5_rep_uplink_priv *rpriv =
5388                 container_of(work, struct mlx5_rep_uplink_priv,
5389                              reoffload_flows_work);
5390         struct mlx5e_tc_flow *flow, *tmp;
5391
5392         mutex_lock(&rpriv->unready_flows_lock);
5393         list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5394                 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5395                         unready_flow_del(flow);
5396         }
5397         mutex_unlock(&rpriv->unready_flows_lock);
5398 }
5399
5400 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5401                                      struct flow_cls_offload *cls_flower,
5402                                      unsigned long flags)
5403 {
5404         switch (cls_flower->command) {
5405         case FLOW_CLS_REPLACE:
5406                 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5407                                               flags);
5408         case FLOW_CLS_DESTROY:
5409                 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5410                                            flags);
5411         case FLOW_CLS_STATS:
5412                 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5413                                           flags);
5414         default:
5415                 return -EOPNOTSUPP;
5416         }
5417 }
5418
5419 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5420                             void *cb_priv)
5421 {
5422         unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
5423         struct mlx5e_priv *priv = cb_priv;
5424
5425         switch (type) {
5426         case TC_SETUP_CLSFLOWER:
5427                 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5428         default:
5429                 return -EOPNOTSUPP;
5430         }
5431 }
5432
5433 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5434                          struct sk_buff *skb)
5435 {
5436 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5437         u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5438         struct mlx5e_priv *priv = netdev_priv(skb->dev);
5439         struct mlx5e_tc_table *tc = &priv->fs.tc;
5440         struct tc_skb_ext *tc_skb_ext;
5441         int err;
5442
5443         reg_b = be32_to_cpu(cqe->ft_metadata);
5444
5445         chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5446
5447         err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
5448         if (err) {
5449                 netdev_dbg(priv->netdev,
5450                            "Couldn't find chain for chain tag: %d, err: %d\n",
5451                            chain_tag, err);
5452                 return false;
5453         }
5454
5455         if (chain) {
5456                 tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
5457                 if (WARN_ON(!tc_skb_ext))
5458                         return false;
5459
5460                 tc_skb_ext->chain = chain;
5461
5462                 zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
5463                                   ZONE_RESTORE_MAX;
5464
5465                 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5466                                               zone_restore_id))
5467                         return false;
5468         }
5469 #endif /* CONFIG_NET_TC_SKB_EXT */
5470
5471         return true;
5472 }