2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <linux/refcount.h>
42 #include <linux/completion.h>
43 #include <net/tc_act/tc_mirred.h>
44 #include <net/tc_act/tc_vlan.h>
45 #include <net/tc_act/tc_tunnel_key.h>
46 #include <net/tc_act/tc_pedit.h>
47 #include <net/tc_act/tc_csum.h>
49 #include <net/ipv6_stubs.h>
56 #include "en/tc_tun.h"
57 #include "lib/devcom.h"
58 #include "lib/geneve.h"
60 struct mlx5_nic_flow_attr {
66 struct mlx5_flow_table *hairpin_ft;
67 struct mlx5_fc *counter;
70 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
73 MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
74 MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
75 MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
76 MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
77 MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
78 MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
79 MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
80 MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
81 MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
82 MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
83 MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
86 #define MLX5E_TC_MAX_SPLITS 1
88 /* Helper struct for accessing a struct containing list_head array.
97 * To access the containing struct from one of the list_head items:
98 * 1. Get the helper item from the list_head item using
100 * container_of(list_head item, helper struct type, list_head field)
101 * 2. Get the contining struct from the helper item and its index in the array:
102 * containing struct =
103 * container_of(helper item, containing struct type, helper field[index])
105 struct encap_flow_item {
106 struct mlx5e_encap_entry *e; /* attached encap instance */
107 struct list_head list;
111 struct mlx5e_tc_flow {
112 struct rhash_head node;
113 struct mlx5e_priv *priv;
116 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
117 /* Flow can be associated with multiple encap IDs.
118 * The number of encaps is bounded by the number of supported
121 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
122 struct mlx5e_tc_flow *peer_flow;
123 struct mlx5e_mod_hdr_entry *mh; /* attached mod header instance */
124 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
125 struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
126 struct list_head hairpin; /* flows sharing the same hairpin */
127 struct list_head peer; /* flows with peer flow */
128 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
130 struct rcu_head rcu_head;
132 struct mlx5_esw_flow_attr esw_attr[0];
133 struct mlx5_nic_flow_attr nic_attr[0];
137 struct mlx5e_tc_flow_parse_attr {
138 const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
139 struct net_device *filter_dev;
140 struct mlx5_flow_spec spec;
141 int num_mod_hdr_actions;
142 int max_mod_hdr_actions;
143 void *mod_hdr_actions;
144 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
147 #define MLX5E_TC_TABLE_NUM_GROUPS 4
148 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
150 struct mlx5e_hairpin {
151 struct mlx5_hairpin *pair;
153 struct mlx5_core_dev *func_mdev;
154 struct mlx5e_priv *func_priv;
159 struct mlx5e_rqt indir_rqt;
160 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
161 struct mlx5e_ttc_table ttc;
164 struct mlx5e_hairpin_entry {
165 /* a node of a hash table which keeps all the hairpin entries */
166 struct hlist_node hairpin_hlist;
168 /* protects flows list */
169 spinlock_t flows_lock;
170 /* flows sharing the same hairpin */
171 struct list_head flows;
172 /* hpe's that were not fully initialized when dead peer update event
173 * function traversed them.
175 struct list_head dead_peer_wait_list;
179 struct mlx5e_hairpin *hp;
181 struct completion res_ready;
189 struct mlx5e_mod_hdr_entry {
190 /* a node of a hash table which keeps all the mod_hdr entries */
191 struct hlist_node mod_hdr_hlist;
193 /* protects flows list */
194 spinlock_t flows_lock;
195 /* flows sharing the same mod_hdr entry */
196 struct list_head flows;
198 struct mod_hdr_key key;
203 struct completion res_ready;
207 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
209 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
210 struct mlx5e_tc_flow *flow);
212 static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
214 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
215 return ERR_PTR(-EINVAL);
219 static void mlx5e_flow_put(struct mlx5e_priv *priv,
220 struct mlx5e_tc_flow *flow)
222 if (refcount_dec_and_test(&flow->refcnt)) {
223 mlx5e_tc_del_flow(priv, flow);
224 kfree_rcu(flow, rcu_head);
228 static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
230 /* Complete all memory stores before setting bit. */
231 smp_mb__before_atomic();
232 set_bit(flag, &flow->flags);
235 #define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
237 static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
240 /* test_and_set_bit() provides all necessary barriers */
241 return test_and_set_bit(flag, &flow->flags);
244 #define flow_flag_test_and_set(flow, flag) \
245 __flow_flag_test_and_set(flow, \
246 MLX5E_TC_FLOW_FLAG_##flag)
248 static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
250 /* Complete all memory stores before clearing bit. */
251 smp_mb__before_atomic();
252 clear_bit(flag, &flow->flags);
255 #define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
256 MLX5E_TC_FLOW_FLAG_##flag)
258 static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
260 bool ret = test_bit(flag, &flow->flags);
262 /* Read fields of flow structure only after checking flags. */
263 smp_mb__after_atomic();
267 #define flow_flag_test(flow, flag) __flow_flag_test(flow, \
268 MLX5E_TC_FLOW_FLAG_##flag)
270 static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
272 return flow_flag_test(flow, ESWITCH);
275 static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
277 return flow_flag_test(flow, OFFLOADED);
280 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
282 return jhash(key->actions,
283 key->num_actions * MLX5_MH_ACT_SZ, 0);
286 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
287 struct mod_hdr_key *b)
289 if (a->num_actions != b->num_actions)
292 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
295 static struct mod_hdr_tbl *
296 get_mod_hdr_table(struct mlx5e_priv *priv, int namespace)
298 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
300 return namespace == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr :
301 &priv->fs.tc.mod_hdr;
304 static struct mlx5e_mod_hdr_entry *
305 mlx5e_mod_hdr_get(struct mod_hdr_tbl *tbl, struct mod_hdr_key *key, u32 hash_key)
307 struct mlx5e_mod_hdr_entry *mh, *found = NULL;
309 hash_for_each_possible(tbl->hlist, mh, mod_hdr_hlist, hash_key) {
310 if (!cmp_mod_hdr_info(&mh->key, key)) {
311 refcount_inc(&mh->refcnt);
320 static void mlx5e_mod_hdr_put(struct mlx5e_priv *priv,
321 struct mlx5e_mod_hdr_entry *mh,
324 struct mod_hdr_tbl *tbl = get_mod_hdr_table(priv, namespace);
326 if (!refcount_dec_and_mutex_lock(&mh->refcnt, &tbl->lock))
328 hash_del(&mh->mod_hdr_hlist);
329 mutex_unlock(&tbl->lock);
331 WARN_ON(!list_empty(&mh->flows));
332 if (mh->compl_result > 0)
333 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
338 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
340 return mlx5e_is_eswitch_flow(flow) ?
341 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
343 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
344 struct mlx5e_tc_flow *flow,
345 struct mlx5e_tc_flow_parse_attr *parse_attr)
347 int num_actions, actions_size, namespace, err;
348 struct mlx5e_mod_hdr_entry *mh;
349 struct mod_hdr_tbl *tbl;
350 struct mod_hdr_key key;
353 num_actions = parse_attr->num_mod_hdr_actions;
354 actions_size = MLX5_MH_ACT_SZ * num_actions;
356 key.actions = parse_attr->mod_hdr_actions;
357 key.num_actions = num_actions;
359 hash_key = hash_mod_hdr_info(&key);
361 namespace = get_flow_name_space(flow);
362 tbl = get_mod_hdr_table(priv, namespace);
364 mutex_lock(&tbl->lock);
365 mh = mlx5e_mod_hdr_get(tbl, &key, hash_key);
367 mutex_unlock(&tbl->lock);
368 wait_for_completion(&mh->res_ready);
370 if (mh->compl_result < 0) {
372 goto attach_header_err;
377 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
379 mutex_unlock(&tbl->lock);
383 mh->key.actions = (void *)mh + sizeof(*mh);
384 memcpy(mh->key.actions, key.actions, actions_size);
385 mh->key.num_actions = num_actions;
386 spin_lock_init(&mh->flows_lock);
387 INIT_LIST_HEAD(&mh->flows);
388 refcount_set(&mh->refcnt, 1);
389 init_completion(&mh->res_ready);
391 hash_add(tbl->hlist, &mh->mod_hdr_hlist, hash_key);
392 mutex_unlock(&tbl->lock);
394 err = mlx5_modify_header_alloc(priv->mdev, namespace,
399 mh->compl_result = err;
400 goto alloc_header_err;
402 mh->compl_result = 1;
403 complete_all(&mh->res_ready);
407 spin_lock(&mh->flows_lock);
408 list_add(&flow->mod_hdr, &mh->flows);
409 spin_unlock(&mh->flows_lock);
410 if (mlx5e_is_eswitch_flow(flow))
411 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
413 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
418 complete_all(&mh->res_ready);
420 mlx5e_mod_hdr_put(priv, mh, namespace);
424 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
425 struct mlx5e_tc_flow *flow)
427 /* flow wasn't fully initialized */
431 spin_lock(&flow->mh->flows_lock);
432 list_del(&flow->mod_hdr);
433 spin_unlock(&flow->mh->flows_lock);
435 mlx5e_mod_hdr_put(priv, flow->mh, get_flow_name_space(flow));
440 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
442 struct net_device *netdev;
443 struct mlx5e_priv *priv;
445 netdev = __dev_get_by_index(net, ifindex);
446 priv = netdev_priv(netdev);
450 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
452 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
456 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
460 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
462 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
463 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
464 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
466 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
473 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
478 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
480 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
481 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
484 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
486 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
487 struct mlx5e_priv *priv = hp->func_priv;
488 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
490 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
493 for (i = 0; i < sz; i++) {
495 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
496 ix = mlx5e_bits_invert(i, ilog2(sz));
497 ix = indirection_rqt[ix];
498 rqn = hp->pair->rqn[ix];
499 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
503 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
505 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
506 struct mlx5e_priv *priv = hp->func_priv;
507 struct mlx5_core_dev *mdev = priv->mdev;
511 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
512 in = kvzalloc(inlen, GFP_KERNEL);
516 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
518 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
519 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
521 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
523 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
525 hp->indir_rqt.enabled = true;
531 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
533 struct mlx5e_priv *priv = hp->func_priv;
534 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
538 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
539 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
541 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
542 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
544 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
545 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
546 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
547 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
549 err = mlx5_core_create_tir(hp->func_mdev, in,
550 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
552 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
553 goto err_destroy_tirs;
559 for (i = 0; i < tt; i++)
560 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
564 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
568 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
569 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
572 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
573 struct ttc_params *ttc_params)
575 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
578 memset(ttc_params, 0, sizeof(*ttc_params));
580 ttc_params->any_tt_tirn = hp->tirn;
582 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
583 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
585 ft_attr->max_fte = MLX5E_NUM_TT;
586 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
587 ft_attr->prio = MLX5E_TC_PRIO;
590 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
592 struct mlx5e_priv *priv = hp->func_priv;
593 struct ttc_params ttc_params;
596 err = mlx5e_hairpin_create_indirect_rqt(hp);
600 err = mlx5e_hairpin_create_indirect_tirs(hp);
602 goto err_create_indirect_tirs;
604 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
605 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
607 goto err_create_ttc_table;
609 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
610 hp->num_channels, hp->ttc.ft.t->id);
614 err_create_ttc_table:
615 mlx5e_hairpin_destroy_indirect_tirs(hp);
616 err_create_indirect_tirs:
617 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
622 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
624 struct mlx5e_priv *priv = hp->func_priv;
626 mlx5e_destroy_ttc_table(priv, &hp->ttc);
627 mlx5e_hairpin_destroy_indirect_tirs(hp);
628 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
631 static struct mlx5e_hairpin *
632 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
635 struct mlx5_core_dev *func_mdev, *peer_mdev;
636 struct mlx5e_hairpin *hp;
637 struct mlx5_hairpin *pair;
640 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
642 return ERR_PTR(-ENOMEM);
644 func_mdev = priv->mdev;
645 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
647 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
650 goto create_pair_err;
653 hp->func_mdev = func_mdev;
654 hp->func_priv = priv;
655 hp->num_channels = params->num_channels;
657 err = mlx5e_hairpin_create_transport(hp);
659 goto create_transport_err;
661 if (hp->num_channels > 1) {
662 err = mlx5e_hairpin_rss_init(hp);
670 mlx5e_hairpin_destroy_transport(hp);
671 create_transport_err:
672 mlx5_core_hairpin_destroy(hp->pair);
678 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
680 if (hp->num_channels > 1)
681 mlx5e_hairpin_rss_cleanup(hp);
682 mlx5e_hairpin_destroy_transport(hp);
683 mlx5_core_hairpin_destroy(hp->pair);
687 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
689 return (peer_vhca_id << 16 | prio);
692 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
693 u16 peer_vhca_id, u8 prio)
695 struct mlx5e_hairpin_entry *hpe;
696 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
698 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
699 hairpin_hlist, hash_key) {
700 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
701 refcount_inc(&hpe->refcnt);
709 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
710 struct mlx5e_hairpin_entry *hpe)
712 /* no more hairpin flows for us, release the hairpin pair */
713 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
715 hash_del(&hpe->hairpin_hlist);
716 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
718 if (!IS_ERR_OR_NULL(hpe->hp)) {
719 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
720 dev_name(hpe->hp->pair->peer_mdev->device));
722 mlx5e_hairpin_destroy(hpe->hp);
725 WARN_ON(!list_empty(&hpe->flows));
729 #define UNKNOWN_MATCH_PRIO 8
731 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
732 struct mlx5_flow_spec *spec, u8 *match_prio,
733 struct netlink_ext_ack *extack)
735 void *headers_c, *headers_v;
736 u8 prio_val, prio_mask = 0;
739 #ifdef CONFIG_MLX5_CORE_EN_DCB
740 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
741 NL_SET_ERR_MSG_MOD(extack,
742 "only PCP trust state supported for hairpin");
746 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
747 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
749 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
751 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
752 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
755 if (!vlan_present || !prio_mask) {
756 prio_val = UNKNOWN_MATCH_PRIO;
757 } else if (prio_mask != 0x7) {
758 NL_SET_ERR_MSG_MOD(extack,
759 "masked priority match not supported for hairpin");
763 *match_prio = prio_val;
767 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
768 struct mlx5e_tc_flow *flow,
769 struct mlx5e_tc_flow_parse_attr *parse_attr,
770 struct netlink_ext_ack *extack)
772 int peer_ifindex = parse_attr->mirred_ifindex[0];
773 struct mlx5_hairpin_params params;
774 struct mlx5_core_dev *peer_mdev;
775 struct mlx5e_hairpin_entry *hpe;
776 struct mlx5e_hairpin *hp;
783 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
784 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
785 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
789 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
790 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
795 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
796 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
798 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
799 wait_for_completion(&hpe->res_ready);
801 if (IS_ERR(hpe->hp)) {
808 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
810 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
814 spin_lock_init(&hpe->flows_lock);
815 INIT_LIST_HEAD(&hpe->flows);
816 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
817 hpe->peer_vhca_id = peer_id;
818 hpe->prio = match_prio;
819 refcount_set(&hpe->refcnt, 1);
820 init_completion(&hpe->res_ready);
822 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
823 hash_hairpin_info(peer_id, match_prio));
824 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
826 params.log_data_size = 15;
827 params.log_data_size = min_t(u8, params.log_data_size,
828 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
829 params.log_data_size = max_t(u8, params.log_data_size,
830 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
832 params.log_num_packets = params.log_data_size -
833 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
834 params.log_num_packets = min_t(u8, params.log_num_packets,
835 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
837 params.q_counter = priv->q_counter;
838 /* set hairpin pair per each 50Gbs share of the link */
839 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
840 link_speed = max_t(u32, link_speed, 50000);
841 link_speed64 = link_speed;
842 do_div(link_speed64, 50000);
843 params.num_channels = link_speed64;
845 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
847 complete_all(&hpe->res_ready);
853 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
854 hp->tirn, hp->pair->rqn[0],
855 dev_name(hp->pair->peer_mdev->device),
856 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
859 if (hpe->hp->num_channels > 1) {
860 flow_flag_set(flow, HAIRPIN_RSS);
861 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
863 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
867 spin_lock(&hpe->flows_lock);
868 list_add(&flow->hairpin, &hpe->flows);
869 spin_unlock(&hpe->flows_lock);
874 mlx5e_hairpin_put(priv, hpe);
878 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
879 struct mlx5e_tc_flow *flow)
881 /* flow wasn't fully initialized */
885 spin_lock(&flow->hpe->flows_lock);
886 list_del(&flow->hairpin);
887 spin_unlock(&flow->hpe->flows_lock);
889 mlx5e_hairpin_put(priv, flow->hpe);
894 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
895 struct mlx5e_tc_flow_parse_attr *parse_attr,
896 struct mlx5e_tc_flow *flow,
897 struct netlink_ext_ack *extack)
899 struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
900 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
901 struct mlx5_core_dev *dev = priv->mdev;
902 struct mlx5_flow_destination dest[2] = {};
903 struct mlx5_flow_act flow_act = {
904 .action = attr->action,
906 .flags = FLOW_ACT_NO_APPEND,
908 struct mlx5_fc *counter = NULL;
909 int err, dest_ix = 0;
911 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
912 flow_context->flow_tag = attr->flow_tag;
914 if (flow_flag_test(flow, HAIRPIN)) {
915 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
919 if (flow_flag_test(flow, HAIRPIN_RSS)) {
920 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
921 dest[dest_ix].ft = attr->hairpin_ft;
923 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
924 dest[dest_ix].tir_num = attr->hairpin_tirn;
927 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
928 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
929 dest[dest_ix].ft = priv->fs.vlan.ft.t;
933 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
934 counter = mlx5_fc_create(dev, true);
936 return PTR_ERR(counter);
938 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
939 dest[dest_ix].counter_id = mlx5_fc_id(counter);
941 attr->counter = counter;
944 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
945 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
946 flow_act.modify_id = attr->mod_hdr_id;
947 kfree(parse_attr->mod_hdr_actions);
952 mutex_lock(&priv->fs.tc.t_lock);
953 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
954 int tc_grp_size, tc_tbl_size;
955 u32 max_flow_counter;
957 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
958 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
960 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
962 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
963 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
966 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
969 MLX5E_TC_TABLE_NUM_GROUPS,
970 MLX5E_TC_FT_LEVEL, 0);
971 if (IS_ERR(priv->fs.tc.t)) {
972 mutex_unlock(&priv->fs.tc.t_lock);
973 NL_SET_ERR_MSG_MOD(extack,
974 "Failed to create tc offload table\n");
975 netdev_err(priv->netdev,
976 "Failed to create tc offload table\n");
977 return PTR_ERR(priv->fs.tc.t);
981 if (attr->match_level != MLX5_MATCH_NONE)
982 parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
984 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
985 &flow_act, dest, dest_ix);
986 mutex_unlock(&priv->fs.tc.t_lock);
988 if (IS_ERR(flow->rule[0]))
989 return PTR_ERR(flow->rule[0]);
994 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
995 struct mlx5e_tc_flow *flow)
997 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
998 struct mlx5_fc *counter = NULL;
1000 counter = attr->counter;
1001 if (!IS_ERR_OR_NULL(flow->rule[0]))
1002 mlx5_del_flow_rules(flow->rule[0]);
1003 mlx5_fc_destroy(priv->mdev, counter);
1005 mutex_lock(&priv->fs.tc.t_lock);
1006 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
1007 mlx5_destroy_flow_table(priv->fs.tc.t);
1008 priv->fs.tc.t = NULL;
1010 mutex_unlock(&priv->fs.tc.t_lock);
1012 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1013 mlx5e_detach_mod_hdr(priv, flow);
1015 if (flow_flag_test(flow, HAIRPIN))
1016 mlx5e_hairpin_flow_del(priv, flow);
1019 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1020 struct mlx5e_tc_flow *flow, int out_index);
1022 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1023 struct mlx5e_tc_flow *flow,
1024 struct net_device *mirred_dev,
1026 struct netlink_ext_ack *extack,
1027 struct net_device **encap_dev,
1030 static struct mlx5_flow_handle *
1031 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1032 struct mlx5e_tc_flow *flow,
1033 struct mlx5_flow_spec *spec,
1034 struct mlx5_esw_flow_attr *attr)
1036 struct mlx5_flow_handle *rule;
1038 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1042 if (attr->split_count) {
1043 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1044 if (IS_ERR(flow->rule[1])) {
1045 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1046 return flow->rule[1];
1054 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1055 struct mlx5e_tc_flow *flow,
1056 struct mlx5_esw_flow_attr *attr)
1058 flow_flag_clear(flow, OFFLOADED);
1060 if (attr->split_count)
1061 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1063 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1066 static struct mlx5_flow_handle *
1067 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1068 struct mlx5e_tc_flow *flow,
1069 struct mlx5_flow_spec *spec,
1070 struct mlx5_esw_flow_attr *slow_attr)
1072 struct mlx5_flow_handle *rule;
1074 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1075 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1076 slow_attr->split_count = 0;
1077 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
1079 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1081 flow_flag_set(flow, SLOW);
1087 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1088 struct mlx5e_tc_flow *flow,
1089 struct mlx5_esw_flow_attr *slow_attr)
1091 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
1092 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1093 slow_attr->split_count = 0;
1094 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
1095 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1096 flow_flag_clear(flow, SLOW);
1099 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1102 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1103 struct list_head *unready_flows)
1105 flow_flag_set(flow, NOT_READY);
1106 list_add_tail(&flow->unready, unready_flows);
1109 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1112 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1114 list_del(&flow->unready);
1115 flow_flag_clear(flow, NOT_READY);
1118 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1120 struct mlx5_rep_uplink_priv *uplink_priv;
1121 struct mlx5e_rep_priv *rpriv;
1122 struct mlx5_eswitch *esw;
1124 esw = flow->priv->mdev->priv.eswitch;
1125 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1126 uplink_priv = &rpriv->uplink_priv;
1128 mutex_lock(&uplink_priv->unready_flows_lock);
1129 unready_flow_add(flow, &uplink_priv->unready_flows);
1130 mutex_unlock(&uplink_priv->unready_flows_lock);
1133 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1135 struct mlx5_rep_uplink_priv *uplink_priv;
1136 struct mlx5e_rep_priv *rpriv;
1137 struct mlx5_eswitch *esw;
1139 esw = flow->priv->mdev->priv.eswitch;
1140 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1141 uplink_priv = &rpriv->uplink_priv;
1143 mutex_lock(&uplink_priv->unready_flows_lock);
1144 unready_flow_del(flow);
1145 mutex_unlock(&uplink_priv->unready_flows_lock);
1149 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1150 struct mlx5e_tc_flow *flow,
1151 struct netlink_ext_ack *extack)
1153 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1154 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
1155 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1156 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
1157 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
1158 struct net_device *out_dev, *encap_dev = NULL;
1159 struct mlx5_fc *counter = NULL;
1160 struct mlx5e_rep_priv *rpriv;
1161 struct mlx5e_priv *out_priv;
1162 bool encap_valid = true;
1166 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
1167 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
1171 if (attr->chain > max_chain) {
1172 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
1176 if (attr->prio > max_prio) {
1177 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
1181 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1184 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1187 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1188 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1190 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1191 extack, &encap_dev, &encap_valid);
1195 out_priv = netdev_priv(encap_dev);
1196 rpriv = out_priv->ppriv;
1197 attr->dests[out_index].rep = rpriv->rep;
1198 attr->dests[out_index].mdev = out_priv->mdev;
1201 err = mlx5_eswitch_add_vlan_action(esw, attr);
1205 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1206 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1207 kfree(parse_attr->mod_hdr_actions);
1212 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1213 counter = mlx5_fc_create(attr->counter_dev, true);
1214 if (IS_ERR(counter))
1215 return PTR_ERR(counter);
1217 attr->counter = counter;
1220 /* we get here if one of the following takes place:
1221 * (1) there's no error
1222 * (2) there's an encap action and we don't have valid neigh
1225 /* continue with goto slow path rule instead */
1226 struct mlx5_esw_flow_attr slow_attr;
1228 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1230 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1233 if (IS_ERR(flow->rule[0]))
1234 return PTR_ERR(flow->rule[0]);
1236 flow_flag_set(flow, OFFLOADED);
1241 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1243 struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
1244 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1247 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1249 geneve_tlv_option_0_data);
1251 return !!geneve_tlv_opt_0_data;
1254 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1255 struct mlx5e_tc_flow *flow)
1257 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1258 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1259 struct mlx5_esw_flow_attr slow_attr;
1262 if (flow_flag_test(flow, NOT_READY)) {
1263 remove_unready_flow(flow);
1264 kvfree(attr->parse_attr);
1268 if (mlx5e_is_offloaded_flow(flow)) {
1269 if (flow_flag_test(flow, SLOW))
1270 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1272 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1275 if (mlx5_flow_has_geneve_opt(flow))
1276 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1278 mlx5_eswitch_del_vlan_action(esw, attr);
1280 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1281 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1282 mlx5e_detach_encap(priv, flow, out_index);
1283 kvfree(attr->parse_attr);
1285 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1286 mlx5e_detach_mod_hdr(priv, flow);
1288 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1289 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1292 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1293 struct mlx5e_encap_entry *e)
1295 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1296 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1297 struct encap_flow_item *efi, *tmp;
1298 struct mlx5_flow_handle *rule;
1299 struct mlx5_flow_spec *spec;
1300 struct mlx5e_tc_flow *flow;
1303 err = mlx5_packet_reformat_alloc(priv->mdev,
1305 e->encap_size, e->encap_header,
1306 MLX5_FLOW_NAMESPACE_FDB,
1309 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1313 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1314 mlx5e_rep_queue_neigh_stats_work(priv);
1316 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1317 bool all_flow_encaps_valid = true;
1320 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1321 if (IS_ERR(mlx5e_flow_get(flow)))
1324 esw_attr = flow->esw_attr;
1325 spec = &esw_attr->parse_attr->spec;
1327 esw_attr->dests[efi->index].encap_id = e->encap_id;
1328 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1329 /* Flow can be associated with multiple encap entries.
1330 * Before offloading the flow verify that all of them have
1331 * a valid neighbour.
1333 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1334 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1336 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1337 all_flow_encaps_valid = false;
1341 /* Do not offload flows with unresolved neighbors */
1342 if (!all_flow_encaps_valid)
1344 /* update from slow path rule to encap rule */
1345 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1347 err = PTR_ERR(rule);
1348 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1353 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1354 flow->rule[0] = rule;
1355 /* was unset when slow path rule removed */
1356 flow_flag_set(flow, OFFLOADED);
1359 mlx5e_flow_put(priv, flow);
1363 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1364 struct mlx5e_encap_entry *e)
1366 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1367 struct mlx5_esw_flow_attr slow_attr;
1368 struct encap_flow_item *efi, *tmp;
1369 struct mlx5_flow_handle *rule;
1370 struct mlx5_flow_spec *spec;
1371 struct mlx5e_tc_flow *flow;
1374 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1375 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1376 if (IS_ERR(mlx5e_flow_get(flow)))
1379 spec = &flow->esw_attr->parse_attr->spec;
1381 /* update from encap rule to slow path rule */
1382 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1383 /* mark the flow's encap dest as non-valid */
1384 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1387 err = PTR_ERR(rule);
1388 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1393 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1394 flow->rule[0] = rule;
1395 /* was unset when fast path rule removed */
1396 flow_flag_set(flow, OFFLOADED);
1399 mlx5e_flow_put(priv, flow);
1402 /* we know that the encap is valid */
1403 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1404 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1407 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1409 if (mlx5e_is_eswitch_flow(flow))
1410 return flow->esw_attr->counter;
1412 return flow->nic_attr->counter;
1415 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1417 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1418 struct mlx5e_tc_flow *flow;
1419 struct mlx5e_encap_entry *e;
1420 struct mlx5_fc *counter;
1421 struct neigh_table *tbl;
1422 bool neigh_used = false;
1423 struct neighbour *n;
1426 if (m_neigh->family == AF_INET)
1428 #if IS_ENABLED(CONFIG_IPV6)
1429 else if (m_neigh->family == AF_INET6)
1435 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1436 struct encap_flow_item *efi, *tmp;
1438 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID) ||
1439 !mlx5e_encap_take(e))
1442 list_for_each_entry_safe(efi, tmp, &e->flows, list) {
1443 flow = container_of(efi, struct mlx5e_tc_flow,
1444 encaps[efi->index]);
1445 if (IS_ERR(mlx5e_flow_get(flow)))
1448 if (mlx5e_is_offloaded_flow(flow)) {
1449 counter = mlx5e_tc_get_counter(flow);
1450 lastuse = mlx5_fc_query_lastuse(counter);
1451 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1452 mlx5e_flow_put(netdev_priv(e->out_dev), flow);
1458 mlx5e_flow_put(netdev_priv(e->out_dev), flow);
1461 mlx5e_encap_put(netdev_priv(e->out_dev), e);
1467 nhe->reported_lastuse = jiffies;
1469 /* find the relevant neigh according to the cached device and
1472 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1476 neigh_event_send(n, NULL);
1481 static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1483 WARN_ON(!list_empty(&e->flows));
1484 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1486 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1487 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1489 kfree(e->encap_header);
1493 void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
1495 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1497 if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
1499 hash_del_rcu(&e->encap_hlist);
1500 mutex_unlock(&esw->offloads.encap_tbl_lock);
1502 mlx5e_encap_dealloc(priv, e);
1505 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1506 struct mlx5e_tc_flow *flow, int out_index)
1508 struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
1509 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1511 /* flow wasn't fully initialized */
1515 mutex_lock(&esw->offloads.encap_tbl_lock);
1516 list_del(&flow->encaps[out_index].list);
1517 flow->encaps[out_index].e = NULL;
1518 if (!refcount_dec_and_test(&e->refcnt)) {
1519 mutex_unlock(&esw->offloads.encap_tbl_lock);
1522 hash_del_rcu(&e->encap_hlist);
1523 mutex_unlock(&esw->offloads.encap_tbl_lock);
1525 mlx5e_encap_dealloc(priv, e);
1528 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1530 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1532 if (!flow_flag_test(flow, ESWITCH) ||
1533 !flow_flag_test(flow, DUP))
1536 mutex_lock(&esw->offloads.peer_mutex);
1537 list_del(&flow->peer);
1538 mutex_unlock(&esw->offloads.peer_mutex);
1540 flow_flag_clear(flow, DUP);
1542 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1543 kvfree(flow->peer_flow);
1544 flow->peer_flow = NULL;
1547 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1549 struct mlx5_core_dev *dev = flow->priv->mdev;
1550 struct mlx5_devcom *devcom = dev->priv.devcom;
1551 struct mlx5_eswitch *peer_esw;
1553 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1557 __mlx5e_tc_del_fdb_peer_flow(flow);
1558 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1561 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1562 struct mlx5e_tc_flow *flow)
1564 if (mlx5e_is_eswitch_flow(flow)) {
1565 mlx5e_tc_del_fdb_peer_flow(flow);
1566 mlx5e_tc_del_fdb_flow(priv, flow);
1568 mlx5e_tc_del_nic_flow(priv, flow);
1573 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1574 struct mlx5_flow_spec *spec,
1575 struct flow_cls_offload *f,
1576 struct net_device *filter_dev, u8 *match_level)
1578 struct netlink_ext_ack *extack = f->common.extack;
1579 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1581 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1583 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1586 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1587 headers_c, headers_v, match_level);
1589 NL_SET_ERR_MSG_MOD(extack,
1590 "failed to parse tunnel attributes");
1594 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
1595 struct flow_match_ipv4_addrs match;
1597 flow_rule_match_enc_ipv4_addrs(rule, &match);
1598 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1599 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1600 ntohl(match.mask->src));
1601 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1602 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1603 ntohl(match.key->src));
1605 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1606 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1607 ntohl(match.mask->dst));
1608 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1609 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1610 ntohl(match.key->dst));
1612 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1613 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1614 } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
1615 struct flow_match_ipv6_addrs match;
1617 flow_rule_match_enc_ipv6_addrs(rule, &match);
1618 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1619 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1620 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1621 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1622 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1623 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1625 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1626 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1627 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1628 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1629 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1630 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1632 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1633 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1636 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1637 struct flow_match_ip match;
1639 flow_rule_match_enc_ip(rule, &match);
1640 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1641 match.mask->tos & 0x3);
1642 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1643 match.key->tos & 0x3);
1645 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1646 match.mask->tos >> 2);
1647 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1648 match.key->tos >> 2);
1650 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1652 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1655 if (match.mask->ttl &&
1656 !MLX5_CAP_ESW_FLOWTABLE_FDB
1658 ft_field_support.outer_ipv4_ttl)) {
1659 NL_SET_ERR_MSG_MOD(extack,
1660 "Matching on TTL is not supported");
1666 /* Enforce DMAC when offloading incoming tunneled flows.
1667 * Flow counters require a match on the DMAC.
1669 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1670 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1671 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1672 dmac_47_16), priv->netdev->dev_addr);
1674 /* let software handle IP fragments */
1675 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1676 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1681 static void *get_match_headers_criteria(u32 flags,
1682 struct mlx5_flow_spec *spec)
1684 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1685 MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1687 MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1691 static void *get_match_headers_value(u32 flags,
1692 struct mlx5_flow_spec *spec)
1694 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1695 MLX5_ADDR_OF(fte_match_param, spec->match_value,
1697 MLX5_ADDR_OF(fte_match_param, spec->match_value,
1701 static int __parse_cls_flower(struct mlx5e_priv *priv,
1702 struct mlx5_flow_spec *spec,
1703 struct flow_cls_offload *f,
1704 struct net_device *filter_dev,
1705 u8 *inner_match_level, u8 *outer_match_level)
1707 struct netlink_ext_ack *extack = f->common.extack;
1708 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1710 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1712 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1714 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1716 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1717 struct flow_dissector *dissector = rule->match.dissector;
1722 match_level = outer_match_level;
1724 if (dissector->used_keys &
1725 ~(BIT(FLOW_DISSECTOR_KEY_META) |
1726 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1727 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1728 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1729 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1730 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1731 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1732 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1733 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1734 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1735 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1736 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1737 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1738 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1739 BIT(FLOW_DISSECTOR_KEY_TCP) |
1740 BIT(FLOW_DISSECTOR_KEY_IP) |
1741 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
1742 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS))) {
1743 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1744 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1745 dissector->used_keys);
1749 if (mlx5e_get_tc_tun(filter_dev)) {
1750 if (parse_tunnel_attr(priv, spec, f, filter_dev,
1754 /* At this point, header pointers should point to the inner
1755 * headers, outer header were already set by parse_tunnel_attr
1757 match_level = inner_match_level;
1758 headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1760 headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
1764 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1765 struct flow_match_basic match;
1767 flow_rule_match_basic(rule, &match);
1768 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1769 ntohs(match.mask->n_proto));
1770 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1771 ntohs(match.key->n_proto));
1773 if (match.mask->n_proto)
1774 *match_level = MLX5_MATCH_L2;
1776 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1777 is_vlan_dev(filter_dev)) {
1778 struct flow_dissector_key_vlan filter_dev_mask;
1779 struct flow_dissector_key_vlan filter_dev_key;
1780 struct flow_match_vlan match;
1782 if (is_vlan_dev(filter_dev)) {
1783 match.key = &filter_dev_key;
1784 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1785 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
1786 match.key->vlan_priority = 0;
1787 match.mask = &filter_dev_mask;
1788 memset(match.mask, 0xff, sizeof(*match.mask));
1789 match.mask->vlan_priority = 0;
1791 flow_rule_match_vlan(rule, &match);
1793 if (match.mask->vlan_id ||
1794 match.mask->vlan_priority ||
1795 match.mask->vlan_tpid) {
1796 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1797 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1799 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1802 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1804 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1808 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
1809 match.mask->vlan_id);
1810 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
1811 match.key->vlan_id);
1813 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
1814 match.mask->vlan_priority);
1815 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
1816 match.key->vlan_priority);
1818 *match_level = MLX5_MATCH_L2;
1820 } else if (*match_level != MLX5_MATCH_NONE) {
1821 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1822 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1823 *match_level = MLX5_MATCH_L2;
1826 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1827 struct flow_match_vlan match;
1829 flow_rule_match_cvlan(rule, &match);
1830 if (match.mask->vlan_id ||
1831 match.mask->vlan_priority ||
1832 match.mask->vlan_tpid) {
1833 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1834 MLX5_SET(fte_match_set_misc, misc_c,
1835 outer_second_svlan_tag, 1);
1836 MLX5_SET(fte_match_set_misc, misc_v,
1837 outer_second_svlan_tag, 1);
1839 MLX5_SET(fte_match_set_misc, misc_c,
1840 outer_second_cvlan_tag, 1);
1841 MLX5_SET(fte_match_set_misc, misc_v,
1842 outer_second_cvlan_tag, 1);
1845 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1846 match.mask->vlan_id);
1847 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1848 match.key->vlan_id);
1849 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1850 match.mask->vlan_priority);
1851 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1852 match.key->vlan_priority);
1854 *match_level = MLX5_MATCH_L2;
1858 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1859 struct flow_match_eth_addrs match;
1861 flow_rule_match_eth_addrs(rule, &match);
1862 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1865 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1869 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1872 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1876 if (!is_zero_ether_addr(match.mask->src) ||
1877 !is_zero_ether_addr(match.mask->dst))
1878 *match_level = MLX5_MATCH_L2;
1881 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1882 struct flow_match_control match;
1884 flow_rule_match_control(rule, &match);
1885 addr_type = match.key->addr_type;
1887 /* the HW doesn't support frag first/later */
1888 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
1891 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
1892 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1893 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1894 match.key->flags & FLOW_DIS_IS_FRAGMENT);
1896 /* the HW doesn't need L3 inline to match on frag=no */
1897 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
1898 *match_level = MLX5_MATCH_L2;
1899 /* *** L2 attributes parsing up to here *** */
1901 *match_level = MLX5_MATCH_L3;
1905 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1906 struct flow_match_basic match;
1908 flow_rule_match_basic(rule, &match);
1909 ip_proto = match.key->ip_proto;
1911 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1912 match.mask->ip_proto);
1913 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1914 match.key->ip_proto);
1916 if (match.mask->ip_proto)
1917 *match_level = MLX5_MATCH_L3;
1920 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1921 struct flow_match_ipv4_addrs match;
1923 flow_rule_match_ipv4_addrs(rule, &match);
1924 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1925 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1926 &match.mask->src, sizeof(match.mask->src));
1927 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1928 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1929 &match.key->src, sizeof(match.key->src));
1930 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1931 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1932 &match.mask->dst, sizeof(match.mask->dst));
1933 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1934 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1935 &match.key->dst, sizeof(match.key->dst));
1937 if (match.mask->src || match.mask->dst)
1938 *match_level = MLX5_MATCH_L3;
1941 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1942 struct flow_match_ipv6_addrs match;
1944 flow_rule_match_ipv6_addrs(rule, &match);
1945 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1946 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1947 &match.mask->src, sizeof(match.mask->src));
1948 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1949 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1950 &match.key->src, sizeof(match.key->src));
1952 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1953 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1954 &match.mask->dst, sizeof(match.mask->dst));
1955 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1956 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1957 &match.key->dst, sizeof(match.key->dst));
1959 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
1960 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
1961 *match_level = MLX5_MATCH_L3;
1964 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1965 struct flow_match_ip match;
1967 flow_rule_match_ip(rule, &match);
1968 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1969 match.mask->tos & 0x3);
1970 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1971 match.key->tos & 0x3);
1973 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1974 match.mask->tos >> 2);
1975 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1976 match.key->tos >> 2);
1978 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1980 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1983 if (match.mask->ttl &&
1984 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1985 ft_field_support.outer_ipv4_ttl)) {
1986 NL_SET_ERR_MSG_MOD(extack,
1987 "Matching on TTL is not supported");
1991 if (match.mask->tos || match.mask->ttl)
1992 *match_level = MLX5_MATCH_L3;
1995 /* *** L3 attributes parsing up to here *** */
1997 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1998 struct flow_match_ports match;
2000 flow_rule_match_ports(rule, &match);
2003 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2004 tcp_sport, ntohs(match.mask->src));
2005 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2006 tcp_sport, ntohs(match.key->src));
2008 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2009 tcp_dport, ntohs(match.mask->dst));
2010 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2011 tcp_dport, ntohs(match.key->dst));
2015 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2016 udp_sport, ntohs(match.mask->src));
2017 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2018 udp_sport, ntohs(match.key->src));
2020 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2021 udp_dport, ntohs(match.mask->dst));
2022 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2023 udp_dport, ntohs(match.key->dst));
2026 NL_SET_ERR_MSG_MOD(extack,
2027 "Only UDP and TCP transports are supported for L4 matching");
2028 netdev_err(priv->netdev,
2029 "Only UDP and TCP transport are supported\n");
2033 if (match.mask->src || match.mask->dst)
2034 *match_level = MLX5_MATCH_L4;
2037 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2038 struct flow_match_tcp match;
2040 flow_rule_match_tcp(rule, &match);
2041 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2042 ntohs(match.mask->flags));
2043 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2044 ntohs(match.key->flags));
2046 if (match.mask->flags)
2047 *match_level = MLX5_MATCH_L4;
2053 static int parse_cls_flower(struct mlx5e_priv *priv,
2054 struct mlx5e_tc_flow *flow,
2055 struct mlx5_flow_spec *spec,
2056 struct flow_cls_offload *f,
2057 struct net_device *filter_dev)
2059 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2060 struct netlink_ext_ack *extack = f->common.extack;
2061 struct mlx5_core_dev *dev = priv->mdev;
2062 struct mlx5_eswitch *esw = dev->priv.eswitch;
2063 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2064 struct mlx5_eswitch_rep *rep;
2065 bool is_eswitch_flow;
2068 inner_match_level = MLX5_MATCH_NONE;
2069 outer_match_level = MLX5_MATCH_NONE;
2071 err = __parse_cls_flower(priv, spec, f, filter_dev, &inner_match_level,
2072 &outer_match_level);
2073 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2074 outer_match_level : inner_match_level;
2076 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2077 if (!err && is_eswitch_flow) {
2079 if (rep->vport != MLX5_VPORT_UPLINK &&
2080 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2081 esw->offloads.inline_mode < non_tunnel_match_level)) {
2082 NL_SET_ERR_MSG_MOD(extack,
2083 "Flow is not offloaded due to min inline setting");
2084 netdev_warn(priv->netdev,
2085 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2086 non_tunnel_match_level, esw->offloads.inline_mode);
2091 if (is_eswitch_flow) {
2092 flow->esw_attr->inner_match_level = inner_match_level;
2093 flow->esw_attr->outer_match_level = outer_match_level;
2095 flow->nic_attr->match_level = non_tunnel_match_level;
2101 struct pedit_headers {
2103 struct vlan_hdr vlan;
2110 struct pedit_headers_action {
2111 struct pedit_headers vals;
2112 struct pedit_headers masks;
2116 static int pedit_header_offsets[] = {
2117 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2118 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2119 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2120 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2121 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2124 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2126 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2127 struct pedit_headers_action *hdrs)
2129 u32 *curr_pmask, *curr_pval;
2131 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2132 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2134 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2137 *curr_pmask |= mask;
2138 *curr_pval |= (val & mask);
2146 struct mlx5_fields {
2153 #define OFFLOAD(fw_field, size, field, off, match_field) \
2154 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
2155 offsetof(struct pedit_headers, field) + (off), \
2156 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2158 /* masked values are the same and there are no rewrites that do not have a
2161 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2162 type matchmaskx = *(type *)(matchmaskp); \
2163 type matchvalx = *(type *)(matchvalp); \
2164 type maskx = *(type *)(maskp); \
2165 type valx = *(type *)(valp); \
2167 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2171 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2172 void *matchmaskp, int size)
2178 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2181 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2184 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2191 static struct mlx5_fields fields[] = {
2192 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
2193 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
2194 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
2195 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
2196 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
2197 OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
2199 OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
2200 OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2201 OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2203 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
2204 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2205 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
2206 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2207 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
2208 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2209 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
2210 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2211 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
2212 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2213 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
2214 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2215 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
2216 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2217 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
2218 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2219 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
2221 OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
2222 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
2223 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
2225 OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
2226 OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
2229 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
2230 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
2231 * says how many HW actions were actually parsed.
2233 static int offload_pedit_fields(struct pedit_headers_action *hdrs,
2234 struct mlx5e_tc_flow_parse_attr *parse_attr,
2236 struct netlink_ext_ack *extack)
2238 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2239 void *headers_c = get_match_headers_criteria(*action_flags,
2241 void *headers_v = get_match_headers_value(*action_flags,
2243 int i, action_size, nactions, max_actions, first, last, next_z;
2244 void *s_masks_p, *a_masks_p, *vals_p;
2245 struct mlx5_fields *f;
2246 u8 cmd, field_bsize;
2253 set_masks = &hdrs[0].masks;
2254 add_masks = &hdrs[1].masks;
2255 set_vals = &hdrs[0].vals;
2256 add_vals = &hdrs[1].vals;
2258 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2259 action = parse_attr->mod_hdr_actions +
2260 parse_attr->num_mod_hdr_actions * action_size;
2262 max_actions = parse_attr->max_mod_hdr_actions;
2263 nactions = parse_attr->num_mod_hdr_actions;
2265 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2269 /* avoid seeing bits set from previous iterations */
2273 s_masks_p = (void *)set_masks + f->offset;
2274 a_masks_p = (void *)add_masks + f->offset;
2276 memcpy(&s_mask, s_masks_p, f->size);
2277 memcpy(&a_mask, a_masks_p, f->size);
2279 if (!s_mask && !a_mask) /* nothing to offload here */
2282 if (s_mask && a_mask) {
2283 NL_SET_ERR_MSG_MOD(extack,
2284 "can't set and add to the same HW field");
2285 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2289 if (nactions == max_actions) {
2290 NL_SET_ERR_MSG_MOD(extack,
2291 "too many pedit actions, can't offload");
2292 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
2298 void *match_mask = headers_c + f->match_offset;
2299 void *match_val = headers_v + f->match_offset;
2301 cmd = MLX5_ACTION_TYPE_SET;
2303 vals_p = (void *)set_vals + f->offset;
2304 /* don't rewrite if we have a match on the same value */
2305 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2306 match_mask, f->size))
2308 /* clear to denote we consumed this field */
2309 memset(s_masks_p, 0, f->size);
2313 cmd = MLX5_ACTION_TYPE_ADD;
2315 vals_p = (void *)add_vals + f->offset;
2316 /* add 0 is no change */
2317 if (!memcmp(vals_p, &zero, f->size))
2319 /* clear to denote we consumed this field */
2320 memset(a_masks_p, 0, f->size);
2325 field_bsize = f->size * BITS_PER_BYTE;
2327 if (field_bsize == 32) {
2328 mask_be32 = *(__be32 *)&mask;
2329 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2330 } else if (field_bsize == 16) {
2331 mask_be16 = *(__be16 *)&mask;
2332 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2335 first = find_first_bit(&mask, field_bsize);
2336 next_z = find_next_zero_bit(&mask, field_bsize, first);
2337 last = find_last_bit(&mask, field_bsize);
2338 if (first < next_z && next_z < last) {
2339 NL_SET_ERR_MSG_MOD(extack,
2340 "rewrite of few sub-fields isn't supported");
2341 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2346 MLX5_SET(set_action_in, action, action_type, cmd);
2347 MLX5_SET(set_action_in, action, field, f->field);
2349 if (cmd == MLX5_ACTION_TYPE_SET) {
2350 MLX5_SET(set_action_in, action, offset, first);
2351 /* length is num of bits to be written, zero means length of 32 */
2352 MLX5_SET(set_action_in, action, length, (last - first + 1));
2355 if (field_bsize == 32)
2356 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2357 else if (field_bsize == 16)
2358 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2359 else if (field_bsize == 8)
2360 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2362 action += action_size;
2366 parse_attr->num_mod_hdr_actions = nactions;
2370 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2373 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2374 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2375 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2376 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2379 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2380 struct pedit_headers_action *hdrs,
2382 struct mlx5e_tc_flow_parse_attr *parse_attr)
2384 int nkeys, action_size, max_actions;
2386 nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2387 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
2388 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2390 max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
2391 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2392 max_actions = min(max_actions, nkeys * 16);
2394 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2395 if (!parse_attr->mod_hdr_actions)
2398 parse_attr->max_mod_hdr_actions = max_actions;
2402 static const struct pedit_headers zero_masks = {};
2404 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2405 const struct flow_action_entry *act, int namespace,
2406 struct mlx5e_tc_flow_parse_attr *parse_attr,
2407 struct pedit_headers_action *hdrs,
2408 struct netlink_ext_ack *extack)
2410 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2411 int err = -EOPNOTSUPP;
2412 u32 mask, val, offset;
2415 htype = act->mangle.htype;
2416 err = -EOPNOTSUPP; /* can't be all optimistic */
2418 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2419 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2423 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2424 NL_SET_ERR_MSG_MOD(extack,
2425 "The pedit offload action is not supported");
2429 mask = act->mangle.mask;
2430 val = act->mangle.val;
2431 offset = act->mangle.offset;
2433 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2444 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2445 struct mlx5e_tc_flow_parse_attr *parse_attr,
2446 struct pedit_headers_action *hdrs,
2448 struct netlink_ext_ack *extack)
2450 struct pedit_headers *cmd_masks;
2454 if (!parse_attr->mod_hdr_actions) {
2455 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2460 err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
2462 goto out_dealloc_parsed_actions;
2464 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2465 cmd_masks = &hdrs[cmd].masks;
2466 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2467 NL_SET_ERR_MSG_MOD(extack,
2468 "attempt to offload an unsupported field");
2469 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2470 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2471 16, 1, cmd_masks, sizeof(zero_masks), true);
2473 goto out_dealloc_parsed_actions;
2479 out_dealloc_parsed_actions:
2480 kfree(parse_attr->mod_hdr_actions);
2485 static bool csum_offload_supported(struct mlx5e_priv *priv,
2488 struct netlink_ext_ack *extack)
2490 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2491 TCA_CSUM_UPDATE_FLAG_UDP;
2493 /* The HW recalcs checksums only if re-writing headers */
2494 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2495 NL_SET_ERR_MSG_MOD(extack,
2496 "TC csum action is only offloaded with pedit");
2497 netdev_warn(priv->netdev,
2498 "TC csum action is only offloaded with pedit\n");
2502 if (update_flags & ~prot_flags) {
2503 NL_SET_ERR_MSG_MOD(extack,
2504 "can't offload TC csum action for some header/s");
2505 netdev_warn(priv->netdev,
2506 "can't offload TC csum action for some header/s - flags %#x\n",
2514 struct ip_ttl_word {
2520 struct ipv6_hoplimit_word {
2526 static bool is_action_keys_supported(const struct flow_action_entry *act)
2531 htype = act->mangle.htype;
2532 offset = act->mangle.offset;
2533 mask = ~act->mangle.mask;
2534 /* For IPv4 & IPv6 header check 4 byte word,
2535 * to determine that modified fields
2536 * are NOT ttl & hop_limit only.
2538 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2539 struct ip_ttl_word *ttl_word =
2540 (struct ip_ttl_word *)&mask;
2542 if (offset != offsetof(struct iphdr, ttl) ||
2543 ttl_word->protocol ||
2547 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2548 struct ipv6_hoplimit_word *hoplimit_word =
2549 (struct ipv6_hoplimit_word *)&mask;
2551 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2552 hoplimit_word->payload_len ||
2553 hoplimit_word->nexthdr) {
2560 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2561 struct flow_action *flow_action,
2563 struct netlink_ext_ack *extack)
2565 const struct flow_action_entry *act;
2566 bool modify_ip_header;
2572 headers_v = get_match_headers_value(actions, spec);
2573 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2575 /* for non-IP we only re-write MACs, so we're okay */
2576 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2579 modify_ip_header = false;
2580 flow_action_for_each(i, act, flow_action) {
2581 if (act->id != FLOW_ACTION_MANGLE &&
2582 act->id != FLOW_ACTION_ADD)
2585 if (is_action_keys_supported(act)) {
2586 modify_ip_header = true;
2591 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2592 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2593 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2594 NL_SET_ERR_MSG_MOD(extack,
2595 "can't offload re-write of non TCP/UDP");
2596 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2604 static bool actions_match_supported(struct mlx5e_priv *priv,
2605 struct flow_action *flow_action,
2606 struct mlx5e_tc_flow_parse_attr *parse_attr,
2607 struct mlx5e_tc_flow *flow,
2608 struct netlink_ext_ack *extack)
2612 if (mlx5e_is_eswitch_flow(flow))
2613 actions = flow->esw_attr->action;
2615 actions = flow->nic_attr->action;
2617 if (flow_flag_test(flow, EGRESS) &&
2618 !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
2619 (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
2620 (actions & MLX5_FLOW_CONTEXT_ACTION_DROP)))
2623 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2624 return modify_header_match_supported(&parse_attr->spec,
2625 flow_action, actions,
2631 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2633 struct mlx5_core_dev *fmdev, *pmdev;
2634 u64 fsystem_guid, psystem_guid;
2637 pmdev = peer_priv->mdev;
2639 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2640 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2642 return (fsystem_guid == psystem_guid);
2645 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
2646 const struct flow_action_entry *act,
2647 struct mlx5e_tc_flow_parse_attr *parse_attr,
2648 struct pedit_headers_action *hdrs,
2649 u32 *action, struct netlink_ext_ack *extack)
2651 u16 mask16 = VLAN_VID_MASK;
2652 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
2653 const struct flow_action_entry pedit_act = {
2654 .id = FLOW_ACTION_MANGLE,
2655 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
2656 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
2657 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
2658 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
2660 u8 match_prio_mask, match_prio_val;
2661 void *headers_c, *headers_v;
2664 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
2665 headers_v = get_match_headers_value(*action, &parse_attr->spec);
2667 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
2668 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
2669 NL_SET_ERR_MSG_MOD(extack,
2670 "VLAN rewrite action must have VLAN protocol match");
2674 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
2675 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
2676 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
2677 NL_SET_ERR_MSG_MOD(extack,
2678 "Changing VLAN prio is not supported");
2682 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
2684 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2690 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
2691 struct mlx5e_tc_flow_parse_attr *parse_attr,
2692 struct pedit_headers_action *hdrs,
2693 u32 *action, struct netlink_ext_ack *extack)
2695 const struct flow_action_entry prio_tag_act = {
2698 MLX5_GET(fte_match_set_lyr_2_4,
2699 get_match_headers_value(*action,
2702 MLX5_GET(fte_match_set_lyr_2_4,
2703 get_match_headers_criteria(*action,
2708 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2709 &prio_tag_act, parse_attr, hdrs, action,
2713 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2714 struct flow_action *flow_action,
2715 struct mlx5e_tc_flow_parse_attr *parse_attr,
2716 struct mlx5e_tc_flow *flow,
2717 struct netlink_ext_ack *extack)
2719 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2720 struct pedit_headers_action hdrs[2] = {};
2721 const struct flow_action_entry *act;
2725 if (!flow_action_has_entries(flow_action))
2728 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2730 flow_action_for_each(i, act, flow_action) {
2732 case FLOW_ACTION_DROP:
2733 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2734 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2735 flow_table_properties_nic_receive.flow_counter))
2736 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2738 case FLOW_ACTION_MANGLE:
2739 case FLOW_ACTION_ADD:
2740 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
2741 parse_attr, hdrs, extack);
2745 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2746 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2748 case FLOW_ACTION_VLAN_MANGLE:
2749 err = add_vlan_rewrite_action(priv,
2750 MLX5_FLOW_NAMESPACE_KERNEL,
2751 act, parse_attr, hdrs,
2757 case FLOW_ACTION_CSUM:
2758 if (csum_offload_supported(priv, action,
2764 case FLOW_ACTION_REDIRECT: {
2765 struct net_device *peer_dev = act->dev;
2767 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2768 same_hw_devs(priv, netdev_priv(peer_dev))) {
2769 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2770 flow_flag_set(flow, HAIRPIN);
2771 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2772 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2774 NL_SET_ERR_MSG_MOD(extack,
2775 "device is not on same HW, can't offload");
2776 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2782 case FLOW_ACTION_MARK: {
2783 u32 mark = act->mark;
2785 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2786 NL_SET_ERR_MSG_MOD(extack,
2787 "Bad flow mark - only 16 bit is supported");
2791 attr->flow_tag = mark;
2792 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2796 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
2801 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2802 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2803 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2804 parse_attr, hdrs, &action, extack);
2807 /* in case all pedit actions are skipped, remove the MOD_HDR
2810 if (parse_attr->num_mod_hdr_actions == 0) {
2811 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2812 kfree(parse_attr->mod_hdr_actions);
2816 attr->action = action;
2817 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2824 const struct ip_tunnel_key *ip_tun_key;
2825 struct mlx5e_tc_tunnel *tc_tunnel;
2828 static inline int cmp_encap_info(struct encap_key *a,
2829 struct encap_key *b)
2831 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2832 a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
2835 static inline int hash_encap_info(struct encap_key *key)
2837 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2838 key->tc_tunnel->tunnel_type);
2842 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2843 struct net_device *peer_netdev)
2845 struct mlx5e_priv *peer_priv;
2847 peer_priv = netdev_priv(peer_netdev);
2849 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2850 mlx5e_eswitch_rep(priv->netdev) &&
2851 mlx5e_eswitch_rep(peer_netdev) &&
2852 same_hw_devs(priv, peer_priv));
2857 bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
2859 return refcount_inc_not_zero(&e->refcnt);
2862 static struct mlx5e_encap_entry *
2863 mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
2866 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2867 struct mlx5e_encap_entry *e;
2868 struct encap_key e_key;
2870 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2871 encap_hlist, hash_key) {
2872 e_key.ip_tun_key = &e->tun_info->key;
2873 e_key.tc_tunnel = e->tunnel;
2874 if (!cmp_encap_info(&e_key, key) &&
2875 mlx5e_encap_take(e))
2882 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2883 struct mlx5e_tc_flow *flow,
2884 struct net_device *mirred_dev,
2886 struct netlink_ext_ack *extack,
2887 struct net_device **encap_dev,
2890 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2891 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2892 struct mlx5e_tc_flow_parse_attr *parse_attr;
2893 const struct ip_tunnel_info *tun_info;
2894 struct encap_key key;
2895 struct mlx5e_encap_entry *e;
2896 unsigned short family;
2900 parse_attr = attr->parse_attr;
2901 tun_info = parse_attr->tun_info[out_index];
2902 family = ip_tunnel_info_af(tun_info);
2903 key.ip_tun_key = &tun_info->key;
2904 key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
2905 if (!key.tc_tunnel) {
2906 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
2910 hash_key = hash_encap_info(&key);
2912 mutex_lock(&esw->offloads.encap_tbl_lock);
2913 e = mlx5e_encap_get(priv, &key, hash_key);
2915 /* must verify if encap is valid or not */
2917 mutex_unlock(&esw->offloads.encap_tbl_lock);
2918 wait_for_completion(&e->res_ready);
2920 /* Protect against concurrent neigh update. */
2921 mutex_lock(&esw->offloads.encap_tbl_lock);
2922 if (e->compl_result) {
2929 e = kzalloc(sizeof(*e), GFP_KERNEL);
2935 refcount_set(&e->refcnt, 1);
2936 init_completion(&e->res_ready);
2938 e->tun_info = tun_info;
2939 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2946 INIT_LIST_HEAD(&e->flows);
2947 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2948 mutex_unlock(&esw->offloads.encap_tbl_lock);
2950 if (family == AF_INET)
2951 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2952 else if (family == AF_INET6)
2953 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2955 /* Protect against concurrent neigh update. */
2956 mutex_lock(&esw->offloads.encap_tbl_lock);
2957 complete_all(&e->res_ready);
2959 e->compl_result = err;
2964 flow->encaps[out_index].e = e;
2965 list_add(&flow->encaps[out_index].list, &e->flows);
2966 flow->encaps[out_index].index = out_index;
2967 *encap_dev = e->out_dev;
2968 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2969 attr->dests[out_index].encap_id = e->encap_id;
2970 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
2971 *encap_valid = true;
2973 *encap_valid = false;
2975 mutex_unlock(&esw->offloads.encap_tbl_lock);
2980 mutex_unlock(&esw->offloads.encap_tbl_lock);
2982 mlx5e_encap_put(priv, e);
2986 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2987 const struct flow_action_entry *act,
2988 struct mlx5_esw_flow_attr *attr,
2991 u8 vlan_idx = attr->total_vlan;
2993 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2997 case FLOW_ACTION_VLAN_POP:
2999 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3000 MLX5_FS_VLAN_DEPTH))
3003 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3005 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3008 case FLOW_ACTION_VLAN_PUSH:
3009 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3010 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3011 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3012 if (!attr->vlan_proto[vlan_idx])
3013 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3016 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3017 MLX5_FS_VLAN_DEPTH))
3020 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3022 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3023 (act->vlan.proto != htons(ETH_P_8021Q) ||
3027 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3034 attr->total_vlan = vlan_idx + 1;
3039 static int add_vlan_push_action(struct mlx5e_priv *priv,
3040 struct mlx5_esw_flow_attr *attr,
3041 struct net_device **out_dev,
3044 struct net_device *vlan_dev = *out_dev;
3045 struct flow_action_entry vlan_act = {
3046 .id = FLOW_ACTION_VLAN_PUSH,
3047 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3048 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3053 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3057 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3058 dev_get_iflink(vlan_dev));
3059 if (is_vlan_dev(*out_dev))
3060 err = add_vlan_push_action(priv, attr, out_dev, action);
3065 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3066 struct mlx5_esw_flow_attr *attr,
3069 int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
3070 struct flow_action_entry vlan_act = {
3071 .id = FLOW_ACTION_VLAN_POP,
3075 while (nest_level--) {
3076 err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
3084 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3085 struct net_device *out_dev)
3087 if (is_merged_eswitch_dev(priv, out_dev))
3090 return mlx5e_eswitch_rep(out_dev) &&
3091 same_hw_devs(priv, netdev_priv(out_dev));
3094 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3095 struct flow_action *flow_action,
3096 struct mlx5e_tc_flow *flow,
3097 struct netlink_ext_ack *extack)
3099 struct pedit_headers_action hdrs[2] = {};
3100 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3101 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3102 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3103 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3104 const struct ip_tunnel_info *info = NULL;
3105 const struct flow_action_entry *act;
3110 if (!flow_action_has_entries(flow_action))
3113 flow_action_for_each(i, act, flow_action) {
3115 case FLOW_ACTION_DROP:
3116 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3117 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3119 case FLOW_ACTION_MANGLE:
3120 case FLOW_ACTION_ADD:
3121 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3122 parse_attr, hdrs, extack);
3126 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3127 attr->split_count = attr->out_count;
3129 case FLOW_ACTION_CSUM:
3130 if (csum_offload_supported(priv, action,
3131 act->csum_flags, extack))
3135 case FLOW_ACTION_REDIRECT:
3136 case FLOW_ACTION_MIRRED: {
3137 struct mlx5e_priv *out_priv;
3138 struct net_device *out_dev;
3142 /* out_dev is NULL when filters with
3143 * non-existing mirred device are replayed to
3149 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3150 NL_SET_ERR_MSG_MOD(extack,
3151 "can't support more output ports, can't offload forwarding");
3152 pr_err("can't support more than %d output ports, can't offload forwarding\n",
3157 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3158 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3159 if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3160 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3161 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3162 struct net_device *uplink_upper;
3166 netdev_master_upper_dev_get_rcu(uplink_dev);
3168 netif_is_lag_master(uplink_upper) &&
3169 uplink_upper == out_dev)
3170 out_dev = uplink_dev;
3173 if (is_vlan_dev(out_dev)) {
3174 err = add_vlan_push_action(priv, attr,
3181 if (is_vlan_dev(parse_attr->filter_dev)) {
3182 err = add_vlan_pop_action(priv, attr,
3188 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3189 NL_SET_ERR_MSG_MOD(extack,
3190 "devices are not on same switch HW, can't offload forwarding");
3191 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3192 priv->netdev->name, out_dev->name);
3196 out_priv = netdev_priv(out_dev);
3197 rpriv = out_priv->ppriv;
3198 attr->dests[attr->out_count].rep = rpriv->rep;
3199 attr->dests[attr->out_count].mdev = out_priv->mdev;
3202 parse_attr->mirred_ifindex[attr->out_count] =
3204 parse_attr->tun_info[attr->out_count] = info;
3206 attr->dests[attr->out_count].flags |=
3207 MLX5_ESW_DEST_ENCAP;
3209 /* attr->dests[].rep is resolved when we
3212 } else if (parse_attr->filter_dev != priv->netdev) {
3213 /* All mlx5 devices are called to configure
3214 * high level device filters. Therefore, the
3215 * *attempt* to install a filter on invalid
3216 * eswitch should not trigger an explicit error
3220 NL_SET_ERR_MSG_MOD(extack,
3221 "devices are not on same switch HW, can't offload forwarding");
3222 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
3223 priv->netdev->name, out_dev->name);
3228 case FLOW_ACTION_TUNNEL_ENCAP:
3236 case FLOW_ACTION_VLAN_PUSH:
3237 case FLOW_ACTION_VLAN_POP:
3238 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3239 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3240 /* Replace vlan pop+push with vlan modify */
3241 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3242 err = add_vlan_rewrite_action(priv,
3243 MLX5_FLOW_NAMESPACE_FDB,
3244 act, parse_attr, hdrs,
3247 err = parse_tc_vlan_action(priv, act, attr, &action);
3252 attr->split_count = attr->out_count;
3254 case FLOW_ACTION_VLAN_MANGLE:
3255 err = add_vlan_rewrite_action(priv,
3256 MLX5_FLOW_NAMESPACE_FDB,
3257 act, parse_attr, hdrs,
3262 attr->split_count = attr->out_count;
3264 case FLOW_ACTION_TUNNEL_DECAP:
3265 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
3267 case FLOW_ACTION_GOTO: {
3268 u32 dest_chain = act->chain_index;
3269 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
3271 if (dest_chain <= attr->chain) {
3272 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
3275 if (dest_chain > max_chain) {
3276 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
3279 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3280 attr->dest_chain = dest_chain;
3284 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3289 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
3290 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
3291 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3294 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3295 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
3301 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3302 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3303 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3304 parse_attr, hdrs, &action, extack);
3307 /* in case all pedit actions are skipped, remove the MOD_HDR
3308 * flag. we might have set split_count either by pedit or
3309 * pop/push. if there is no pop/push either, reset it too.
3311 if (parse_attr->num_mod_hdr_actions == 0) {
3312 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3313 kfree(parse_attr->mod_hdr_actions);
3314 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3315 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3316 attr->split_count = 0;
3320 attr->action = action;
3321 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3324 if (attr->dest_chain) {
3325 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3326 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3329 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3332 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3333 NL_SET_ERR_MSG_MOD(extack,
3334 "current firmware doesn't support split rule for port mirroring");
3335 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3342 static void get_flags(int flags, unsigned long *flow_flags)
3344 unsigned long __flow_flags = 0;
3346 if (flags & MLX5_TC_FLAG(INGRESS))
3347 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
3348 if (flags & MLX5_TC_FLAG(EGRESS))
3349 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
3351 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
3352 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3353 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
3354 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3356 *flow_flags = __flow_flags;
3359 static const struct rhashtable_params tc_ht_params = {
3360 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3361 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3362 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3363 .automatic_shrinking = true,
3366 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
3367 unsigned long flags)
3369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3370 struct mlx5e_rep_priv *uplink_rpriv;
3372 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
3373 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3374 return &uplink_rpriv->uplink_priv.tc_ht;
3375 } else /* NIC offload */
3376 return &priv->fs.tc.ht;
3379 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
3381 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
3382 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
3383 flow_flag_test(flow, INGRESS);
3384 bool act_is_encap = !!(attr->action &
3385 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
3386 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
3387 MLX5_DEVCOM_ESW_OFFLOADS);
3392 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
3393 mlx5_lag_is_multipath(attr->in_mdev)) &&
3394 (is_rep_ingress || act_is_encap))
3401 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3402 struct flow_cls_offload *f, unsigned long flow_flags,
3403 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3404 struct mlx5e_tc_flow **__flow)
3406 struct mlx5e_tc_flow_parse_attr *parse_attr;
3407 struct mlx5e_tc_flow *flow;
3410 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
3411 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3412 if (!parse_attr || !flow) {
3417 flow->cookie = f->cookie;
3418 flow->flags = flow_flags;
3420 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
3421 INIT_LIST_HEAD(&flow->encaps[out_index].list);
3422 INIT_LIST_HEAD(&flow->mod_hdr);
3423 INIT_LIST_HEAD(&flow->hairpin);
3424 refcount_set(&flow->refcnt, 1);
3427 *__parse_attr = parse_attr;
3438 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
3439 struct mlx5e_priv *priv,
3440 struct mlx5e_tc_flow_parse_attr *parse_attr,
3441 struct flow_cls_offload *f,
3442 struct mlx5_eswitch_rep *in_rep,
3443 struct mlx5_core_dev *in_mdev)
3445 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3447 esw_attr->parse_attr = parse_attr;
3448 esw_attr->chain = f->common.chain_index;
3449 esw_attr->prio = f->common.prio;
3451 esw_attr->in_rep = in_rep;
3452 esw_attr->in_mdev = in_mdev;
3454 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
3455 MLX5_COUNTER_SOURCE_ESWITCH)
3456 esw_attr->counter_dev = in_mdev;
3458 esw_attr->counter_dev = priv->mdev;
3461 static struct mlx5e_tc_flow *
3462 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3463 struct flow_cls_offload *f,
3464 unsigned long flow_flags,
3465 struct net_device *filter_dev,
3466 struct mlx5_eswitch_rep *in_rep,
3467 struct mlx5_core_dev *in_mdev)
3469 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3470 struct netlink_ext_ack *extack = f->common.extack;
3471 struct mlx5e_tc_flow_parse_attr *parse_attr;
3472 struct mlx5e_tc_flow *flow;
3475 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
3476 attr_size = sizeof(struct mlx5_esw_flow_attr);
3477 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3478 &parse_attr, &flow);
3482 parse_attr->filter_dev = filter_dev;
3483 mlx5e_flow_esw_attr_init(flow->esw_attr,
3485 f, in_rep, in_mdev);
3487 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3492 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
3496 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
3498 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
3501 add_unready_flow(flow);
3507 mlx5e_flow_put(priv, flow);
3509 return ERR_PTR(err);
3512 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
3513 struct mlx5e_tc_flow *flow,
3514 unsigned long flow_flags)
3516 struct mlx5e_priv *priv = flow->priv, *peer_priv;
3517 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
3518 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3519 struct mlx5e_tc_flow_parse_attr *parse_attr;
3520 struct mlx5e_rep_priv *peer_urpriv;
3521 struct mlx5e_tc_flow *peer_flow;
3522 struct mlx5_core_dev *in_mdev;
3525 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3529 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
3530 peer_priv = netdev_priv(peer_urpriv->netdev);
3532 /* in_mdev is assigned of which the packet originated from.
3533 * So packets redirected to uplink use the same mdev of the
3534 * original flow and packets redirected from uplink use the
3537 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
3538 in_mdev = peer_priv->mdev;
3540 in_mdev = priv->mdev;
3542 parse_attr = flow->esw_attr->parse_attr;
3543 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
3544 parse_attr->filter_dev,
3545 flow->esw_attr->in_rep, in_mdev);
3546 if (IS_ERR(peer_flow)) {
3547 err = PTR_ERR(peer_flow);
3551 flow->peer_flow = peer_flow;
3552 flow_flag_set(flow, DUP);
3553 mutex_lock(&esw->offloads.peer_mutex);
3554 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
3555 mutex_unlock(&esw->offloads.peer_mutex);
3558 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3563 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3564 struct flow_cls_offload *f,
3565 unsigned long flow_flags,
3566 struct net_device *filter_dev,
3567 struct mlx5e_tc_flow **__flow)
3569 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3570 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
3571 struct mlx5_core_dev *in_mdev = priv->mdev;
3572 struct mlx5e_tc_flow *flow;
3575 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
3578 return PTR_ERR(flow);
3580 if (is_peer_flow_needed(flow)) {
3581 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
3583 mlx5e_tc_del_fdb_flow(priv, flow);
3597 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3598 struct flow_cls_offload *f,
3599 unsigned long flow_flags,
3600 struct net_device *filter_dev,
3601 struct mlx5e_tc_flow **__flow)
3603 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
3604 struct netlink_ext_ack *extack = f->common.extack;
3605 struct mlx5e_tc_flow_parse_attr *parse_attr;
3606 struct mlx5e_tc_flow *flow;
3609 /* multi-chain not supported for NIC rules */
3610 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3613 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
3614 attr_size = sizeof(struct mlx5_nic_flow_attr);
3615 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3616 &parse_attr, &flow);
3620 parse_attr->filter_dev = filter_dev;
3621 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3626 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
3630 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3634 flow_flag_set(flow, OFFLOADED);
3641 mlx5e_flow_put(priv, flow);
3648 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3649 struct flow_cls_offload *f,
3650 unsigned long flags,
3651 struct net_device *filter_dev,
3652 struct mlx5e_tc_flow **flow)
3654 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3655 unsigned long flow_flags;
3658 get_flags(flags, &flow_flags);
3660 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3663 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
3664 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3667 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3673 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3674 struct flow_cls_offload *f, unsigned long flags)
3676 struct netlink_ext_ack *extack = f->common.extack;
3677 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3678 struct mlx5e_tc_flow *flow;
3682 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
3685 NL_SET_ERR_MSG_MOD(extack,
3686 "flow cookie already exists, ignoring");
3687 netdev_warn_once(priv->netdev,
3688 "flow cookie %lx already exists, ignoring\n",
3694 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3698 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
3705 mlx5e_flow_put(priv, flow);
3710 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3712 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
3713 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
3715 return flow_flag_test(flow, INGRESS) == dir_ingress &&
3716 flow_flag_test(flow, EGRESS) == dir_egress;
3719 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3720 struct flow_cls_offload *f, unsigned long flags)
3722 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3723 struct mlx5e_tc_flow *flow;
3727 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3728 if (!flow || !same_flow_direction(flow, flags)) {
3733 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
3736 if (flow_flag_test_and_set(flow, DELETED)) {
3740 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3743 mlx5e_flow_put(priv, flow);
3752 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3753 struct flow_cls_offload *f, unsigned long flags)
3755 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3756 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3757 struct mlx5_eswitch *peer_esw;
3758 struct mlx5e_tc_flow *flow;
3759 struct mlx5_fc *counter;
3766 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
3770 return PTR_ERR(flow);
3772 if (!same_flow_direction(flow, flags)) {
3777 if (mlx5e_is_offloaded_flow(flow)) {
3778 counter = mlx5e_tc_get_counter(flow);
3782 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3785 /* Under multipath it's possible for one rule to be currently
3786 * un-offloaded while the other rule is offloaded.
3788 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3792 if (flow_flag_test(flow, DUP) &&
3793 flow_flag_test(flow->peer_flow, OFFLOADED)) {
3798 counter = mlx5e_tc_get_counter(flow->peer_flow);
3800 goto no_peer_counter;
3801 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3804 packets += packets2;
3805 lastuse = max_t(u64, lastuse, lastuse2);
3809 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3811 flow_stats_update(&f->stats, bytes, packets, lastuse);
3813 mlx5e_flow_put(priv, flow);
3817 static int apply_police_params(struct mlx5e_priv *priv, u32 rate,
3818 struct netlink_ext_ack *extack)
3820 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3821 struct mlx5_eswitch *esw;
3826 esw = priv->mdev->priv.eswitch;
3827 /* rate is given in bytes/sec.
3828 * First convert to bits/sec and then round to the nearest mbit/secs.
3829 * mbit means million bits.
3830 * Moreover, if rate is non zero we choose to configure to a minimum of
3833 rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0;
3834 vport_num = rpriv->rep->vport;
3836 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
3838 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
3843 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
3844 struct flow_action *flow_action,
3845 struct netlink_ext_ack *extack)
3847 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3848 const struct flow_action_entry *act;
3852 if (!flow_action_has_entries(flow_action)) {
3853 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
3857 if (!flow_offload_has_one_action(flow_action)) {
3858 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
3862 flow_action_for_each(i, act, flow_action) {
3864 case FLOW_ACTION_POLICE:
3865 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
3869 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
3872 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
3880 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
3881 struct tc_cls_matchall_offload *ma)
3883 struct netlink_ext_ack *extack = ma->common.extack;
3884 int prio = TC_H_MAJ(ma->common.prio) >> 16;
3887 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
3891 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
3894 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
3895 struct tc_cls_matchall_offload *ma)
3897 struct netlink_ext_ack *extack = ma->common.extack;
3899 return apply_police_params(priv, 0, extack);
3902 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
3903 struct tc_cls_matchall_offload *ma)
3905 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3906 struct rtnl_link_stats64 cur_stats;
3910 cur_stats = priv->stats.vf_vport;
3911 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
3912 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
3913 rpriv->prev_vf_vport_stats = cur_stats;
3914 flow_stats_update(&ma->stats, dpkts, dbytes, jiffies);
3917 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3918 struct mlx5e_priv *peer_priv)
3920 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3921 struct mlx5e_hairpin_entry *hpe, *tmp;
3922 LIST_HEAD(init_wait_list);
3926 if (!same_hw_devs(priv, peer_priv))
3929 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3931 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
3932 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
3933 if (refcount_inc_not_zero(&hpe->refcnt))
3934 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
3935 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
3937 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
3938 wait_for_completion(&hpe->res_ready);
3939 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
3940 hpe->hp->pair->peer_gone = true;
3942 mlx5e_hairpin_put(priv, hpe);
3946 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3947 unsigned long event, void *ptr)
3949 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3950 struct mlx5e_flow_steering *fs;
3951 struct mlx5e_priv *peer_priv;
3952 struct mlx5e_tc_table *tc;
3953 struct mlx5e_priv *priv;
3955 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3956 event != NETDEV_UNREGISTER ||
3957 ndev->reg_state == NETREG_REGISTERED)
3960 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3961 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3962 priv = container_of(fs, struct mlx5e_priv, fs);
3963 peer_priv = netdev_priv(ndev);
3964 if (priv == peer_priv ||
3965 !(priv->netdev->features & NETIF_F_HW_TC))
3968 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3973 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3975 struct mlx5e_tc_table *tc = &priv->fs.tc;
3978 mutex_init(&tc->t_lock);
3979 mutex_init(&tc->mod_hdr.lock);
3980 hash_init(tc->mod_hdr.hlist);
3981 mutex_init(&tc->hairpin_tbl_lock);
3982 hash_init(tc->hairpin_tbl);
3984 err = rhashtable_init(&tc->ht, &tc_ht_params);
3988 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3989 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3990 tc->netdevice_nb.notifier_call = NULL;
3991 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3997 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3999 struct mlx5e_tc_flow *flow = ptr;
4000 struct mlx5e_priv *priv = flow->priv;
4002 mlx5e_tc_del_flow(priv, flow);
4006 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4008 struct mlx5e_tc_table *tc = &priv->fs.tc;
4010 if (tc->netdevice_nb.notifier_call)
4011 unregister_netdevice_notifier(&tc->netdevice_nb);
4013 mutex_destroy(&tc->mod_hdr.lock);
4014 mutex_destroy(&tc->hairpin_tbl_lock);
4016 rhashtable_destroy(&tc->ht);
4018 if (!IS_ERR_OR_NULL(tc->t)) {
4019 mlx5_destroy_flow_table(tc->t);
4022 mutex_destroy(&tc->t_lock);
4025 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4027 return rhashtable_init(tc_ht, &tc_ht_params);
4030 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4032 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4035 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4037 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4039 return atomic_read(&tc_ht->nelems);
4042 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4044 struct mlx5e_tc_flow *flow, *tmp;
4046 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4047 __mlx5e_tc_del_fdb_peer_flow(flow);
4050 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4052 struct mlx5_rep_uplink_priv *rpriv =
4053 container_of(work, struct mlx5_rep_uplink_priv,
4054 reoffload_flows_work);
4055 struct mlx5e_tc_flow *flow, *tmp;
4057 mutex_lock(&rpriv->unready_flows_lock);
4058 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
4059 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
4060 unready_flow_del(flow);
4062 mutex_unlock(&rpriv->unready_flows_lock);