2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
56 struct mlx5_nic_flow_attr {
61 struct mlx5_flow_table *hairpin_ft;
65 MLX5E_TC_FLOW_ESWITCH = BIT(0),
66 MLX5E_TC_FLOW_NIC = BIT(1),
67 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
68 MLX5E_TC_FLOW_HAIRPIN = BIT(3),
69 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
72 struct mlx5e_tc_flow {
73 struct rhash_head node;
76 struct mlx5_flow_handle *rule;
77 struct list_head encap; /* flows sharing the same encap ID */
78 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
79 struct list_head hairpin; /* flows sharing the same hairpin */
81 struct mlx5_esw_flow_attr esw_attr[0];
82 struct mlx5_nic_flow_attr nic_attr[0];
86 struct mlx5e_tc_flow_parse_attr {
87 struct ip_tunnel_info tun_info;
88 struct mlx5_flow_spec spec;
89 int num_mod_hdr_actions;
90 void *mod_hdr_actions;
95 MLX5_HEADER_TYPE_VXLAN = 0x0,
96 MLX5_HEADER_TYPE_NVGRE = 0x1,
99 #define MLX5E_TC_TABLE_NUM_GROUPS 4
100 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
102 struct mlx5e_hairpin {
103 struct mlx5_hairpin *pair;
105 struct mlx5_core_dev *func_mdev;
106 struct mlx5e_priv *func_priv;
111 struct mlx5e_rqt indir_rqt;
112 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
113 struct mlx5e_ttc_table ttc;
116 struct mlx5e_hairpin_entry {
117 /* a node of a hash table which keeps all the hairpin entries */
118 struct hlist_node hairpin_hlist;
120 /* flows sharing the same hairpin */
121 struct list_head flows;
125 struct mlx5e_hairpin *hp;
133 struct mlx5e_mod_hdr_entry {
134 /* a node of a hash table which keeps all the mod_hdr entries */
135 struct hlist_node mod_hdr_hlist;
137 /* flows sharing the same mod_hdr entry */
138 struct list_head flows;
140 struct mod_hdr_key key;
145 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
147 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
149 return jhash(key->actions,
150 key->num_actions * MLX5_MH_ACT_SZ, 0);
153 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
154 struct mod_hdr_key *b)
156 if (a->num_actions != b->num_actions)
159 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
162 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
163 struct mlx5e_tc_flow *flow,
164 struct mlx5e_tc_flow_parse_attr *parse_attr)
166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
167 int num_actions, actions_size, namespace, err;
168 struct mlx5e_mod_hdr_entry *mh;
169 struct mod_hdr_key key;
173 num_actions = parse_attr->num_mod_hdr_actions;
174 actions_size = MLX5_MH_ACT_SZ * num_actions;
176 key.actions = parse_attr->mod_hdr_actions;
177 key.num_actions = num_actions;
179 hash_key = hash_mod_hdr_info(&key);
181 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
182 namespace = MLX5_FLOW_NAMESPACE_FDB;
183 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
184 mod_hdr_hlist, hash_key) {
185 if (!cmp_mod_hdr_info(&mh->key, &key)) {
191 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
192 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
193 mod_hdr_hlist, hash_key) {
194 if (!cmp_mod_hdr_info(&mh->key, &key)) {
204 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
208 mh->key.actions = (void *)mh + sizeof(*mh);
209 memcpy(mh->key.actions, key.actions, actions_size);
210 mh->key.num_actions = num_actions;
211 INIT_LIST_HEAD(&mh->flows);
213 err = mlx5_modify_header_alloc(priv->mdev, namespace,
220 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
221 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
223 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
226 list_add(&flow->mod_hdr, &mh->flows);
227 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
228 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
230 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
239 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
240 struct mlx5e_tc_flow *flow)
242 struct list_head *next = flow->mod_hdr.next;
244 list_del(&flow->mod_hdr);
246 if (list_empty(next)) {
247 struct mlx5e_mod_hdr_entry *mh;
249 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
251 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
252 hash_del(&mh->mod_hdr_hlist);
258 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
260 struct net_device *netdev;
261 struct mlx5e_priv *priv;
263 netdev = __dev_get_by_index(net, ifindex);
264 priv = netdev_priv(netdev);
268 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
270 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
274 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
278 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
280 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
281 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
282 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
284 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
291 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
296 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
298 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
299 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
302 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
304 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
305 struct mlx5e_priv *priv = hp->func_priv;
306 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
308 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
311 for (i = 0; i < sz; i++) {
313 if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
314 ix = mlx5e_bits_invert(i, ilog2(sz));
315 ix = indirection_rqt[ix];
316 rqn = hp->pair->rqn[ix];
317 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
321 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
323 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
324 struct mlx5e_priv *priv = hp->func_priv;
325 struct mlx5_core_dev *mdev = priv->mdev;
329 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
330 in = kvzalloc(inlen, GFP_KERNEL);
334 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
336 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
337 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
339 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
341 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
343 hp->indir_rqt.enabled = true;
349 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
351 struct mlx5e_priv *priv = hp->func_priv;
352 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
356 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
357 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
358 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
360 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
361 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
362 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
363 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
365 err = mlx5_core_create_tir(hp->func_mdev, in,
366 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
368 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
369 goto err_destroy_tirs;
375 for (i = 0; i < tt; i++)
376 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
380 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
384 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
385 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
388 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
389 struct ttc_params *ttc_params)
391 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
394 memset(ttc_params, 0, sizeof(*ttc_params));
396 ttc_params->any_tt_tirn = hp->tirn;
398 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
399 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
401 ft_attr->max_fte = MLX5E_NUM_TT;
402 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
403 ft_attr->prio = MLX5E_TC_PRIO;
406 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
408 struct mlx5e_priv *priv = hp->func_priv;
409 struct ttc_params ttc_params;
412 err = mlx5e_hairpin_create_indirect_rqt(hp);
416 err = mlx5e_hairpin_create_indirect_tirs(hp);
418 goto err_create_indirect_tirs;
420 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
421 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
423 goto err_create_ttc_table;
425 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
426 hp->num_channels, hp->ttc.ft.t->id);
430 err_create_ttc_table:
431 mlx5e_hairpin_destroy_indirect_tirs(hp);
432 err_create_indirect_tirs:
433 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
438 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
440 struct mlx5e_priv *priv = hp->func_priv;
442 mlx5e_destroy_ttc_table(priv, &hp->ttc);
443 mlx5e_hairpin_destroy_indirect_tirs(hp);
444 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
447 static struct mlx5e_hairpin *
448 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
451 struct mlx5_core_dev *func_mdev, *peer_mdev;
452 struct mlx5e_hairpin *hp;
453 struct mlx5_hairpin *pair;
456 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
458 return ERR_PTR(-ENOMEM);
460 func_mdev = priv->mdev;
461 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
463 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
466 goto create_pair_err;
469 hp->func_mdev = func_mdev;
470 hp->func_priv = priv;
471 hp->num_channels = params->num_channels;
473 err = mlx5e_hairpin_create_transport(hp);
475 goto create_transport_err;
477 if (hp->num_channels > 1) {
478 err = mlx5e_hairpin_rss_init(hp);
486 mlx5e_hairpin_destroy_transport(hp);
487 create_transport_err:
488 mlx5_core_hairpin_destroy(hp->pair);
494 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
496 if (hp->num_channels > 1)
497 mlx5e_hairpin_rss_cleanup(hp);
498 mlx5e_hairpin_destroy_transport(hp);
499 mlx5_core_hairpin_destroy(hp->pair);
503 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
505 return (peer_vhca_id << 16 | prio);
508 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
509 u16 peer_vhca_id, u8 prio)
511 struct mlx5e_hairpin_entry *hpe;
512 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
514 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
515 hairpin_hlist, hash_key) {
516 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
523 #define UNKNOWN_MATCH_PRIO 8
525 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
526 struct mlx5_flow_spec *spec, u8 *match_prio)
528 void *headers_c, *headers_v;
529 u8 prio_val, prio_mask = 0;
532 #ifdef CONFIG_MLX5_CORE_EN_DCB
533 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
534 netdev_warn(priv->netdev,
535 "only PCP trust state supported for hairpin\n");
539 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
540 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
542 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
544 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
545 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
548 if (!vlan_present || !prio_mask) {
549 prio_val = UNKNOWN_MATCH_PRIO;
550 } else if (prio_mask != 0x7) {
551 netdev_warn(priv->netdev,
552 "masked priority match not supported for hairpin\n");
556 *match_prio = prio_val;
560 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
561 struct mlx5e_tc_flow *flow,
562 struct mlx5e_tc_flow_parse_attr *parse_attr)
564 int peer_ifindex = parse_attr->mirred_ifindex;
565 struct mlx5_hairpin_params params;
566 struct mlx5_core_dev *peer_mdev;
567 struct mlx5e_hairpin_entry *hpe;
568 struct mlx5e_hairpin *hp;
575 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
576 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
577 netdev_warn(priv->netdev, "hairpin is not supported\n");
581 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
582 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
585 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
589 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
593 INIT_LIST_HEAD(&hpe->flows);
594 hpe->peer_vhca_id = peer_id;
595 hpe->prio = match_prio;
597 params.log_data_size = 15;
598 params.log_data_size = min_t(u8, params.log_data_size,
599 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
600 params.log_data_size = max_t(u8, params.log_data_size,
601 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
603 params.log_num_packets = params.log_data_size -
604 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
605 params.log_num_packets = min_t(u8, params.log_num_packets,
606 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
608 params.q_counter = priv->q_counter;
609 /* set hairpin pair per each 50Gbs share of the link */
610 mlx5e_get_max_linkspeed(priv->mdev, &link_speed);
611 link_speed = max_t(u32, link_speed, 50000);
612 link_speed64 = link_speed;
613 do_div(link_speed64, 50000);
614 params.num_channels = link_speed64;
616 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
619 goto create_hairpin_err;
622 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
623 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
624 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
627 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
628 hash_hairpin_info(peer_id, match_prio));
631 if (hpe->hp->num_channels > 1) {
632 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
633 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
635 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
637 list_add(&flow->hairpin, &hpe->flows);
646 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
647 struct mlx5e_tc_flow *flow)
649 struct list_head *next = flow->hairpin.next;
651 list_del(&flow->hairpin);
653 /* no more hairpin flows for us, release the hairpin pair */
654 if (list_empty(next)) {
655 struct mlx5e_hairpin_entry *hpe;
657 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
659 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
660 hpe->hp->pair->peer_mdev->priv.name);
662 mlx5e_hairpin_destroy(hpe->hp);
663 hash_del(&hpe->hairpin_hlist);
668 static struct mlx5_flow_handle *
669 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
670 struct mlx5e_tc_flow_parse_attr *parse_attr,
671 struct mlx5e_tc_flow *flow)
673 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
674 struct mlx5_core_dev *dev = priv->mdev;
675 struct mlx5_flow_destination dest[2] = {};
676 struct mlx5_flow_act flow_act = {
677 .action = attr->action,
678 .has_flow_tag = true,
679 .flow_tag = attr->flow_tag,
682 struct mlx5_fc *counter = NULL;
683 struct mlx5_flow_handle *rule;
684 bool table_created = false;
685 int err, dest_ix = 0;
687 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
688 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
691 goto err_add_hairpin_flow;
693 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
694 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
695 dest[dest_ix].ft = attr->hairpin_ft;
697 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
698 dest[dest_ix].tir_num = attr->hairpin_tirn;
701 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
702 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
703 dest[dest_ix].ft = priv->fs.vlan.ft.t;
707 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
708 counter = mlx5_fc_create(dev, true);
709 if (IS_ERR(counter)) {
710 rule = ERR_CAST(counter);
713 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
714 dest[dest_ix].counter = counter;
718 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
719 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
720 flow_act.modify_id = attr->mod_hdr_id;
721 kfree(parse_attr->mod_hdr_actions);
724 goto err_create_mod_hdr_id;
728 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
729 int tc_grp_size, tc_tbl_size;
730 u32 max_flow_counter;
732 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
733 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
735 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
737 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
738 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
741 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
744 MLX5E_TC_TABLE_NUM_GROUPS,
745 MLX5E_TC_FT_LEVEL, 0);
746 if (IS_ERR(priv->fs.tc.t)) {
747 netdev_err(priv->netdev,
748 "Failed to create tc offload table\n");
749 rule = ERR_CAST(priv->fs.tc.t);
753 table_created = true;
756 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
757 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
758 &flow_act, dest, dest_ix);
767 mlx5_destroy_flow_table(priv->fs.tc.t);
768 priv->fs.tc.t = NULL;
771 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
772 mlx5e_detach_mod_hdr(priv, flow);
773 err_create_mod_hdr_id:
774 mlx5_fc_destroy(dev, counter);
776 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
777 mlx5e_hairpin_flow_del(priv, flow);
778 err_add_hairpin_flow:
782 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
783 struct mlx5e_tc_flow *flow)
785 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
786 struct mlx5_fc *counter = NULL;
788 counter = mlx5_flow_rule_counter(flow->rule);
789 mlx5_del_flow_rules(flow->rule);
790 mlx5_fc_destroy(priv->mdev, counter);
792 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
793 mlx5_destroy_flow_table(priv->fs.tc.t);
794 priv->fs.tc.t = NULL;
797 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
798 mlx5e_detach_mod_hdr(priv, flow);
800 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
801 mlx5e_hairpin_flow_del(priv, flow);
804 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
805 struct mlx5e_tc_flow *flow);
807 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
808 struct ip_tunnel_info *tun_info,
809 struct net_device *mirred_dev,
810 struct net_device **encap_dev,
811 struct mlx5e_tc_flow *flow);
813 static struct mlx5_flow_handle *
814 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
815 struct mlx5e_tc_flow_parse_attr *parse_attr,
816 struct mlx5e_tc_flow *flow)
818 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
819 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
820 struct net_device *out_dev, *encap_dev = NULL;
821 struct mlx5_flow_handle *rule = NULL;
822 struct mlx5e_rep_priv *rpriv;
823 struct mlx5e_priv *out_priv;
826 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
827 out_dev = __dev_get_by_index(dev_net(priv->netdev),
828 attr->parse_attr->mirred_ifindex);
829 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
830 out_dev, &encap_dev, flow);
834 goto err_attach_encap;
836 out_priv = netdev_priv(encap_dev);
837 rpriv = out_priv->ppriv;
838 attr->out_rep = rpriv->rep;
841 err = mlx5_eswitch_add_vlan_action(esw, attr);
847 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
848 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
849 kfree(parse_attr->mod_hdr_actions);
856 /* we get here if (1) there's no error (rule being null) or when
857 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
859 if (rule != ERR_PTR(-EAGAIN)) {
860 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
867 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
868 mlx5e_detach_mod_hdr(priv, flow);
870 mlx5_eswitch_del_vlan_action(esw, attr);
872 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
873 mlx5e_detach_encap(priv, flow);
878 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
879 struct mlx5e_tc_flow *flow)
881 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
882 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
884 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
885 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
886 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
889 mlx5_eswitch_del_vlan_action(esw, attr);
891 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
892 mlx5e_detach_encap(priv, flow);
893 kvfree(attr->parse_attr);
896 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
897 mlx5e_detach_mod_hdr(priv, flow);
900 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
901 struct mlx5e_encap_entry *e)
903 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
904 struct mlx5_esw_flow_attr *esw_attr;
905 struct mlx5e_tc_flow *flow;
908 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
909 e->encap_size, e->encap_header,
912 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
916 e->flags |= MLX5_ENCAP_ENTRY_VALID;
917 mlx5e_rep_queue_neigh_stats_work(priv);
919 list_for_each_entry(flow, &e->flows, encap) {
920 esw_attr = flow->esw_attr;
921 esw_attr->encap_id = e->encap_id;
922 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
923 if (IS_ERR(flow->rule)) {
924 err = PTR_ERR(flow->rule);
925 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
929 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
933 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
934 struct mlx5e_encap_entry *e)
936 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
937 struct mlx5e_tc_flow *flow;
939 list_for_each_entry(flow, &e->flows, encap) {
940 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
941 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
942 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
946 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
947 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
948 mlx5_encap_dealloc(priv->mdev, e->encap_id);
952 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
954 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
955 u64 bytes, packets, lastuse = 0;
956 struct mlx5e_tc_flow *flow;
957 struct mlx5e_encap_entry *e;
958 struct mlx5_fc *counter;
959 struct neigh_table *tbl;
960 bool neigh_used = false;
963 if (m_neigh->family == AF_INET)
965 #if IS_ENABLED(CONFIG_IPV6)
966 else if (m_neigh->family == AF_INET6)
972 list_for_each_entry(e, &nhe->encap_list, encap_list) {
973 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
975 list_for_each_entry(flow, &e->flows, encap) {
976 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
977 counter = mlx5_flow_rule_counter(flow->rule);
978 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
979 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
988 nhe->reported_lastuse = jiffies;
990 /* find the relevant neigh according to the cached device and
993 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
995 WARN(1, "The neighbour already freed\n");
999 neigh_event_send(n, NULL);
1004 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1005 struct mlx5e_tc_flow *flow)
1007 struct list_head *next = flow->encap.next;
1009 list_del(&flow->encap);
1010 if (list_empty(next)) {
1011 struct mlx5e_encap_entry *e;
1013 e = list_entry(next, struct mlx5e_encap_entry, flows);
1014 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1016 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1017 mlx5_encap_dealloc(priv->mdev, e->encap_id);
1019 hash_del_rcu(&e->encap_hlist);
1020 kfree(e->encap_header);
1025 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1026 struct mlx5e_tc_flow *flow)
1028 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1029 mlx5e_tc_del_fdb_flow(priv, flow);
1031 mlx5e_tc_del_nic_flow(priv, flow);
1034 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1035 struct tc_cls_flower_offload *f)
1037 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1039 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1041 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1043 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1046 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1047 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1049 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1050 struct flow_dissector_key_keyid *key =
1051 skb_flow_dissector_target(f->dissector,
1052 FLOW_DISSECTOR_KEY_ENC_KEYID,
1054 struct flow_dissector_key_keyid *mask =
1055 skb_flow_dissector_target(f->dissector,
1056 FLOW_DISSECTOR_KEY_ENC_KEYID,
1058 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1059 be32_to_cpu(mask->keyid));
1060 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1061 be32_to_cpu(key->keyid));
1065 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1066 struct mlx5_flow_spec *spec,
1067 struct tc_cls_flower_offload *f)
1069 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1071 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1074 struct flow_dissector_key_control *enc_control =
1075 skb_flow_dissector_target(f->dissector,
1076 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1079 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1080 struct flow_dissector_key_ports *key =
1081 skb_flow_dissector_target(f->dissector,
1082 FLOW_DISSECTOR_KEY_ENC_PORTS,
1084 struct flow_dissector_key_ports *mask =
1085 skb_flow_dissector_target(f->dissector,
1086 FLOW_DISSECTOR_KEY_ENC_PORTS,
1088 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1089 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1090 struct net_device *up_dev = uplink_rpriv->netdev;
1091 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1093 /* Full udp dst port must be given */
1094 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
1095 goto vxlan_match_offload_err;
1097 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
1098 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1099 parse_vxlan_attr(spec, f);
1101 netdev_warn(priv->netdev,
1102 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
1106 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1107 udp_dport, ntohs(mask->dst));
1108 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1109 udp_dport, ntohs(key->dst));
1111 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1112 udp_sport, ntohs(mask->src));
1113 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1114 udp_sport, ntohs(key->src));
1115 } else { /* udp dst port must be given */
1116 vxlan_match_offload_err:
1117 netdev_warn(priv->netdev,
1118 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1122 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1123 struct flow_dissector_key_ipv4_addrs *key =
1124 skb_flow_dissector_target(f->dissector,
1125 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1127 struct flow_dissector_key_ipv4_addrs *mask =
1128 skb_flow_dissector_target(f->dissector,
1129 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1131 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1132 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1134 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1135 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1138 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1139 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1141 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1142 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1145 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1146 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1147 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1148 struct flow_dissector_key_ipv6_addrs *key =
1149 skb_flow_dissector_target(f->dissector,
1150 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1152 struct flow_dissector_key_ipv6_addrs *mask =
1153 skb_flow_dissector_target(f->dissector,
1154 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1157 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1158 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1159 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1160 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1161 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1162 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1164 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1165 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1166 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1167 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1168 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1169 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1171 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1172 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1175 /* Enforce DMAC when offloading incoming tunneled flows.
1176 * Flow counters require a match on the DMAC.
1178 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1179 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1180 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1181 dmac_47_16), priv->netdev->dev_addr);
1183 /* let software handle IP fragments */
1184 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1185 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1190 static int __parse_cls_flower(struct mlx5e_priv *priv,
1191 struct mlx5_flow_spec *spec,
1192 struct tc_cls_flower_offload *f,
1195 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1197 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1202 *min_inline = MLX5_INLINE_MODE_L2;
1204 if (f->dissector->used_keys &
1205 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1206 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1207 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1208 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1209 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1210 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1211 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1212 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1213 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1214 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1215 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1216 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1217 BIT(FLOW_DISSECTOR_KEY_TCP) |
1218 BIT(FLOW_DISSECTOR_KEY_IP))) {
1219 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1220 f->dissector->used_keys);
1224 if ((dissector_uses_key(f->dissector,
1225 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1226 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1227 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1228 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1229 struct flow_dissector_key_control *key =
1230 skb_flow_dissector_target(f->dissector,
1231 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1233 switch (key->addr_type) {
1234 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1235 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1236 if (parse_tunnel_attr(priv, spec, f))
1243 /* In decap flow, header pointers should point to the inner
1244 * headers, outer header were already set by parse_tunnel_attr
1246 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1248 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1252 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1253 struct flow_dissector_key_control *key =
1254 skb_flow_dissector_target(f->dissector,
1255 FLOW_DISSECTOR_KEY_CONTROL,
1258 struct flow_dissector_key_control *mask =
1259 skb_flow_dissector_target(f->dissector,
1260 FLOW_DISSECTOR_KEY_CONTROL,
1262 addr_type = key->addr_type;
1264 /* the HW doesn't support frag first/later */
1265 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1268 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1269 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1270 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1271 key->flags & FLOW_DIS_IS_FRAGMENT);
1273 /* the HW doesn't need L3 inline to match on frag=no */
1274 if (key->flags & FLOW_DIS_IS_FRAGMENT)
1275 *min_inline = MLX5_INLINE_MODE_IP;
1279 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1280 struct flow_dissector_key_basic *key =
1281 skb_flow_dissector_target(f->dissector,
1282 FLOW_DISSECTOR_KEY_BASIC,
1284 struct flow_dissector_key_basic *mask =
1285 skb_flow_dissector_target(f->dissector,
1286 FLOW_DISSECTOR_KEY_BASIC,
1288 ip_proto = key->ip_proto;
1290 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1291 ntohs(mask->n_proto));
1292 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1293 ntohs(key->n_proto));
1295 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1297 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1301 *min_inline = MLX5_INLINE_MODE_IP;
1304 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1305 struct flow_dissector_key_eth_addrs *key =
1306 skb_flow_dissector_target(f->dissector,
1307 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1309 struct flow_dissector_key_eth_addrs *mask =
1310 skb_flow_dissector_target(f->dissector,
1311 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1314 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1317 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1321 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1324 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1329 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1330 struct flow_dissector_key_vlan *key =
1331 skb_flow_dissector_target(f->dissector,
1332 FLOW_DISSECTOR_KEY_VLAN,
1334 struct flow_dissector_key_vlan *mask =
1335 skb_flow_dissector_target(f->dissector,
1336 FLOW_DISSECTOR_KEY_VLAN,
1338 if (mask->vlan_id || mask->vlan_priority) {
1339 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1340 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1342 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1343 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1345 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1346 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1350 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1351 struct flow_dissector_key_ipv4_addrs *key =
1352 skb_flow_dissector_target(f->dissector,
1353 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1355 struct flow_dissector_key_ipv4_addrs *mask =
1356 skb_flow_dissector_target(f->dissector,
1357 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1360 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1361 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1362 &mask->src, sizeof(mask->src));
1363 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1364 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1365 &key->src, sizeof(key->src));
1366 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1367 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1368 &mask->dst, sizeof(mask->dst));
1369 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1370 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1371 &key->dst, sizeof(key->dst));
1373 if (mask->src || mask->dst)
1374 *min_inline = MLX5_INLINE_MODE_IP;
1377 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1378 struct flow_dissector_key_ipv6_addrs *key =
1379 skb_flow_dissector_target(f->dissector,
1380 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1382 struct flow_dissector_key_ipv6_addrs *mask =
1383 skb_flow_dissector_target(f->dissector,
1384 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1387 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1388 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1389 &mask->src, sizeof(mask->src));
1390 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1391 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1392 &key->src, sizeof(key->src));
1394 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1395 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1396 &mask->dst, sizeof(mask->dst));
1397 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1398 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1399 &key->dst, sizeof(key->dst));
1401 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1402 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1403 *min_inline = MLX5_INLINE_MODE_IP;
1406 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1407 struct flow_dissector_key_ip *key =
1408 skb_flow_dissector_target(f->dissector,
1409 FLOW_DISSECTOR_KEY_IP,
1411 struct flow_dissector_key_ip *mask =
1412 skb_flow_dissector_target(f->dissector,
1413 FLOW_DISSECTOR_KEY_IP,
1416 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1417 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1419 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1420 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1422 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1423 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1426 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1427 ft_field_support.outer_ipv4_ttl))
1430 if (mask->tos || mask->ttl)
1431 *min_inline = MLX5_INLINE_MODE_IP;
1434 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1435 struct flow_dissector_key_ports *key =
1436 skb_flow_dissector_target(f->dissector,
1437 FLOW_DISSECTOR_KEY_PORTS,
1439 struct flow_dissector_key_ports *mask =
1440 skb_flow_dissector_target(f->dissector,
1441 FLOW_DISSECTOR_KEY_PORTS,
1445 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1446 tcp_sport, ntohs(mask->src));
1447 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1448 tcp_sport, ntohs(key->src));
1450 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1451 tcp_dport, ntohs(mask->dst));
1452 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1453 tcp_dport, ntohs(key->dst));
1457 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1458 udp_sport, ntohs(mask->src));
1459 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1460 udp_sport, ntohs(key->src));
1462 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1463 udp_dport, ntohs(mask->dst));
1464 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1465 udp_dport, ntohs(key->dst));
1468 netdev_err(priv->netdev,
1469 "Only UDP and TCP transport are supported\n");
1473 if (mask->src || mask->dst)
1474 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1477 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1478 struct flow_dissector_key_tcp *key =
1479 skb_flow_dissector_target(f->dissector,
1480 FLOW_DISSECTOR_KEY_TCP,
1482 struct flow_dissector_key_tcp *mask =
1483 skb_flow_dissector_target(f->dissector,
1484 FLOW_DISSECTOR_KEY_TCP,
1487 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1488 ntohs(mask->flags));
1489 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1493 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1499 static int parse_cls_flower(struct mlx5e_priv *priv,
1500 struct mlx5e_tc_flow *flow,
1501 struct mlx5_flow_spec *spec,
1502 struct tc_cls_flower_offload *f)
1504 struct mlx5_core_dev *dev = priv->mdev;
1505 struct mlx5_eswitch *esw = dev->priv.eswitch;
1506 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1507 struct mlx5_eswitch_rep *rep;
1511 err = __parse_cls_flower(priv, spec, f, &min_inline);
1513 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1515 if (rep->vport != FDB_UPLINK_VPORT &&
1516 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1517 esw->offloads.inline_mode < min_inline)) {
1518 netdev_warn(priv->netdev,
1519 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1520 min_inline, esw->offloads.inline_mode);
1528 struct pedit_headers {
1536 static int pedit_header_offsets[] = {
1537 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1538 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1539 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1540 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1541 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1544 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1546 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1547 struct pedit_headers *masks,
1548 struct pedit_headers *vals)
1550 u32 *curr_pmask, *curr_pval;
1552 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1555 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1556 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1558 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1561 *curr_pmask |= mask;
1562 *curr_pval |= (val & mask);
1570 struct mlx5_fields {
1576 #define OFFLOAD(fw_field, size, field, off) \
1577 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1579 static struct mlx5_fields fields[] = {
1580 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1581 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1582 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1583 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1584 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1585 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1587 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1588 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1589 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1591 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1592 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1593 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1594 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1595 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1596 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1597 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1598 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1599 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1601 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1602 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1603 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1605 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1606 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1609 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1610 * max from the SW pedit action. On success, it says how many HW actions were
1613 static int offload_pedit_fields(struct pedit_headers *masks,
1614 struct pedit_headers *vals,
1615 struct mlx5e_tc_flow_parse_attr *parse_attr)
1617 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1618 int i, action_size, nactions, max_actions, first, last, next_z;
1619 void *s_masks_p, *a_masks_p, *vals_p;
1620 struct mlx5_fields *f;
1621 u8 cmd, field_bsize;
1628 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1629 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1630 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1631 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1633 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1634 action = parse_attr->mod_hdr_actions;
1635 max_actions = parse_attr->num_mod_hdr_actions;
1638 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1640 /* avoid seeing bits set from previous iterations */
1644 s_masks_p = (void *)set_masks + f->offset;
1645 a_masks_p = (void *)add_masks + f->offset;
1647 memcpy(&s_mask, s_masks_p, f->size);
1648 memcpy(&a_mask, a_masks_p, f->size);
1650 if (!s_mask && !a_mask) /* nothing to offload here */
1653 if (s_mask && a_mask) {
1654 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1658 if (nactions == max_actions) {
1659 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1664 cmd = MLX5_ACTION_TYPE_SET;
1666 vals_p = (void *)set_vals + f->offset;
1667 /* clear to denote we consumed this field */
1668 memset(s_masks_p, 0, f->size);
1670 cmd = MLX5_ACTION_TYPE_ADD;
1672 vals_p = (void *)add_vals + f->offset;
1673 /* clear to denote we consumed this field */
1674 memset(a_masks_p, 0, f->size);
1677 field_bsize = f->size * BITS_PER_BYTE;
1679 if (field_bsize == 32) {
1680 mask_be32 = *(__be32 *)&mask;
1681 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1682 } else if (field_bsize == 16) {
1683 mask_be16 = *(__be16 *)&mask;
1684 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1687 first = find_first_bit(&mask, field_bsize);
1688 next_z = find_next_zero_bit(&mask, field_bsize, first);
1689 last = find_last_bit(&mask, field_bsize);
1690 if (first < next_z && next_z < last) {
1691 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1696 MLX5_SET(set_action_in, action, action_type, cmd);
1697 MLX5_SET(set_action_in, action, field, f->field);
1699 if (cmd == MLX5_ACTION_TYPE_SET) {
1700 MLX5_SET(set_action_in, action, offset, first);
1701 /* length is num of bits to be written, zero means length of 32 */
1702 MLX5_SET(set_action_in, action, length, (last - first + 1));
1705 if (field_bsize == 32)
1706 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1707 else if (field_bsize == 16)
1708 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1709 else if (field_bsize == 8)
1710 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1712 action += action_size;
1716 parse_attr->num_mod_hdr_actions = nactions;
1720 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1721 const struct tc_action *a, int namespace,
1722 struct mlx5e_tc_flow_parse_attr *parse_attr)
1724 int nkeys, action_size, max_actions;
1726 nkeys = tcf_pedit_nkeys(a);
1727 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1729 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1730 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1731 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1732 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1734 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1735 max_actions = min(max_actions, nkeys * 16);
1737 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1738 if (!parse_attr->mod_hdr_actions)
1741 parse_attr->num_mod_hdr_actions = max_actions;
1745 static const struct pedit_headers zero_masks = {};
1747 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1748 const struct tc_action *a, int namespace,
1749 struct mlx5e_tc_flow_parse_attr *parse_attr)
1751 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1752 int nkeys, i, err = -EOPNOTSUPP;
1753 u32 mask, val, offset;
1756 nkeys = tcf_pedit_nkeys(a);
1758 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1759 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1761 for (i = 0; i < nkeys; i++) {
1762 htype = tcf_pedit_htype(a, i);
1763 cmd = tcf_pedit_cmd(a, i);
1764 err = -EOPNOTSUPP; /* can't be all optimistic */
1766 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1767 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1771 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1772 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1776 mask = tcf_pedit_mask(a, i);
1777 val = tcf_pedit_val(a, i);
1778 offset = tcf_pedit_offset(a, i);
1780 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1785 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1789 err = offload_pedit_fields(masks, vals, parse_attr);
1791 goto out_dealloc_parsed_actions;
1793 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1794 cmd_masks = &masks[cmd];
1795 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1796 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1798 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1799 16, 1, cmd_masks, sizeof(zero_masks), true);
1801 goto out_dealloc_parsed_actions;
1807 out_dealloc_parsed_actions:
1808 kfree(parse_attr->mod_hdr_actions);
1813 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1815 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1816 TCA_CSUM_UPDATE_FLAG_UDP;
1818 /* The HW recalcs checksums only if re-writing headers */
1819 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1820 netdev_warn(priv->netdev,
1821 "TC csum action is only offloaded with pedit\n");
1825 if (update_flags & ~prot_flags) {
1826 netdev_warn(priv->netdev,
1827 "can't offload TC csum action for some header/s - flags %#x\n",
1835 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1836 struct tcf_exts *exts)
1838 const struct tc_action *a;
1839 bool modify_ip_header;
1846 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1847 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1849 /* for non-IP we only re-write MACs, so we're okay */
1850 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1853 modify_ip_header = false;
1854 tcf_exts_to_list(exts, &actions);
1855 list_for_each_entry(a, &actions, list) {
1856 if (!is_tcf_pedit(a))
1859 nkeys = tcf_pedit_nkeys(a);
1860 for (i = 0; i < nkeys; i++) {
1861 htype = tcf_pedit_htype(a, i);
1862 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1863 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1864 modify_ip_header = true;
1870 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1871 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
1872 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
1873 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1881 static bool actions_match_supported(struct mlx5e_priv *priv,
1882 struct tcf_exts *exts,
1883 struct mlx5e_tc_flow_parse_attr *parse_attr,
1884 struct mlx5e_tc_flow *flow)
1888 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1889 actions = flow->esw_attr->action;
1891 actions = flow->nic_attr->action;
1893 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1894 return modify_header_match_supported(&parse_attr->spec, exts);
1899 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
1901 struct mlx5_core_dev *fmdev, *pmdev;
1902 u16 func_id, peer_id;
1905 pmdev = peer_priv->mdev;
1907 func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
1908 peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
1910 return (func_id == peer_id);
1913 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1914 struct mlx5e_tc_flow_parse_attr *parse_attr,
1915 struct mlx5e_tc_flow *flow)
1917 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1918 const struct tc_action *a;
1922 if (!tcf_exts_has_actions(exts))
1925 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1928 tcf_exts_to_list(exts, &actions);
1929 list_for_each_entry(a, &actions, list) {
1930 if (is_tcf_gact_shot(a)) {
1931 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1932 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1933 flow_table_properties_nic_receive.flow_counter))
1934 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1938 if (is_tcf_pedit(a)) {
1939 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1944 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1945 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1949 if (is_tcf_csum(a)) {
1950 if (csum_offload_supported(priv, attr->action,
1951 tcf_csum_update_flags(a)))
1957 if (is_tcf_mirred_egress_redirect(a)) {
1958 struct net_device *peer_dev = tcf_mirred_dev(a);
1960 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
1961 same_hw_devs(priv, netdev_priv(peer_dev))) {
1962 parse_attr->mirred_ifindex = peer_dev->ifindex;
1963 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
1964 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1965 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1967 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
1974 if (is_tcf_skbedit_mark(a)) {
1975 u32 mark = tcf_skbedit_mark(a);
1977 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1978 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1983 attr->flow_tag = mark;
1984 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1991 if (!actions_match_supported(priv, exts, parse_attr, flow))
1997 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1998 struct ip_tunnel_key *b)
2000 return memcmp(a, b, sizeof(*a));
2003 static inline int hash_encap_info(struct ip_tunnel_key *key)
2005 return jhash(key, sizeof(*key), 0);
2008 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2009 struct net_device *mirred_dev,
2010 struct net_device **out_dev,
2012 struct neighbour **out_n,
2015 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2016 struct mlx5e_rep_priv *uplink_rpriv;
2018 struct neighbour *n = NULL;
2020 #if IS_ENABLED(CONFIG_INET)
2023 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2024 ret = PTR_ERR_OR_ZERO(rt);
2030 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2031 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2032 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2033 *out_dev = uplink_rpriv->netdev;
2035 *out_dev = rt->dst.dev;
2037 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2038 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2047 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2048 struct net_device *mirred_dev,
2049 struct net_device **out_dev,
2051 struct neighbour **out_n,
2054 struct neighbour *n = NULL;
2055 struct dst_entry *dst;
2057 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2058 struct mlx5e_rep_priv *uplink_rpriv;
2059 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2062 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2067 *out_ttl = ip6_dst_hoplimit(dst);
2069 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2070 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2071 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2072 *out_dev = uplink_rpriv->netdev;
2074 *out_dev = dst->dev;
2079 n = dst_neigh_lookup(dst, &fl6->daddr);
2088 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2089 char buf[], int encap_size,
2090 unsigned char h_dest[ETH_ALEN],
2094 __be16 udp_dst_port,
2097 struct ethhdr *eth = (struct ethhdr *)buf;
2098 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2099 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2100 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2102 memset(buf, 0, encap_size);
2104 ether_addr_copy(eth->h_dest, h_dest);
2105 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2106 eth->h_proto = htons(ETH_P_IP);
2112 ip->protocol = IPPROTO_UDP;
2116 udp->dest = udp_dst_port;
2117 vxh->vx_flags = VXLAN_HF_VNI;
2118 vxh->vx_vni = vxlan_vni_field(vx_vni);
2121 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2122 char buf[], int encap_size,
2123 unsigned char h_dest[ETH_ALEN],
2125 struct in6_addr *daddr,
2126 struct in6_addr *saddr,
2127 __be16 udp_dst_port,
2130 struct ethhdr *eth = (struct ethhdr *)buf;
2131 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2132 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2133 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2135 memset(buf, 0, encap_size);
2137 ether_addr_copy(eth->h_dest, h_dest);
2138 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2139 eth->h_proto = htons(ETH_P_IPV6);
2141 ip6_flow_hdr(ip6h, 0, 0);
2142 /* the HW fills up ipv6 payload len */
2143 ip6h->nexthdr = IPPROTO_UDP;
2144 ip6h->hop_limit = ttl;
2145 ip6h->daddr = *daddr;
2146 ip6h->saddr = *saddr;
2148 udp->dest = udp_dst_port;
2149 vxh->vx_flags = VXLAN_HF_VNI;
2150 vxh->vx_vni = vxlan_vni_field(vx_vni);
2153 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2154 struct net_device *mirred_dev,
2155 struct mlx5e_encap_entry *e)
2157 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2158 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
2159 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2160 struct net_device *out_dev;
2161 struct neighbour *n = NULL;
2162 struct flowi4 fl4 = {};
2167 if (max_encap_size < ipv4_encap_size) {
2168 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2169 ipv4_encap_size, max_encap_size);
2173 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2177 switch (e->tunnel_type) {
2178 case MLX5_HEADER_TYPE_VXLAN:
2179 fl4.flowi4_proto = IPPROTO_UDP;
2180 fl4.fl4_dport = tun_key->tp_dst;
2186 fl4.flowi4_tos = tun_key->tos;
2187 fl4.daddr = tun_key->u.ipv4.dst;
2188 fl4.saddr = tun_key->u.ipv4.src;
2190 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2195 /* used by mlx5e_detach_encap to lookup a neigh hash table
2196 * entry in the neigh hash table when a user deletes a rule
2198 e->m_neigh.dev = n->dev;
2199 e->m_neigh.family = n->ops->family;
2200 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2201 e->out_dev = out_dev;
2203 /* It's importent to add the neigh to the hash table before checking
2204 * the neigh validity state. So if we'll get a notification, in case the
2205 * neigh changes it's validity state, we would find the relevant neigh
2208 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2212 read_lock_bh(&n->lock);
2213 nud_state = n->nud_state;
2214 ether_addr_copy(e->h_dest, n->ha);
2215 read_unlock_bh(&n->lock);
2217 switch (e->tunnel_type) {
2218 case MLX5_HEADER_TYPE_VXLAN:
2219 gen_vxlan_header_ipv4(out_dev, encap_header,
2220 ipv4_encap_size, e->h_dest, ttl,
2222 fl4.saddr, tun_key->tp_dst,
2223 tunnel_id_to_key32(tun_key->tun_id));
2227 goto destroy_neigh_entry;
2229 e->encap_size = ipv4_encap_size;
2230 e->encap_header = encap_header;
2232 if (!(nud_state & NUD_VALID)) {
2233 neigh_event_send(n, NULL);
2238 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2239 ipv4_encap_size, encap_header, &e->encap_id);
2241 goto destroy_neigh_entry;
2243 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2244 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2248 destroy_neigh_entry:
2249 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2251 kfree(encap_header);
2258 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2259 struct net_device *mirred_dev,
2260 struct mlx5e_encap_entry *e)
2262 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2263 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2264 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2265 struct net_device *out_dev;
2266 struct neighbour *n = NULL;
2267 struct flowi6 fl6 = {};
2272 if (max_encap_size < ipv6_encap_size) {
2273 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2274 ipv6_encap_size, max_encap_size);
2278 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2282 switch (e->tunnel_type) {
2283 case MLX5_HEADER_TYPE_VXLAN:
2284 fl6.flowi6_proto = IPPROTO_UDP;
2285 fl6.fl6_dport = tun_key->tp_dst;
2292 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2293 fl6.daddr = tun_key->u.ipv6.dst;
2294 fl6.saddr = tun_key->u.ipv6.src;
2296 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2301 /* used by mlx5e_detach_encap to lookup a neigh hash table
2302 * entry in the neigh hash table when a user deletes a rule
2304 e->m_neigh.dev = n->dev;
2305 e->m_neigh.family = n->ops->family;
2306 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2307 e->out_dev = out_dev;
2309 /* It's importent to add the neigh to the hash table before checking
2310 * the neigh validity state. So if we'll get a notification, in case the
2311 * neigh changes it's validity state, we would find the relevant neigh
2314 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2318 read_lock_bh(&n->lock);
2319 nud_state = n->nud_state;
2320 ether_addr_copy(e->h_dest, n->ha);
2321 read_unlock_bh(&n->lock);
2323 switch (e->tunnel_type) {
2324 case MLX5_HEADER_TYPE_VXLAN:
2325 gen_vxlan_header_ipv6(out_dev, encap_header,
2326 ipv6_encap_size, e->h_dest, ttl,
2328 &fl6.saddr, tun_key->tp_dst,
2329 tunnel_id_to_key32(tun_key->tun_id));
2333 goto destroy_neigh_entry;
2336 e->encap_size = ipv6_encap_size;
2337 e->encap_header = encap_header;
2339 if (!(nud_state & NUD_VALID)) {
2340 neigh_event_send(n, NULL);
2345 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2346 ipv6_encap_size, encap_header, &e->encap_id);
2348 goto destroy_neigh_entry;
2350 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2351 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2355 destroy_neigh_entry:
2356 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2358 kfree(encap_header);
2365 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2366 struct ip_tunnel_info *tun_info,
2367 struct net_device *mirred_dev,
2368 struct net_device **encap_dev,
2369 struct mlx5e_tc_flow *flow)
2371 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2372 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
2374 struct net_device *up_dev = uplink_rpriv->netdev;
2375 unsigned short family = ip_tunnel_info_af(tun_info);
2376 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
2377 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2378 struct ip_tunnel_key *key = &tun_info->key;
2379 struct mlx5e_encap_entry *e;
2380 int tunnel_type, err = 0;
2384 /* udp dst port must be set */
2385 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2386 goto vxlan_encap_offload_err;
2388 /* setting udp src port isn't supported */
2389 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2390 vxlan_encap_offload_err:
2391 netdev_warn(priv->netdev,
2392 "must set udp dst port and not set udp src port\n");
2396 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
2397 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2398 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
2400 netdev_warn(priv->netdev,
2401 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2405 hash_key = hash_encap_info(key);
2407 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2408 encap_hlist, hash_key) {
2409 if (!cmp_encap_info(&e->tun_info.key, key)) {
2415 /* must verify if encap is valid or not */
2419 e = kzalloc(sizeof(*e), GFP_KERNEL);
2423 e->tun_info = *tun_info;
2424 e->tunnel_type = tunnel_type;
2425 INIT_LIST_HEAD(&e->flows);
2427 if (family == AF_INET)
2428 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2429 else if (family == AF_INET6)
2430 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2432 if (err && err != -EAGAIN)
2435 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2438 list_add(&flow->encap, &e->flows);
2439 *encap_dev = e->out_dev;
2440 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2441 attr->encap_id = e->encap_id;
2452 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2453 struct mlx5e_tc_flow_parse_attr *parse_attr,
2454 struct mlx5e_tc_flow *flow)
2456 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2457 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2458 struct ip_tunnel_info *info = NULL;
2459 const struct tc_action *a;
2464 if (!tcf_exts_has_actions(exts))
2467 memset(attr, 0, sizeof(*attr));
2468 attr->in_rep = rpriv->rep;
2470 tcf_exts_to_list(exts, &actions);
2471 list_for_each_entry(a, &actions, list) {
2472 if (is_tcf_gact_shot(a)) {
2473 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2474 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2478 if (is_tcf_pedit(a)) {
2479 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2484 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2488 if (is_tcf_csum(a)) {
2489 if (csum_offload_supported(priv, attr->action,
2490 tcf_csum_update_flags(a)))
2496 if (is_tcf_mirred_egress_redirect(a)) {
2497 struct net_device *out_dev;
2498 struct mlx5e_priv *out_priv;
2500 out_dev = tcf_mirred_dev(a);
2502 if (switchdev_port_same_parent_id(priv->netdev,
2504 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2505 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2506 out_priv = netdev_priv(out_dev);
2507 rpriv = out_priv->ppriv;
2508 attr->out_rep = rpriv->rep;
2510 parse_attr->mirred_ifindex = out_dev->ifindex;
2511 parse_attr->tun_info = *info;
2512 attr->parse_attr = parse_attr;
2513 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2514 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2515 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2516 /* attr->out_rep is resolved when we handle encap */
2518 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2519 priv->netdev->name, out_dev->name);
2525 if (is_tcf_tunnel_set(a)) {
2526 info = tcf_tunnel_info(a);
2534 if (is_tcf_vlan(a)) {
2535 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2536 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2537 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2538 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2539 attr->vlan_vid = tcf_vlan_push_vid(a);
2540 if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
2541 attr->vlan_prio = tcf_vlan_push_prio(a);
2542 attr->vlan_proto = tcf_vlan_push_proto(a);
2543 if (!attr->vlan_proto)
2544 attr->vlan_proto = htons(ETH_P_8021Q);
2545 } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2546 tcf_vlan_push_prio(a)) {
2549 } else { /* action is TCA_VLAN_ACT_MODIFY */
2555 if (is_tcf_tunnel_release(a)) {
2556 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2563 if (!actions_match_supported(priv, exts, parse_attr, flow))
2569 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2570 struct tc_cls_flower_offload *f)
2572 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2573 struct mlx5e_tc_flow_parse_attr *parse_attr;
2574 struct mlx5e_tc_table *tc = &priv->fs.tc;
2575 struct mlx5e_tc_flow *flow;
2576 int attr_size, err = 0;
2579 if (esw && esw->mode == SRIOV_OFFLOADS) {
2580 flow_flags = MLX5E_TC_FLOW_ESWITCH;
2581 attr_size = sizeof(struct mlx5_esw_flow_attr);
2583 flow_flags = MLX5E_TC_FLOW_NIC;
2584 attr_size = sizeof(struct mlx5_nic_flow_attr);
2587 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2588 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2589 if (!parse_attr || !flow) {
2594 flow->cookie = f->cookie;
2595 flow->flags = flow_flags;
2597 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2601 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2602 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2605 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2607 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2610 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2613 if (IS_ERR(flow->rule)) {
2614 err = PTR_ERR(flow->rule);
2620 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2622 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2623 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2626 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2629 mlx5e_tc_del_flow(priv, flow);
2641 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2642 struct tc_cls_flower_offload *f)
2644 struct mlx5e_tc_flow *flow;
2645 struct mlx5e_tc_table *tc = &priv->fs.tc;
2647 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2652 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2654 mlx5e_tc_del_flow(priv, flow);
2661 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2662 struct tc_cls_flower_offload *f)
2664 struct mlx5e_tc_table *tc = &priv->fs.tc;
2665 struct mlx5e_tc_flow *flow;
2666 struct mlx5_fc *counter;
2671 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2676 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2679 counter = mlx5_flow_rule_counter(flow->rule);
2683 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2685 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2690 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2691 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2692 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2693 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2694 .automatic_shrinking = true,
2697 int mlx5e_tc_init(struct mlx5e_priv *priv)
2699 struct mlx5e_tc_table *tc = &priv->fs.tc;
2701 hash_init(tc->mod_hdr_tbl);
2702 hash_init(tc->hairpin_tbl);
2704 tc->ht_params = mlx5e_tc_flow_ht_params;
2705 return rhashtable_init(&tc->ht, &tc->ht_params);
2708 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2710 struct mlx5e_tc_flow *flow = ptr;
2711 struct mlx5e_priv *priv = arg;
2713 mlx5e_tc_del_flow(priv, flow);
2717 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2719 struct mlx5e_tc_table *tc = &priv->fs.tc;
2721 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2723 if (!IS_ERR_OR_NULL(tc->t)) {
2724 mlx5_destroy_flow_table(tc->t);