1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies. */
4 #include <linux/netdevice.h>
5 #include <linux/if_macvlan.h>
6 #include <linux/list.h>
7 #include <linux/rculist.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
10 #include <linux/spinlock.h>
15 #include "lib/fs_chains.h"
17 #include "en/mapping.h"
18 #include "en/tc_tun.h"
19 #include "lib/port_tun.h"
20 #include "en/tc/sample.h"
21 #include "en_accel/ipsec_rxtx.h"
22 #include "en/tc/int_port.h"
23 #include "en/tc/act/act.h"
25 struct mlx5e_rep_indr_block_priv {
26 struct net_device *netdev;
27 struct mlx5e_rep_priv *rpriv;
28 enum flow_block_binder_type binder_type;
30 struct list_head list;
33 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
34 struct mlx5e_encap_entry *e,
35 struct mlx5e_neigh *m_neigh,
36 struct net_device *neigh_dev)
38 struct mlx5e_rep_priv *rpriv = priv->ppriv;
39 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
40 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
41 struct mlx5e_neigh_hash_entry *nhe;
44 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
48 mutex_lock(&rpriv->neigh_update.encap_lock);
49 nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
51 err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
53 mutex_unlock(&rpriv->neigh_update.encap_lock);
54 mlx5_tun_entropy_refcount_dec(tun_entropy,
61 spin_lock(&nhe->encap_list_lock);
62 list_add_rcu(&e->encap_list, &nhe->encap_list);
63 spin_unlock(&nhe->encap_list_lock);
65 mutex_unlock(&rpriv->neigh_update.encap_lock);
70 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
71 struct mlx5e_encap_entry *e)
73 struct mlx5e_rep_priv *rpriv = priv->ppriv;
74 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
75 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
80 spin_lock(&e->nhe->encap_list_lock);
81 list_del_rcu(&e->encap_list);
82 spin_unlock(&e->nhe->encap_list_lock);
84 mlx5e_rep_neigh_entry_release(e->nhe);
86 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
89 void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
90 struct mlx5e_encap_entry *e,
92 unsigned char ha[ETH_ALEN])
94 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
95 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
101 mutex_lock(&esw->offloads.encap_tbl_lock);
102 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
103 if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
106 mlx5e_take_all_encap_flows(e, &flow_list);
108 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
109 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
110 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
112 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
113 struct net_device *route_dev;
115 ether_addr_copy(e->h_dest, ha);
116 ether_addr_copy(eth->h_dest, ha);
117 /* Update the encap source mac, in case that we delete
118 * the flows when encap source mac changed.
120 route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
122 ether_addr_copy(eth->h_source, route_dev->dev_addr);
124 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
127 mutex_unlock(&esw->offloads.encap_tbl_lock);
128 mlx5e_put_flow_list(priv, &flow_list);
132 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
133 struct flow_cls_offload *cls_flower, int flags)
135 switch (cls_flower->command) {
136 case FLOW_CLS_REPLACE:
137 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
139 case FLOW_CLS_DESTROY:
140 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
143 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
150 static void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
151 struct tc_cls_matchall_offload *ma)
153 struct mlx5e_rep_priv *rpriv = priv->ppriv;
157 dpkts = priv->stats.rep_stats.vport_rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
158 dbytes = priv->stats.rep_stats.vport_rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
159 mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats, &priv->stats.rep_stats);
160 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
161 FLOW_ACTION_HW_STATS_DELAYED);
165 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
166 struct tc_cls_matchall_offload *ma)
168 switch (ma->command) {
169 case TC_CLSMATCHALL_REPLACE:
170 return mlx5e_tc_configure_matchall(priv, ma);
171 case TC_CLSMATCHALL_DESTROY:
172 return mlx5e_tc_delete_matchall(priv, ma);
173 case TC_CLSMATCHALL_STATS:
174 mlx5e_tc_stats_matchall(priv, ma);
181 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
184 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
185 struct mlx5e_priv *priv = cb_priv;
187 if (!priv->netdev || !netif_device_present(priv->netdev))
191 case TC_SETUP_CLSFLOWER:
192 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
193 case TC_SETUP_CLSMATCHALL:
194 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
200 static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
203 struct flow_cls_offload tmp, *f = type_data;
204 struct mlx5e_priv *priv = cb_priv;
205 struct mlx5_eswitch *esw;
209 flags = MLX5_TC_FLAG(INGRESS) |
210 MLX5_TC_FLAG(ESW_OFFLOAD) |
211 MLX5_TC_FLAG(FT_OFFLOAD);
212 esw = priv->mdev->priv.eswitch;
215 case TC_SETUP_CLSFLOWER:
216 memcpy(&tmp, f, sizeof(*f));
218 if (!mlx5_chains_prios_supported(esw_chains(esw)))
221 /* Re-use tc offload path by moving the ft flow to the
224 * FT offload can use prio range [0, INT_MAX], so we normalize
225 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
226 * as with tc, where prio 0 isn't supported.
228 * We only support chain 0 of FT offload.
230 if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
232 if (tmp.common.chain_index != 0)
235 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
237 err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
238 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
245 static LIST_HEAD(mlx5e_rep_block_tc_cb_list);
246 static LIST_HEAD(mlx5e_rep_block_ft_cb_list);
247 int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
250 struct mlx5e_priv *priv = netdev_priv(dev);
251 struct flow_block_offload *f = type_data;
253 f->unlocked_driver_cb = true;
257 return flow_block_cb_setup_simple(type_data,
258 &mlx5e_rep_block_tc_cb_list,
259 mlx5e_rep_setup_tc_cb,
262 return flow_block_cb_setup_simple(type_data,
263 &mlx5e_rep_block_ft_cb_list,
264 mlx5e_rep_setup_ft_cb,
271 int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
273 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
276 mutex_init(&uplink_priv->unready_flows_lock);
277 INIT_LIST_HEAD(&uplink_priv->unready_flows);
279 /* init shared tc flow table */
280 err = mlx5e_tc_esw_init(uplink_priv);
284 void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
286 /* delete shared tc flow table */
287 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
288 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
291 void mlx5e_rep_tc_enable(struct mlx5e_priv *priv)
293 struct mlx5e_rep_priv *rpriv = priv->ppriv;
295 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
296 mlx5e_tc_reoffload_flows_work);
299 void mlx5e_rep_tc_disable(struct mlx5e_priv *priv)
301 struct mlx5e_rep_priv *rpriv = priv->ppriv;
303 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
306 int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
308 struct mlx5e_rep_priv *rpriv = priv->ppriv;
310 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
315 static struct mlx5e_rep_indr_block_priv *
316 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
317 struct net_device *netdev,
318 enum flow_block_binder_type binder_type)
320 struct mlx5e_rep_indr_block_priv *cb_priv;
322 list_for_each_entry(cb_priv,
323 &rpriv->uplink_priv.tc_indr_block_priv_list,
325 if (cb_priv->netdev == netdev &&
326 cb_priv->binder_type == binder_type)
333 mlx5e_rep_indr_offload(struct net_device *netdev,
334 struct flow_cls_offload *flower,
335 struct mlx5e_rep_indr_block_priv *indr_priv,
338 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
341 if (!netif_device_present(indr_priv->rpriv->netdev))
344 switch (flower->command) {
345 case FLOW_CLS_REPLACE:
346 err = mlx5e_configure_flower(netdev, priv, flower, flags);
348 case FLOW_CLS_DESTROY:
349 err = mlx5e_delete_flower(netdev, priv, flower, flags);
352 err = mlx5e_stats_flower(netdev, priv, flower, flags);
361 static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
362 void *type_data, void *indr_priv)
364 unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD);
365 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
367 flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ?
368 MLX5_TC_FLAG(EGRESS) :
369 MLX5_TC_FLAG(INGRESS);
372 case TC_SETUP_CLSFLOWER:
373 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
380 static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
381 void *type_data, void *indr_priv)
383 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
384 struct flow_cls_offload *f = type_data;
385 struct flow_cls_offload tmp;
386 struct mlx5e_priv *mpriv;
387 struct mlx5_eswitch *esw;
391 mpriv = netdev_priv(priv->rpriv->netdev);
392 esw = mpriv->mdev->priv.eswitch;
394 flags = MLX5_TC_FLAG(EGRESS) |
395 MLX5_TC_FLAG(ESW_OFFLOAD) |
396 MLX5_TC_FLAG(FT_OFFLOAD);
399 case TC_SETUP_CLSFLOWER:
400 memcpy(&tmp, f, sizeof(*f));
402 /* Re-use tc offload path by moving the ft flow to the
405 * FT offload can use prio range [0, INT_MAX], so we normalize
406 * it to range [1, mlx5_esw_chains_get_prio_range(esw)]
407 * as with tc, where prio 0 isn't supported.
409 * We only support chain 0 of FT offload.
411 if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
412 tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
413 tmp.common.chain_index)
416 tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
418 err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
419 memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
426 static void mlx5e_rep_indr_block_unbind(void *cb_priv)
428 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
430 list_del(&indr_priv->list);
434 static LIST_HEAD(mlx5e_block_cb_list);
436 static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
438 struct macvlan_dev *macvlan = netdev_priv(dev);
440 return macvlan->mode == MACVLAN_MODE_PASSTHRU;
444 mlx5e_rep_check_indr_block_supported(struct mlx5e_rep_priv *rpriv,
445 struct net_device *netdev,
446 struct flow_block_offload *f)
448 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
449 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
450 struct net_device *macvlan_real_dev;
452 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
453 f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
456 if (mlx5e_tc_tun_device_to_offload(priv, netdev))
459 if (is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev)
462 if (netif_is_macvlan(netdev)) {
463 if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
464 netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
468 macvlan_real_dev = macvlan_dev_real_dev(netdev);
470 if (macvlan_real_dev == rpriv->netdev)
472 if (netif_is_bond_master(macvlan_real_dev))
476 if (netif_is_ovs_master(netdev) && f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
477 mlx5e_tc_int_port_supported(esw))
484 mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
485 struct mlx5e_rep_priv *rpriv,
486 struct flow_block_offload *f,
487 flow_setup_cb_t *setup_cb,
489 void (*cleanup)(struct flow_block_cb *block_cb))
491 struct mlx5e_rep_indr_block_priv *indr_priv;
492 struct flow_block_cb *block_cb;
494 if (!mlx5e_rep_check_indr_block_supported(rpriv, netdev, f))
497 f->unlocked_driver_cb = true;
498 f->driver_block_list = &mlx5e_block_cb_list;
500 switch (f->command) {
501 case FLOW_BLOCK_BIND:
502 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
506 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
510 indr_priv->netdev = netdev;
511 indr_priv->rpriv = rpriv;
512 indr_priv->binder_type = f->binder_type;
513 list_add(&indr_priv->list,
514 &rpriv->uplink_priv.tc_indr_block_priv_list);
516 block_cb = flow_indr_block_cb_alloc(setup_cb, indr_priv, indr_priv,
517 mlx5e_rep_indr_block_unbind,
518 f, netdev, sch, data, rpriv,
520 if (IS_ERR(block_cb)) {
521 list_del(&indr_priv->list);
523 return PTR_ERR(block_cb);
525 flow_block_cb_add(block_cb, f);
526 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
529 case FLOW_BLOCK_UNBIND:
530 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
534 block_cb = flow_block_cb_lookup(f->block, setup_cb, indr_priv);
538 flow_indr_block_cb_remove(block_cb, f);
539 list_del(&block_cb->driver_list);
548 mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
549 struct flow_offload_action *fl_act)
552 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
553 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
554 enum mlx5_flow_namespace_type ns_type;
555 struct flow_action_entry *action;
556 struct mlx5e_tc_act *act;
560 /* There is no use case currently for more than one action (e.g. pedit).
561 * when there will be, need to handle cleaning multiple actions on err.
563 if (!flow_offload_has_one_action(&fl_act->action))
566 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
567 ns_type = MLX5_FLOW_NAMESPACE_FDB;
569 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
571 flow_action_for_each(i, action, &fl_act->action) {
572 act = mlx5e_tc_act_get(action->id, ns_type);
576 if (!act->offload_action)
579 if (!act->offload_action(priv, fl_act, action))
583 return add ? 0 : -EOPNOTSUPP;
587 mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
588 struct flow_offload_action *fl_act)
590 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
591 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
592 enum mlx5_flow_namespace_type ns_type;
593 struct mlx5e_tc_act *act;
595 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
596 ns_type = MLX5_FLOW_NAMESPACE_FDB;
598 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
600 act = mlx5e_tc_act_get(fl_act->id, ns_type);
601 if (!act || !act->destroy_action)
604 return act->destroy_action(priv, fl_act);
608 mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
609 struct flow_offload_action *fl_act)
612 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
613 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
614 enum mlx5_flow_namespace_type ns_type;
615 struct mlx5e_tc_act *act;
617 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
618 ns_type = MLX5_FLOW_NAMESPACE_FDB;
620 ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
622 act = mlx5e_tc_act_get(fl_act->id, ns_type);
623 if (!act || !act->stats_action)
624 return mlx5e_tc_fill_action_stats(priv, fl_act);
626 return act->stats_action(priv, fl_act);
630 mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
631 struct flow_offload_action *fl_act)
633 switch (fl_act->command) {
634 case FLOW_ACT_REPLACE:
635 return mlx5e_rep_indr_replace_act(rpriv, fl_act);
636 case FLOW_ACT_DESTROY:
637 return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
639 return mlx5e_rep_indr_stats_act(rpriv, fl_act);
646 mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
647 enum tc_setup_type type,
655 return mlx5e_rep_indr_setup_act(rpriv, data);
662 int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
663 enum tc_setup_type type, void *type_data,
665 void (*cleanup)(struct flow_block_cb *block_cb))
668 return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
672 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
673 mlx5e_rep_indr_setup_tc_cb,
676 return mlx5e_rep_indr_setup_block(netdev, sch, cb_priv, type_data,
677 mlx5e_rep_indr_setup_ft_cb,
684 int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
686 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
688 /* init indirect block notifications */
689 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
691 return flow_indr_dev_register(mlx5e_rep_indr_setup_cb, rpriv);
694 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
696 flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
697 mlx5e_rep_indr_block_unbind);
700 void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
703 u32 reg_c0, reg_c1, zone_restore_id, tunnel_id;
704 struct mlx5e_tc_update_priv tc_priv = {};
705 struct mlx5_rep_uplink_priv *uplink_priv;
706 struct mlx5e_rep_priv *uplink_rpriv;
707 struct mlx5_tc_ct_priv *ct_priv;
708 struct mapping_ctx *mapping_ctx;
709 struct mlx5_eswitch *esw;
710 struct mlx5e_priv *priv;
712 reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
713 if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
716 /* If mapped_obj_id is not equal to the default flow tag then skb->mark
717 * is not supported and must be reset back to 0.
721 priv = netdev_priv(skb->dev);
722 esw = priv->mdev->priv.eswitch;
723 mapping_ctx = esw->offloads.reg_c0_obj_pool;
724 reg_c1 = be32_to_cpu(cqe->ft_metadata);
725 zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
726 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
728 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
729 uplink_priv = &uplink_rpriv->uplink_priv;
730 ct_priv = uplink_priv->ct_priv;
732 #ifdef CONFIG_MLX5_EN_IPSEC
733 if (!(tunnel_id >> ESW_TUN_OPTS_BITS)) {
737 mapped_id = tunnel_id & ESW_IPSEC_RX_MAPPED_ID_MASK;
739 !mlx5_esw_ipsec_rx_make_metadata(priv, mapped_id, &metadata))
740 mlx5e_ipsec_offload_handle_rx_skb(priv->netdev, skb, metadata);
744 if (!mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv,
745 zone_restore_id, tunnel_id, &tc_priv))
749 if (tc_priv.skb_done)
752 if (tc_priv.forward_tx)
755 napi_gro_receive(rq->cq.napi, skb);
757 dev_put(tc_priv.fwd_dev);
762 dev_put(tc_priv.fwd_dev);
763 dev_kfree_skb_any(skb);