1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock_drv.h>
12 #include "ice_txrx_lib.h"
16 * ice_qp_reset_stats - Resets all stats for rings of given index
17 * @vsi: VSI that contains rings of interest
18 * @q_idx: ring index in array
20 static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23 sizeof(vsi->rx_rings[q_idx]->rx_stats));
24 memset(&vsi->tx_rings[q_idx]->stats, 0,
25 sizeof(vsi->tx_rings[q_idx]->stats));
26 if (ice_is_xdp_ena_vsi(vsi))
27 memset(&vsi->xdp_rings[q_idx]->stats, 0,
28 sizeof(vsi->xdp_rings[q_idx]->stats));
32 * ice_qp_clean_rings - Cleans all the rings of a given index
33 * @vsi: VSI that contains rings of interest
34 * @q_idx: ring index in array
36 static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
38 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39 if (ice_is_xdp_ena_vsi(vsi))
40 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
41 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
45 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46 * @vsi: VSI that has netdev
47 * @q_vector: q_vector that has NAPI context
48 * @enable: true for enable, false for disable
51 ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
54 if (!vsi->netdev || !q_vector)
58 napi_enable(&q_vector->napi);
60 napi_disable(&q_vector->napi);
64 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65 * @vsi: the VSI that contains queue vector being un-configured
66 * @rx_ring: Rx ring that will have its IRQ disabled
67 * @q_vector: queue vector
70 ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
71 struct ice_q_vector *q_vector)
73 struct ice_pf *pf = vsi->back;
74 struct ice_hw *hw = &pf->hw;
75 int base = vsi->base_vector;
79 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80 * here only QINT_RQCTL
82 reg = rx_ring->reg_idx;
83 val = rd32(hw, QINT_RQCTL(reg));
84 val &= ~QINT_RQCTL_CAUSE_ENA_M;
85 wr32(hw, QINT_RQCTL(reg), val);
88 u16 v_idx = q_vector->v_idx;
90 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
92 synchronize_irq(pf->msix_entries[v_idx + base].vector);
97 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98 * @vsi: the VSI that contains queue vector
99 * @q_vector: queue vector
102 ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
104 u16 reg_idx = q_vector->reg_idx;
105 struct ice_pf *pf = vsi->back;
106 struct ice_hw *hw = &pf->hw;
107 struct ice_ring *ring;
109 ice_cfg_itr(hw, q_vector);
111 ice_for_each_ring(ring, q_vector->tx)
112 ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
113 q_vector->tx.itr_idx);
115 ice_for_each_ring(ring, q_vector->rx)
116 ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
117 q_vector->rx.itr_idx);
123 * ice_qvec_ena_irq - Enable IRQ for given queue vector
124 * @vsi: the VSI that contains queue vector
125 * @q_vector: queue vector
127 static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
129 struct ice_pf *pf = vsi->back;
130 struct ice_hw *hw = &pf->hw;
132 ice_irq_dynamic_ena(hw, vsi, q_vector);
138 * ice_qp_dis - Disables a queue pair
139 * @vsi: VSI of interest
140 * @q_idx: ring index in array
142 * Returns 0 on success, negative on failure.
144 static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
146 struct ice_txq_meta txq_meta = { };
147 struct ice_ring *tx_ring, *rx_ring;
148 struct ice_q_vector *q_vector;
152 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
155 tx_ring = vsi->tx_rings[q_idx];
156 rx_ring = vsi->rx_rings[q_idx];
157 q_vector = rx_ring->q_vector;
159 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) {
163 usleep_range(1000, 2000);
165 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
167 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
169 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
170 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
173 if (ice_is_xdp_ena_vsi(vsi)) {
174 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
176 memset(&txq_meta, 0, sizeof(txq_meta));
177 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
178 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
183 err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
187 ice_qvec_toggle_napi(vsi, q_vector, false);
188 ice_qp_clean_rings(vsi, q_idx);
189 ice_qp_reset_stats(vsi, q_idx);
195 * ice_qp_ena - Enables a queue pair
196 * @vsi: VSI of interest
197 * @q_idx: ring index in array
199 * Returns 0 on success, negative on failure.
201 static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
203 struct ice_aqc_add_tx_qgrp *qg_buf;
204 struct ice_ring *tx_ring, *rx_ring;
205 struct ice_q_vector *q_vector;
209 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
212 size = struct_size(qg_buf, txqs, 1);
213 qg_buf = kzalloc(size, GFP_KERNEL);
217 qg_buf->num_txqs = 1;
219 tx_ring = vsi->tx_rings[q_idx];
220 rx_ring = vsi->rx_rings[q_idx];
221 q_vector = rx_ring->q_vector;
223 err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
227 if (ice_is_xdp_ena_vsi(vsi)) {
228 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
230 memset(qg_buf, 0, size);
231 qg_buf->num_txqs = 1;
232 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
235 ice_set_ring_xdp(xdp_ring);
236 xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
239 err = ice_vsi_cfg_rxq(rx_ring);
243 ice_qvec_cfg_msix(vsi, q_vector);
245 err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
249 clear_bit(ICE_CFG_BUSY, vsi->state);
250 ice_qvec_toggle_napi(vsi, q_vector, true);
251 ice_qvec_ena_irq(vsi, q_vector);
253 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
260 * ice_xsk_pool_disable - disable a buffer pool region
264 * Returns 0 on success, negative on failure
266 static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
268 struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
273 clear_bit(qid, vsi->af_xdp_zc_qps);
274 xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
280 * ice_xsk_pool_enable - enable a buffer pool region
282 * @pool: pointer to a requested buffer pool region
285 * Returns 0 on success, negative on failure
288 ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
292 if (vsi->type != ICE_VSI_PF)
295 if (qid >= vsi->netdev->real_num_rx_queues ||
296 qid >= vsi->netdev->real_num_tx_queues)
299 err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
304 set_bit(qid, vsi->af_xdp_zc_qps);
310 * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
312 * @pool: buffer pool to enable/associate to a ring, NULL to disable
315 * Returns 0 on success, negative on failure
317 int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
319 bool if_running, pool_present = !!pool;
320 int ret = 0, pool_failure = 0;
322 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
325 ret = ice_qp_dis(vsi, qid);
327 netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
332 pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
333 ice_xsk_pool_disable(vsi, qid);
337 ret = ice_qp_ena(vsi, qid);
338 if (!ret && pool_present)
339 napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
341 netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
345 netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
346 pool_present ? "en" : "dis", pool_failure);
354 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
356 * @count: The number of buffers to allocate
358 * This function allocates a number of Rx buffers from the fill ring
359 * or the internal recycle mechanism and places them on the Rx ring.
361 * Returns true if all allocations were successful, false if any fail.
363 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
365 union ice_32b_rx_flex_desc *rx_desc;
366 u16 ntu = rx_ring->next_to_use;
367 struct ice_rx_buf *rx_buf;
374 rx_desc = ICE_RX_DESC(rx_ring, ntu);
375 rx_buf = &rx_ring->rx_buf[ntu];
378 rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
384 dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
385 rx_desc->read.pkt_addr = cpu_to_le64(dma);
386 rx_desc->wb.status_error0 = 0;
392 if (unlikely(ntu == rx_ring->count)) {
393 rx_desc = ICE_RX_DESC(rx_ring, 0);
394 rx_buf = rx_ring->rx_buf;
399 if (rx_ring->next_to_use != ntu) {
400 /* clear the status bits for the next_to_use descriptor */
401 rx_desc->wb.status_error0 = 0;
402 ice_release_rx_desc(rx_ring, ntu);
409 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
412 static void ice_bump_ntc(struct ice_ring *rx_ring)
414 int ntc = rx_ring->next_to_clean + 1;
416 ntc = (ntc < rx_ring->count) ? ntc : 0;
417 rx_ring->next_to_clean = ntc;
418 prefetch(ICE_RX_DESC(rx_ring, ntc));
422 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
424 * @rx_buf: zero-copy Rx buffer
426 * This function allocates a new skb from a zero-copy Rx buffer.
428 * Returns the skb on success, NULL on failure.
430 static struct sk_buff *
431 ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
433 unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
434 unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
435 unsigned int datasize_hard = rx_buf->xdp->data_end -
436 rx_buf->xdp->data_hard_start;
439 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
440 GFP_ATOMIC | __GFP_NOWARN);
444 skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
445 memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
447 skb_metadata_set(skb, metasize);
449 xsk_buff_free(rx_buf->xdp);
455 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
457 * @xdp: xdp_buff used as input to the XDP program
459 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
462 ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
464 int err, result = ICE_XDP_PASS;
465 struct bpf_prog *xdp_prog;
466 struct ice_ring *xdp_ring;
469 /* ZC patch is enabled only when XDP program is set,
470 * so here it can not be NULL
472 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
474 act = bpf_prog_run_xdp(xdp_prog, xdp);
476 if (likely(act == XDP_REDIRECT)) {
477 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
480 return ICE_XDP_REDIR;
487 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
488 result = ice_xmit_xdp_buff(xdp, xdp_ring);
489 if (result == ICE_XDP_CONSUMED)
493 bpf_warn_invalid_xdp_action(act);
497 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
500 result = ICE_XDP_CONSUMED;
508 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
509 * @rx_ring: AF_XDP Rx ring
510 * @budget: NAPI budget
512 * Returns number of processed packets on success, remaining budget on failure.
514 int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
516 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
517 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
518 unsigned int xdp_xmit = 0;
519 bool failure = false;
521 while (likely(total_rx_packets < (unsigned int)budget)) {
522 union ice_32b_rx_flex_desc *rx_desc;
523 unsigned int size, xdp_res = 0;
524 struct ice_rx_buf *rx_buf;
530 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
532 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
533 if (!ice_test_staterr(rx_desc, stat_err_bits))
536 /* This memory barrier is needed to keep us from reading
537 * any other fields out of the rx_desc until we have
538 * verified the descriptor has been written back.
542 size = le16_to_cpu(rx_desc->wb.pkt_len) &
543 ICE_RX_FLX_DESC_PKT_LEN_M;
547 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
548 rx_buf->xdp->data_end = rx_buf->xdp->data + size;
549 xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
551 xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
553 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
556 xsk_buff_free(rx_buf->xdp);
559 total_rx_bytes += size;
563 ice_bump_ntc(rx_ring);
568 skb = ice_construct_skb_zc(rx_ring, rx_buf);
570 rx_ring->rx_stats.alloc_buf_failed++;
575 ice_bump_ntc(rx_ring);
577 if (eth_skb_pad(skb)) {
582 total_rx_bytes += skb->len;
585 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
586 if (ice_test_staterr(rx_desc, stat_err_bits))
587 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
589 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
590 ICE_RX_FLEX_DESC_PTYPE_M;
592 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
593 ice_receive_skb(rx_ring, skb, vlan_tag);
596 if (cleaned_count >= ICE_RX_BUF_WRITE)
597 failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
599 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
600 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
602 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
603 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
604 xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
606 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
608 return (int)total_rx_packets;
611 return failure ? budget : (int)total_rx_packets;
615 * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
616 * @xdp_ring: XDP Tx ring
617 * @budget: max number of frames to xmit
619 * Returns true if cleanup/transmission is done.
621 static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
623 struct ice_tx_desc *tx_desc = NULL;
624 bool work_done = true;
625 struct xdp_desc desc;
628 while (likely(budget-- > 0)) {
629 struct ice_tx_buf *tx_buf;
631 if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
632 xdp_ring->tx_stats.tx_busy++;
637 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
639 if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
642 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
643 xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
646 tx_buf->bytecount = desc.len;
648 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
649 tx_desc->buf_addr = cpu_to_le64(dma);
650 tx_desc->cmd_type_offset_bsz =
651 ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0, desc.len, 0);
653 xdp_ring->next_to_use++;
654 if (xdp_ring->next_to_use == xdp_ring->count)
655 xdp_ring->next_to_use = 0;
659 ice_xdp_ring_update_tail(xdp_ring);
660 xsk_tx_release(xdp_ring->xsk_pool);
663 return budget > 0 && work_done;
667 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
668 * @xdp_ring: XDP Tx ring
669 * @tx_buf: Tx buffer to clean
672 ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
674 xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
675 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
676 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
677 dma_unmap_len_set(tx_buf, len, 0);
681 * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
682 * @xdp_ring: XDP Tx ring
683 * @budget: NAPI budget
685 * Returns true if cleanup/tranmission is done.
687 bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
689 int total_packets = 0, total_bytes = 0;
690 s16 ntc = xdp_ring->next_to_clean;
691 struct ice_tx_desc *tx_desc;
692 struct ice_tx_buf *tx_buf;
696 tx_desc = ICE_TX_DESC(xdp_ring, ntc);
697 tx_buf = &xdp_ring->tx_buf[ntc];
698 ntc -= xdp_ring->count;
701 if (!(tx_desc->cmd_type_offset_bsz &
702 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
705 total_bytes += tx_buf->bytecount;
708 if (tx_buf->raw_buf) {
709 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
710 tx_buf->raw_buf = NULL;
715 tx_desc->cmd_type_offset_bsz = 0;
720 if (unlikely(!ntc)) {
721 ntc -= xdp_ring->count;
722 tx_buf = xdp_ring->tx_buf;
723 tx_desc = ICE_TX_DESC(xdp_ring, 0);
728 } while (likely(--budget));
730 ntc += xdp_ring->count;
731 xdp_ring->next_to_clean = ntc;
734 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
736 if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
737 xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
739 ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
740 xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
742 return budget > 0 && xmit_done;
746 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
747 * @netdev: net_device
748 * @queue_id: queue to wake up
749 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
751 * Returns negative on error, zero otherwise.
754 ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
755 u32 __always_unused flags)
757 struct ice_netdev_priv *np = netdev_priv(netdev);
758 struct ice_q_vector *q_vector;
759 struct ice_vsi *vsi = np->vsi;
760 struct ice_ring *ring;
762 if (test_bit(ICE_DOWN, vsi->state))
765 if (!ice_is_xdp_ena_vsi(vsi))
768 if (queue_id >= vsi->num_txq)
771 if (!vsi->xdp_rings[queue_id]->xsk_pool)
774 ring = vsi->xdp_rings[queue_id];
776 /* The idea here is that if NAPI is running, mark a miss, so
777 * it will run again. If not, trigger an interrupt and
778 * schedule the NAPI from interrupt context. If NAPI would be
779 * scheduled here, the interrupt affinity would not be
782 q_vector = ring->q_vector;
783 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
784 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
790 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
791 * @vsi: VSI to be checked
793 * Returns true if any of the Rx rings has an AF_XDP buff pool attached
795 bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
799 ice_for_each_rxq(vsi, i) {
800 if (xsk_get_pool_from_qid(vsi->netdev, i))
808 * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
809 * @rx_ring: ring to be cleaned
811 void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
815 for (i = 0; i < rx_ring->count; i++) {
816 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
826 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
827 * @xdp_ring: XDP_Tx ring
829 void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
831 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
835 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
838 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
842 tx_buf->raw_buf = NULL;
845 if (ntc >= xdp_ring->count)
850 xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);