1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2018 Intel Corporation. */
4 #include <linux/bpf_trace.h>
5 #include <net/xdp_sock.h>
9 #include "i40e_txrx_common.h"
13 * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
15 * @umem: UMEM to DMA map
17 * Returns 0 on success, <0 on failure
19 static int i40e_xsk_umem_dma_map(struct i40e_vsi *vsi, struct xdp_umem *umem)
21 struct i40e_pf *pf = vsi->back;
27 for (i = 0; i < umem->npgs; i++) {
28 dma = dma_map_page_attrs(dev, umem->pgs[i], 0, PAGE_SIZE,
29 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
30 if (dma_mapping_error(dev, dma))
33 umem->pages[i].dma = dma;
39 for (j = 0; j < i; j++) {
40 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
41 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
42 umem->pages[i].dma = 0;
49 * i40e_xsk_umem_dma_unmap - DMA unmaps all UMEM memory for the netdev
51 * @umem: UMEM to DMA map
53 static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
55 struct i40e_pf *pf = vsi->back;
61 for (i = 0; i < umem->npgs; i++) {
62 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
63 DMA_BIDIRECTIONAL, I40E_RX_DMA_ATTR);
65 umem->pages[i].dma = 0;
70 * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
73 * @qid: Rx ring to associate UMEM to
75 * Returns 0 on success, <0 on failure
77 static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
80 struct net_device *netdev = vsi->netdev;
81 struct xdp_umem_fq_reuse *reuseq;
85 if (vsi->type != I40E_VSI_MAIN)
88 if (qid >= vsi->num_queue_pairs)
91 if (qid >= netdev->real_num_rx_queues ||
92 qid >= netdev->real_num_tx_queues)
95 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
99 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
101 err = i40e_xsk_umem_dma_map(vsi, umem);
105 set_bit(qid, vsi->af_xdp_zc_qps);
107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
110 err = i40e_queue_pair_disable(vsi, qid);
114 err = i40e_queue_pair_enable(vsi, qid);
118 /* Kick start the NAPI context so that receiving will start */
119 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
128 * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
130 * @qid: Rx ring to associate UMEM to
132 * Returns 0 on success, <0 on failure
134 static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
136 struct net_device *netdev = vsi->netdev;
137 struct xdp_umem *umem;
141 umem = xdp_get_umem_from_qid(netdev, qid);
145 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
148 err = i40e_queue_pair_disable(vsi, qid);
153 clear_bit(qid, vsi->af_xdp_zc_qps);
154 i40e_xsk_umem_dma_unmap(vsi, umem);
157 err = i40e_queue_pair_enable(vsi, qid);
161 /* Kick start the NAPI context so that receiving will start */
162 err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
171 * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
173 * @umem: UMEM to enable/associate to a ring, or NULL to disable
174 * @qid: Rx ring to (dis)associate UMEM (from)to
176 * This function enables or disables a UMEM to a certain ring.
178 * Returns 0 on success, <0 on failure
180 int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
183 return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
184 i40e_xsk_umem_disable(vsi, qid);
188 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
190 * @xdp: xdp_buff used as input to the XDP program
192 * This function enables or disables a UMEM to a certain ring.
194 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
196 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
198 struct xdp_umem *umem = rx_ring->xsk_umem;
199 int err, result = I40E_XDP_PASS;
200 struct i40e_ring *xdp_ring;
201 struct bpf_prog *xdp_prog;
206 /* NB! xdp_prog will always be !NULL, due to the fact that
207 * this path is enabled by setting an XDP program.
209 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
210 act = bpf_prog_run_xdp(xdp_prog, xdp);
211 offset = xdp->data - xdp->data_hard_start;
213 xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
219 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
220 result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
223 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
224 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
227 bpf_warn_invalid_xdp_action(act);
230 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
231 /* fallthrough -- handle aborts by dropping packet */
233 result = I40E_XDP_CONSUMED;
241 * i40e_alloc_buffer_zc - Allocates an i40e_rx_buffer
243 * @bi: Rx buffer to populate
245 * This function allocates an Rx buffer. The buffer can come from fill
246 * queue, or via the recycle queue (next_to_alloc).
248 * Returns true for a successful allocation, false otherwise
250 static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
251 struct i40e_rx_buffer *bi)
253 struct xdp_umem *umem = rx_ring->xsk_umem;
254 void *addr = bi->addr;
258 rx_ring->rx_stats.page_reuse_count++;
262 if (!xsk_umem_peek_addr(umem, &handle)) {
263 rx_ring->rx_stats.alloc_page_failed++;
267 hr = umem->headroom + XDP_PACKET_HEADROOM;
269 bi->dma = xdp_umem_get_dma(umem, handle);
272 bi->addr = xdp_umem_get_data(umem, handle);
275 bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
277 xsk_umem_discard_addr(umem);
282 * i40e_alloc_buffer_slow_zc - Allocates an i40e_rx_buffer
284 * @bi: Rx buffer to populate
286 * This function allocates an Rx buffer. The buffer can come from fill
287 * queue, or via the reuse queue.
289 * Returns true for a successful allocation, false otherwise
291 static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
292 struct i40e_rx_buffer *bi)
294 struct xdp_umem *umem = rx_ring->xsk_umem;
297 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
298 rx_ring->rx_stats.alloc_page_failed++;
302 handle &= rx_ring->xsk_umem->chunk_mask;
304 hr = umem->headroom + XDP_PACKET_HEADROOM;
306 bi->dma = xdp_umem_get_dma(umem, handle);
309 bi->addr = xdp_umem_get_data(umem, handle);
312 bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
314 xsk_umem_discard_addr_rq(umem);
318 static __always_inline bool
319 __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
320 bool alloc(struct i40e_ring *rx_ring,
321 struct i40e_rx_buffer *bi))
323 u16 ntu = rx_ring->next_to_use;
324 union i40e_rx_desc *rx_desc;
325 struct i40e_rx_buffer *bi;
328 rx_desc = I40E_RX_DESC(rx_ring, ntu);
329 bi = &rx_ring->rx_bi[ntu];
331 if (!alloc(rx_ring, bi)) {
336 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, 0,
340 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
346 if (unlikely(ntu == rx_ring->count)) {
347 rx_desc = I40E_RX_DESC(rx_ring, 0);
352 rx_desc->wb.qword1.status_error_len = 0;
357 if (rx_ring->next_to_use != ntu)
358 i40e_release_rx_desc(rx_ring, ntu);
364 * i40e_alloc_rx_buffers_zc - Allocates a number of Rx buffers
366 * @count: The number of buffers to allocate
368 * This function allocates a number of Rx buffers from the reuse queue
369 * or fill ring and places them on the Rx ring.
371 * Returns true for a successful allocation, false otherwise
373 bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
375 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
376 i40e_alloc_buffer_slow_zc);
380 * i40e_alloc_rx_buffers_fast_zc - Allocates a number of Rx buffers
382 * @count: The number of buffers to allocate
384 * This function allocates a number of Rx buffers from the fill ring
385 * or the internal recycle mechanism and places them on the Rx ring.
387 * Returns true for a successful allocation, false otherwise
389 static bool i40e_alloc_rx_buffers_fast_zc(struct i40e_ring *rx_ring, u16 count)
391 return __i40e_alloc_rx_buffers_zc(rx_ring, count,
392 i40e_alloc_buffer_zc);
396 * i40e_get_rx_buffer_zc - Return the current Rx buffer
398 * @size: The size of the rx buffer (read from descriptor)
400 * This function returns the current, received Rx buffer, and also
401 * does DMA synchronization. the Rx ring.
403 * Returns the received Rx buffer
405 static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
406 const unsigned int size)
408 struct i40e_rx_buffer *bi;
410 bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
412 /* we are reusing so sync this buffer for CPU use */
413 dma_sync_single_range_for_cpu(rx_ring->dev,
422 * i40e_reuse_rx_buffer_zc - Recycle an Rx buffer
424 * @old_bi: The Rx buffer to recycle
426 * This function recycles a finished Rx buffer, and places it on the
427 * recycle queue (next_to_alloc).
429 static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
430 struct i40e_rx_buffer *old_bi)
432 struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
433 u16 nta = rx_ring->next_to_alloc;
435 /* update, and store next to alloc */
437 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
439 /* transfer page from old buffer to new buffer */
440 new_bi->dma = old_bi->dma;
441 new_bi->addr = old_bi->addr;
442 new_bi->handle = old_bi->handle;
448 * i40e_zca_free - Free callback for MEM_TYPE_ZERO_COPY allocations
449 * @alloc: Zero-copy allocator
450 * @handle: Buffer handle
452 void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
454 struct i40e_rx_buffer *bi;
455 struct i40e_ring *rx_ring;
459 rx_ring = container_of(alloc, struct i40e_ring, zca);
460 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
461 mask = rx_ring->xsk_umem->chunk_mask;
463 nta = rx_ring->next_to_alloc;
464 bi = &rx_ring->rx_bi[nta];
467 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
471 bi->dma = xdp_umem_get_dma(rx_ring->xsk_umem, handle);
474 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
477 bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
478 rx_ring->xsk_umem->headroom);
482 * i40e_construct_skb_zc - Create skbufff from zero-copy Rx buffer
487 * This functions allocates a new skb from a zero-copy Rx buffer.
489 * Returns the skb, or NULL on failure.
491 static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
492 struct i40e_rx_buffer *bi,
493 struct xdp_buff *xdp)
495 unsigned int metasize = xdp->data - xdp->data_meta;
496 unsigned int datasize = xdp->data_end - xdp->data;
499 /* allocate a skb to store the frags */
500 skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
501 xdp->data_end - xdp->data_hard_start,
502 GFP_ATOMIC | __GFP_NOWARN);
506 skb_reserve(skb, xdp->data - xdp->data_hard_start);
507 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
509 skb_metadata_set(skb, metasize);
511 i40e_reuse_rx_buffer_zc(rx_ring, bi);
516 * i40e_inc_ntc: Advance the next_to_clean index
519 static void i40e_inc_ntc(struct i40e_ring *rx_ring)
521 u32 ntc = rx_ring->next_to_clean + 1;
523 ntc = (ntc < rx_ring->count) ? ntc : 0;
524 rx_ring->next_to_clean = ntc;
525 prefetch(I40E_RX_DESC(rx_ring, ntc));
529 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
531 * @budget: NAPI budget
533 * Returns amount of work completed
535 int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
537 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
538 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
539 unsigned int xdp_res, xdp_xmit = 0;
540 bool failure = false;
544 xdp.rxq = &rx_ring->xdp_rxq;
546 while (likely(total_rx_packets < (unsigned int)budget)) {
547 struct i40e_rx_buffer *bi;
548 union i40e_rx_desc *rx_desc;
552 if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
554 !i40e_alloc_rx_buffers_fast_zc(rx_ring,
559 rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
560 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
562 /* This memory barrier is needed to keep us from reading
563 * any other fields out of the rx_desc until we have
564 * verified the descriptor has been written back.
568 bi = i40e_clean_programming_status(rx_ring, rx_desc,
571 i40e_reuse_rx_buffer_zc(rx_ring, bi);
576 size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
577 I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
581 bi = i40e_get_rx_buffer_zc(rx_ring, size);
583 xdp.data_meta = xdp.data;
584 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
585 xdp.data_end = xdp.data + size;
586 xdp.handle = bi->handle;
588 xdp_res = i40e_run_xdp_zc(rx_ring, &xdp);
590 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
594 i40e_reuse_rx_buffer_zc(rx_ring, bi);
597 total_rx_bytes += size;
601 i40e_inc_ntc(rx_ring);
607 /* NB! We are not checking for errors using
608 * i40e_test_staterr with
609 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
610 * SBP is *not* set in PRT_SBPVSI (default not set).
612 skb = i40e_construct_skb_zc(rx_ring, bi, &xdp);
614 rx_ring->rx_stats.alloc_buff_failed++;
619 i40e_inc_ntc(rx_ring);
621 if (eth_skb_pad(skb))
624 total_rx_bytes += skb->len;
627 i40e_process_skb_fields(rx_ring, rx_desc, skb);
628 napi_gro_receive(&rx_ring->q_vector->napi, skb);
631 i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
632 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
634 if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
635 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
636 xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
638 xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
640 return (int)total_rx_packets;
642 return failure ? budget : (int)total_rx_packets;
646 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
647 * @xdp_ring: XDP Tx ring
648 * @budget: NAPI budget
650 * Returns true if the work is finished.
652 static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
654 struct i40e_tx_desc *tx_desc = NULL;
655 struct i40e_tx_buffer *tx_bi;
656 bool work_done = true;
657 struct xdp_desc desc;
660 while (budget-- > 0) {
661 if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
662 xdp_ring->tx_stats.tx_busy++;
667 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
670 dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
672 dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
675 tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
676 tx_bi->bytecount = desc.len;
678 tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
679 tx_desc->buffer_addr = cpu_to_le64(dma);
680 tx_desc->cmd_type_offset_bsz =
681 build_ctob(I40E_TX_DESC_CMD_ICRC
682 | I40E_TX_DESC_CMD_EOP,
685 xdp_ring->next_to_use++;
686 if (xdp_ring->next_to_use == xdp_ring->count)
687 xdp_ring->next_to_use = 0;
691 /* Request an interrupt for the last frame and bump tail ptr. */
692 tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
693 I40E_TXD_QW1_CMD_SHIFT);
694 i40e_xdp_ring_update_tail(xdp_ring);
696 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
697 if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
698 xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
701 return !!budget && work_done;
705 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
706 * @tx_ring: XDP Tx ring
707 * @tx_bi: Tx buffer info to clean
709 static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
710 struct i40e_tx_buffer *tx_bi)
712 xdp_return_frame(tx_bi->xdpf);
713 dma_unmap_single(tx_ring->dev,
714 dma_unmap_addr(tx_bi, dma),
715 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
716 dma_unmap_len_set(tx_bi, len, 0);
720 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
721 * @tx_ring: XDP Tx ring
722 * @tx_bi: Tx buffer info to clean
724 * Returns true if cleanup/tranmission is done.
726 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
727 struct i40e_ring *tx_ring, int napi_budget)
729 unsigned int ntc, total_bytes = 0, budget = vsi->work_limit;
730 u32 i, completed_frames, frames_ready, xsk_frames = 0;
731 struct xdp_umem *umem = tx_ring->xsk_umem;
732 u32 head_idx = i40e_get_head(tx_ring);
733 bool work_done = true, xmit_done;
734 struct i40e_tx_buffer *tx_bi;
736 if (head_idx < tx_ring->next_to_clean)
737 head_idx += tx_ring->count;
738 frames_ready = head_idx - tx_ring->next_to_clean;
740 if (frames_ready == 0) {
742 } else if (frames_ready > budget) {
743 completed_frames = budget;
746 completed_frames = frames_ready;
749 ntc = tx_ring->next_to_clean;
751 for (i = 0; i < completed_frames; i++) {
752 tx_bi = &tx_ring->tx_bi[ntc];
755 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
760 total_bytes += tx_bi->bytecount;
762 if (++ntc >= tx_ring->count)
766 tx_ring->next_to_clean += completed_frames;
767 if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
768 tx_ring->next_to_clean -= tx_ring->count;
771 xsk_umem_complete_tx(umem, xsk_frames);
773 i40e_arm_wb(tx_ring, vsi, budget);
774 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
777 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
778 if (tx_ring->next_to_clean == tx_ring->next_to_use)
779 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
781 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
784 xmit_done = i40e_xmit_zc(tx_ring, budget);
786 return work_done && xmit_done;
790 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
791 * @dev: the netdevice
792 * @queue_id: queue id to wake up
793 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
795 * Returns <0 for errors, 0 otherwise.
797 int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
799 struct i40e_netdev_priv *np = netdev_priv(dev);
800 struct i40e_vsi *vsi = np->vsi;
801 struct i40e_ring *ring;
803 if (test_bit(__I40E_VSI_DOWN, vsi->state))
806 if (!i40e_enabled_xdp_vsi(vsi))
809 if (queue_id >= vsi->num_queue_pairs)
812 if (!vsi->xdp_rings[queue_id]->xsk_umem)
815 ring = vsi->xdp_rings[queue_id];
817 /* The idea here is that if NAPI is running, mark a miss, so
818 * it will run again. If not, trigger an interrupt and
819 * schedule the NAPI from interrupt context. If NAPI would be
820 * scheduled here, the interrupt affinity would not be
823 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
824 i40e_force_wb(vsi, ring->q_vector);
829 void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
833 for (i = 0; i < rx_ring->count; i++) {
834 struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
839 xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_bi->handle);
845 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
846 * @xdp_ring: XDP Tx ring
848 void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
850 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
851 struct xdp_umem *umem = tx_ring->xsk_umem;
852 struct i40e_tx_buffer *tx_bi;
856 tx_bi = &tx_ring->tx_bi[ntc];
859 i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
866 if (ntc >= tx_ring->count)
871 xsk_umem_complete_tx(umem, xsk_frames);
875 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
878 * Returns true if any of the Rx rings has an AF_XDP UMEM attached
880 bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
882 struct net_device *netdev = vsi->netdev;
885 for (i = 0; i < vsi->num_queue_pairs; i++) {
886 if (xdp_get_umem_from_qid(netdev, i))