1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
8 #include <linux/bpf_trace.h>
10 #include "ice_txrx_lib.h"
13 #include "ice_dcb_lib.h"
16 #define ICE_RX_HDR_SIZE 256
19 * ice_unmap_and_free_tx_buf - Release a Tx buffer
20 * @ring: the ring that owns the buffer
21 * @tx_buf: the buffer to free
24 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
27 if (ice_ring_is_xdp(ring))
28 page_frag_free(tx_buf->raw_buf);
30 dev_kfree_skb_any(tx_buf->skb);
31 if (dma_unmap_len(tx_buf, len))
32 dma_unmap_single(ring->dev,
33 dma_unmap_addr(tx_buf, dma),
34 dma_unmap_len(tx_buf, len),
36 } else if (dma_unmap_len(tx_buf, len)) {
37 dma_unmap_page(ring->dev,
38 dma_unmap_addr(tx_buf, dma),
39 dma_unmap_len(tx_buf, len),
43 tx_buf->next_to_watch = NULL;
45 dma_unmap_len_set(tx_buf, len, 0);
46 /* tx_buf must be completely set up in the transmit path */
49 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
51 return netdev_get_tx_queue(ring->netdev, ring->q_index);
55 * ice_clean_tx_ring - Free any empty Tx buffers
56 * @tx_ring: ring to be cleaned
58 void ice_clean_tx_ring(struct ice_ring *tx_ring)
62 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
63 ice_xsk_clean_xdp_ring(tx_ring);
67 /* ring already cleared, nothing to do */
71 /* Free all the Tx ring sk_buffs */
72 for (i = 0; i < tx_ring->count; i++)
73 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
76 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
78 /* Zero out the descriptor ring */
79 memset(tx_ring->desc, 0, tx_ring->size);
81 tx_ring->next_to_use = 0;
82 tx_ring->next_to_clean = 0;
87 /* cleanup Tx queue statistics */
88 netdev_tx_reset_queue(txring_txq(tx_ring));
92 * ice_free_tx_ring - Free Tx resources per queue
93 * @tx_ring: Tx descriptor ring for a specific queue
95 * Free all transmit software resources
97 void ice_free_tx_ring(struct ice_ring *tx_ring)
99 ice_clean_tx_ring(tx_ring);
100 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
101 tx_ring->tx_buf = NULL;
104 dmam_free_coherent(tx_ring->dev, tx_ring->size,
105 tx_ring->desc, tx_ring->dma);
106 tx_ring->desc = NULL;
111 * ice_clean_tx_irq - Reclaim resources after transmit completes
112 * @tx_ring: Tx ring to clean
113 * @napi_budget: Used to determine if we are in netpoll
115 * Returns true if there's any budget left (e.g. the clean is finished)
117 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
119 unsigned int total_bytes = 0, total_pkts = 0;
120 unsigned int budget = ICE_DFLT_IRQ_WORK;
121 struct ice_vsi *vsi = tx_ring->vsi;
122 s16 i = tx_ring->next_to_clean;
123 struct ice_tx_desc *tx_desc;
124 struct ice_tx_buf *tx_buf;
126 tx_buf = &tx_ring->tx_buf[i];
127 tx_desc = ICE_TX_DESC(tx_ring, i);
130 prefetch(&vsi->state);
133 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
135 /* if next_to_watch is not set then there is no work pending */
139 smp_rmb(); /* prevent any other reads prior to eop_desc */
141 /* if the descriptor isn't done, no work yet to do */
142 if (!(eop_desc->cmd_type_offset_bsz &
143 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
146 /* clear next_to_watch to prevent false hangs */
147 tx_buf->next_to_watch = NULL;
149 /* update the statistics for this packet */
150 total_bytes += tx_buf->bytecount;
151 total_pkts += tx_buf->gso_segs;
153 if (ice_ring_is_xdp(tx_ring))
154 page_frag_free(tx_buf->raw_buf);
157 napi_consume_skb(tx_buf->skb, napi_budget);
159 /* unmap skb header data */
160 dma_unmap_single(tx_ring->dev,
161 dma_unmap_addr(tx_buf, dma),
162 dma_unmap_len(tx_buf, len),
165 /* clear tx_buf data */
167 dma_unmap_len_set(tx_buf, len, 0);
169 /* unmap remaining buffers */
170 while (tx_desc != eop_desc) {
176 tx_buf = tx_ring->tx_buf;
177 tx_desc = ICE_TX_DESC(tx_ring, 0);
180 /* unmap any remaining paged data */
181 if (dma_unmap_len(tx_buf, len)) {
182 dma_unmap_page(tx_ring->dev,
183 dma_unmap_addr(tx_buf, dma),
184 dma_unmap_len(tx_buf, len),
186 dma_unmap_len_set(tx_buf, len, 0);
190 /* move us one more past the eop_desc for start of next pkt */
196 tx_buf = tx_ring->tx_buf;
197 tx_desc = ICE_TX_DESC(tx_ring, 0);
202 /* update budget accounting */
204 } while (likely(budget));
207 tx_ring->next_to_clean = i;
209 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
211 if (ice_ring_is_xdp(tx_ring))
214 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
217 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
218 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
219 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
220 /* Make sure that anybody stopping the queue after this
221 * sees the new next_to_clean.
224 if (__netif_subqueue_stopped(tx_ring->netdev,
226 !test_bit(__ICE_DOWN, vsi->state)) {
227 netif_wake_subqueue(tx_ring->netdev,
229 ++tx_ring->tx_stats.restart_q;
237 * ice_setup_tx_ring - Allocate the Tx descriptors
238 * @tx_ring: the Tx ring to set up
240 * Return 0 on success, negative on error
242 int ice_setup_tx_ring(struct ice_ring *tx_ring)
244 struct device *dev = tx_ring->dev;
249 /* warn if we are about to overwrite the pointer */
250 WARN_ON(tx_ring->tx_buf);
252 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
254 if (!tx_ring->tx_buf)
257 /* round up to nearest page */
258 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
260 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
262 if (!tx_ring->desc) {
263 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
268 tx_ring->next_to_use = 0;
269 tx_ring->next_to_clean = 0;
270 tx_ring->tx_stats.prev_pkt = -1;
274 devm_kfree(dev, tx_ring->tx_buf);
275 tx_ring->tx_buf = NULL;
280 * ice_clean_rx_ring - Free Rx buffers
281 * @rx_ring: ring to be cleaned
283 void ice_clean_rx_ring(struct ice_ring *rx_ring)
285 struct device *dev = rx_ring->dev;
288 /* ring already cleared, nothing to do */
289 if (!rx_ring->rx_buf)
292 if (rx_ring->xsk_umem) {
293 ice_xsk_clean_rx_ring(rx_ring);
297 /* Free all the Rx ring sk_buffs */
298 for (i = 0; i < rx_ring->count; i++) {
299 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
302 dev_kfree_skb(rx_buf->skb);
308 /* Invalidate cache lines that may have been written to by
309 * device so that we avoid corrupting memory.
311 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
316 /* free resources associated with mapping */
317 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
318 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
319 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
322 rx_buf->page_offset = 0;
326 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
328 /* Zero out the descriptor ring */
329 memset(rx_ring->desc, 0, rx_ring->size);
331 rx_ring->next_to_alloc = 0;
332 rx_ring->next_to_clean = 0;
333 rx_ring->next_to_use = 0;
337 * ice_free_rx_ring - Free Rx resources
338 * @rx_ring: ring to clean the resources from
340 * Free all receive software resources
342 void ice_free_rx_ring(struct ice_ring *rx_ring)
344 ice_clean_rx_ring(rx_ring);
345 if (rx_ring->vsi->type == ICE_VSI_PF)
346 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
347 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
348 rx_ring->xdp_prog = NULL;
349 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
350 rx_ring->rx_buf = NULL;
353 dmam_free_coherent(rx_ring->dev, rx_ring->size,
354 rx_ring->desc, rx_ring->dma);
355 rx_ring->desc = NULL;
360 * ice_setup_rx_ring - Allocate the Rx descriptors
361 * @rx_ring: the Rx ring to set up
363 * Return 0 on success, negative on error
365 int ice_setup_rx_ring(struct ice_ring *rx_ring)
367 struct device *dev = rx_ring->dev;
372 /* warn if we are about to overwrite the pointer */
373 WARN_ON(rx_ring->rx_buf);
375 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
377 if (!rx_ring->rx_buf)
380 /* round up to nearest page */
381 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
383 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
385 if (!rx_ring->desc) {
386 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
391 rx_ring->next_to_use = 0;
392 rx_ring->next_to_clean = 0;
394 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
395 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
397 if (rx_ring->vsi->type == ICE_VSI_PF &&
398 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
399 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
405 devm_kfree(dev, rx_ring->rx_buf);
406 rx_ring->rx_buf = NULL;
411 * ice_rx_offset - Return expected offset into page to access data
412 * @rx_ring: Ring we are requesting offset of
414 * Returns the offset value for ring into the data buffer.
416 static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
418 if (ice_ring_uses_build_skb(rx_ring))
420 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
421 return XDP_PACKET_HEADROOM;
427 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
429 * @xdp: xdp_buff used as input to the XDP program
430 * @xdp_prog: XDP program to run
432 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
435 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
436 struct bpf_prog *xdp_prog)
438 int err, result = ICE_XDP_PASS;
439 struct ice_ring *xdp_ring;
442 act = bpf_prog_run_xdp(xdp_prog, xdp);
447 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
448 result = ice_xmit_xdp_buff(xdp, xdp_ring);
451 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
452 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
455 bpf_warn_invalid_xdp_action(act);
456 /* fallthrough -- not supported action */
458 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
459 /* fallthrough -- handle aborts by dropping frame */
461 result = ICE_XDP_CONSUMED;
469 * ice_xdp_xmit - submit packets to XDP ring for transmission
471 * @n: number of XDP frames to be transmitted
472 * @frames: XDP frames to be transmitted
473 * @flags: transmit flags
475 * Returns number of frames successfully sent. Frames that fail are
476 * free'ed via XDP return API.
477 * For error cases, a negative errno code is returned and no-frames
478 * are transmitted (caller must handle freeing frames).
481 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
484 struct ice_netdev_priv *np = netdev_priv(dev);
485 unsigned int queue_index = smp_processor_id();
486 struct ice_vsi *vsi = np->vsi;
487 struct ice_ring *xdp_ring;
490 if (test_bit(__ICE_DOWN, vsi->state))
493 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
496 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
499 xdp_ring = vsi->xdp_rings[queue_index];
500 for (i = 0; i < n; i++) {
501 struct xdp_frame *xdpf = frames[i];
504 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
505 if (err != ICE_XDP_TX) {
506 xdp_return_frame_rx_napi(xdpf);
511 if (unlikely(flags & XDP_XMIT_FLUSH))
512 ice_xdp_ring_update_tail(xdp_ring);
518 * ice_alloc_mapped_page - recycle or make a new page
519 * @rx_ring: ring to use
520 * @bi: rx_buf struct to modify
522 * Returns true if the page was successfully allocated or
526 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
528 struct page *page = bi->page;
531 /* since we are recycling buffers we should seldom need to alloc */
533 rx_ring->rx_stats.page_reuse_count++;
537 /* alloc new page for storage */
538 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
539 if (unlikely(!page)) {
540 rx_ring->rx_stats.alloc_page_failed++;
544 /* map page for use */
545 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
546 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
548 /* if mapping failed free memory back to system since
549 * there isn't much point in holding memory we can't use
551 if (dma_mapping_error(rx_ring->dev, dma)) {
552 __free_pages(page, ice_rx_pg_order(rx_ring));
553 rx_ring->rx_stats.alloc_page_failed++;
559 bi->page_offset = ice_rx_offset(rx_ring);
560 page_ref_add(page, USHRT_MAX - 1);
561 bi->pagecnt_bias = USHRT_MAX;
567 * ice_alloc_rx_bufs - Replace used receive buffers
568 * @rx_ring: ring to place buffers on
569 * @cleaned_count: number of buffers to replace
571 * Returns false if all allocations were successful, true if any fail. Returning
572 * true signals to the caller that we didn't replace cleaned_count buffers and
573 * there is more work to do.
575 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
576 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
577 * multiple tail writes per call.
579 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
581 union ice_32b_rx_flex_desc *rx_desc;
582 u16 ntu = rx_ring->next_to_use;
583 struct ice_rx_buf *bi;
585 /* do nothing if no valid netdev defined */
586 if (!rx_ring->netdev || !cleaned_count)
589 /* get the Rx descriptor and buffer based on next_to_use */
590 rx_desc = ICE_RX_DESC(rx_ring, ntu);
591 bi = &rx_ring->rx_buf[ntu];
594 /* if we fail here, we have work remaining */
595 if (!ice_alloc_mapped_page(rx_ring, bi))
598 /* sync the buffer for use by the device */
599 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
604 /* Refresh the desc even if buffer_addrs didn't change
605 * because each write-back erases this info.
607 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
612 if (unlikely(ntu == rx_ring->count)) {
613 rx_desc = ICE_RX_DESC(rx_ring, 0);
614 bi = rx_ring->rx_buf;
618 /* clear the status bits for the next_to_use descriptor */
619 rx_desc->wb.status_error0 = 0;
622 } while (cleaned_count);
624 if (rx_ring->next_to_use != ntu)
625 ice_release_rx_desc(rx_ring, ntu);
627 return !!cleaned_count;
631 * ice_page_is_reserved - check if reuse is possible
632 * @page: page struct to check
634 static bool ice_page_is_reserved(struct page *page)
636 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
640 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
641 * @rx_buf: Rx buffer to adjust
642 * @size: Size of adjustment
644 * Update the offset within page so that Rx buf will be ready to be reused.
645 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
646 * so the second half of page assigned to Rx buffer will be used, otherwise
647 * the offset is moved by the @size bytes
650 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
652 #if (PAGE_SIZE < 8192)
653 /* flip page offset to other buffer */
654 rx_buf->page_offset ^= size;
656 /* move offset up to the next cache line */
657 rx_buf->page_offset += size;
662 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
663 * @rx_buf: buffer containing the page
665 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
666 * which will assign the current buffer to the buffer that next_to_alloc is
667 * pointing to; otherwise, the DMA mapping needs to be destroyed and
670 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
672 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
673 struct page *page = rx_buf->page;
675 /* avoid re-using remote pages */
676 if (unlikely(ice_page_is_reserved(page)))
679 #if (PAGE_SIZE < 8192)
680 /* if we are only owner of page we can reuse it */
681 if (unlikely((page_count(page) - pagecnt_bias) > 1))
684 #define ICE_LAST_OFFSET \
685 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
686 if (rx_buf->page_offset > ICE_LAST_OFFSET)
688 #endif /* PAGE_SIZE < 8192) */
690 /* If we have drained the page fragment pool we need to update
691 * the pagecnt_bias and page count so that we fully restock the
692 * number of references the driver holds.
694 if (unlikely(pagecnt_bias == 1)) {
695 page_ref_add(page, USHRT_MAX - 1);
696 rx_buf->pagecnt_bias = USHRT_MAX;
703 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
704 * @rx_ring: Rx descriptor ring to transact packets on
705 * @rx_buf: buffer containing page to add
706 * @skb: sk_buff to place the data into
707 * @size: packet length from rx_desc
709 * This function will add the data contained in rx_buf->page to the skb.
710 * It will just attach the page as a frag to the skb.
711 * The function will then update the page offset.
714 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
715 struct sk_buff *skb, unsigned int size)
717 #if (PAGE_SIZE >= 8192)
718 unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
720 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
725 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
726 rx_buf->page_offset, size, truesize);
728 /* page is being used so we must update the page offset */
729 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
733 * ice_reuse_rx_page - page flip buffer and store it back on the ring
734 * @rx_ring: Rx descriptor ring to store buffers on
735 * @old_buf: donor buffer to have page reused
737 * Synchronizes page for reuse by the adapter
740 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
742 u16 nta = rx_ring->next_to_alloc;
743 struct ice_rx_buf *new_buf;
745 new_buf = &rx_ring->rx_buf[nta];
747 /* update, and store next to alloc */
749 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
751 /* Transfer page from old buffer to new buffer.
752 * Move each member individually to avoid possible store
753 * forwarding stalls and unnecessary copy of skb.
755 new_buf->dma = old_buf->dma;
756 new_buf->page = old_buf->page;
757 new_buf->page_offset = old_buf->page_offset;
758 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
762 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
763 * @rx_ring: Rx descriptor ring to transact packets on
764 * @skb: skb to be used
765 * @size: size of buffer to add to skb
767 * This function will pull an Rx buffer from the ring and synchronize it
768 * for use by the CPU.
770 static struct ice_rx_buf *
771 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
772 const unsigned int size)
774 struct ice_rx_buf *rx_buf;
776 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
777 prefetchw(rx_buf->page);
782 /* we are reusing so sync this buffer for CPU use */
783 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
784 rx_buf->page_offset, size,
787 /* We have pulled a buffer for use, so decrement pagecnt_bias */
788 rx_buf->pagecnt_bias--;
794 * ice_build_skb - Build skb around an existing buffer
795 * @rx_ring: Rx descriptor ring to transact packets on
796 * @rx_buf: Rx buffer to pull data from
797 * @xdp: xdp_buff pointing to the data
799 * This function builds an skb around an existing Rx buffer, taking care
800 * to set up the skb correctly and avoid any memcpy overhead.
802 static struct sk_buff *
803 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
804 struct xdp_buff *xdp)
806 unsigned int metasize = xdp->data - xdp->data_meta;
807 #if (PAGE_SIZE < 8192)
808 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
810 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
811 SKB_DATA_ALIGN(xdp->data_end -
812 xdp->data_hard_start);
816 /* Prefetch first cache line of first page. If xdp->data_meta
817 * is unused, this points exactly as xdp->data, otherwise we
818 * likely have a consumer accessing first few bytes of meta
819 * data, and then actual data.
821 prefetch(xdp->data_meta);
822 #if L1_CACHE_BYTES < 128
823 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
825 /* build an skb around the page buffer */
826 skb = build_skb(xdp->data_hard_start, truesize);
830 /* must to record Rx queue, otherwise OS features such as
831 * symmetric queue won't work
833 skb_record_rx_queue(skb, rx_ring->q_index);
835 /* update pointers within the skb to store the data */
836 skb_reserve(skb, xdp->data - xdp->data_hard_start);
837 __skb_put(skb, xdp->data_end - xdp->data);
839 skb_metadata_set(skb, metasize);
841 /* buffer is used by skb, update page_offset */
842 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
848 * ice_construct_skb - Allocate skb and populate it
849 * @rx_ring: Rx descriptor ring to transact packets on
850 * @rx_buf: Rx buffer to pull data from
851 * @xdp: xdp_buff pointing to the data
853 * This function allocates an skb. It then populates it with the page
854 * data from the current receive descriptor, taking care to set up the
857 static struct sk_buff *
858 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
859 struct xdp_buff *xdp)
861 unsigned int size = xdp->data_end - xdp->data;
862 unsigned int headlen;
865 /* prefetch first cache line of first page */
867 #if L1_CACHE_BYTES < 128
868 prefetch((void *)(xdp->data + L1_CACHE_BYTES));
869 #endif /* L1_CACHE_BYTES */
871 /* allocate a skb to store the frags */
872 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
873 GFP_ATOMIC | __GFP_NOWARN);
877 skb_record_rx_queue(skb, rx_ring->q_index);
878 /* Determine available headroom for copy */
880 if (headlen > ICE_RX_HDR_SIZE)
881 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
883 /* align pull length to size of long to optimize memcpy performance */
884 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
887 /* if we exhaust the linear part then add what is left as a frag */
890 #if (PAGE_SIZE >= 8192)
891 unsigned int truesize = SKB_DATA_ALIGN(size);
893 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
895 skb_add_rx_frag(skb, 0, rx_buf->page,
896 rx_buf->page_offset + headlen, size, truesize);
897 /* buffer is used by skb, update page_offset */
898 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
900 /* buffer is unused, reset bias back to rx_buf; data was copied
901 * onto skb's linear part so there's no need for adjusting
902 * page offset and we can reuse this buffer as-is
904 rx_buf->pagecnt_bias++;
911 * ice_put_rx_buf - Clean up used buffer and either recycle or free
912 * @rx_ring: Rx descriptor ring to transact packets on
913 * @rx_buf: Rx buffer to pull data from
915 * This function will update next_to_clean and then clean up the contents
916 * of the rx_buf. It will either recycle the buffer or unmap it and free
917 * the associated resources.
919 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
921 u32 ntc = rx_ring->next_to_clean + 1;
923 /* fetch, update, and store next to clean */
924 ntc = (ntc < rx_ring->count) ? ntc : 0;
925 rx_ring->next_to_clean = ntc;
930 if (ice_can_reuse_rx_page(rx_buf)) {
931 /* hand second half of page back to the ring */
932 ice_reuse_rx_page(rx_ring, rx_buf);
933 rx_ring->rx_stats.page_reuse_count++;
935 /* we are not reusing the buffer so unmap it */
936 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
937 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
939 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
942 /* clear contents of buffer_info */
948 * ice_is_non_eop - process handling of non-EOP buffers
949 * @rx_ring: Rx ring being processed
950 * @rx_desc: Rx descriptor for current buffer
951 * @skb: Current socket buffer containing buffer in progress
953 * If the buffer is an EOP buffer, this function exits returning false,
954 * otherwise return true indicating that this is in fact a non-EOP buffer.
957 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
960 /* if we are the last buffer then there is nothing else to do */
961 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
962 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
965 /* place skb in next buffer to be received */
966 rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
967 rx_ring->rx_stats.non_eop_descs++;
973 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
974 * @rx_ring: Rx descriptor ring to transact packets on
975 * @budget: Total limit on number of packets to process
977 * This function provides a "bounce buffer" approach to Rx interrupt
978 * processing. The advantage to this is that on systems that have
979 * expensive overhead for IOMMU access this provides a means of avoiding
980 * it by maintaining the mapping of the page to the system.
982 * Returns amount of work completed
984 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
986 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
987 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
988 unsigned int xdp_res, xdp_xmit = 0;
989 struct bpf_prog *xdp_prog = NULL;
993 xdp.rxq = &rx_ring->xdp_rxq;
995 /* start the loop to process Rx packets bounded by 'budget' */
996 while (likely(total_rx_pkts < (unsigned int)budget)) {
997 union ice_32b_rx_flex_desc *rx_desc;
998 struct ice_rx_buf *rx_buf;
1005 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1006 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1008 /* status_error_len will always be zero for unused descriptors
1009 * because it's cleared in cleanup, and overlaps with hdr_addr
1010 * which is always zero because packet split isn't used, if the
1011 * hardware wrote DD then it will be non-zero
1013 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1014 if (!ice_test_staterr(rx_desc, stat_err_bits))
1017 /* This memory barrier is needed to keep us from reading
1018 * any other fields out of the rx_desc until we know the
1023 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1024 ICE_RX_FLX_DESC_PKT_LEN_M;
1026 /* retrieve a buffer from the ring */
1027 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1031 xdp.data_end = NULL;
1032 xdp.data_hard_start = NULL;
1033 xdp.data_meta = NULL;
1037 xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
1038 xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
1039 xdp.data_meta = xdp.data;
1040 xdp.data_end = xdp.data + size;
1043 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1049 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1053 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1054 unsigned int truesize;
1056 #if (PAGE_SIZE < 8192)
1057 truesize = ice_rx_pg_size(rx_ring) / 2;
1059 truesize = SKB_DATA_ALIGN(ice_rx_offset(rx_ring) +
1062 xdp_xmit |= xdp_res;
1063 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
1065 rx_buf->pagecnt_bias++;
1067 total_rx_bytes += size;
1071 ice_put_rx_buf(rx_ring, rx_buf);
1075 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1076 } else if (likely(xdp.data)) {
1077 if (ice_ring_uses_build_skb(rx_ring))
1078 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1080 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1082 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1084 /* exit if we failed to retrieve a buffer */
1086 rx_ring->rx_stats.alloc_buf_failed++;
1088 rx_buf->pagecnt_bias++;
1092 ice_put_rx_buf(rx_ring, rx_buf);
1095 /* skip if it is NOP desc */
1096 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1099 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1100 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1101 dev_kfree_skb_any(skb);
1105 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1106 if (ice_test_staterr(rx_desc, stat_err_bits))
1107 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1109 /* pad the skb if needed, to make a valid ethernet frame */
1110 if (eth_skb_pad(skb)) {
1115 /* probably a little skewed due to removing CRC */
1116 total_rx_bytes += skb->len;
1118 /* populate checksum, VLAN, and protocol */
1119 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1120 ICE_RX_FLEX_DESC_PTYPE_M;
1122 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1124 /* send completed skb up the stack */
1125 ice_receive_skb(rx_ring, skb, vlan_tag);
1127 /* update budget accounting */
1131 /* return up to cleaned_count buffers to hardware */
1132 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1135 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1137 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1139 /* guarantee a trip back through this routine if there was a failure */
1140 return failure ? budget : (int)total_rx_pkts;
1144 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1145 * @port_info: port_info structure containing the current link speed
1146 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1147 * @itr: ITR value to update
1149 * Calculate how big of an increment should be applied to the ITR value passed
1150 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1153 * The following is a calculation derived from:
1154 * wmem_default / (size + overhead) = desired_pkts_per_int
1155 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1156 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1158 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1159 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1162 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1163 * ITR = -------------------------------------------- * --------------
1164 * rate pkt_size + 640
1167 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1168 unsigned int avg_pkt_size,
1171 switch (port_info->phy.link_info.link_speed) {
1172 case ICE_AQ_LINK_SPEED_100GB:
1173 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1174 avg_pkt_size + 640);
1176 case ICE_AQ_LINK_SPEED_50GB:
1177 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1178 avg_pkt_size + 640);
1180 case ICE_AQ_LINK_SPEED_40GB:
1181 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1182 avg_pkt_size + 640);
1184 case ICE_AQ_LINK_SPEED_25GB:
1185 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1186 avg_pkt_size + 640);
1188 case ICE_AQ_LINK_SPEED_20GB:
1189 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1190 avg_pkt_size + 640);
1192 case ICE_AQ_LINK_SPEED_10GB:
1195 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1196 avg_pkt_size + 640);
1200 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1201 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1202 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1209 * ice_update_itr - update the adaptive ITR value based on statistics
1210 * @q_vector: structure containing interrupt and ring information
1211 * @rc: structure containing ring performance data
1213 * Stores a new ITR value based on packets and byte
1214 * counts during the last interrupt. The advantage of per interrupt
1215 * computation is faster updates and more accurate ITR for the current
1216 * traffic pattern. Constants in this function were computed
1217 * based on theoretical maximum wire speed and thresholds were set based
1218 * on testing data as well as attempting to minimize response time
1219 * while increasing bulk throughput.
1222 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1224 unsigned long next_update = jiffies;
1225 unsigned int packets, bytes, itr;
1226 bool container_is_rx;
1228 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1231 /* If itr_countdown is set it means we programmed an ITR within
1232 * the last 4 interrupt cycles. This has a side effect of us
1233 * potentially firing an early interrupt. In order to work around
1234 * this we need to throw out any data received for a few
1235 * interrupts following the update.
1237 if (q_vector->itr_countdown) {
1238 itr = rc->target_itr;
1242 container_is_rx = (&q_vector->rx == rc);
1243 /* For Rx we want to push the delay up and default to low latency.
1244 * for Tx we want to pull the delay down and default to high latency.
1246 itr = container_is_rx ?
1247 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1248 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1250 /* If we didn't update within up to 1 - 2 jiffies we can assume
1251 * that either packets are coming in so slow there hasn't been
1252 * any work, or that there is so much work that NAPI is dealing
1253 * with interrupt moderation and we don't need to do anything.
1255 if (time_after(next_update, rc->next_update))
1258 prefetch(q_vector->vsi->port_info);
1260 packets = rc->total_pkts;
1261 bytes = rc->total_bytes;
1263 if (container_is_rx) {
1264 /* If Rx there are 1 to 4 packets and bytes are less than
1265 * 9000 assume insufficient data to use bulk rate limiting
1266 * approach unless Tx is already in bulk rate limiting. We
1267 * are likely latency driven.
1269 if (packets && packets < 4 && bytes < 9000 &&
1270 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1271 itr = ICE_ITR_ADAPTIVE_LATENCY;
1272 goto adjust_by_size_and_speed;
1274 } else if (packets < 4) {
1275 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1276 * bulk mode and we are receiving 4 or fewer packets just
1277 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1278 * that the Rx can relax.
1280 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1281 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1282 ICE_ITR_ADAPTIVE_MAX_USECS)
1284 } else if (packets > 32) {
1285 /* If we have processed over 32 packets in a single interrupt
1286 * for Tx assume we need to switch over to "bulk" mode.
1288 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1291 /* We have no packets to actually measure against. This means
1292 * either one of the other queues on this vector is active or
1293 * we are a Tx queue doing TSO with too high of an interrupt rate.
1295 * Between 4 and 56 we can assume that our current interrupt delay
1296 * is only slightly too low. As such we should increase it by a small
1300 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1301 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1302 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1303 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1308 if (packets <= 256) {
1309 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1310 itr &= ICE_ITR_MASK;
1312 /* Between 56 and 112 is our "goldilocks" zone where we are
1313 * working out "just right". Just report that our current
1314 * ITR is good for us.
1319 /* If packet count is 128 or greater we are likely looking
1320 * at a slight overrun of the delay we want. Try halving
1321 * our delay to see if that will cut the number of packets
1322 * in half per interrupt.
1325 itr &= ICE_ITR_MASK;
1326 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1327 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1332 /* The paths below assume we are dealing with a bulk ITR since
1333 * number of packets is greater than 256. We are just going to have
1334 * to compute a value and try to bring the count under control,
1335 * though for smaller packet sizes there isn't much we can do as
1336 * NAPI polling will likely be kicking in sooner rather than later.
1338 itr = ICE_ITR_ADAPTIVE_BULK;
1340 adjust_by_size_and_speed:
1342 /* based on checks above packets cannot be 0 so division is safe */
1343 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1344 bytes / packets, itr);
1347 /* write back value */
1348 rc->target_itr = itr;
1350 /* next update should occur within next jiffy */
1351 rc->next_update = next_update + 1;
1353 rc->total_bytes = 0;
1358 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1359 * @itr_idx: interrupt throttling index
1360 * @itr: interrupt throttling value in usecs
1362 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1364 /* The ITR value is reported in microseconds, and the register value is
1365 * recorded in 2 microsecond units. For this reason we only need to
1366 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1367 * granularity as a shift instead of division. The mask makes sure the
1368 * ITR value is never odd so we don't accidentally write into the field
1369 * prior to the ITR field.
1371 itr &= ICE_ITR_MASK;
1373 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1374 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1375 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1378 /* The act of updating the ITR will cause it to immediately trigger. In order
1379 * to prevent this from throwing off adaptive update statistics we defer the
1380 * update so that it can only happen so often. So after either Tx or Rx are
1381 * updated we make the adaptive scheme wait until either the ITR completely
1382 * expires via the next_update expiration or we have been through at least
1385 #define ITR_COUNTDOWN_START 3
1388 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1389 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1391 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1393 struct ice_ring_container *tx = &q_vector->tx;
1394 struct ice_ring_container *rx = &q_vector->rx;
1395 struct ice_vsi *vsi = q_vector->vsi;
1398 /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1399 * interrupts to expire right away in case we have more work ready to go
1402 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1403 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1404 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1405 /* set target back to last user set value */
1406 rx->target_itr = rx->itr_setting;
1407 /* set current to what we just wrote and dynamic if needed */
1408 rx->current_itr = ICE_WB_ON_ITR_USECS |
1409 (rx->itr_setting & ICE_ITR_DYNAMIC);
1410 /* allow normal interrupt flow to start */
1411 q_vector->itr_countdown = 0;
1415 /* This will do nothing if dynamic updates are not enabled */
1416 ice_update_itr(q_vector, tx);
1417 ice_update_itr(q_vector, rx);
1419 /* This block of logic allows us to get away with only updating
1420 * one ITR value with each interrupt. The idea is to perform a
1421 * pseudo-lazy update with the following criteria.
1423 * 1. Rx is given higher priority than Tx if both are in same state
1424 * 2. If we must reduce an ITR that is given highest priority.
1425 * 3. We then give priority to increasing ITR based on amount.
1427 if (rx->target_itr < rx->current_itr) {
1428 /* Rx ITR needs to be reduced, this is highest priority */
1429 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1430 rx->current_itr = rx->target_itr;
1431 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1432 } else if ((tx->target_itr < tx->current_itr) ||
1433 ((rx->target_itr - rx->current_itr) <
1434 (tx->target_itr - tx->current_itr))) {
1435 /* Tx ITR needs to be reduced, this is second priority
1436 * Tx ITR needs to be increased more than Rx, fourth priority
1438 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1439 tx->current_itr = tx->target_itr;
1440 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1441 } else if (rx->current_itr != rx->target_itr) {
1442 /* Rx ITR needs to be increased, third priority */
1443 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1444 rx->current_itr = rx->target_itr;
1445 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1447 /* Still have to re-enable the interrupts */
1448 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1449 if (q_vector->itr_countdown)
1450 q_vector->itr_countdown--;
1453 if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
1454 wr32(&q_vector->vsi->back->hw,
1455 GLINT_DYN_CTL(q_vector->reg_idx),
1460 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1461 * @q_vector: q_vector to set WB_ON_ITR on
1463 * We need to tell hardware to write-back completed descriptors even when
1464 * interrupts are disabled. Descriptors will be written back on cache line
1465 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1466 * descriptors may not be written back if they don't fill a cache line until the
1469 * This sets the write-back frequency to 2 microseconds as that is the minimum
1470 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1471 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1473 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1475 struct ice_vsi *vsi = q_vector->vsi;
1477 /* already in WB_ON_ITR mode no need to change it */
1478 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1481 if (q_vector->num_ring_rx)
1482 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1483 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1486 if (q_vector->num_ring_tx)
1487 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1488 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1491 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1495 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1496 * @napi: napi struct with our devices info in it
1497 * @budget: amount of work driver is allowed to do this pass, in packets
1499 * This function will clean all queues associated with a q_vector.
1501 * Returns the amount of work done
1503 int ice_napi_poll(struct napi_struct *napi, int budget)
1505 struct ice_q_vector *q_vector =
1506 container_of(napi, struct ice_q_vector, napi);
1507 bool clean_complete = true;
1508 struct ice_ring *ring;
1509 int budget_per_ring;
1512 /* Since the actual Tx work is minimal, we can give the Tx a larger
1513 * budget and be more aggressive about cleaning up the Tx descriptors.
1515 ice_for_each_ring(ring, q_vector->tx) {
1516 bool wd = ring->xsk_umem ?
1517 ice_clean_tx_irq_zc(ring, budget) :
1518 ice_clean_tx_irq(ring, budget);
1521 clean_complete = false;
1524 /* Handle case where we are called by netpoll with a budget of 0 */
1525 if (unlikely(budget <= 0))
1528 /* normally we have 1 Rx ring per q_vector */
1529 if (unlikely(q_vector->num_ring_rx > 1))
1530 /* We attempt to distribute budget to each Rx queue fairly, but
1531 * don't allow the budget to go below 1 because that would exit
1534 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1536 /* Max of 1 Rx ring in this q_vector so give it the budget */
1537 budget_per_ring = budget;
1539 ice_for_each_ring(ring, q_vector->rx) {
1542 /* A dedicated path for zero-copy allows making a single
1543 * comparison in the irq context instead of many inside the
1544 * ice_clean_rx_irq function and makes the codebase cleaner.
1546 cleaned = ring->xsk_umem ?
1547 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1548 ice_clean_rx_irq(ring, budget_per_ring);
1549 work_done += cleaned;
1550 /* if we clean as many as budgeted, we must not be done */
1551 if (cleaned >= budget_per_ring)
1552 clean_complete = false;
1555 /* If work not completed, return budget and polling will return */
1556 if (!clean_complete)
1559 /* Exit the polling mode, but don't re-enable interrupts if stack might
1560 * poll us due to busy-polling
1562 if (likely(napi_complete_done(napi, work_done)))
1563 ice_update_ena_itr(q_vector);
1565 ice_set_wb_on_itr(q_vector);
1567 return min_t(int, work_done, budget - 1);
1571 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1572 * @tx_ring: the ring to be checked
1573 * @size: the size buffer we want to assure is available
1575 * Returns -EBUSY if a stop is needed, else 0
1577 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1579 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1580 /* Memory barrier before checking head and tail */
1583 /* Check again in a case another CPU has just made room available. */
1584 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1587 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1588 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1589 ++tx_ring->tx_stats.restart_q;
1594 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1595 * @tx_ring: the ring to be checked
1596 * @size: the size buffer we want to assure is available
1598 * Returns 0 if stop is not needed
1600 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1602 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1605 return __ice_maybe_stop_tx(tx_ring, size);
1609 * ice_tx_map - Build the Tx descriptor
1610 * @tx_ring: ring to send buffer on
1611 * @first: first buffer info buffer to use
1612 * @off: pointer to struct that holds offload parameters
1614 * This function loops over the skb data pointed to by *first
1615 * and gets a physical address for each memory location and programs
1616 * it and the length into the transmit descriptor.
1619 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1620 struct ice_tx_offload_params *off)
1622 u64 td_offset, td_tag, td_cmd;
1623 u16 i = tx_ring->next_to_use;
1625 unsigned int data_len, size;
1626 struct ice_tx_desc *tx_desc;
1627 struct ice_tx_buf *tx_buf;
1628 struct sk_buff *skb;
1631 td_tag = off->td_l2tag1;
1632 td_cmd = off->td_cmd;
1633 td_offset = off->td_offset;
1636 data_len = skb->data_len;
1637 size = skb_headlen(skb);
1639 tx_desc = ICE_TX_DESC(tx_ring, i);
1641 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1642 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1643 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1644 ICE_TX_FLAGS_VLAN_S;
1647 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1651 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1652 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1654 if (dma_mapping_error(tx_ring->dev, dma))
1657 /* record length, and DMA address */
1658 dma_unmap_len_set(tx_buf, len, size);
1659 dma_unmap_addr_set(tx_buf, dma, dma);
1661 /* align size to end of page */
1662 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1663 tx_desc->buf_addr = cpu_to_le64(dma);
1665 /* account for data chunks larger than the hardware
1668 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1669 tx_desc->cmd_type_offset_bsz =
1670 build_ctob(td_cmd, td_offset, max_data, td_tag);
1675 if (i == tx_ring->count) {
1676 tx_desc = ICE_TX_DESC(tx_ring, 0);
1683 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1684 tx_desc->buf_addr = cpu_to_le64(dma);
1687 if (likely(!data_len))
1690 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1696 if (i == tx_ring->count) {
1697 tx_desc = ICE_TX_DESC(tx_ring, 0);
1701 size = skb_frag_size(frag);
1704 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1707 tx_buf = &tx_ring->tx_buf[i];
1710 /* record bytecount for BQL */
1711 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1713 /* record SW timestamp if HW timestamp is not available */
1714 skb_tx_timestamp(first->skb);
1717 if (i == tx_ring->count)
1720 /* write last descriptor with RS and EOP bits */
1721 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1722 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size,
1725 /* Force memory writes to complete before letting h/w know there
1726 * are new descriptors to fetch.
1728 * We also use this memory barrier to make certain all of the
1729 * status bits have been updated before next_to_watch is written.
1733 /* set next_to_watch value indicating a packet is present */
1734 first->next_to_watch = tx_desc;
1736 tx_ring->next_to_use = i;
1738 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1740 /* notify HW of packet */
1741 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1742 writel(i, tx_ring->tail);
1748 /* clear DMA mappings for failed tx_buf map */
1750 tx_buf = &tx_ring->tx_buf[i];
1751 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1752 if (tx_buf == first)
1759 tx_ring->next_to_use = i;
1763 * ice_tx_csum - Enable Tx checksum offloads
1764 * @first: pointer to the first descriptor
1765 * @off: pointer to struct that holds offload parameters
1767 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1770 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1772 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1773 struct sk_buff *skb = first->skb;
1783 __be16 frag_off, protocol;
1784 unsigned char *exthdr;
1785 u32 offset, cmd = 0;
1788 if (skb->ip_summed != CHECKSUM_PARTIAL)
1791 ip.hdr = skb_network_header(skb);
1792 l4.hdr = skb_transport_header(skb);
1794 /* compute outer L2 header size */
1795 l2_len = ip.hdr - skb->data;
1796 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1798 if (skb->encapsulation)
1801 /* Enable IP checksum offloads */
1802 protocol = vlan_get_protocol(skb);
1803 if (protocol == htons(ETH_P_IP)) {
1804 l4_proto = ip.v4->protocol;
1805 /* the stack computes the IP header already, the only time we
1806 * need the hardware to recompute it is in the case of TSO.
1808 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1809 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1811 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1813 } else if (protocol == htons(ETH_P_IPV6)) {
1814 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1815 exthdr = ip.hdr + sizeof(*ip.v6);
1816 l4_proto = ip.v6->nexthdr;
1817 if (l4.hdr != exthdr)
1818 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1824 /* compute inner L3 header size */
1825 l3_len = l4.hdr - ip.hdr;
1826 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1828 /* Enable L4 checksum offloads */
1831 /* enable checksum offloads */
1832 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1833 l4_len = l4.tcp->doff;
1834 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1837 /* enable UDP checksum offload */
1838 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1839 l4_len = (sizeof(struct udphdr) >> 2);
1840 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1843 /* enable SCTP checksum offload */
1844 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1845 l4_len = sizeof(struct sctphdr) >> 2;
1846 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1850 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1852 skb_checksum_help(skb);
1857 off->td_offset |= offset;
1862 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1863 * @tx_ring: ring to send buffer on
1864 * @first: pointer to struct ice_tx_buf
1866 * Checks the skb and set up correspondingly several generic transmit flags
1867 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1869 * Returns error code indicate the frame should be dropped upon error and the
1870 * otherwise returns 0 to indicate the flags has been set properly.
1873 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1875 struct sk_buff *skb = first->skb;
1876 __be16 protocol = skb->protocol;
1878 if (protocol == htons(ETH_P_8021Q) &&
1879 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1880 /* when HW VLAN acceleration is turned off by the user the
1881 * stack sets the protocol to 8021q so that the driver
1882 * can take any steps required to support the SW only
1883 * VLAN handling. In our case the driver doesn't need
1884 * to take any further steps so just set the protocol
1885 * to the encapsulated ethertype.
1887 skb->protocol = vlan_get_protocol(skb);
1891 /* if we have a HW VLAN tag being added, default to the HW one */
1892 if (skb_vlan_tag_present(skb)) {
1893 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1894 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1895 } else if (protocol == htons(ETH_P_8021Q)) {
1896 struct vlan_hdr *vhdr, _vhdr;
1898 /* for SW VLAN, check the next protocol and store the tag */
1899 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1905 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1906 ICE_TX_FLAGS_VLAN_S;
1907 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1910 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1914 * ice_tso - computes mss and TSO length to prepare for TSO
1915 * @first: pointer to struct ice_tx_buf
1916 * @off: pointer to struct that holds offload parameters
1918 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1921 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1923 struct sk_buff *skb = first->skb;
1934 u64 cd_mss, cd_tso_len;
1935 u32 paylen, l4_start;
1938 if (skb->ip_summed != CHECKSUM_PARTIAL)
1941 if (!skb_is_gso(skb))
1944 err = skb_cow_head(skb, 0);
1948 /* cppcheck-suppress unreadVariable */
1949 ip.hdr = skb_network_header(skb);
1950 l4.hdr = skb_transport_header(skb);
1952 /* initialize outer IP header fields */
1953 if (ip.v4->version == 4) {
1957 ip.v6->payload_len = 0;
1960 /* determine offset of transport header */
1961 l4_start = l4.hdr - skb->data;
1963 /* remove payload length from checksum */
1964 paylen = skb->len - l4_start;
1966 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
1967 csum_replace_by_diff(&l4.udp->check,
1968 (__force __wsum)htonl(paylen));
1969 /* compute length of UDP segmentation header */
1970 off->header_len = sizeof(l4.udp) + l4_start;
1972 csum_replace_by_diff(&l4.tcp->check,
1973 (__force __wsum)htonl(paylen));
1974 /* compute length of TCP segmentation header */
1975 off->header_len = (l4.tcp->doff * 4) + l4_start;
1978 /* update gso_segs and bytecount */
1979 first->gso_segs = skb_shinfo(skb)->gso_segs;
1980 first->bytecount += (first->gso_segs - 1) * off->header_len;
1982 cd_tso_len = skb->len - off->header_len;
1983 cd_mss = skb_shinfo(skb)->gso_size;
1985 /* record cdesc_qw1 with TSO parameters */
1986 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1987 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1988 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1989 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1990 first->tx_flags |= ICE_TX_FLAGS_TSO;
1995 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1996 * @size: transmit request size in bytes
1998 * Due to hardware alignment restrictions (4K alignment), we need to
1999 * assume that we can have no more than 12K of data per descriptor, even
2000 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2001 * Thus, we need to divide by 12K. But division is slow! Instead,
2002 * we decompose the operation into shifts and one relatively cheap
2003 * multiply operation.
2005 * To divide by 12K, we first divide by 4K, then divide by 3:
2006 * To divide by 4K, shift right by 12 bits
2007 * To divide by 3, multiply by 85, then divide by 256
2008 * (Divide by 256 is done by shifting right by 8 bits)
2009 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2010 * 3, we'll underestimate near each multiple of 12K. This is actually more
2011 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2012 * segment. For our purposes this is accurate out to 1M which is orders of
2013 * magnitude greater than our largest possible GSO size.
2015 * This would then be implemented as:
2016 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2018 * Since multiplication and division are commutative, we can reorder
2020 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2022 static unsigned int ice_txd_use_count(unsigned int size)
2024 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2028 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2031 * Returns number of data descriptors needed for this skb.
2033 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2035 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2036 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2037 unsigned int count = 0, size = skb_headlen(skb);
2040 count += ice_txd_use_count(size);
2045 size = skb_frag_size(frag++);
2052 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2055 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2056 * and so we need to figure out the cases where we need to linearize the skb.
2058 * For TSO we need to count the TSO header and segment payload separately.
2059 * As such we need to check cases where we have 7 fragments or more as we
2060 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2061 * the segment payload in the first descriptor, and another 7 for the
2064 static bool __ice_chk_linearize(struct sk_buff *skb)
2066 const skb_frag_t *frag, *stale;
2069 /* no need to check if number of frags is less than 7 */
2070 nr_frags = skb_shinfo(skb)->nr_frags;
2071 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2074 /* We need to walk through the list and validate that each group
2075 * of 6 fragments totals at least gso_size.
2077 nr_frags -= ICE_MAX_BUF_TXD - 2;
2078 frag = &skb_shinfo(skb)->frags[0];
2080 /* Initialize size to the negative value of gso_size minus 1. We
2081 * use this as the worst case scenerio in which the frag ahead
2082 * of us only provides one byte which is why we are limited to 6
2083 * descriptors for a single transmit as the header and previous
2084 * fragment are already consuming 2 descriptors.
2086 sum = 1 - skb_shinfo(skb)->gso_size;
2088 /* Add size of frags 0 through 4 to create our initial sum */
2089 sum += skb_frag_size(frag++);
2090 sum += skb_frag_size(frag++);
2091 sum += skb_frag_size(frag++);
2092 sum += skb_frag_size(frag++);
2093 sum += skb_frag_size(frag++);
2095 /* Walk through fragments adding latest fragment, testing it, and
2096 * then removing stale fragments from the sum.
2098 stale = &skb_shinfo(skb)->frags[0];
2100 sum += skb_frag_size(frag++);
2102 /* if sum is negative we failed to make sufficient progress */
2109 sum -= skb_frag_size(stale++);
2116 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2118 * @count: number of buffers used
2120 * Note: Our HW can't scatter-gather more than 8 fragments to build
2121 * a packet on the wire and so we need to figure out the cases where we
2122 * need to linearize the skb.
2124 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2126 /* Both TSO and single send will work if count is less than 8 */
2127 if (likely(count < ICE_MAX_BUF_TXD))
2130 if (skb_is_gso(skb))
2131 return __ice_chk_linearize(skb);
2133 /* we can support up to 8 data buffers for a single send */
2134 return count != ICE_MAX_BUF_TXD;
2138 * ice_xmit_frame_ring - Sends buffer on Tx ring
2140 * @tx_ring: ring to send buffer on
2142 * Returns NETDEV_TX_OK if sent, else an error code
2145 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2147 struct ice_tx_offload_params offload = { 0 };
2148 struct ice_vsi *vsi = tx_ring->vsi;
2149 struct ice_tx_buf *first;
2153 count = ice_xmit_desc_count(skb);
2154 if (ice_chk_linearize(skb, count)) {
2155 if (__skb_linearize(skb))
2157 count = ice_txd_use_count(skb->len);
2158 tx_ring->tx_stats.tx_linearize++;
2161 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2162 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2163 * + 4 desc gap to avoid the cache line where head is,
2164 * + 1 desc for context descriptor,
2165 * otherwise try next time
2167 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2168 ICE_DESCS_FOR_CTX_DESC)) {
2169 tx_ring->tx_stats.tx_busy++;
2170 return NETDEV_TX_BUSY;
2173 offload.tx_ring = tx_ring;
2175 /* record the location of the first descriptor for this packet */
2176 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2178 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2179 first->gso_segs = 1;
2180 first->tx_flags = 0;
2182 /* prepare the VLAN tagging flags for Tx */
2183 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2186 /* set up TSO offload */
2187 tso = ice_tso(first, &offload);
2191 /* always set up Tx checksum offload */
2192 csum = ice_tx_csum(first, &offload);
2196 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2197 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2198 vsi->type == ICE_VSI_PF &&
2199 vsi->port_info->is_sw_lldp))
2200 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2201 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2202 ICE_TXD_CTX_QW1_CMD_S);
2204 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2205 struct ice_tx_ctx_desc *cdesc;
2206 int i = tx_ring->next_to_use;
2208 /* grab the next descriptor */
2209 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2211 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2213 /* setup context descriptor */
2214 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2215 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2216 cdesc->rsvd = cpu_to_le16(0);
2217 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2220 ice_tx_map(tx_ring, first, &offload);
2221 return NETDEV_TX_OK;
2224 dev_kfree_skb_any(skb);
2225 return NETDEV_TX_OK;
2229 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2231 * @netdev: network interface device structure
2233 * Returns NETDEV_TX_OK if sent, else an error code
2235 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2237 struct ice_netdev_priv *np = netdev_priv(netdev);
2238 struct ice_vsi *vsi = np->vsi;
2239 struct ice_ring *tx_ring;
2241 tx_ring = vsi->tx_rings[skb->queue_mapping];
2243 /* hardware can't handle really short frames, hardware padding works
2246 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2247 return NETDEV_TX_OK;
2249 return ice_xmit_frame_ring(skb, tx_ring);