1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
8 #include <linux/bpf_trace.h>
10 #include "ice_txrx_lib.h"
13 #include "ice_dcb_lib.h"
16 #define ICE_RX_HDR_SIZE 256
18 #define FDIR_DESC_RXDID 0x40
19 #define ICE_FDIR_CLEAN_DELAY 10
22 * ice_prgm_fdir_fltr - Program a Flow Director filter
23 * @vsi: VSI to send dummy packet
24 * @fdir_desc: flow director descriptor
25 * @raw_packet: allocated buffer for flow director
28 ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
31 struct ice_tx_buf *tx_buf, *first;
32 struct ice_fltr_desc *f_desc;
33 struct ice_tx_desc *tx_desc;
34 struct ice_ring *tx_ring;
43 tx_ring = vsi->tx_rings[0];
44 if (!tx_ring || !tx_ring->desc)
48 /* we are using two descriptors to add/del a filter and we can wait */
49 for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
52 msleep_interruptible(1);
55 dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
58 if (dma_mapping_error(dev, dma))
61 /* grab the next descriptor */
62 i = tx_ring->next_to_use;
63 first = &tx_ring->tx_buf[i];
64 f_desc = ICE_TX_FDIRDESC(tx_ring, i);
65 memcpy(f_desc, fdir_desc, sizeof(*f_desc));
68 i = (i < tx_ring->count) ? i : 0;
69 tx_desc = ICE_TX_DESC(tx_ring, i);
70 tx_buf = &tx_ring->tx_buf[i];
73 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
75 memset(tx_buf, 0, sizeof(*tx_buf));
76 dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
77 dma_unmap_addr_set(tx_buf, dma, dma);
79 tx_desc->buf_addr = cpu_to_le64(dma);
80 td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
83 tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
84 tx_buf->raw_buf = raw_packet;
86 tx_desc->cmd_type_offset_bsz =
87 ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);
89 /* Force memory write to complete before letting h/w know
90 * there are new descriptors to fetch.
94 /* mark the data descriptor to be watched */
95 first->next_to_watch = tx_desc;
97 writel(tx_ring->next_to_use, tx_ring->tail);
103 * ice_unmap_and_free_tx_buf - Release a Tx buffer
104 * @ring: the ring that owns the buffer
105 * @tx_buf: the buffer to free
108 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
111 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
112 devm_kfree(ring->dev, tx_buf->raw_buf);
113 else if (ice_ring_is_xdp(ring))
114 page_frag_free(tx_buf->raw_buf);
116 dev_kfree_skb_any(tx_buf->skb);
117 if (dma_unmap_len(tx_buf, len))
118 dma_unmap_single(ring->dev,
119 dma_unmap_addr(tx_buf, dma),
120 dma_unmap_len(tx_buf, len),
122 } else if (dma_unmap_len(tx_buf, len)) {
123 dma_unmap_page(ring->dev,
124 dma_unmap_addr(tx_buf, dma),
125 dma_unmap_len(tx_buf, len),
129 tx_buf->next_to_watch = NULL;
131 dma_unmap_len_set(tx_buf, len, 0);
132 /* tx_buf must be completely set up in the transmit path */
135 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
137 return netdev_get_tx_queue(ring->netdev, ring->q_index);
141 * ice_clean_tx_ring - Free any empty Tx buffers
142 * @tx_ring: ring to be cleaned
144 void ice_clean_tx_ring(struct ice_ring *tx_ring)
148 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
149 ice_xsk_clean_xdp_ring(tx_ring);
153 /* ring already cleared, nothing to do */
154 if (!tx_ring->tx_buf)
157 /* Free all the Tx ring sk_buffs */
158 for (i = 0; i < tx_ring->count; i++)
159 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
162 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
164 /* Zero out the descriptor ring */
165 memset(tx_ring->desc, 0, tx_ring->size);
167 tx_ring->next_to_use = 0;
168 tx_ring->next_to_clean = 0;
170 if (!tx_ring->netdev)
173 /* cleanup Tx queue statistics */
174 netdev_tx_reset_queue(txring_txq(tx_ring));
178 * ice_free_tx_ring - Free Tx resources per queue
179 * @tx_ring: Tx descriptor ring for a specific queue
181 * Free all transmit software resources
183 void ice_free_tx_ring(struct ice_ring *tx_ring)
185 ice_clean_tx_ring(tx_ring);
186 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
187 tx_ring->tx_buf = NULL;
190 dmam_free_coherent(tx_ring->dev, tx_ring->size,
191 tx_ring->desc, tx_ring->dma);
192 tx_ring->desc = NULL;
197 * ice_clean_tx_irq - Reclaim resources after transmit completes
198 * @tx_ring: Tx ring to clean
199 * @napi_budget: Used to determine if we are in netpoll
201 * Returns true if there's any budget left (e.g. the clean is finished)
203 static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
205 unsigned int total_bytes = 0, total_pkts = 0;
206 unsigned int budget = ICE_DFLT_IRQ_WORK;
207 struct ice_vsi *vsi = tx_ring->vsi;
208 s16 i = tx_ring->next_to_clean;
209 struct ice_tx_desc *tx_desc;
210 struct ice_tx_buf *tx_buf;
212 tx_buf = &tx_ring->tx_buf[i];
213 tx_desc = ICE_TX_DESC(tx_ring, i);
216 prefetch(&vsi->state);
219 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
221 /* if next_to_watch is not set then there is no work pending */
225 smp_rmb(); /* prevent any other reads prior to eop_desc */
227 /* if the descriptor isn't done, no work yet to do */
228 if (!(eop_desc->cmd_type_offset_bsz &
229 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
232 /* clear next_to_watch to prevent false hangs */
233 tx_buf->next_to_watch = NULL;
235 /* update the statistics for this packet */
236 total_bytes += tx_buf->bytecount;
237 total_pkts += tx_buf->gso_segs;
239 if (ice_ring_is_xdp(tx_ring))
240 page_frag_free(tx_buf->raw_buf);
243 napi_consume_skb(tx_buf->skb, napi_budget);
245 /* unmap skb header data */
246 dma_unmap_single(tx_ring->dev,
247 dma_unmap_addr(tx_buf, dma),
248 dma_unmap_len(tx_buf, len),
251 /* clear tx_buf data */
253 dma_unmap_len_set(tx_buf, len, 0);
255 /* unmap remaining buffers */
256 while (tx_desc != eop_desc) {
262 tx_buf = tx_ring->tx_buf;
263 tx_desc = ICE_TX_DESC(tx_ring, 0);
266 /* unmap any remaining paged data */
267 if (dma_unmap_len(tx_buf, len)) {
268 dma_unmap_page(tx_ring->dev,
269 dma_unmap_addr(tx_buf, dma),
270 dma_unmap_len(tx_buf, len),
272 dma_unmap_len_set(tx_buf, len, 0);
276 /* move us one more past the eop_desc for start of next pkt */
282 tx_buf = tx_ring->tx_buf;
283 tx_desc = ICE_TX_DESC(tx_ring, 0);
288 /* update budget accounting */
290 } while (likely(budget));
293 tx_ring->next_to_clean = i;
295 ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
297 if (ice_ring_is_xdp(tx_ring))
300 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
303 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
304 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
305 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
306 /* Make sure that anybody stopping the queue after this
307 * sees the new next_to_clean.
310 if (__netif_subqueue_stopped(tx_ring->netdev,
312 !test_bit(__ICE_DOWN, vsi->state)) {
313 netif_wake_subqueue(tx_ring->netdev,
315 ++tx_ring->tx_stats.restart_q;
323 * ice_setup_tx_ring - Allocate the Tx descriptors
324 * @tx_ring: the Tx ring to set up
326 * Return 0 on success, negative on error
328 int ice_setup_tx_ring(struct ice_ring *tx_ring)
330 struct device *dev = tx_ring->dev;
335 /* warn if we are about to overwrite the pointer */
336 WARN_ON(tx_ring->tx_buf);
338 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
340 if (!tx_ring->tx_buf)
343 /* round up to nearest page */
344 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
346 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
348 if (!tx_ring->desc) {
349 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
354 tx_ring->next_to_use = 0;
355 tx_ring->next_to_clean = 0;
356 tx_ring->tx_stats.prev_pkt = -1;
360 devm_kfree(dev, tx_ring->tx_buf);
361 tx_ring->tx_buf = NULL;
366 * ice_clean_rx_ring - Free Rx buffers
367 * @rx_ring: ring to be cleaned
369 void ice_clean_rx_ring(struct ice_ring *rx_ring)
371 struct device *dev = rx_ring->dev;
374 /* ring already cleared, nothing to do */
375 if (!rx_ring->rx_buf)
379 dev_kfree_skb(rx_ring->skb);
383 if (rx_ring->xsk_pool) {
384 ice_xsk_clean_rx_ring(rx_ring);
388 /* Free all the Rx ring sk_buffs */
389 for (i = 0; i < rx_ring->count; i++) {
390 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
395 /* Invalidate cache lines that may have been written to by
396 * device so that we avoid corrupting memory.
398 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
403 /* free resources associated with mapping */
404 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
405 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
406 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
409 rx_buf->page_offset = 0;
413 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
415 /* Zero out the descriptor ring */
416 memset(rx_ring->desc, 0, rx_ring->size);
418 rx_ring->next_to_alloc = 0;
419 rx_ring->next_to_clean = 0;
420 rx_ring->next_to_use = 0;
424 * ice_free_rx_ring - Free Rx resources
425 * @rx_ring: ring to clean the resources from
427 * Free all receive software resources
429 void ice_free_rx_ring(struct ice_ring *rx_ring)
431 ice_clean_rx_ring(rx_ring);
432 if (rx_ring->vsi->type == ICE_VSI_PF)
433 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
434 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
435 rx_ring->xdp_prog = NULL;
436 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
437 rx_ring->rx_buf = NULL;
440 dmam_free_coherent(rx_ring->dev, rx_ring->size,
441 rx_ring->desc, rx_ring->dma);
442 rx_ring->desc = NULL;
447 * ice_rx_offset - Return expected offset into page to access data
448 * @rx_ring: Ring we are requesting offset of
450 * Returns the offset value for ring into the data buffer.
452 static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
454 if (ice_ring_uses_build_skb(rx_ring))
456 else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
457 return XDP_PACKET_HEADROOM;
463 * ice_setup_rx_ring - Allocate the Rx descriptors
464 * @rx_ring: the Rx ring to set up
466 * Return 0 on success, negative on error
468 int ice_setup_rx_ring(struct ice_ring *rx_ring)
470 struct device *dev = rx_ring->dev;
475 /* warn if we are about to overwrite the pointer */
476 WARN_ON(rx_ring->rx_buf);
478 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
480 if (!rx_ring->rx_buf)
483 /* round up to nearest page */
484 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
486 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
488 if (!rx_ring->desc) {
489 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
494 rx_ring->next_to_use = 0;
495 rx_ring->next_to_clean = 0;
496 rx_ring->rx_offset = ice_rx_offset(rx_ring);
498 if (ice_is_xdp_ena_vsi(rx_ring->vsi))
499 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
501 if (rx_ring->vsi->type == ICE_VSI_PF &&
502 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
503 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
504 rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
509 devm_kfree(dev, rx_ring->rx_buf);
510 rx_ring->rx_buf = NULL;
515 ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
517 unsigned int truesize;
519 #if (PAGE_SIZE < 8192)
520 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
522 truesize = rx_ring->rx_offset ?
523 SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
524 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
525 SKB_DATA_ALIGN(size);
531 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
533 * @xdp: xdp_buff used as input to the XDP program
534 * @xdp_prog: XDP program to run
536 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
539 ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
540 struct bpf_prog *xdp_prog)
542 struct ice_ring *xdp_ring;
546 act = bpf_prog_run_xdp(xdp_prog, xdp);
551 xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
552 return ice_xmit_xdp_buff(xdp, xdp_ring);
554 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
555 return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
557 bpf_warn_invalid_xdp_action(act);
560 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
563 return ICE_XDP_CONSUMED;
568 * ice_xdp_xmit - submit packets to XDP ring for transmission
570 * @n: number of XDP frames to be transmitted
571 * @frames: XDP frames to be transmitted
572 * @flags: transmit flags
574 * Returns number of frames successfully sent. Frames that fail are
575 * free'ed via XDP return API.
576 * For error cases, a negative errno code is returned and no-frames
577 * are transmitted (caller must handle freeing frames).
580 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
583 struct ice_netdev_priv *np = netdev_priv(dev);
584 unsigned int queue_index = smp_processor_id();
585 struct ice_vsi *vsi = np->vsi;
586 struct ice_ring *xdp_ring;
589 if (test_bit(__ICE_DOWN, vsi->state))
592 if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
595 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
598 xdp_ring = vsi->xdp_rings[queue_index];
599 for (i = 0; i < n; i++) {
600 struct xdp_frame *xdpf = frames[i];
603 err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
604 if (err != ICE_XDP_TX) {
605 xdp_return_frame_rx_napi(xdpf);
610 if (unlikely(flags & XDP_XMIT_FLUSH))
611 ice_xdp_ring_update_tail(xdp_ring);
617 * ice_alloc_mapped_page - recycle or make a new page
618 * @rx_ring: ring to use
619 * @bi: rx_buf struct to modify
621 * Returns true if the page was successfully allocated or
625 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
627 struct page *page = bi->page;
630 /* since we are recycling buffers we should seldom need to alloc */
634 /* alloc new page for storage */
635 page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
636 if (unlikely(!page)) {
637 rx_ring->rx_stats.alloc_page_failed++;
641 /* map page for use */
642 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
643 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
645 /* if mapping failed free memory back to system since
646 * there isn't much point in holding memory we can't use
648 if (dma_mapping_error(rx_ring->dev, dma)) {
649 __free_pages(page, ice_rx_pg_order(rx_ring));
650 rx_ring->rx_stats.alloc_page_failed++;
656 bi->page_offset = rx_ring->rx_offset;
657 page_ref_add(page, USHRT_MAX - 1);
658 bi->pagecnt_bias = USHRT_MAX;
664 * ice_alloc_rx_bufs - Replace used receive buffers
665 * @rx_ring: ring to place buffers on
666 * @cleaned_count: number of buffers to replace
668 * Returns false if all allocations were successful, true if any fail. Returning
669 * true signals to the caller that we didn't replace cleaned_count buffers and
670 * there is more work to do.
672 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
673 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
674 * multiple tail writes per call.
676 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
678 union ice_32b_rx_flex_desc *rx_desc;
679 u16 ntu = rx_ring->next_to_use;
680 struct ice_rx_buf *bi;
682 /* do nothing if no valid netdev defined */
683 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
687 /* get the Rx descriptor and buffer based on next_to_use */
688 rx_desc = ICE_RX_DESC(rx_ring, ntu);
689 bi = &rx_ring->rx_buf[ntu];
692 /* if we fail here, we have work remaining */
693 if (!ice_alloc_mapped_page(rx_ring, bi))
696 /* sync the buffer for use by the device */
697 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
702 /* Refresh the desc even if buffer_addrs didn't change
703 * because each write-back erases this info.
705 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
710 if (unlikely(ntu == rx_ring->count)) {
711 rx_desc = ICE_RX_DESC(rx_ring, 0);
712 bi = rx_ring->rx_buf;
716 /* clear the status bits for the next_to_use descriptor */
717 rx_desc->wb.status_error0 = 0;
720 } while (cleaned_count);
722 if (rx_ring->next_to_use != ntu)
723 ice_release_rx_desc(rx_ring, ntu);
725 return !!cleaned_count;
729 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
730 * @rx_buf: Rx buffer to adjust
731 * @size: Size of adjustment
733 * Update the offset within page so that Rx buf will be ready to be reused.
734 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
735 * so the second half of page assigned to Rx buffer will be used, otherwise
736 * the offset is moved by "size" bytes
739 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
741 #if (PAGE_SIZE < 8192)
742 /* flip page offset to other buffer */
743 rx_buf->page_offset ^= size;
745 /* move offset up to the next cache line */
746 rx_buf->page_offset += size;
751 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
752 * @rx_buf: buffer containing the page
753 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
755 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
756 * which will assign the current buffer to the buffer that next_to_alloc is
757 * pointing to; otherwise, the DMA mapping needs to be destroyed and
761 ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
763 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
764 struct page *page = rx_buf->page;
766 /* avoid re-using remote and pfmemalloc pages */
767 if (!dev_page_is_reusable(page))
770 #if (PAGE_SIZE < 8192)
771 /* if we are only owner of page we can reuse it */
772 if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
775 #define ICE_LAST_OFFSET \
776 (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
777 if (rx_buf->page_offset > ICE_LAST_OFFSET)
779 #endif /* PAGE_SIZE < 8192) */
781 /* If we have drained the page fragment pool we need to update
782 * the pagecnt_bias and page count so that we fully restock the
783 * number of references the driver holds.
785 if (unlikely(pagecnt_bias == 1)) {
786 page_ref_add(page, USHRT_MAX - 1);
787 rx_buf->pagecnt_bias = USHRT_MAX;
794 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
795 * @rx_ring: Rx descriptor ring to transact packets on
796 * @rx_buf: buffer containing page to add
797 * @skb: sk_buff to place the data into
798 * @size: packet length from rx_desc
800 * This function will add the data contained in rx_buf->page to the skb.
801 * It will just attach the page as a frag to the skb.
802 * The function will then update the page offset.
805 ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
806 struct sk_buff *skb, unsigned int size)
808 #if (PAGE_SIZE >= 8192)
809 unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
811 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
816 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
817 rx_buf->page_offset, size, truesize);
819 /* page is being used so we must update the page offset */
820 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
824 * ice_reuse_rx_page - page flip buffer and store it back on the ring
825 * @rx_ring: Rx descriptor ring to store buffers on
826 * @old_buf: donor buffer to have page reused
828 * Synchronizes page for reuse by the adapter
831 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
833 u16 nta = rx_ring->next_to_alloc;
834 struct ice_rx_buf *new_buf;
836 new_buf = &rx_ring->rx_buf[nta];
838 /* update, and store next to alloc */
840 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
842 /* Transfer page from old buffer to new buffer.
843 * Move each member individually to avoid possible store
844 * forwarding stalls and unnecessary copy of skb.
846 new_buf->dma = old_buf->dma;
847 new_buf->page = old_buf->page;
848 new_buf->page_offset = old_buf->page_offset;
849 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
853 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
854 * @rx_ring: Rx descriptor ring to transact packets on
855 * @size: size of buffer to add to skb
856 * @rx_buf_pgcnt: rx_buf page refcount
858 * This function will pull an Rx buffer from the ring and synchronize it
859 * for use by the CPU.
861 static struct ice_rx_buf *
862 ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
865 struct ice_rx_buf *rx_buf;
867 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
869 #if (PAGE_SIZE < 8192)
870 page_count(rx_buf->page);
874 prefetchw(rx_buf->page);
878 /* we are reusing so sync this buffer for CPU use */
879 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
880 rx_buf->page_offset, size,
883 /* We have pulled a buffer for use, so decrement pagecnt_bias */
884 rx_buf->pagecnt_bias--;
890 * ice_build_skb - Build skb around an existing buffer
891 * @rx_ring: Rx descriptor ring to transact packets on
892 * @rx_buf: Rx buffer to pull data from
893 * @xdp: xdp_buff pointing to the data
895 * This function builds an skb around an existing Rx buffer, taking care
896 * to set up the skb correctly and avoid any memcpy overhead.
898 static struct sk_buff *
899 ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
900 struct xdp_buff *xdp)
902 u8 metasize = xdp->data - xdp->data_meta;
903 #if (PAGE_SIZE < 8192)
904 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
906 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
907 SKB_DATA_ALIGN(xdp->data_end -
908 xdp->data_hard_start);
912 /* Prefetch first cache line of first page. If xdp->data_meta
913 * is unused, this points exactly as xdp->data, otherwise we
914 * likely have a consumer accessing first few bytes of meta
915 * data, and then actual data.
917 net_prefetch(xdp->data_meta);
918 /* build an skb around the page buffer */
919 skb = build_skb(xdp->data_hard_start, truesize);
923 /* must to record Rx queue, otherwise OS features such as
924 * symmetric queue won't work
926 skb_record_rx_queue(skb, rx_ring->q_index);
928 /* update pointers within the skb to store the data */
929 skb_reserve(skb, xdp->data - xdp->data_hard_start);
930 __skb_put(skb, xdp->data_end - xdp->data);
932 skb_metadata_set(skb, metasize);
934 /* buffer is used by skb, update page_offset */
935 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
941 * ice_construct_skb - Allocate skb and populate it
942 * @rx_ring: Rx descriptor ring to transact packets on
943 * @rx_buf: Rx buffer to pull data from
944 * @xdp: xdp_buff pointing to the data
946 * This function allocates an skb. It then populates it with the page
947 * data from the current receive descriptor, taking care to set up the
950 static struct sk_buff *
951 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
952 struct xdp_buff *xdp)
954 unsigned int size = xdp->data_end - xdp->data;
955 unsigned int headlen;
958 /* prefetch first cache line of first page */
959 net_prefetch(xdp->data);
961 /* allocate a skb to store the frags */
962 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
963 GFP_ATOMIC | __GFP_NOWARN);
967 skb_record_rx_queue(skb, rx_ring->q_index);
968 /* Determine available headroom for copy */
970 if (headlen > ICE_RX_HDR_SIZE)
971 headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
973 /* align pull length to size of long to optimize memcpy performance */
974 memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
977 /* if we exhaust the linear part then add what is left as a frag */
980 #if (PAGE_SIZE >= 8192)
981 unsigned int truesize = SKB_DATA_ALIGN(size);
983 unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
985 skb_add_rx_frag(skb, 0, rx_buf->page,
986 rx_buf->page_offset + headlen, size, truesize);
987 /* buffer is used by skb, update page_offset */
988 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
990 /* buffer is unused, reset bias back to rx_buf; data was copied
991 * onto skb's linear part so there's no need for adjusting
992 * page offset and we can reuse this buffer as-is
994 rx_buf->pagecnt_bias++;
1001 * ice_put_rx_buf - Clean up used buffer and either recycle or free
1002 * @rx_ring: Rx descriptor ring to transact packets on
1003 * @rx_buf: Rx buffer to pull data from
1004 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1006 * This function will update next_to_clean and then clean up the contents
1007 * of the rx_buf. It will either recycle the buffer or unmap it and free
1008 * the associated resources.
1011 ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
1014 u16 ntc = rx_ring->next_to_clean + 1;
1016 /* fetch, update, and store next to clean */
1017 ntc = (ntc < rx_ring->count) ? ntc : 0;
1018 rx_ring->next_to_clean = ntc;
1023 if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
1024 /* hand second half of page back to the ring */
1025 ice_reuse_rx_page(rx_ring, rx_buf);
1027 /* we are not reusing the buffer so unmap it */
1028 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
1029 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1031 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1034 /* clear contents of buffer_info */
1035 rx_buf->page = NULL;
1039 * ice_is_non_eop - process handling of non-EOP buffers
1040 * @rx_ring: Rx ring being processed
1041 * @rx_desc: Rx descriptor for current buffer
1043 * If the buffer is an EOP buffer, this function exits returning false,
1044 * otherwise return true indicating that this is in fact a non-EOP buffer.
1047 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1049 /* if we are the last buffer then there is nothing else to do */
1050 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
1051 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
1054 rx_ring->rx_stats.non_eop_descs++;
1060 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1061 * @rx_ring: Rx descriptor ring to transact packets on
1062 * @budget: Total limit on number of packets to process
1064 * This function provides a "bounce buffer" approach to Rx interrupt
1065 * processing. The advantage to this is that on systems that have
1066 * expensive overhead for IOMMU access this provides a means of avoiding
1067 * it by maintaining the mapping of the page to the system.
1069 * Returns amount of work completed
1071 int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1073 unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1074 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1075 unsigned int offset = rx_ring->rx_offset;
1076 unsigned int xdp_res, xdp_xmit = 0;
1077 struct sk_buff *skb = rx_ring->skb;
1078 struct bpf_prog *xdp_prog = NULL;
1079 struct xdp_buff xdp;
1082 /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
1083 #if (PAGE_SIZE < 8192)
1084 frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1086 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
1088 /* start the loop to process Rx packets bounded by 'budget' */
1089 while (likely(total_rx_pkts < (unsigned int)budget)) {
1090 union ice_32b_rx_flex_desc *rx_desc;
1091 struct ice_rx_buf *rx_buf;
1092 unsigned char *hard_start;
1099 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1100 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1102 /* status_error_len will always be zero for unused descriptors
1103 * because it's cleared in cleanup, and overlaps with hdr_addr
1104 * which is always zero because packet split isn't used, if the
1105 * hardware wrote DD then it will be non-zero
1107 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1108 if (!ice_test_staterr(rx_desc, stat_err_bits))
1111 /* This memory barrier is needed to keep us from reading
1112 * any other fields out of the rx_desc until we know the
1117 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1118 ice_put_rx_buf(rx_ring, NULL, 0);
1123 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1124 ICE_RX_FLX_DESC_PKT_LEN_M;
1126 /* retrieve a buffer from the ring */
1127 rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
1131 xdp.data_end = NULL;
1132 xdp.data_hard_start = NULL;
1133 xdp.data_meta = NULL;
1137 hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
1139 xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1140 #if (PAGE_SIZE > 4096)
1141 /* At larger PAGE_SIZE, frame_sz depend on len size */
1142 xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
1146 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
1152 xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
1156 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
1157 xdp_xmit |= xdp_res;
1158 ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1160 rx_buf->pagecnt_bias++;
1162 total_rx_bytes += size;
1166 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1170 ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1171 } else if (likely(xdp.data)) {
1172 if (ice_ring_uses_build_skb(rx_ring))
1173 skb = ice_build_skb(rx_ring, rx_buf, &xdp);
1175 skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
1177 /* exit if we failed to retrieve a buffer */
1179 rx_ring->rx_stats.alloc_buf_failed++;
1181 rx_buf->pagecnt_bias++;
1185 ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1188 /* skip if it is NOP desc */
1189 if (ice_is_non_eop(rx_ring, rx_desc))
1192 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1193 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1194 dev_kfree_skb_any(skb);
1198 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1199 if (ice_test_staterr(rx_desc, stat_err_bits))
1200 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1202 /* pad the skb if needed, to make a valid ethernet frame */
1203 if (eth_skb_pad(skb)) {
1208 /* probably a little skewed due to removing CRC */
1209 total_rx_bytes += skb->len;
1211 /* populate checksum, VLAN, and protocol */
1212 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1213 ICE_RX_FLEX_DESC_PTYPE_M;
1215 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1217 /* send completed skb up the stack */
1218 ice_receive_skb(rx_ring, skb, vlan_tag);
1221 /* update budget accounting */
1225 /* return up to cleaned_count buffers to hardware */
1226 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1229 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
1232 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1234 /* guarantee a trip back through this routine if there was a failure */
1235 return failure ? budget : (int)total_rx_pkts;
1239 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1240 * @port_info: port_info structure containing the current link speed
1241 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1242 * @itr: ITR value to update
1244 * Calculate how big of an increment should be applied to the ITR value passed
1245 * in based on wmem_default, SKB overhead, ethernet overhead, and the current
1248 * The following is a calculation derived from:
1249 * wmem_default / (size + overhead) = desired_pkts_per_int
1250 * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
1251 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1253 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1254 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1257 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1258 * ITR = -------------------------------------------- * --------------
1259 * rate pkt_size + 640
1262 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1263 unsigned int avg_pkt_size,
1266 switch (port_info->phy.link_info.link_speed) {
1267 case ICE_AQ_LINK_SPEED_100GB:
1268 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1269 avg_pkt_size + 640);
1271 case ICE_AQ_LINK_SPEED_50GB:
1272 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1273 avg_pkt_size + 640);
1275 case ICE_AQ_LINK_SPEED_40GB:
1276 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1277 avg_pkt_size + 640);
1279 case ICE_AQ_LINK_SPEED_25GB:
1280 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1281 avg_pkt_size + 640);
1283 case ICE_AQ_LINK_SPEED_20GB:
1284 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1285 avg_pkt_size + 640);
1287 case ICE_AQ_LINK_SPEED_10GB:
1289 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1290 avg_pkt_size + 640);
1294 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1295 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1296 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1303 * ice_update_itr - update the adaptive ITR value based on statistics
1304 * @q_vector: structure containing interrupt and ring information
1305 * @rc: structure containing ring performance data
1307 * Stores a new ITR value based on packets and byte
1308 * counts during the last interrupt. The advantage of per interrupt
1309 * computation is faster updates and more accurate ITR for the current
1310 * traffic pattern. Constants in this function were computed
1311 * based on theoretical maximum wire speed and thresholds were set based
1312 * on testing data as well as attempting to minimize response time
1313 * while increasing bulk throughput.
1316 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1318 unsigned long next_update = jiffies;
1319 unsigned int packets, bytes, itr;
1320 bool container_is_rx;
1322 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1325 /* If itr_countdown is set it means we programmed an ITR within
1326 * the last 4 interrupt cycles. This has a side effect of us
1327 * potentially firing an early interrupt. In order to work around
1328 * this we need to throw out any data received for a few
1329 * interrupts following the update.
1331 if (q_vector->itr_countdown) {
1332 itr = rc->target_itr;
1336 container_is_rx = (&q_vector->rx == rc);
1337 /* For Rx we want to push the delay up and default to low latency.
1338 * for Tx we want to pull the delay down and default to high latency.
1340 itr = container_is_rx ?
1341 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1342 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1344 /* If we didn't update within up to 1 - 2 jiffies we can assume
1345 * that either packets are coming in so slow there hasn't been
1346 * any work, or that there is so much work that NAPI is dealing
1347 * with interrupt moderation and we don't need to do anything.
1349 if (time_after(next_update, rc->next_update))
1352 prefetch(q_vector->vsi->port_info);
1354 packets = rc->total_pkts;
1355 bytes = rc->total_bytes;
1357 if (container_is_rx) {
1358 /* If Rx there are 1 to 4 packets and bytes are less than
1359 * 9000 assume insufficient data to use bulk rate limiting
1360 * approach unless Tx is already in bulk rate limiting. We
1361 * are likely latency driven.
1363 if (packets && packets < 4 && bytes < 9000 &&
1364 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1365 itr = ICE_ITR_ADAPTIVE_LATENCY;
1366 goto adjust_by_size_and_speed;
1368 } else if (packets < 4) {
1369 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1370 * bulk mode and we are receiving 4 or fewer packets just
1371 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1372 * that the Rx can relax.
1374 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1375 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1376 ICE_ITR_ADAPTIVE_MAX_USECS)
1378 } else if (packets > 32) {
1379 /* If we have processed over 32 packets in a single interrupt
1380 * for Tx assume we need to switch over to "bulk" mode.
1382 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1385 /* We have no packets to actually measure against. This means
1386 * either one of the other queues on this vector is active or
1387 * we are a Tx queue doing TSO with too high of an interrupt rate.
1389 * Between 4 and 56 we can assume that our current interrupt delay
1390 * is only slightly too low. As such we should increase it by a small
1394 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1395 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1396 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1397 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1402 if (packets <= 256) {
1403 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1404 itr &= ICE_ITR_MASK;
1406 /* Between 56 and 112 is our "goldilocks" zone where we are
1407 * working out "just right". Just report that our current
1408 * ITR is good for us.
1413 /* If packet count is 128 or greater we are likely looking
1414 * at a slight overrun of the delay we want. Try halving
1415 * our delay to see if that will cut the number of packets
1416 * in half per interrupt.
1419 itr &= ICE_ITR_MASK;
1420 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1421 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1426 /* The paths below assume we are dealing with a bulk ITR since
1427 * number of packets is greater than 256. We are just going to have
1428 * to compute a value and try to bring the count under control,
1429 * though for smaller packet sizes there isn't much we can do as
1430 * NAPI polling will likely be kicking in sooner rather than later.
1432 itr = ICE_ITR_ADAPTIVE_BULK;
1434 adjust_by_size_and_speed:
1436 /* based on checks above packets cannot be 0 so division is safe */
1437 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1438 bytes / packets, itr);
1441 /* write back value */
1442 rc->target_itr = itr;
1444 /* next update should occur within next jiffy */
1445 rc->next_update = next_update + 1;
1447 rc->total_bytes = 0;
1452 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1453 * @itr_idx: interrupt throttling index
1454 * @itr: interrupt throttling value in usecs
1456 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1458 /* The ITR value is reported in microseconds, and the register value is
1459 * recorded in 2 microsecond units. For this reason we only need to
1460 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1461 * granularity as a shift instead of division. The mask makes sure the
1462 * ITR value is never odd so we don't accidentally write into the field
1463 * prior to the ITR field.
1465 itr &= ICE_ITR_MASK;
1467 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1468 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1469 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1472 /* The act of updating the ITR will cause it to immediately trigger. In order
1473 * to prevent this from throwing off adaptive update statistics we defer the
1474 * update so that it can only happen so often. So after either Tx or Rx are
1475 * updated we make the adaptive scheme wait until either the ITR completely
1476 * expires via the next_update expiration or we have been through at least
1479 #define ITR_COUNTDOWN_START 3
1482 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1483 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1485 static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1487 struct ice_ring_container *tx = &q_vector->tx;
1488 struct ice_ring_container *rx = &q_vector->rx;
1489 struct ice_vsi *vsi = q_vector->vsi;
1492 /* when exiting WB_ON_ITR just reset the countdown and let ITR
1493 * resume it's normal "interrupts-enabled" path
1495 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1496 q_vector->itr_countdown = 0;
1498 /* This will do nothing if dynamic updates are not enabled */
1499 ice_update_itr(q_vector, tx);
1500 ice_update_itr(q_vector, rx);
1502 /* This block of logic allows us to get away with only updating
1503 * one ITR value with each interrupt. The idea is to perform a
1504 * pseudo-lazy update with the following criteria.
1506 * 1. Rx is given higher priority than Tx if both are in same state
1507 * 2. If we must reduce an ITR that is given highest priority.
1508 * 3. We then give priority to increasing ITR based on amount.
1510 if (rx->target_itr < rx->current_itr) {
1511 /* Rx ITR needs to be reduced, this is highest priority */
1512 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1513 rx->current_itr = rx->target_itr;
1514 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1515 } else if ((tx->target_itr < tx->current_itr) ||
1516 ((rx->target_itr - rx->current_itr) <
1517 (tx->target_itr - tx->current_itr))) {
1518 /* Tx ITR needs to be reduced, this is second priority
1519 * Tx ITR needs to be increased more than Rx, fourth priority
1521 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1522 tx->current_itr = tx->target_itr;
1523 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1524 } else if (rx->current_itr != rx->target_itr) {
1525 /* Rx ITR needs to be increased, third priority */
1526 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1527 rx->current_itr = rx->target_itr;
1528 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1530 /* Still have to re-enable the interrupts */
1531 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1532 if (q_vector->itr_countdown)
1533 q_vector->itr_countdown--;
1536 if (!test_bit(__ICE_DOWN, vsi->state))
1537 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1541 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1542 * @q_vector: q_vector to set WB_ON_ITR on
1544 * We need to tell hardware to write-back completed descriptors even when
1545 * interrupts are disabled. Descriptors will be written back on cache line
1546 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1547 * descriptors may not be written back if they don't fill a cache line until
1548 * the next interrupt.
1550 * This sets the write-back frequency to whatever was set previously for the
1551 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
1552 * aren't meddling with the INTENA_M bit.
1554 static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1556 struct ice_vsi *vsi = q_vector->vsi;
1558 /* already in wb_on_itr mode no need to change it */
1559 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1562 /* use previously set ITR values for all of the ITR indices by
1563 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
1564 * be static in non-adaptive mode (user configured)
1566 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1567 ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
1568 GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
1569 GLINT_DYN_CTL_WB_ON_ITR_M);
1571 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1575 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1576 * @napi: napi struct with our devices info in it
1577 * @budget: amount of work driver is allowed to do this pass, in packets
1579 * This function will clean all queues associated with a q_vector.
1581 * Returns the amount of work done
1583 int ice_napi_poll(struct napi_struct *napi, int budget)
1585 struct ice_q_vector *q_vector =
1586 container_of(napi, struct ice_q_vector, napi);
1587 bool clean_complete = true;
1588 struct ice_ring *ring;
1589 int budget_per_ring;
1592 /* Since the actual Tx work is minimal, we can give the Tx a larger
1593 * budget and be more aggressive about cleaning up the Tx descriptors.
1595 ice_for_each_ring(ring, q_vector->tx) {
1596 bool wd = ring->xsk_pool ?
1597 ice_clean_tx_irq_zc(ring, budget) :
1598 ice_clean_tx_irq(ring, budget);
1601 clean_complete = false;
1604 /* Handle case where we are called by netpoll with a budget of 0 */
1605 if (unlikely(budget <= 0))
1608 /* normally we have 1 Rx ring per q_vector */
1609 if (unlikely(q_vector->num_ring_rx > 1))
1610 /* We attempt to distribute budget to each Rx queue fairly, but
1611 * don't allow the budget to go below 1 because that would exit
1614 budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1616 /* Max of 1 Rx ring in this q_vector so give it the budget */
1617 budget_per_ring = budget;
1619 ice_for_each_ring(ring, q_vector->rx) {
1622 /* A dedicated path for zero-copy allows making a single
1623 * comparison in the irq context instead of many inside the
1624 * ice_clean_rx_irq function and makes the codebase cleaner.
1626 cleaned = ring->xsk_pool ?
1627 ice_clean_rx_irq_zc(ring, budget_per_ring) :
1628 ice_clean_rx_irq(ring, budget_per_ring);
1629 work_done += cleaned;
1630 /* if we clean as many as budgeted, we must not be done */
1631 if (cleaned >= budget_per_ring)
1632 clean_complete = false;
1635 /* If work not completed, return budget and polling will return */
1636 if (!clean_complete) {
1637 /* Set the writeback on ITR so partial completions of
1638 * cache-lines will still continue even if we're polling.
1640 ice_set_wb_on_itr(q_vector);
1644 /* Exit the polling mode, but don't re-enable interrupts if stack might
1645 * poll us due to busy-polling
1647 if (likely(napi_complete_done(napi, work_done)))
1648 ice_update_ena_itr(q_vector);
1650 ice_set_wb_on_itr(q_vector);
1652 return min_t(int, work_done, budget - 1);
1656 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1657 * @tx_ring: the ring to be checked
1658 * @size: the size buffer we want to assure is available
1660 * Returns -EBUSY if a stop is needed, else 0
1662 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1664 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1665 /* Memory barrier before checking head and tail */
1668 /* Check again in a case another CPU has just made room available. */
1669 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1672 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1673 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1674 ++tx_ring->tx_stats.restart_q;
1679 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1680 * @tx_ring: the ring to be checked
1681 * @size: the size buffer we want to assure is available
1683 * Returns 0 if stop is not needed
1685 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1687 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1690 return __ice_maybe_stop_tx(tx_ring, size);
1694 * ice_tx_map - Build the Tx descriptor
1695 * @tx_ring: ring to send buffer on
1696 * @first: first buffer info buffer to use
1697 * @off: pointer to struct that holds offload parameters
1699 * This function loops over the skb data pointed to by *first
1700 * and gets a physical address for each memory location and programs
1701 * it and the length into the transmit descriptor.
1704 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1705 struct ice_tx_offload_params *off)
1707 u64 td_offset, td_tag, td_cmd;
1708 u16 i = tx_ring->next_to_use;
1709 unsigned int data_len, size;
1710 struct ice_tx_desc *tx_desc;
1711 struct ice_tx_buf *tx_buf;
1712 struct sk_buff *skb;
1716 td_tag = off->td_l2tag1;
1717 td_cmd = off->td_cmd;
1718 td_offset = off->td_offset;
1721 data_len = skb->data_len;
1722 size = skb_headlen(skb);
1724 tx_desc = ICE_TX_DESC(tx_ring, i);
1726 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1727 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1728 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1729 ICE_TX_FLAGS_VLAN_S;
1732 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1736 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1737 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1739 if (dma_mapping_error(tx_ring->dev, dma))
1742 /* record length, and DMA address */
1743 dma_unmap_len_set(tx_buf, len, size);
1744 dma_unmap_addr_set(tx_buf, dma, dma);
1746 /* align size to end of page */
1747 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1748 tx_desc->buf_addr = cpu_to_le64(dma);
1750 /* account for data chunks larger than the hardware
1753 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1754 tx_desc->cmd_type_offset_bsz =
1755 ice_build_ctob(td_cmd, td_offset, max_data,
1761 if (i == tx_ring->count) {
1762 tx_desc = ICE_TX_DESC(tx_ring, 0);
1769 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1770 tx_desc->buf_addr = cpu_to_le64(dma);
1773 if (likely(!data_len))
1776 tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
1782 if (i == tx_ring->count) {
1783 tx_desc = ICE_TX_DESC(tx_ring, 0);
1787 size = skb_frag_size(frag);
1790 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1793 tx_buf = &tx_ring->tx_buf[i];
1796 /* record bytecount for BQL */
1797 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1799 /* record SW timestamp if HW timestamp is not available */
1800 skb_tx_timestamp(first->skb);
1803 if (i == tx_ring->count)
1806 /* write last descriptor with RS and EOP bits */
1807 td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1808 tx_desc->cmd_type_offset_bsz =
1809 ice_build_ctob(td_cmd, td_offset, size, td_tag);
1811 /* Force memory writes to complete before letting h/w know there
1812 * are new descriptors to fetch.
1814 * We also use this memory barrier to make certain all of the
1815 * status bits have been updated before next_to_watch is written.
1819 /* set next_to_watch value indicating a packet is present */
1820 first->next_to_watch = tx_desc;
1822 tx_ring->next_to_use = i;
1824 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1826 /* notify HW of packet */
1827 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1828 writel(i, tx_ring->tail);
1833 /* clear DMA mappings for failed tx_buf map */
1835 tx_buf = &tx_ring->tx_buf[i];
1836 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1837 if (tx_buf == first)
1844 tx_ring->next_to_use = i;
1848 * ice_tx_csum - Enable Tx checksum offloads
1849 * @first: pointer to the first descriptor
1850 * @off: pointer to struct that holds offload parameters
1852 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1855 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1857 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1858 struct sk_buff *skb = first->skb;
1868 __be16 frag_off, protocol;
1869 unsigned char *exthdr;
1870 u32 offset, cmd = 0;
1873 if (skb->ip_summed != CHECKSUM_PARTIAL)
1876 ip.hdr = skb_network_header(skb);
1877 l4.hdr = skb_transport_header(skb);
1879 /* compute outer L2 header size */
1880 l2_len = ip.hdr - skb->data;
1881 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1883 protocol = vlan_get_protocol(skb);
1885 if (protocol == htons(ETH_P_IP))
1886 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1887 else if (protocol == htons(ETH_P_IPV6))
1888 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1890 if (skb->encapsulation) {
1891 bool gso_ena = false;
1894 /* define outer network header type */
1895 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1896 tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
1897 ICE_TX_CTX_EIPT_IPV4 :
1898 ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
1899 l4_proto = ip.v4->protocol;
1900 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1903 tunnel |= ICE_TX_CTX_EIPT_IPV6;
1904 exthdr = ip.hdr + sizeof(*ip.v6);
1905 l4_proto = ip.v6->nexthdr;
1906 ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
1907 &l4_proto, &frag_off);
1912 /* define outer transport */
1915 tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
1916 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1919 tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
1920 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1924 first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
1925 l4.hdr = skb_inner_network_header(skb);
1928 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1931 skb_checksum_help(skb);
1935 /* compute outer L3 header size */
1936 tunnel |= ((l4.hdr - ip.hdr) / 4) <<
1937 ICE_TXD_CTX_QW0_EIPLEN_S;
1939 /* switch IP header pointer from outer to inner header */
1940 ip.hdr = skb_inner_network_header(skb);
1942 /* compute tunnel header size */
1943 tunnel |= ((ip.hdr - l4.hdr) / 2) <<
1944 ICE_TXD_CTX_QW0_NATLEN_S;
1946 gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
1947 /* indicate if we need to offload outer UDP header */
1948 if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
1949 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
1950 tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;
1952 /* record tunnel offload values */
1953 off->cd_tunnel_params |= tunnel;
1955 /* set DTYP=1 to indicate that it's an Tx context descriptor
1956 * in IPsec tunnel mode with Tx offloads in Quad word 1
1958 off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;
1960 /* switch L4 header pointer from outer to inner */
1961 l4.hdr = skb_inner_transport_header(skb);
1964 /* reset type as we transition from outer to inner headers */
1965 first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
1966 if (ip.v4->version == 4)
1967 first->tx_flags |= ICE_TX_FLAGS_IPV4;
1968 if (ip.v6->version == 6)
1969 first->tx_flags |= ICE_TX_FLAGS_IPV6;
1972 /* Enable IP checksum offloads */
1973 if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1974 l4_proto = ip.v4->protocol;
1975 /* the stack computes the IP header already, the only time we
1976 * need the hardware to recompute it is in the case of TSO.
1978 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1979 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1981 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1983 } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1984 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1985 exthdr = ip.hdr + sizeof(*ip.v6);
1986 l4_proto = ip.v6->nexthdr;
1987 if (l4.hdr != exthdr)
1988 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1994 /* compute inner L3 header size */
1995 l3_len = l4.hdr - ip.hdr;
1996 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1998 /* Enable L4 checksum offloads */
2001 /* enable checksum offloads */
2002 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
2003 l4_len = l4.tcp->doff;
2004 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2007 /* enable UDP checksum offload */
2008 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
2009 l4_len = (sizeof(struct udphdr) >> 2);
2010 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2013 /* enable SCTP checksum offload */
2014 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
2015 l4_len = sizeof(struct sctphdr) >> 2;
2016 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
2020 if (first->tx_flags & ICE_TX_FLAGS_TSO)
2022 skb_checksum_help(skb);
2027 off->td_offset |= offset;
2032 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
2033 * @tx_ring: ring to send buffer on
2034 * @first: pointer to struct ice_tx_buf
2036 * Checks the skb and set up correspondingly several generic transmit flags
2037 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
2040 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
2042 struct sk_buff *skb = first->skb;
2044 /* nothing left to do, software offloaded VLAN */
2045 if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
2048 /* currently, we always assume 802.1Q for VLAN insertion as VLAN
2049 * insertion for 802.1AD is not supported
2051 if (skb_vlan_tag_present(skb)) {
2052 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
2053 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
2056 ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
2060 * ice_tso - computes mss and TSO length to prepare for TSO
2061 * @first: pointer to struct ice_tx_buf
2062 * @off: pointer to struct that holds offload parameters
2064 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
2067 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
2069 struct sk_buff *skb = first->skb;
2080 u64 cd_mss, cd_tso_len;
2085 if (skb->ip_summed != CHECKSUM_PARTIAL)
2088 if (!skb_is_gso(skb))
2091 err = skb_cow_head(skb, 0);
2095 /* cppcheck-suppress unreadVariable */
2096 ip.hdr = skb_network_header(skb);
2097 l4.hdr = skb_transport_header(skb);
2099 /* initialize outer IP header fields */
2100 if (ip.v4->version == 4) {
2104 ip.v6->payload_len = 0;
2107 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
2111 SKB_GSO_UDP_TUNNEL |
2112 SKB_GSO_UDP_TUNNEL_CSUM)) {
2113 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
2114 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
2117 /* determine offset of outer transport header */
2118 l4_start = (u8)(l4.hdr - skb->data);
2120 /* remove payload length from outer checksum */
2121 paylen = skb->len - l4_start;
2122 csum_replace_by_diff(&l4.udp->check,
2123 (__force __wsum)htonl(paylen));
2126 /* reset pointers to inner headers */
2128 /* cppcheck-suppress unreadVariable */
2129 ip.hdr = skb_inner_network_header(skb);
2130 l4.hdr = skb_inner_transport_header(skb);
2132 /* initialize inner IP header fields */
2133 if (ip.v4->version == 4) {
2137 ip.v6->payload_len = 0;
2141 /* determine offset of transport header */
2142 l4_start = (u8)(l4.hdr - skb->data);
2144 /* remove payload length from checksum */
2145 paylen = skb->len - l4_start;
2147 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
2148 csum_replace_by_diff(&l4.udp->check,
2149 (__force __wsum)htonl(paylen));
2150 /* compute length of UDP segmentation header */
2151 off->header_len = (u8)sizeof(l4.udp) + l4_start;
2153 csum_replace_by_diff(&l4.tcp->check,
2154 (__force __wsum)htonl(paylen));
2155 /* compute length of TCP segmentation header */
2156 off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
2159 /* update gso_segs and bytecount */
2160 first->gso_segs = skb_shinfo(skb)->gso_segs;
2161 first->bytecount += (first->gso_segs - 1) * off->header_len;
2163 cd_tso_len = skb->len - off->header_len;
2164 cd_mss = skb_shinfo(skb)->gso_size;
2166 /* record cdesc_qw1 with TSO parameters */
2167 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2168 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
2169 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
2170 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
2171 first->tx_flags |= ICE_TX_FLAGS_TSO;
2176 * ice_txd_use_count - estimate the number of descriptors needed for Tx
2177 * @size: transmit request size in bytes
2179 * Due to hardware alignment restrictions (4K alignment), we need to
2180 * assume that we can have no more than 12K of data per descriptor, even
2181 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
2182 * Thus, we need to divide by 12K. But division is slow! Instead,
2183 * we decompose the operation into shifts and one relatively cheap
2184 * multiply operation.
2186 * To divide by 12K, we first divide by 4K, then divide by 3:
2187 * To divide by 4K, shift right by 12 bits
2188 * To divide by 3, multiply by 85, then divide by 256
2189 * (Divide by 256 is done by shifting right by 8 bits)
2190 * Finally, we add one to round up. Because 256 isn't an exact multiple of
2191 * 3, we'll underestimate near each multiple of 12K. This is actually more
2192 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2193 * segment. For our purposes this is accurate out to 1M which is orders of
2194 * magnitude greater than our largest possible GSO size.
2196 * This would then be implemented as:
2197 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2199 * Since multiplication and division are commutative, we can reorder
2201 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2203 static unsigned int ice_txd_use_count(unsigned int size)
2205 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2209 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2212 * Returns number of data descriptors needed for this skb.
2214 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
2216 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2217 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2218 unsigned int count = 0, size = skb_headlen(skb);
2221 count += ice_txd_use_count(size);
2226 size = skb_frag_size(frag++);
2233 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2236 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2237 * and so we need to figure out the cases where we need to linearize the skb.
2239 * For TSO we need to count the TSO header and segment payload separately.
2240 * As such we need to check cases where we have 7 fragments or more as we
2241 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2242 * the segment payload in the first descriptor, and another 7 for the
2245 static bool __ice_chk_linearize(struct sk_buff *skb)
2247 const skb_frag_t *frag, *stale;
2250 /* no need to check if number of frags is less than 7 */
2251 nr_frags = skb_shinfo(skb)->nr_frags;
2252 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2255 /* We need to walk through the list and validate that each group
2256 * of 6 fragments totals at least gso_size.
2258 nr_frags -= ICE_MAX_BUF_TXD - 2;
2259 frag = &skb_shinfo(skb)->frags[0];
2261 /* Initialize size to the negative value of gso_size minus 1. We
2262 * use this as the worst case scenario in which the frag ahead
2263 * of us only provides one byte which is why we are limited to 6
2264 * descriptors for a single transmit as the header and previous
2265 * fragment are already consuming 2 descriptors.
2267 sum = 1 - skb_shinfo(skb)->gso_size;
2269 /* Add size of frags 0 through 4 to create our initial sum */
2270 sum += skb_frag_size(frag++);
2271 sum += skb_frag_size(frag++);
2272 sum += skb_frag_size(frag++);
2273 sum += skb_frag_size(frag++);
2274 sum += skb_frag_size(frag++);
2276 /* Walk through fragments adding latest fragment, testing it, and
2277 * then removing stale fragments from the sum.
2279 for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
2280 int stale_size = skb_frag_size(stale);
2282 sum += skb_frag_size(frag++);
2284 /* The stale fragment may present us with a smaller
2285 * descriptor than the actual fragment size. To account
2286 * for that we need to remove all the data on the front and
2287 * figure out what the remainder would be in the last
2288 * descriptor associated with the fragment.
2290 if (stale_size > ICE_MAX_DATA_PER_TXD) {
2291 int align_pad = -(skb_frag_off(stale)) &
2292 (ICE_MAX_READ_REQ_SIZE - 1);
2295 stale_size -= align_pad;
2298 sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2299 stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
2300 } while (stale_size > ICE_MAX_DATA_PER_TXD);
2303 /* if sum is negative we failed to make sufficient progress */
2317 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2319 * @count: number of buffers used
2321 * Note: Our HW can't scatter-gather more than 8 fragments to build
2322 * a packet on the wire and so we need to figure out the cases where we
2323 * need to linearize the skb.
2325 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2327 /* Both TSO and single send will work if count is less than 8 */
2328 if (likely(count < ICE_MAX_BUF_TXD))
2331 if (skb_is_gso(skb))
2332 return __ice_chk_linearize(skb);
2334 /* we can support up to 8 data buffers for a single send */
2335 return count != ICE_MAX_BUF_TXD;
2339 * ice_xmit_frame_ring - Sends buffer on Tx ring
2341 * @tx_ring: ring to send buffer on
2343 * Returns NETDEV_TX_OK if sent, else an error code
2346 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2348 struct ice_tx_offload_params offload = { 0 };
2349 struct ice_vsi *vsi = tx_ring->vsi;
2350 struct ice_tx_buf *first;
2354 count = ice_xmit_desc_count(skb);
2355 if (ice_chk_linearize(skb, count)) {
2356 if (__skb_linearize(skb))
2358 count = ice_txd_use_count(skb->len);
2359 tx_ring->tx_stats.tx_linearize++;
2362 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2363 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2364 * + 4 desc gap to avoid the cache line where head is,
2365 * + 1 desc for context descriptor,
2366 * otherwise try next time
2368 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2369 ICE_DESCS_FOR_CTX_DESC)) {
2370 tx_ring->tx_stats.tx_busy++;
2371 return NETDEV_TX_BUSY;
2374 offload.tx_ring = tx_ring;
2376 /* record the location of the first descriptor for this packet */
2377 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2379 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2380 first->gso_segs = 1;
2381 first->tx_flags = 0;
2383 /* prepare the VLAN tagging flags for Tx */
2384 ice_tx_prepare_vlan_flags(tx_ring, first);
2386 /* set up TSO offload */
2387 tso = ice_tso(first, &offload);
2391 /* always set up Tx checksum offload */
2392 csum = ice_tx_csum(first, &offload);
2396 /* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2397 if (unlikely(skb->priority == TC_PRIO_CONTROL &&
2398 vsi->type == ICE_VSI_PF &&
2399 vsi->port_info->qos_cfg.is_sw_lldp))
2400 offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
2401 ICE_TX_CTX_DESC_SWTCH_UPLINK <<
2402 ICE_TXD_CTX_QW1_CMD_S);
2404 if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2405 struct ice_tx_ctx_desc *cdesc;
2406 u16 i = tx_ring->next_to_use;
2408 /* grab the next descriptor */
2409 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2411 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2413 /* setup context descriptor */
2414 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2415 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2416 cdesc->rsvd = cpu_to_le16(0);
2417 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2420 ice_tx_map(tx_ring, first, &offload);
2421 return NETDEV_TX_OK;
2424 dev_kfree_skb_any(skb);
2425 return NETDEV_TX_OK;
2429 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2431 * @netdev: network interface device structure
2433 * Returns NETDEV_TX_OK if sent, else an error code
2435 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2437 struct ice_netdev_priv *np = netdev_priv(netdev);
2438 struct ice_vsi *vsi = np->vsi;
2439 struct ice_ring *tx_ring;
2441 tx_ring = vsi->tx_rings[skb->queue_mapping];
2443 /* hardware can't handle really short frames, hardware padding works
2446 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2447 return NETDEV_TX_OK;
2449 return ice_xmit_frame_ring(skb, tx_ring);
2453 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
2454 * @tx_ring: tx_ring to clean
2456 void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
2458 struct ice_vsi *vsi = tx_ring->vsi;
2459 s16 i = tx_ring->next_to_clean;
2460 int budget = ICE_DFLT_IRQ_WORK;
2461 struct ice_tx_desc *tx_desc;
2462 struct ice_tx_buf *tx_buf;
2464 tx_buf = &tx_ring->tx_buf[i];
2465 tx_desc = ICE_TX_DESC(tx_ring, i);
2466 i -= tx_ring->count;
2469 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
2471 /* if next_to_watch is not set then there is no pending work */
2475 /* prevent any other reads prior to eop_desc */
2478 /* if the descriptor isn't done, no work to do */
2479 if (!(eop_desc->cmd_type_offset_bsz &
2480 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
2483 /* clear next_to_watch to prevent false hangs */
2484 tx_buf->next_to_watch = NULL;
2485 tx_desc->buf_addr = 0;
2486 tx_desc->cmd_type_offset_bsz = 0;
2488 /* move past filter desc */
2493 i -= tx_ring->count;
2494 tx_buf = tx_ring->tx_buf;
2495 tx_desc = ICE_TX_DESC(tx_ring, 0);
2498 /* unmap the data header */
2499 if (dma_unmap_len(tx_buf, len))
2500 dma_unmap_single(tx_ring->dev,
2501 dma_unmap_addr(tx_buf, dma),
2502 dma_unmap_len(tx_buf, len),
2504 if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
2505 devm_kfree(tx_ring->dev, tx_buf->raw_buf);
2507 /* clear next_to_watch to prevent false hangs */
2508 tx_buf->raw_buf = NULL;
2509 tx_buf->tx_flags = 0;
2510 tx_buf->next_to_watch = NULL;
2511 dma_unmap_len_set(tx_buf, len, 0);
2512 tx_desc->buf_addr = 0;
2513 tx_desc->cmd_type_offset_bsz = 0;
2515 /* move past eop_desc for start of next FD desc */
2520 i -= tx_ring->count;
2521 tx_buf = tx_ring->tx_buf;
2522 tx_desc = ICE_TX_DESC(tx_ring, 0);
2526 } while (likely(budget));
2528 i += tx_ring->count;
2529 tx_ring->next_to_clean = i;
2531 /* re-enable interrupt if needed */
2532 ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);