1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
9 #include "ice_dcb_lib.h"
11 #define ICE_RX_HDR_SIZE 256
14 * ice_unmap_and_free_tx_buf - Release a Tx buffer
15 * @ring: the ring that owns the buffer
16 * @tx_buf: the buffer to free
19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
22 dev_kfree_skb_any(tx_buf->skb);
23 if (dma_unmap_len(tx_buf, len))
24 dma_unmap_single(ring->dev,
25 dma_unmap_addr(tx_buf, dma),
26 dma_unmap_len(tx_buf, len),
28 } else if (dma_unmap_len(tx_buf, len)) {
29 dma_unmap_page(ring->dev,
30 dma_unmap_addr(tx_buf, dma),
31 dma_unmap_len(tx_buf, len),
35 tx_buf->next_to_watch = NULL;
37 dma_unmap_len_set(tx_buf, len, 0);
38 /* tx_buf must be completely set up in the transmit path */
41 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
43 return netdev_get_tx_queue(ring->netdev, ring->q_index);
47 * ice_clean_tx_ring - Free any empty Tx buffers
48 * @tx_ring: ring to be cleaned
50 void ice_clean_tx_ring(struct ice_ring *tx_ring)
54 /* ring already cleared, nothing to do */
58 /* Free all the Tx ring sk_buffs */
59 for (i = 0; i < tx_ring->count; i++)
60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
64 /* Zero out the descriptor ring */
65 memset(tx_ring->desc, 0, tx_ring->size);
67 tx_ring->next_to_use = 0;
68 tx_ring->next_to_clean = 0;
73 /* cleanup Tx queue statistics */
74 netdev_tx_reset_queue(txring_txq(tx_ring));
78 * ice_free_tx_ring - Free Tx resources per queue
79 * @tx_ring: Tx descriptor ring for a specific queue
81 * Free all transmit software resources
83 void ice_free_tx_ring(struct ice_ring *tx_ring)
85 ice_clean_tx_ring(tx_ring);
86 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
87 tx_ring->tx_buf = NULL;
90 dmam_free_coherent(tx_ring->dev, tx_ring->size,
91 tx_ring->desc, tx_ring->dma);
97 * ice_clean_tx_irq - Reclaim resources after transmit completes
98 * @vsi: the VSI we care about
99 * @tx_ring: Tx ring to clean
100 * @napi_budget: Used to determine if we are in netpoll
102 * Returns true if there's any budget left (e.g. the clean is finished)
105 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
107 unsigned int total_bytes = 0, total_pkts = 0;
108 unsigned int budget = vsi->work_lmt;
109 s16 i = tx_ring->next_to_clean;
110 struct ice_tx_desc *tx_desc;
111 struct ice_tx_buf *tx_buf;
113 tx_buf = &tx_ring->tx_buf[i];
114 tx_desc = ICE_TX_DESC(tx_ring, i);
118 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
120 /* if next_to_watch is not set then there is no work pending */
124 smp_rmb(); /* prevent any other reads prior to eop_desc */
126 /* if the descriptor isn't done, no work yet to do */
127 if (!(eop_desc->cmd_type_offset_bsz &
128 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
131 /* clear next_to_watch to prevent false hangs */
132 tx_buf->next_to_watch = NULL;
134 /* update the statistics for this packet */
135 total_bytes += tx_buf->bytecount;
136 total_pkts += tx_buf->gso_segs;
139 napi_consume_skb(tx_buf->skb, napi_budget);
141 /* unmap skb header data */
142 dma_unmap_single(tx_ring->dev,
143 dma_unmap_addr(tx_buf, dma),
144 dma_unmap_len(tx_buf, len),
147 /* clear tx_buf data */
149 dma_unmap_len_set(tx_buf, len, 0);
151 /* unmap remaining buffers */
152 while (tx_desc != eop_desc) {
158 tx_buf = tx_ring->tx_buf;
159 tx_desc = ICE_TX_DESC(tx_ring, 0);
162 /* unmap any remaining paged data */
163 if (dma_unmap_len(tx_buf, len)) {
164 dma_unmap_page(tx_ring->dev,
165 dma_unmap_addr(tx_buf, dma),
166 dma_unmap_len(tx_buf, len),
168 dma_unmap_len_set(tx_buf, len, 0);
172 /* move us one more past the eop_desc for start of next pkt */
178 tx_buf = tx_ring->tx_buf;
179 tx_desc = ICE_TX_DESC(tx_ring, 0);
184 /* update budget accounting */
186 } while (likely(budget));
189 tx_ring->next_to_clean = i;
190 u64_stats_update_begin(&tx_ring->syncp);
191 tx_ring->stats.bytes += total_bytes;
192 tx_ring->stats.pkts += total_pkts;
193 u64_stats_update_end(&tx_ring->syncp);
194 tx_ring->q_vector->tx.total_bytes += total_bytes;
195 tx_ring->q_vector->tx.total_pkts += total_pkts;
197 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
200 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
201 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
202 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
203 /* Make sure that anybody stopping the queue after this
204 * sees the new next_to_clean.
207 if (__netif_subqueue_stopped(tx_ring->netdev,
209 !test_bit(__ICE_DOWN, vsi->state)) {
210 netif_wake_subqueue(tx_ring->netdev,
212 ++tx_ring->tx_stats.restart_q;
220 * ice_setup_tx_ring - Allocate the Tx descriptors
221 * @tx_ring: the Tx ring to set up
223 * Return 0 on success, negative on error
225 int ice_setup_tx_ring(struct ice_ring *tx_ring)
227 struct device *dev = tx_ring->dev;
232 /* warn if we are about to overwrite the pointer */
233 WARN_ON(tx_ring->tx_buf);
235 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
237 if (!tx_ring->tx_buf)
240 /* round up to nearest page */
241 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
243 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
245 if (!tx_ring->desc) {
246 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
251 tx_ring->next_to_use = 0;
252 tx_ring->next_to_clean = 0;
253 tx_ring->tx_stats.prev_pkt = -1;
257 devm_kfree(dev, tx_ring->tx_buf);
258 tx_ring->tx_buf = NULL;
263 * ice_clean_rx_ring - Free Rx buffers
264 * @rx_ring: ring to be cleaned
266 void ice_clean_rx_ring(struct ice_ring *rx_ring)
268 struct device *dev = rx_ring->dev;
271 /* ring already cleared, nothing to do */
272 if (!rx_ring->rx_buf)
275 /* Free all the Rx ring sk_buffs */
276 for (i = 0; i < rx_ring->count; i++) {
277 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
280 dev_kfree_skb(rx_buf->skb);
286 /* Invalidate cache lines that may have been written to by
287 * device so that we avoid corrupting memory.
289 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
291 ICE_RXBUF_2048, DMA_FROM_DEVICE);
293 /* free resources associated with mapping */
294 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
295 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
296 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
299 rx_buf->page_offset = 0;
302 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
304 /* Zero out the descriptor ring */
305 memset(rx_ring->desc, 0, rx_ring->size);
307 rx_ring->next_to_alloc = 0;
308 rx_ring->next_to_clean = 0;
309 rx_ring->next_to_use = 0;
313 * ice_free_rx_ring - Free Rx resources
314 * @rx_ring: ring to clean the resources from
316 * Free all receive software resources
318 void ice_free_rx_ring(struct ice_ring *rx_ring)
320 ice_clean_rx_ring(rx_ring);
321 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
322 rx_ring->rx_buf = NULL;
325 dmam_free_coherent(rx_ring->dev, rx_ring->size,
326 rx_ring->desc, rx_ring->dma);
327 rx_ring->desc = NULL;
332 * ice_setup_rx_ring - Allocate the Rx descriptors
333 * @rx_ring: the Rx ring to set up
335 * Return 0 on success, negative on error
337 int ice_setup_rx_ring(struct ice_ring *rx_ring)
339 struct device *dev = rx_ring->dev;
344 /* warn if we are about to overwrite the pointer */
345 WARN_ON(rx_ring->rx_buf);
347 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
349 if (!rx_ring->rx_buf)
352 /* round up to nearest page */
353 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
355 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
357 if (!rx_ring->desc) {
358 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
363 rx_ring->next_to_use = 0;
364 rx_ring->next_to_clean = 0;
368 devm_kfree(dev, rx_ring->rx_buf);
369 rx_ring->rx_buf = NULL;
374 * ice_release_rx_desc - Store the new tail and head values
375 * @rx_ring: ring to bump
376 * @val: new head index
378 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
380 u16 prev_ntu = rx_ring->next_to_use;
382 rx_ring->next_to_use = val;
384 /* update next to alloc since we have filled the ring */
385 rx_ring->next_to_alloc = val;
387 /* QRX_TAIL will be updated with any tail value, but hardware ignores
388 * the lower 3 bits. This makes it so we only bump tail on meaningful
389 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
390 * the budget depending on the current traffic load.
393 if (prev_ntu != val) {
394 /* Force memory writes to complete before letting h/w
395 * know there are new descriptors to fetch. (Only
396 * applicable for weak-ordered memory model archs,
400 writel(val, rx_ring->tail);
405 * ice_alloc_mapped_page - recycle or make a new page
406 * @rx_ring: ring to use
407 * @bi: rx_buf struct to modify
409 * Returns true if the page was successfully allocated or
413 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
415 struct page *page = bi->page;
418 /* since we are recycling buffers we should seldom need to alloc */
420 rx_ring->rx_stats.page_reuse_count++;
424 /* alloc new page for storage */
425 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
426 if (unlikely(!page)) {
427 rx_ring->rx_stats.alloc_page_failed++;
431 /* map page for use */
432 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
433 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
435 /* if mapping failed free memory back to system since
436 * there isn't much point in holding memory we can't use
438 if (dma_mapping_error(rx_ring->dev, dma)) {
439 __free_pages(page, 0);
440 rx_ring->rx_stats.alloc_page_failed++;
447 page_ref_add(page, USHRT_MAX - 1);
448 bi->pagecnt_bias = USHRT_MAX;
454 * ice_alloc_rx_bufs - Replace used receive buffers
455 * @rx_ring: ring to place buffers on
456 * @cleaned_count: number of buffers to replace
458 * Returns false if all allocations were successful, true if any fail. Returning
459 * true signals to the caller that we didn't replace cleaned_count buffers and
460 * there is more work to do.
462 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
463 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
464 * multiple tail writes per call.
466 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
468 union ice_32b_rx_flex_desc *rx_desc;
469 u16 ntu = rx_ring->next_to_use;
470 struct ice_rx_buf *bi;
472 /* do nothing if no valid netdev defined */
473 if (!rx_ring->netdev || !cleaned_count)
476 /* get the Rx descriptor and buffer based on next_to_use */
477 rx_desc = ICE_RX_DESC(rx_ring, ntu);
478 bi = &rx_ring->rx_buf[ntu];
481 /* if we fail here, we have work remaining */
482 if (!ice_alloc_mapped_page(rx_ring, bi))
485 /* sync the buffer for use by the device */
486 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
491 /* Refresh the desc even if buffer_addrs didn't change
492 * because each write-back erases this info.
494 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
499 if (unlikely(ntu == rx_ring->count)) {
500 rx_desc = ICE_RX_DESC(rx_ring, 0);
501 bi = rx_ring->rx_buf;
505 /* clear the status bits for the next_to_use descriptor */
506 rx_desc->wb.status_error0 = 0;
509 } while (cleaned_count);
511 if (rx_ring->next_to_use != ntu)
512 ice_release_rx_desc(rx_ring, ntu);
514 return !!cleaned_count;
518 * ice_page_is_reserved - check if reuse is possible
519 * @page: page struct to check
521 static bool ice_page_is_reserved(struct page *page)
523 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
527 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
528 * @rx_buf: Rx buffer to adjust
529 * @size: Size of adjustment
531 * Update the offset within page so that Rx buf will be ready to be reused.
532 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
533 * so the second half of page assigned to Rx buffer will be used, otherwise
534 * the offset is moved by the @size bytes
537 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
539 #if (PAGE_SIZE < 8192)
540 /* flip page offset to other buffer */
541 rx_buf->page_offset ^= size;
543 /* move offset up to the next cache line */
544 rx_buf->page_offset += size;
549 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
550 * @rx_buf: buffer containing the page
552 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
553 * which will assign the current buffer to the buffer that next_to_alloc is
554 * pointing to; otherwise, the DMA mapping needs to be destroyed and
557 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
559 #if (PAGE_SIZE >= 8192)
560 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
562 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
563 struct page *page = rx_buf->page;
565 /* avoid re-using remote pages */
566 if (unlikely(ice_page_is_reserved(page)))
569 #if (PAGE_SIZE < 8192)
570 /* if we are only owner of page we can reuse it */
571 if (unlikely((page_count(page) - pagecnt_bias) > 1))
574 if (rx_buf->page_offset > last_offset)
576 #endif /* PAGE_SIZE < 8192) */
578 /* If we have drained the page fragment pool we need to update
579 * the pagecnt_bias and page count so that we fully restock the
580 * number of references the driver holds.
582 if (unlikely(pagecnt_bias == 1)) {
583 page_ref_add(page, USHRT_MAX - 1);
584 rx_buf->pagecnt_bias = USHRT_MAX;
591 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
592 * @rx_buf: buffer containing page to add
593 * @skb: sk_buff to place the data into
594 * @size: packet length from rx_desc
596 * This function will add the data contained in rx_buf->page to the skb.
597 * It will just attach the page as a frag to the skb.
598 * The function will then update the page offset.
601 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
604 #if (PAGE_SIZE >= 8192)
605 unsigned int truesize = SKB_DATA_ALIGN(size);
607 unsigned int truesize = ICE_RXBUF_2048;
610 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
611 rx_buf->page_offset, size, truesize);
613 /* page is being used so we must update the page offset */
614 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
618 * ice_reuse_rx_page - page flip buffer and store it back on the ring
619 * @rx_ring: Rx descriptor ring to store buffers on
620 * @old_buf: donor buffer to have page reused
622 * Synchronizes page for reuse by the adapter
625 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
627 u16 nta = rx_ring->next_to_alloc;
628 struct ice_rx_buf *new_buf;
630 new_buf = &rx_ring->rx_buf[nta];
632 /* update, and store next to alloc */
634 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
636 /* Transfer page from old buffer to new buffer.
637 * Move each member individually to avoid possible store
638 * forwarding stalls and unnecessary copy of skb.
640 new_buf->dma = old_buf->dma;
641 new_buf->page = old_buf->page;
642 new_buf->page_offset = old_buf->page_offset;
643 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
647 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
648 * @rx_ring: Rx descriptor ring to transact packets on
649 * @skb: skb to be used
650 * @size: size of buffer to add to skb
652 * This function will pull an Rx buffer from the ring and synchronize it
653 * for use by the CPU.
655 static struct ice_rx_buf *
656 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
657 const unsigned int size)
659 struct ice_rx_buf *rx_buf;
661 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
662 prefetchw(rx_buf->page);
665 /* we are reusing so sync this buffer for CPU use */
666 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
667 rx_buf->page_offset, size,
670 /* We have pulled a buffer for use, so decrement pagecnt_bias */
671 rx_buf->pagecnt_bias--;
677 * ice_construct_skb - Allocate skb and populate it
678 * @rx_ring: Rx descriptor ring to transact packets on
679 * @rx_buf: Rx buffer to pull data from
680 * @size: the length of the packet
682 * This function allocates an skb. It then populates it with the page
683 * data from the current receive descriptor, taking care to set up the
686 static struct sk_buff *
687 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
690 void *va = page_address(rx_buf->page) + rx_buf->page_offset;
691 unsigned int headlen;
694 /* prefetch first cache line of first page */
696 #if L1_CACHE_BYTES < 128
697 prefetch((u8 *)va + L1_CACHE_BYTES);
698 #endif /* L1_CACHE_BYTES */
700 /* allocate a skb to store the frags */
701 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
702 GFP_ATOMIC | __GFP_NOWARN);
706 skb_record_rx_queue(skb, rx_ring->q_index);
707 /* Determine available headroom for copy */
709 if (headlen > ICE_RX_HDR_SIZE)
710 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
712 /* align pull length to size of long to optimize memcpy performance */
713 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
715 /* if we exhaust the linear part then add what is left as a frag */
718 #if (PAGE_SIZE >= 8192)
719 unsigned int truesize = SKB_DATA_ALIGN(size);
721 unsigned int truesize = ICE_RXBUF_2048;
723 skb_add_rx_frag(skb, 0, rx_buf->page,
724 rx_buf->page_offset + headlen, size, truesize);
725 /* buffer is used by skb, update page_offset */
726 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
728 /* buffer is unused, reset bias back to rx_buf; data was copied
729 * onto skb's linear part so there's no need for adjusting
730 * page offset and we can reuse this buffer as-is
732 rx_buf->pagecnt_bias++;
739 * ice_put_rx_buf - Clean up used buffer and either recycle or free
740 * @rx_ring: Rx descriptor ring to transact packets on
741 * @rx_buf: Rx buffer to pull data from
743 * This function will clean up the contents of the rx_buf. It will
744 * either recycle the buffer or unmap it and free the associated resources.
746 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
748 /* hand second half of page back to the ring */
749 if (ice_can_reuse_rx_page(rx_buf)) {
750 ice_reuse_rx_page(rx_ring, rx_buf);
751 rx_ring->rx_stats.page_reuse_count++;
753 /* we are not reusing the buffer so unmap it */
754 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
755 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
756 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
759 /* clear contents of buffer_info */
765 * ice_cleanup_headers - Correct empty headers
766 * @skb: pointer to current skb being fixed
768 * Also address the case where we are pulling data in on pages only
769 * and as such no data is present in the skb header.
771 * In addition if skb is not at least 60 bytes we need to pad it so that
772 * it is large enough to qualify as a valid Ethernet frame.
774 * Returns true if an error was encountered and skb was freed.
776 static bool ice_cleanup_headers(struct sk_buff *skb)
778 /* if eth_skb_pad returns an error the skb was freed */
779 if (eth_skb_pad(skb))
786 * ice_test_staterr - tests bits in Rx descriptor status and error fields
787 * @rx_desc: pointer to receive descriptor (in le64 format)
788 * @stat_err_bits: value to mask
790 * This function does some fast chicanery in order to return the
791 * value of the mask which is really only used for boolean tests.
792 * The status_error_len doesn't need to be shifted because it begins
796 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
798 return !!(rx_desc->wb.status_error0 &
799 cpu_to_le16(stat_err_bits));
803 * ice_is_non_eop - process handling of non-EOP buffers
804 * @rx_ring: Rx ring being processed
805 * @rx_desc: Rx descriptor for current buffer
806 * @skb: Current socket buffer containing buffer in progress
808 * This function updates next to clean. If the buffer is an EOP buffer
809 * this function exits returning false, otherwise it will place the
810 * sk_buff in the next buffer to be chained and return true indicating
811 * that this is in fact a non-EOP buffer.
814 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
817 u32 ntc = rx_ring->next_to_clean + 1;
819 /* fetch, update, and store next to clean */
820 ntc = (ntc < rx_ring->count) ? ntc : 0;
821 rx_ring->next_to_clean = ntc;
823 prefetch(ICE_RX_DESC(rx_ring, ntc));
825 /* if we are the last buffer then there is nothing else to do */
826 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
827 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
830 /* place skb in next buffer to be received */
831 rx_ring->rx_buf[ntc].skb = skb;
832 rx_ring->rx_stats.non_eop_descs++;
838 * ice_ptype_to_htype - get a hash type
839 * @ptype: the ptype value from the descriptor
841 * Returns a hash type to be used by skb_set_hash
843 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
845 return PKT_HASH_TYPE_NONE;
849 * ice_rx_hash - set the hash value in the skb
850 * @rx_ring: descriptor ring
851 * @rx_desc: specific descriptor
852 * @skb: pointer to current skb
853 * @rx_ptype: the ptype value from the descriptor
856 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
857 struct sk_buff *skb, u8 rx_ptype)
859 struct ice_32b_rx_flex_desc_nic *nic_mdid;
862 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
865 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
868 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
869 hash = le32_to_cpu(nic_mdid->rss_hash);
870 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
874 * ice_rx_csum - Indicate in skb if checksum is good
875 * @vsi: the VSI we care about
876 * @skb: skb currently being received and modified
877 * @rx_desc: the receive descriptor
878 * @ptype: the packet type decoded by hardware
880 * skb->protocol must be set before this function is called
883 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
884 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
886 struct ice_rx_ptype_decoded decoded;
887 u32 rx_error, rx_status;
890 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
891 rx_error = rx_status;
893 decoded = ice_decode_rx_desc_ptype(ptype);
895 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
896 skb->ip_summed = CHECKSUM_NONE;
897 skb_checksum_none_assert(skb);
899 /* check if Rx checksum is enabled */
900 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
903 /* check if HW has decoded the packet and checksum */
904 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
907 if (!(decoded.known && decoded.outer_ip))
910 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
911 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
912 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
913 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
915 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
916 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
918 else if (ipv6 && (rx_status &
919 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
922 /* check for L4 errors and handle packets that were not able to be
923 * checksummed due to arrival speed
925 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
928 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
929 switch (decoded.inner_prot) {
930 case ICE_RX_PTYPE_INNER_PROT_TCP:
931 case ICE_RX_PTYPE_INNER_PROT_UDP:
932 case ICE_RX_PTYPE_INNER_PROT_SCTP:
933 skb->ip_summed = CHECKSUM_UNNECESSARY;
940 vsi->back->hw_csum_rx_error++;
944 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
945 * @rx_ring: Rx descriptor ring packet is being transacted on
946 * @rx_desc: pointer to the EOP Rx descriptor
947 * @skb: pointer to current skb being populated
948 * @ptype: the packet type decoded by hardware
950 * This function checks the ring, descriptor, and packet information in
951 * order to populate the hash, checksum, VLAN, protocol, and
952 * other fields within the skb.
955 ice_process_skb_fields(struct ice_ring *rx_ring,
956 union ice_32b_rx_flex_desc *rx_desc,
957 struct sk_buff *skb, u8 ptype)
959 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
961 /* modifies the skb - consumes the enet header */
962 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
964 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
968 * ice_receive_skb - Send a completed packet up the stack
969 * @rx_ring: Rx ring in play
970 * @skb: packet to send up
971 * @vlan_tag: VLAN tag for packet
973 * This function sends the completed packet (via. skb) up the stack using
974 * gro receive functions (with/without VLAN tag)
977 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
979 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
980 (vlan_tag & VLAN_VID_MASK))
981 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
982 napi_gro_receive(&rx_ring->q_vector->napi, skb);
986 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
987 * @rx_ring: Rx descriptor ring to transact packets on
988 * @budget: Total limit on number of packets to process
990 * This function provides a "bounce buffer" approach to Rx interrupt
991 * processing. The advantage to this is that on systems that have
992 * expensive overhead for IOMMU access this provides a means of avoiding
993 * it by maintaining the mapping of the page to the system.
995 * Returns amount of work completed
997 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
999 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1000 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1003 /* start the loop to process Rx packets bounded by 'budget' */
1004 while (likely(total_rx_pkts < (unsigned int)budget)) {
1005 union ice_32b_rx_flex_desc *rx_desc;
1006 struct ice_rx_buf *rx_buf;
1007 struct sk_buff *skb;
1013 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1014 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1016 /* status_error_len will always be zero for unused descriptors
1017 * because it's cleared in cleanup, and overlaps with hdr_addr
1018 * which is always zero because packet split isn't used, if the
1019 * hardware wrote DD then it will be non-zero
1021 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1022 if (!ice_test_staterr(rx_desc, stat_err_bits))
1025 /* This memory barrier is needed to keep us from reading
1026 * any other fields out of the rx_desc until we know the
1031 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1032 ICE_RX_FLX_DESC_PKT_LEN_M;
1034 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1035 /* allocate (if needed) and populate skb */
1037 ice_add_rx_frag(rx_buf, skb, size);
1039 skb = ice_construct_skb(rx_ring, rx_buf, size);
1041 /* exit if we failed to retrieve a buffer */
1043 rx_ring->rx_stats.alloc_buf_failed++;
1044 rx_buf->pagecnt_bias++;
1048 ice_put_rx_buf(rx_ring, rx_buf);
1051 /* skip if it is NOP desc */
1052 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1055 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1056 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1057 dev_kfree_skb_any(skb);
1061 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1062 ICE_RX_FLEX_DESC_PTYPE_M;
1064 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1065 if (ice_test_staterr(rx_desc, stat_err_bits))
1066 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1068 /* correct empty headers and pad skb if needed (to make valid
1071 if (ice_cleanup_headers(skb)) {
1076 /* probably a little skewed due to removing CRC */
1077 total_rx_bytes += skb->len;
1079 /* populate checksum, VLAN, and protocol */
1080 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1082 /* send completed skb up the stack */
1083 ice_receive_skb(rx_ring, skb, vlan_tag);
1085 /* update budget accounting */
1089 /* return up to cleaned_count buffers to hardware */
1090 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1092 /* update queue and vector specific stats */
1093 u64_stats_update_begin(&rx_ring->syncp);
1094 rx_ring->stats.pkts += total_rx_pkts;
1095 rx_ring->stats.bytes += total_rx_bytes;
1096 u64_stats_update_end(&rx_ring->syncp);
1097 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1098 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1100 /* guarantee a trip back through this routine if there was a failure */
1101 return failure ? budget : (int)total_rx_pkts;
1105 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1106 * @port_info: port_info structure containing the current link speed
1107 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1108 * @itr: ITR value to update
1110 * Calculate how big of an increment should be applied to the ITR value passed
1111 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1114 * The following is a calculation derived from:
1115 * wmem_default / (size + overhead) = desired_pkts_per_int
1116 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1117 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1119 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1120 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1123 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1124 * ITR = -------------------------------------------- * --------------
1125 * rate pkt_size + 640
1128 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1129 unsigned int avg_pkt_size,
1132 switch (port_info->phy.link_info.link_speed) {
1133 case ICE_AQ_LINK_SPEED_100GB:
1134 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1135 avg_pkt_size + 640);
1137 case ICE_AQ_LINK_SPEED_50GB:
1138 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1139 avg_pkt_size + 640);
1141 case ICE_AQ_LINK_SPEED_40GB:
1142 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1143 avg_pkt_size + 640);
1145 case ICE_AQ_LINK_SPEED_25GB:
1146 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1147 avg_pkt_size + 640);
1149 case ICE_AQ_LINK_SPEED_20GB:
1150 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1151 avg_pkt_size + 640);
1153 case ICE_AQ_LINK_SPEED_10GB:
1156 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1157 avg_pkt_size + 640);
1161 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1162 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1163 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1170 * ice_update_itr - update the adaptive ITR value based on statistics
1171 * @q_vector: structure containing interrupt and ring information
1172 * @rc: structure containing ring performance data
1174 * Stores a new ITR value based on packets and byte
1175 * counts during the last interrupt. The advantage of per interrupt
1176 * computation is faster updates and more accurate ITR for the current
1177 * traffic pattern. Constants in this function were computed
1178 * based on theoretical maximum wire speed and thresholds were set based
1179 * on testing data as well as attempting to minimize response time
1180 * while increasing bulk throughput.
1183 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1185 unsigned long next_update = jiffies;
1186 unsigned int packets, bytes, itr;
1187 bool container_is_rx;
1189 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1192 /* If itr_countdown is set it means we programmed an ITR within
1193 * the last 4 interrupt cycles. This has a side effect of us
1194 * potentially firing an early interrupt. In order to work around
1195 * this we need to throw out any data received for a few
1196 * interrupts following the update.
1198 if (q_vector->itr_countdown) {
1199 itr = rc->target_itr;
1203 container_is_rx = (&q_vector->rx == rc);
1204 /* For Rx we want to push the delay up and default to low latency.
1205 * for Tx we want to pull the delay down and default to high latency.
1207 itr = container_is_rx ?
1208 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1209 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1211 /* If we didn't update within up to 1 - 2 jiffies we can assume
1212 * that either packets are coming in so slow there hasn't been
1213 * any work, or that there is so much work that NAPI is dealing
1214 * with interrupt moderation and we don't need to do anything.
1216 if (time_after(next_update, rc->next_update))
1219 packets = rc->total_pkts;
1220 bytes = rc->total_bytes;
1222 if (container_is_rx) {
1223 /* If Rx there are 1 to 4 packets and bytes are less than
1224 * 9000 assume insufficient data to use bulk rate limiting
1225 * approach unless Tx is already in bulk rate limiting. We
1226 * are likely latency driven.
1228 if (packets && packets < 4 && bytes < 9000 &&
1229 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1230 itr = ICE_ITR_ADAPTIVE_LATENCY;
1231 goto adjust_by_size_and_speed;
1233 } else if (packets < 4) {
1234 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1235 * bulk mode and we are receiving 4 or fewer packets just
1236 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1237 * that the Rx can relax.
1239 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1240 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1241 ICE_ITR_ADAPTIVE_MAX_USECS)
1243 } else if (packets > 32) {
1244 /* If we have processed over 32 packets in a single interrupt
1245 * for Tx assume we need to switch over to "bulk" mode.
1247 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1250 /* We have no packets to actually measure against. This means
1251 * either one of the other queues on this vector is active or
1252 * we are a Tx queue doing TSO with too high of an interrupt rate.
1254 * Between 4 and 56 we can assume that our current interrupt delay
1255 * is only slightly too low. As such we should increase it by a small
1259 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1260 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1261 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1262 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1267 if (packets <= 256) {
1268 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1269 itr &= ICE_ITR_MASK;
1271 /* Between 56 and 112 is our "goldilocks" zone where we are
1272 * working out "just right". Just report that our current
1273 * ITR is good for us.
1278 /* If packet count is 128 or greater we are likely looking
1279 * at a slight overrun of the delay we want. Try halving
1280 * our delay to see if that will cut the number of packets
1281 * in half per interrupt.
1284 itr &= ICE_ITR_MASK;
1285 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1286 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1291 /* The paths below assume we are dealing with a bulk ITR since
1292 * number of packets is greater than 256. We are just going to have
1293 * to compute a value and try to bring the count under control,
1294 * though for smaller packet sizes there isn't much we can do as
1295 * NAPI polling will likely be kicking in sooner rather than later.
1297 itr = ICE_ITR_ADAPTIVE_BULK;
1299 adjust_by_size_and_speed:
1301 /* based on checks above packets cannot be 0 so division is safe */
1302 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1303 bytes / packets, itr);
1306 /* write back value */
1307 rc->target_itr = itr;
1309 /* next update should occur within next jiffy */
1310 rc->next_update = next_update + 1;
1312 rc->total_bytes = 0;
1317 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1318 * @itr_idx: interrupt throttling index
1319 * @itr: interrupt throttling value in usecs
1321 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1323 /* The ITR value is reported in microseconds, and the register value is
1324 * recorded in 2 microsecond units. For this reason we only need to
1325 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1326 * granularity as a shift instead of division. The mask makes sure the
1327 * ITR value is never odd so we don't accidentally write into the field
1328 * prior to the ITR field.
1330 itr &= ICE_ITR_MASK;
1332 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1333 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1334 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1337 /* The act of updating the ITR will cause it to immediately trigger. In order
1338 * to prevent this from throwing off adaptive update statistics we defer the
1339 * update so that it can only happen so often. So after either Tx or Rx are
1340 * updated we make the adaptive scheme wait until either the ITR completely
1341 * expires via the next_update expiration or we have been through at least
1344 #define ITR_COUNTDOWN_START 3
1347 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1348 * @vsi: the VSI associated with the q_vector
1349 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1352 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1354 struct ice_ring_container *tx = &q_vector->tx;
1355 struct ice_ring_container *rx = &q_vector->rx;
1358 /* when exiting WB_ON_ITR lets set a low ITR value and trigger
1359 * interrupts to expire right away in case we have more work ready to go
1362 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
1363 itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
1364 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1365 /* set target back to last user set value */
1366 rx->target_itr = rx->itr_setting;
1367 /* set current to what we just wrote and dynamic if needed */
1368 rx->current_itr = ICE_WB_ON_ITR_USECS |
1369 (rx->itr_setting & ICE_ITR_DYNAMIC);
1370 /* allow normal interrupt flow to start */
1371 q_vector->itr_countdown = 0;
1375 /* This will do nothing if dynamic updates are not enabled */
1376 ice_update_itr(q_vector, tx);
1377 ice_update_itr(q_vector, rx);
1379 /* This block of logic allows us to get away with only updating
1380 * one ITR value with each interrupt. The idea is to perform a
1381 * pseudo-lazy update with the following criteria.
1383 * 1. Rx is given higher priority than Tx if both are in same state
1384 * 2. If we must reduce an ITR that is given highest priority.
1385 * 3. We then give priority to increasing ITR based on amount.
1387 if (rx->target_itr < rx->current_itr) {
1388 /* Rx ITR needs to be reduced, this is highest priority */
1389 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1390 rx->current_itr = rx->target_itr;
1391 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1392 } else if ((tx->target_itr < tx->current_itr) ||
1393 ((rx->target_itr - rx->current_itr) <
1394 (tx->target_itr - tx->current_itr))) {
1395 /* Tx ITR needs to be reduced, this is second priority
1396 * Tx ITR needs to be increased more than Rx, fourth priority
1398 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1399 tx->current_itr = tx->target_itr;
1400 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1401 } else if (rx->current_itr != rx->target_itr) {
1402 /* Rx ITR needs to be increased, third priority */
1403 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1404 rx->current_itr = rx->target_itr;
1405 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1407 /* Still have to re-enable the interrupts */
1408 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1409 if (q_vector->itr_countdown)
1410 q_vector->itr_countdown--;
1413 if (!test_bit(__ICE_DOWN, vsi->state))
1414 wr32(&vsi->back->hw,
1415 GLINT_DYN_CTL(q_vector->reg_idx),
1420 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
1421 * @vsi: pointer to the VSI structure
1422 * @q_vector: q_vector to set WB_ON_ITR on
1424 * We need to tell hardware to write-back completed descriptors even when
1425 * interrupts are disabled. Descriptors will be written back on cache line
1426 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1427 * descriptors may not be written back if they don't fill a cache line until the
1430 * This sets the write-back frequency to 2 microseconds as that is the minimum
1431 * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
1432 * make sure hardware knows we aren't meddling with the INTENA_M bit.
1435 ice_set_wb_on_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1437 /* already in WB_ON_ITR mode no need to change it */
1438 if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
1441 if (q_vector->num_ring_rx)
1442 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1443 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1446 if (q_vector->num_ring_tx)
1447 wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
1448 ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
1451 q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
1455 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1456 * @napi: napi struct with our devices info in it
1457 * @budget: amount of work driver is allowed to do this pass, in packets
1459 * This function will clean all queues associated with a q_vector.
1461 * Returns the amount of work done
1463 int ice_napi_poll(struct napi_struct *napi, int budget)
1465 struct ice_q_vector *q_vector =
1466 container_of(napi, struct ice_q_vector, napi);
1467 struct ice_vsi *vsi = q_vector->vsi;
1468 bool clean_complete = true;
1469 struct ice_ring *ring;
1470 int budget_per_ring;
1473 /* Since the actual Tx work is minimal, we can give the Tx a larger
1474 * budget and be more aggressive about cleaning up the Tx descriptors.
1476 ice_for_each_ring(ring, q_vector->tx)
1477 if (!ice_clean_tx_irq(vsi, ring, budget))
1478 clean_complete = false;
1480 /* Handle case where we are called by netpoll with a budget of 0 */
1484 /* normally we have 1 Rx ring per q_vector */
1485 if (unlikely(q_vector->num_ring_rx > 1))
1486 /* We attempt to distribute budget to each Rx queue fairly, but
1487 * don't allow the budget to go below 1 because that would exit
1490 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1492 /* Max of 1 Rx ring in this q_vector so give it the budget */
1493 budget_per_ring = budget;
1495 ice_for_each_ring(ring, q_vector->rx) {
1498 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1499 work_done += cleaned;
1500 /* if we clean as many as budgeted, we must not be done */
1501 if (cleaned >= budget_per_ring)
1502 clean_complete = false;
1505 /* If work not completed, return budget and polling will return */
1506 if (!clean_complete)
1509 /* Exit the polling mode, but don't re-enable interrupts if stack might
1510 * poll us due to busy-polling
1512 if (likely(napi_complete_done(napi, work_done)))
1513 ice_update_ena_itr(vsi, q_vector);
1515 ice_set_wb_on_itr(vsi, q_vector);
1517 return min_t(int, work_done, budget - 1);
1520 /* helper function for building cmd/type/offset */
1522 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1524 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1525 (td_cmd << ICE_TXD_QW1_CMD_S) |
1526 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1527 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1528 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1532 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1533 * @tx_ring: the ring to be checked
1534 * @size: the size buffer we want to assure is available
1536 * Returns -EBUSY if a stop is needed, else 0
1538 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1540 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1541 /* Memory barrier before checking head and tail */
1544 /* Check again in a case another CPU has just made room available. */
1545 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1548 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1549 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1550 ++tx_ring->tx_stats.restart_q;
1555 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1556 * @tx_ring: the ring to be checked
1557 * @size: the size buffer we want to assure is available
1559 * Returns 0 if stop is not needed
1561 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1563 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1566 return __ice_maybe_stop_tx(tx_ring, size);
1570 * ice_tx_map - Build the Tx descriptor
1571 * @tx_ring: ring to send buffer on
1572 * @first: first buffer info buffer to use
1573 * @off: pointer to struct that holds offload parameters
1575 * This function loops over the skb data pointed to by *first
1576 * and gets a physical address for each memory location and programs
1577 * it and the length into the transmit descriptor.
1580 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1581 struct ice_tx_offload_params *off)
1583 u64 td_offset, td_tag, td_cmd;
1584 u16 i = tx_ring->next_to_use;
1586 unsigned int data_len, size;
1587 struct ice_tx_desc *tx_desc;
1588 struct ice_tx_buf *tx_buf;
1589 struct sk_buff *skb;
1592 td_tag = off->td_l2tag1;
1593 td_cmd = off->td_cmd;
1594 td_offset = off->td_offset;
1597 data_len = skb->data_len;
1598 size = skb_headlen(skb);
1600 tx_desc = ICE_TX_DESC(tx_ring, i);
1602 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1603 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1604 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1605 ICE_TX_FLAGS_VLAN_S;
1608 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1612 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1613 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1615 if (dma_mapping_error(tx_ring->dev, dma))
1618 /* record length, and DMA address */
1619 dma_unmap_len_set(tx_buf, len, size);
1620 dma_unmap_addr_set(tx_buf, dma, dma);
1622 /* align size to end of page */
1623 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1624 tx_desc->buf_addr = cpu_to_le64(dma);
1626 /* account for data chunks larger than the hardware
1629 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1630 tx_desc->cmd_type_offset_bsz =
1631 build_ctob(td_cmd, td_offset, max_data, td_tag);
1636 if (i == tx_ring->count) {
1637 tx_desc = ICE_TX_DESC(tx_ring, 0);
1644 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1645 tx_desc->buf_addr = cpu_to_le64(dma);
1648 if (likely(!data_len))
1651 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1657 if (i == tx_ring->count) {
1658 tx_desc = ICE_TX_DESC(tx_ring, 0);
1662 size = skb_frag_size(frag);
1665 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1668 tx_buf = &tx_ring->tx_buf[i];
1671 /* record bytecount for BQL */
1672 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1674 /* record SW timestamp if HW timestamp is not available */
1675 skb_tx_timestamp(first->skb);
1678 if (i == tx_ring->count)
1681 /* write last descriptor with RS and EOP bits */
1682 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1683 tx_desc->cmd_type_offset_bsz =
1684 build_ctob(td_cmd, td_offset, size, td_tag);
1686 /* Force memory writes to complete before letting h/w know there
1687 * are new descriptors to fetch.
1689 * We also use this memory barrier to make certain all of the
1690 * status bits have been updated before next_to_watch is written.
1694 /* set next_to_watch value indicating a packet is present */
1695 first->next_to_watch = tx_desc;
1697 tx_ring->next_to_use = i;
1699 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1701 /* notify HW of packet */
1702 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1703 writel(i, tx_ring->tail);
1709 /* clear DMA mappings for failed tx_buf map */
1711 tx_buf = &tx_ring->tx_buf[i];
1712 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1713 if (tx_buf == first)
1720 tx_ring->next_to_use = i;
1724 * ice_tx_csum - Enable Tx checksum offloads
1725 * @first: pointer to the first descriptor
1726 * @off: pointer to struct that holds offload parameters
1728 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1731 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1733 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1734 struct sk_buff *skb = first->skb;
1744 __be16 frag_off, protocol;
1745 unsigned char *exthdr;
1746 u32 offset, cmd = 0;
1749 if (skb->ip_summed != CHECKSUM_PARTIAL)
1752 ip.hdr = skb_network_header(skb);
1753 l4.hdr = skb_transport_header(skb);
1755 /* compute outer L2 header size */
1756 l2_len = ip.hdr - skb->data;
1757 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1759 if (skb->encapsulation)
1762 /* Enable IP checksum offloads */
1763 protocol = vlan_get_protocol(skb);
1764 if (protocol == htons(ETH_P_IP)) {
1765 l4_proto = ip.v4->protocol;
1766 /* the stack computes the IP header already, the only time we
1767 * need the hardware to recompute it is in the case of TSO.
1769 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1770 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1772 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1774 } else if (protocol == htons(ETH_P_IPV6)) {
1775 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1776 exthdr = ip.hdr + sizeof(*ip.v6);
1777 l4_proto = ip.v6->nexthdr;
1778 if (l4.hdr != exthdr)
1779 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1785 /* compute inner L3 header size */
1786 l3_len = l4.hdr - ip.hdr;
1787 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1789 /* Enable L4 checksum offloads */
1792 /* enable checksum offloads */
1793 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1794 l4_len = l4.tcp->doff;
1795 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1798 /* enable UDP checksum offload */
1799 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1800 l4_len = (sizeof(struct udphdr) >> 2);
1801 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1804 /* enable SCTP checksum offload */
1805 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1806 l4_len = sizeof(struct sctphdr) >> 2;
1807 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1811 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1813 skb_checksum_help(skb);
1818 off->td_offset |= offset;
1823 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1824 * @tx_ring: ring to send buffer on
1825 * @first: pointer to struct ice_tx_buf
1827 * Checks the skb and set up correspondingly several generic transmit flags
1828 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1830 * Returns error code indicate the frame should be dropped upon error and the
1831 * otherwise returns 0 to indicate the flags has been set properly.
1834 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1836 struct sk_buff *skb = first->skb;
1837 __be16 protocol = skb->protocol;
1839 if (protocol == htons(ETH_P_8021Q) &&
1840 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1841 /* when HW VLAN acceleration is turned off by the user the
1842 * stack sets the protocol to 8021q so that the driver
1843 * can take any steps required to support the SW only
1844 * VLAN handling. In our case the driver doesn't need
1845 * to take any further steps so just set the protocol
1846 * to the encapsulated ethertype.
1848 skb->protocol = vlan_get_protocol(skb);
1852 /* if we have a HW VLAN tag being added, default to the HW one */
1853 if (skb_vlan_tag_present(skb)) {
1854 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1855 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1856 } else if (protocol == htons(ETH_P_8021Q)) {
1857 struct vlan_hdr *vhdr, _vhdr;
1859 /* for SW VLAN, check the next protocol and store the tag */
1860 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1866 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1867 ICE_TX_FLAGS_VLAN_S;
1868 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1871 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1875 * ice_tso - computes mss and TSO length to prepare for TSO
1876 * @first: pointer to struct ice_tx_buf
1877 * @off: pointer to struct that holds offload parameters
1879 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1882 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1884 struct sk_buff *skb = first->skb;
1894 u64 cd_mss, cd_tso_len;
1895 u32 paylen, l4_start;
1898 if (skb->ip_summed != CHECKSUM_PARTIAL)
1901 if (!skb_is_gso(skb))
1904 err = skb_cow_head(skb, 0);
1908 /* cppcheck-suppress unreadVariable */
1909 ip.hdr = skb_network_header(skb);
1910 l4.hdr = skb_transport_header(skb);
1912 /* initialize outer IP header fields */
1913 if (ip.v4->version == 4) {
1917 ip.v6->payload_len = 0;
1920 /* determine offset of transport header */
1921 l4_start = l4.hdr - skb->data;
1923 /* remove payload length from checksum */
1924 paylen = skb->len - l4_start;
1925 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1927 /* compute length of segmentation header */
1928 off->header_len = (l4.tcp->doff * 4) + l4_start;
1930 /* update gso_segs and bytecount */
1931 first->gso_segs = skb_shinfo(skb)->gso_segs;
1932 first->bytecount += (first->gso_segs - 1) * off->header_len;
1934 cd_tso_len = skb->len - off->header_len;
1935 cd_mss = skb_shinfo(skb)->gso_size;
1937 /* record cdesc_qw1 with TSO parameters */
1938 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1939 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1940 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1941 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1942 first->tx_flags |= ICE_TX_FLAGS_TSO;
1947 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1948 * @size: transmit request size in bytes
1950 * Due to hardware alignment restrictions (4K alignment), we need to
1951 * assume that we can have no more than 12K of data per descriptor, even
1952 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1953 * Thus, we need to divide by 12K. But division is slow! Instead,
1954 * we decompose the operation into shifts and one relatively cheap
1955 * multiply operation.
1957 * To divide by 12K, we first divide by 4K, then divide by 3:
1958 * To divide by 4K, shift right by 12 bits
1959 * To divide by 3, multiply by 85, then divide by 256
1960 * (Divide by 256 is done by shifting right by 8 bits)
1961 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1962 * 3, we'll underestimate near each multiple of 12K. This is actually more
1963 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1964 * segment. For our purposes this is accurate out to 1M which is orders of
1965 * magnitude greater than our largest possible GSO size.
1967 * This would then be implemented as:
1968 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1970 * Since multiplication and division are commutative, we can reorder
1972 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1974 static unsigned int ice_txd_use_count(unsigned int size)
1976 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1980 * ice_xmit_desc_count - calculate number of Tx descriptors needed
1983 * Returns number of data descriptors needed for this skb.
1985 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1987 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1988 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1989 unsigned int count = 0, size = skb_headlen(skb);
1992 count += ice_txd_use_count(size);
1997 size = skb_frag_size(frag++);
2004 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
2007 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
2008 * and so we need to figure out the cases where we need to linearize the skb.
2010 * For TSO we need to count the TSO header and segment payload separately.
2011 * As such we need to check cases where we have 7 fragments or more as we
2012 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
2013 * the segment payload in the first descriptor, and another 7 for the
2016 static bool __ice_chk_linearize(struct sk_buff *skb)
2018 const skb_frag_t *frag, *stale;
2021 /* no need to check if number of frags is less than 7 */
2022 nr_frags = skb_shinfo(skb)->nr_frags;
2023 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
2026 /* We need to walk through the list and validate that each group
2027 * of 6 fragments totals at least gso_size.
2029 nr_frags -= ICE_MAX_BUF_TXD - 2;
2030 frag = &skb_shinfo(skb)->frags[0];
2032 /* Initialize size to the negative value of gso_size minus 1. We
2033 * use this as the worst case scenerio in which the frag ahead
2034 * of us only provides one byte which is why we are limited to 6
2035 * descriptors for a single transmit as the header and previous
2036 * fragment are already consuming 2 descriptors.
2038 sum = 1 - skb_shinfo(skb)->gso_size;
2040 /* Add size of frags 0 through 4 to create our initial sum */
2041 sum += skb_frag_size(frag++);
2042 sum += skb_frag_size(frag++);
2043 sum += skb_frag_size(frag++);
2044 sum += skb_frag_size(frag++);
2045 sum += skb_frag_size(frag++);
2047 /* Walk through fragments adding latest fragment, testing it, and
2048 * then removing stale fragments from the sum.
2050 stale = &skb_shinfo(skb)->frags[0];
2052 sum += skb_frag_size(frag++);
2054 /* if sum is negative we failed to make sufficient progress */
2061 sum -= skb_frag_size(stale++);
2068 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2070 * @count: number of buffers used
2072 * Note: Our HW can't scatter-gather more than 8 fragments to build
2073 * a packet on the wire and so we need to figure out the cases where we
2074 * need to linearize the skb.
2076 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2078 /* Both TSO and single send will work if count is less than 8 */
2079 if (likely(count < ICE_MAX_BUF_TXD))
2082 if (skb_is_gso(skb))
2083 return __ice_chk_linearize(skb);
2085 /* we can support up to 8 data buffers for a single send */
2086 return count != ICE_MAX_BUF_TXD;
2090 * ice_xmit_frame_ring - Sends buffer on Tx ring
2092 * @tx_ring: ring to send buffer on
2094 * Returns NETDEV_TX_OK if sent, else an error code
2097 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2099 struct ice_tx_offload_params offload = { 0 };
2100 struct ice_tx_buf *first;
2104 count = ice_xmit_desc_count(skb);
2105 if (ice_chk_linearize(skb, count)) {
2106 if (__skb_linearize(skb))
2108 count = ice_txd_use_count(skb->len);
2109 tx_ring->tx_stats.tx_linearize++;
2112 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2113 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2114 * + 4 desc gap to avoid the cache line where head is,
2115 * + 1 desc for context descriptor,
2116 * otherwise try next time
2118 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2119 ICE_DESCS_FOR_CTX_DESC)) {
2120 tx_ring->tx_stats.tx_busy++;
2121 return NETDEV_TX_BUSY;
2124 offload.tx_ring = tx_ring;
2126 /* record the location of the first descriptor for this packet */
2127 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2129 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2130 first->gso_segs = 1;
2131 first->tx_flags = 0;
2133 /* prepare the VLAN tagging flags for Tx */
2134 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2137 /* set up TSO offload */
2138 tso = ice_tso(first, &offload);
2142 /* always set up Tx checksum offload */
2143 csum = ice_tx_csum(first, &offload);
2147 if (tso || offload.cd_tunnel_params) {
2148 struct ice_tx_ctx_desc *cdesc;
2149 int i = tx_ring->next_to_use;
2151 /* grab the next descriptor */
2152 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2154 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2156 /* setup context descriptor */
2157 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2158 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2159 cdesc->rsvd = cpu_to_le16(0);
2160 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2163 ice_tx_map(tx_ring, first, &offload);
2164 return NETDEV_TX_OK;
2167 dev_kfree_skb_any(skb);
2168 return NETDEV_TX_OK;
2172 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2174 * @netdev: network interface device structure
2176 * Returns NETDEV_TX_OK if sent, else an error code
2178 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2180 struct ice_netdev_priv *np = netdev_priv(netdev);
2181 struct ice_vsi *vsi = np->vsi;
2182 struct ice_ring *tx_ring;
2184 tx_ring = vsi->tx_rings[skb->queue_mapping];
2186 /* hardware can't handle really short frames, hardware padding works
2189 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2190 return NETDEV_TX_OK;
2192 return ice_xmit_frame_ring(skb, tx_ring);