1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 /* The driver transmit and receive code */
6 #include <linux/prefetch.h>
9 #include "ice_dcb_lib.h"
11 #define ICE_RX_HDR_SIZE 256
14 * ice_unmap_and_free_tx_buf - Release a Tx buffer
15 * @ring: the ring that owns the buffer
16 * @tx_buf: the buffer to free
19 ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
22 dev_kfree_skb_any(tx_buf->skb);
23 if (dma_unmap_len(tx_buf, len))
24 dma_unmap_single(ring->dev,
25 dma_unmap_addr(tx_buf, dma),
26 dma_unmap_len(tx_buf, len),
28 } else if (dma_unmap_len(tx_buf, len)) {
29 dma_unmap_page(ring->dev,
30 dma_unmap_addr(tx_buf, dma),
31 dma_unmap_len(tx_buf, len),
35 tx_buf->next_to_watch = NULL;
37 dma_unmap_len_set(tx_buf, len, 0);
38 /* tx_buf must be completely set up in the transmit path */
41 static struct netdev_queue *txring_txq(const struct ice_ring *ring)
43 return netdev_get_tx_queue(ring->netdev, ring->q_index);
47 * ice_clean_tx_ring - Free any empty Tx buffers
48 * @tx_ring: ring to be cleaned
50 void ice_clean_tx_ring(struct ice_ring *tx_ring)
54 /* ring already cleared, nothing to do */
58 /* Free all the Tx ring sk_buffs */
59 for (i = 0; i < tx_ring->count; i++)
60 ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
62 memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
64 /* Zero out the descriptor ring */
65 memset(tx_ring->desc, 0, tx_ring->size);
67 tx_ring->next_to_use = 0;
68 tx_ring->next_to_clean = 0;
73 /* cleanup Tx queue statistics */
74 netdev_tx_reset_queue(txring_txq(tx_ring));
78 * ice_free_tx_ring - Free Tx resources per queue
79 * @tx_ring: Tx descriptor ring for a specific queue
81 * Free all transmit software resources
83 void ice_free_tx_ring(struct ice_ring *tx_ring)
85 ice_clean_tx_ring(tx_ring);
86 devm_kfree(tx_ring->dev, tx_ring->tx_buf);
87 tx_ring->tx_buf = NULL;
90 dmam_free_coherent(tx_ring->dev, tx_ring->size,
91 tx_ring->desc, tx_ring->dma);
97 * ice_clean_tx_irq - Reclaim resources after transmit completes
98 * @vsi: the VSI we care about
99 * @tx_ring: Tx ring to clean
100 * @napi_budget: Used to determine if we are in netpoll
102 * Returns true if there's any budget left (e.g. the clean is finished)
105 ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
107 unsigned int total_bytes = 0, total_pkts = 0;
108 unsigned int budget = vsi->work_lmt;
109 s16 i = tx_ring->next_to_clean;
110 struct ice_tx_desc *tx_desc;
111 struct ice_tx_buf *tx_buf;
113 tx_buf = &tx_ring->tx_buf[i];
114 tx_desc = ICE_TX_DESC(tx_ring, i);
118 struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;
120 /* if next_to_watch is not set then there is no work pending */
124 smp_rmb(); /* prevent any other reads prior to eop_desc */
126 /* if the descriptor isn't done, no work yet to do */
127 if (!(eop_desc->cmd_type_offset_bsz &
128 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
131 /* clear next_to_watch to prevent false hangs */
132 tx_buf->next_to_watch = NULL;
134 /* update the statistics for this packet */
135 total_bytes += tx_buf->bytecount;
136 total_pkts += tx_buf->gso_segs;
139 napi_consume_skb(tx_buf->skb, napi_budget);
141 /* unmap skb header data */
142 dma_unmap_single(tx_ring->dev,
143 dma_unmap_addr(tx_buf, dma),
144 dma_unmap_len(tx_buf, len),
147 /* clear tx_buf data */
149 dma_unmap_len_set(tx_buf, len, 0);
151 /* unmap remaining buffers */
152 while (tx_desc != eop_desc) {
158 tx_buf = tx_ring->tx_buf;
159 tx_desc = ICE_TX_DESC(tx_ring, 0);
162 /* unmap any remaining paged data */
163 if (dma_unmap_len(tx_buf, len)) {
164 dma_unmap_page(tx_ring->dev,
165 dma_unmap_addr(tx_buf, dma),
166 dma_unmap_len(tx_buf, len),
168 dma_unmap_len_set(tx_buf, len, 0);
172 /* move us one more past the eop_desc for start of next pkt */
178 tx_buf = tx_ring->tx_buf;
179 tx_desc = ICE_TX_DESC(tx_ring, 0);
184 /* update budget accounting */
186 } while (likely(budget));
189 tx_ring->next_to_clean = i;
190 u64_stats_update_begin(&tx_ring->syncp);
191 tx_ring->stats.bytes += total_bytes;
192 tx_ring->stats.pkts += total_pkts;
193 u64_stats_update_end(&tx_ring->syncp);
194 tx_ring->q_vector->tx.total_bytes += total_bytes;
195 tx_ring->q_vector->tx.total_pkts += total_pkts;
197 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
200 #define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
201 if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
202 (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
203 /* Make sure that anybody stopping the queue after this
204 * sees the new next_to_clean.
207 if (__netif_subqueue_stopped(tx_ring->netdev,
209 !test_bit(__ICE_DOWN, vsi->state)) {
210 netif_wake_subqueue(tx_ring->netdev,
212 ++tx_ring->tx_stats.restart_q;
220 * ice_setup_tx_ring - Allocate the Tx descriptors
221 * @tx_ring: the Tx ring to set up
223 * Return 0 on success, negative on error
225 int ice_setup_tx_ring(struct ice_ring *tx_ring)
227 struct device *dev = tx_ring->dev;
232 /* warn if we are about to overwrite the pointer */
233 WARN_ON(tx_ring->tx_buf);
235 devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
237 if (!tx_ring->tx_buf)
240 /* round up to nearest page */
241 tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
243 tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
245 if (!tx_ring->desc) {
246 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
251 tx_ring->next_to_use = 0;
252 tx_ring->next_to_clean = 0;
253 tx_ring->tx_stats.prev_pkt = -1;
257 devm_kfree(dev, tx_ring->tx_buf);
258 tx_ring->tx_buf = NULL;
263 * ice_clean_rx_ring - Free Rx buffers
264 * @rx_ring: ring to be cleaned
266 void ice_clean_rx_ring(struct ice_ring *rx_ring)
268 struct device *dev = rx_ring->dev;
271 /* ring already cleared, nothing to do */
272 if (!rx_ring->rx_buf)
275 /* Free all the Rx ring sk_buffs */
276 for (i = 0; i < rx_ring->count; i++) {
277 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
280 dev_kfree_skb(rx_buf->skb);
286 /* Invalidate cache lines that may have been written to by
287 * device so that we avoid corrupting memory.
289 dma_sync_single_range_for_cpu(dev, rx_buf->dma,
291 ICE_RXBUF_2048, DMA_FROM_DEVICE);
293 /* free resources associated with mapping */
294 dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
295 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
296 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
299 rx_buf->page_offset = 0;
302 memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
304 /* Zero out the descriptor ring */
305 memset(rx_ring->desc, 0, rx_ring->size);
307 rx_ring->next_to_alloc = 0;
308 rx_ring->next_to_clean = 0;
309 rx_ring->next_to_use = 0;
313 * ice_free_rx_ring - Free Rx resources
314 * @rx_ring: ring to clean the resources from
316 * Free all receive software resources
318 void ice_free_rx_ring(struct ice_ring *rx_ring)
320 ice_clean_rx_ring(rx_ring);
321 devm_kfree(rx_ring->dev, rx_ring->rx_buf);
322 rx_ring->rx_buf = NULL;
325 dmam_free_coherent(rx_ring->dev, rx_ring->size,
326 rx_ring->desc, rx_ring->dma);
327 rx_ring->desc = NULL;
332 * ice_setup_rx_ring - Allocate the Rx descriptors
333 * @rx_ring: the Rx ring to set up
335 * Return 0 on success, negative on error
337 int ice_setup_rx_ring(struct ice_ring *rx_ring)
339 struct device *dev = rx_ring->dev;
344 /* warn if we are about to overwrite the pointer */
345 WARN_ON(rx_ring->rx_buf);
347 devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
349 if (!rx_ring->rx_buf)
352 /* round up to nearest page */
353 rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
355 rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
357 if (!rx_ring->desc) {
358 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
363 rx_ring->next_to_use = 0;
364 rx_ring->next_to_clean = 0;
368 devm_kfree(dev, rx_ring->rx_buf);
369 rx_ring->rx_buf = NULL;
374 * ice_release_rx_desc - Store the new tail and head values
375 * @rx_ring: ring to bump
376 * @val: new head index
378 static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
380 u16 prev_ntu = rx_ring->next_to_use;
382 rx_ring->next_to_use = val;
384 /* update next to alloc since we have filled the ring */
385 rx_ring->next_to_alloc = val;
387 /* QRX_TAIL will be updated with any tail value, but hardware ignores
388 * the lower 3 bits. This makes it so we only bump tail on meaningful
389 * boundaries. Also, this allows us to bump tail on intervals of 8 up to
390 * the budget depending on the current traffic load.
393 if (prev_ntu != val) {
394 /* Force memory writes to complete before letting h/w
395 * know there are new descriptors to fetch. (Only
396 * applicable for weak-ordered memory model archs,
400 writel(val, rx_ring->tail);
405 * ice_alloc_mapped_page - recycle or make a new page
406 * @rx_ring: ring to use
407 * @bi: rx_buf struct to modify
409 * Returns true if the page was successfully allocated or
413 ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
415 struct page *page = bi->page;
418 /* since we are recycling buffers we should seldom need to alloc */
420 rx_ring->rx_stats.page_reuse_count++;
424 /* alloc new page for storage */
425 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
426 if (unlikely(!page)) {
427 rx_ring->rx_stats.alloc_page_failed++;
431 /* map page for use */
432 dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
433 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
435 /* if mapping failed free memory back to system since
436 * there isn't much point in holding memory we can't use
438 if (dma_mapping_error(rx_ring->dev, dma)) {
439 __free_pages(page, 0);
440 rx_ring->rx_stats.alloc_page_failed++;
447 page_ref_add(page, USHRT_MAX - 1);
448 bi->pagecnt_bias = USHRT_MAX;
454 * ice_alloc_rx_bufs - Replace used receive buffers
455 * @rx_ring: ring to place buffers on
456 * @cleaned_count: number of buffers to replace
458 * Returns false if all allocations were successful, true if any fail. Returning
459 * true signals to the caller that we didn't replace cleaned_count buffers and
460 * there is more work to do.
462 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
463 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
464 * multiple tail writes per call.
466 bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
468 union ice_32b_rx_flex_desc *rx_desc;
469 u16 ntu = rx_ring->next_to_use;
470 struct ice_rx_buf *bi;
472 /* do nothing if no valid netdev defined */
473 if (!rx_ring->netdev || !cleaned_count)
476 /* get the Rx descriptor and buffer based on next_to_use */
477 rx_desc = ICE_RX_DESC(rx_ring, ntu);
478 bi = &rx_ring->rx_buf[ntu];
481 if (!ice_alloc_mapped_page(rx_ring, bi))
484 /* sync the buffer for use by the device */
485 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
490 /* Refresh the desc even if buffer_addrs didn't change
491 * because each write-back erases this info.
493 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
498 if (unlikely(ntu == rx_ring->count)) {
499 rx_desc = ICE_RX_DESC(rx_ring, 0);
500 bi = rx_ring->rx_buf;
504 /* clear the status bits for the next_to_use descriptor */
505 rx_desc->wb.status_error0 = 0;
508 } while (cleaned_count);
510 if (rx_ring->next_to_use != ntu)
511 ice_release_rx_desc(rx_ring, ntu);
516 if (rx_ring->next_to_use != ntu)
517 ice_release_rx_desc(rx_ring, ntu);
519 /* make sure to come back via polling to try again after
526 * ice_page_is_reserved - check if reuse is possible
527 * @page: page struct to check
529 static bool ice_page_is_reserved(struct page *page)
531 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
535 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
536 * @rx_buf: Rx buffer to adjust
537 * @size: Size of adjustment
539 * Update the offset within page so that Rx buf will be ready to be reused.
540 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
541 * so the second half of page assigned to Rx buffer will be used, otherwise
542 * the offset is moved by the @size bytes
545 ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
547 #if (PAGE_SIZE < 8192)
548 /* flip page offset to other buffer */
549 rx_buf->page_offset ^= size;
551 /* move offset up to the next cache line */
552 rx_buf->page_offset += size;
557 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
558 * @rx_buf: buffer containing the page
560 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
561 * which will assign the current buffer to the buffer that next_to_alloc is
562 * pointing to; otherwise, the DMA mapping needs to be destroyed and
565 static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
567 #if (PAGE_SIZE >= 8192)
568 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
570 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
571 struct page *page = rx_buf->page;
573 /* avoid re-using remote pages */
574 if (unlikely(ice_page_is_reserved(page)))
577 #if (PAGE_SIZE < 8192)
578 /* if we are only owner of page we can reuse it */
579 if (unlikely((page_count(page) - pagecnt_bias) > 1))
582 if (rx_buf->page_offset > last_offset)
584 #endif /* PAGE_SIZE < 8192) */
586 /* If we have drained the page fragment pool we need to update
587 * the pagecnt_bias and page count so that we fully restock the
588 * number of references the driver holds.
590 if (unlikely(pagecnt_bias == 1)) {
591 page_ref_add(page, USHRT_MAX - 1);
592 rx_buf->pagecnt_bias = USHRT_MAX;
599 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
600 * @rx_buf: buffer containing page to add
601 * @skb: sk_buff to place the data into
602 * @size: packet length from rx_desc
604 * This function will add the data contained in rx_buf->page to the skb.
605 * It will just attach the page as a frag to the skb.
606 * The function will then update the page offset.
609 ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
612 #if (PAGE_SIZE >= 8192)
613 unsigned int truesize = SKB_DATA_ALIGN(size);
615 unsigned int truesize = ICE_RXBUF_2048;
618 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
619 rx_buf->page_offset, size, truesize);
621 /* page is being used so we must update the page offset */
622 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
626 * ice_reuse_rx_page - page flip buffer and store it back on the ring
627 * @rx_ring: Rx descriptor ring to store buffers on
628 * @old_buf: donor buffer to have page reused
630 * Synchronizes page for reuse by the adapter
633 ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
635 u16 nta = rx_ring->next_to_alloc;
636 struct ice_rx_buf *new_buf;
638 new_buf = &rx_ring->rx_buf[nta];
640 /* update, and store next to alloc */
642 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
644 /* Transfer page from old buffer to new buffer.
645 * Move each member individually to avoid possible store
646 * forwarding stalls and unnecessary copy of skb.
648 new_buf->dma = old_buf->dma;
649 new_buf->page = old_buf->page;
650 new_buf->page_offset = old_buf->page_offset;
651 new_buf->pagecnt_bias = old_buf->pagecnt_bias;
655 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
656 * @rx_ring: Rx descriptor ring to transact packets on
657 * @skb: skb to be used
658 * @size: size of buffer to add to skb
660 * This function will pull an Rx buffer from the ring and synchronize it
661 * for use by the CPU.
663 static struct ice_rx_buf *
664 ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
665 const unsigned int size)
667 struct ice_rx_buf *rx_buf;
669 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
670 prefetchw(rx_buf->page);
673 /* we are reusing so sync this buffer for CPU use */
674 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
675 rx_buf->page_offset, size,
678 /* We have pulled a buffer for use, so decrement pagecnt_bias */
679 rx_buf->pagecnt_bias--;
685 * ice_construct_skb - Allocate skb and populate it
686 * @rx_ring: Rx descriptor ring to transact packets on
687 * @rx_buf: Rx buffer to pull data from
688 * @size: the length of the packet
690 * This function allocates an skb. It then populates it with the page
691 * data from the current receive descriptor, taking care to set up the
694 static struct sk_buff *
695 ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
698 void *va = page_address(rx_buf->page) + rx_buf->page_offset;
699 unsigned int headlen;
702 /* prefetch first cache line of first page */
704 #if L1_CACHE_BYTES < 128
705 prefetch((u8 *)va + L1_CACHE_BYTES);
706 #endif /* L1_CACHE_BYTES */
708 /* allocate a skb to store the frags */
709 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
710 GFP_ATOMIC | __GFP_NOWARN);
714 skb_record_rx_queue(skb, rx_ring->q_index);
715 /* Determine available headroom for copy */
717 if (headlen > ICE_RX_HDR_SIZE)
718 headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
720 /* align pull length to size of long to optimize memcpy performance */
721 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
723 /* if we exhaust the linear part then add what is left as a frag */
726 #if (PAGE_SIZE >= 8192)
727 unsigned int truesize = SKB_DATA_ALIGN(size);
729 unsigned int truesize = ICE_RXBUF_2048;
731 skb_add_rx_frag(skb, 0, rx_buf->page,
732 rx_buf->page_offset + headlen, size, truesize);
733 /* buffer is used by skb, update page_offset */
734 ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
736 /* buffer is unused, reset bias back to rx_buf; data was copied
737 * onto skb's linear part so there's no need for adjusting
738 * page offset and we can reuse this buffer as-is
740 rx_buf->pagecnt_bias++;
747 * ice_put_rx_buf - Clean up used buffer and either recycle or free
748 * @rx_ring: Rx descriptor ring to transact packets on
749 * @rx_buf: Rx buffer to pull data from
751 * This function will clean up the contents of the rx_buf. It will
752 * either recycle the buffer or unmap it and free the associated resources.
754 static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
756 /* hand second half of page back to the ring */
757 if (ice_can_reuse_rx_page(rx_buf)) {
758 ice_reuse_rx_page(rx_ring, rx_buf);
759 rx_ring->rx_stats.page_reuse_count++;
761 /* we are not reusing the buffer so unmap it */
762 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
763 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
764 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
767 /* clear contents of buffer_info */
773 * ice_cleanup_headers - Correct empty headers
774 * @skb: pointer to current skb being fixed
776 * Also address the case where we are pulling data in on pages only
777 * and as such no data is present in the skb header.
779 * In addition if skb is not at least 60 bytes we need to pad it so that
780 * it is large enough to qualify as a valid Ethernet frame.
782 * Returns true if an error was encountered and skb was freed.
784 static bool ice_cleanup_headers(struct sk_buff *skb)
786 /* if eth_skb_pad returns an error the skb was freed */
787 if (eth_skb_pad(skb))
794 * ice_test_staterr - tests bits in Rx descriptor status and error fields
795 * @rx_desc: pointer to receive descriptor (in le64 format)
796 * @stat_err_bits: value to mask
798 * This function does some fast chicanery in order to return the
799 * value of the mask which is really only used for boolean tests.
800 * The status_error_len doesn't need to be shifted because it begins
804 ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
806 return !!(rx_desc->wb.status_error0 &
807 cpu_to_le16(stat_err_bits));
811 * ice_is_non_eop - process handling of non-EOP buffers
812 * @rx_ring: Rx ring being processed
813 * @rx_desc: Rx descriptor for current buffer
814 * @skb: Current socket buffer containing buffer in progress
816 * This function updates next to clean. If the buffer is an EOP buffer
817 * this function exits returning false, otherwise it will place the
818 * sk_buff in the next buffer to be chained and return true indicating
819 * that this is in fact a non-EOP buffer.
822 ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
825 u32 ntc = rx_ring->next_to_clean + 1;
827 /* fetch, update, and store next to clean */
828 ntc = (ntc < rx_ring->count) ? ntc : 0;
829 rx_ring->next_to_clean = ntc;
831 prefetch(ICE_RX_DESC(rx_ring, ntc));
833 /* if we are the last buffer then there is nothing else to do */
834 #define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
835 if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
838 /* place skb in next buffer to be received */
839 rx_ring->rx_buf[ntc].skb = skb;
840 rx_ring->rx_stats.non_eop_descs++;
846 * ice_ptype_to_htype - get a hash type
847 * @ptype: the ptype value from the descriptor
849 * Returns a hash type to be used by skb_set_hash
851 static enum pkt_hash_types ice_ptype_to_htype(u8 __always_unused ptype)
853 return PKT_HASH_TYPE_NONE;
857 * ice_rx_hash - set the hash value in the skb
858 * @rx_ring: descriptor ring
859 * @rx_desc: specific descriptor
860 * @skb: pointer to current skb
861 * @rx_ptype: the ptype value from the descriptor
864 ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
865 struct sk_buff *skb, u8 rx_ptype)
867 struct ice_32b_rx_flex_desc_nic *nic_mdid;
870 if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
873 if (rx_desc->wb.rxdid != ICE_RXDID_FLEX_NIC)
876 nic_mdid = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
877 hash = le32_to_cpu(nic_mdid->rss_hash);
878 skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
882 * ice_rx_csum - Indicate in skb if checksum is good
883 * @vsi: the VSI we care about
884 * @skb: skb currently being received and modified
885 * @rx_desc: the receive descriptor
886 * @ptype: the packet type decoded by hardware
888 * skb->protocol must be set before this function is called
891 ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
892 union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
894 struct ice_rx_ptype_decoded decoded;
895 u32 rx_error, rx_status;
898 rx_status = le16_to_cpu(rx_desc->wb.status_error0);
899 rx_error = rx_status;
901 decoded = ice_decode_rx_desc_ptype(ptype);
903 /* Start with CHECKSUM_NONE and by default csum_level = 0 */
904 skb->ip_summed = CHECKSUM_NONE;
905 skb_checksum_none_assert(skb);
907 /* check if Rx checksum is enabled */
908 if (!(vsi->netdev->features & NETIF_F_RXCSUM))
911 /* check if HW has decoded the packet and checksum */
912 if (!(rx_status & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
915 if (!(decoded.known && decoded.outer_ip))
918 ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
919 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
920 ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
921 (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
923 if (ipv4 && (rx_error & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) |
924 BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S))))
926 else if (ipv6 && (rx_status &
927 (BIT(ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S))))
930 /* check for L4 errors and handle packets that were not able to be
931 * checksummed due to arrival speed
933 if (rx_error & BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S))
936 /* Only report checksum unnecessary for TCP, UDP, or SCTP */
937 switch (decoded.inner_prot) {
938 case ICE_RX_PTYPE_INNER_PROT_TCP:
939 case ICE_RX_PTYPE_INNER_PROT_UDP:
940 case ICE_RX_PTYPE_INNER_PROT_SCTP:
941 skb->ip_summed = CHECKSUM_UNNECESSARY;
948 vsi->back->hw_csum_rx_error++;
952 * ice_process_skb_fields - Populate skb header fields from Rx descriptor
953 * @rx_ring: Rx descriptor ring packet is being transacted on
954 * @rx_desc: pointer to the EOP Rx descriptor
955 * @skb: pointer to current skb being populated
956 * @ptype: the packet type decoded by hardware
958 * This function checks the ring, descriptor, and packet information in
959 * order to populate the hash, checksum, VLAN, protocol, and
960 * other fields within the skb.
963 ice_process_skb_fields(struct ice_ring *rx_ring,
964 union ice_32b_rx_flex_desc *rx_desc,
965 struct sk_buff *skb, u8 ptype)
967 ice_rx_hash(rx_ring, rx_desc, skb, ptype);
969 /* modifies the skb - consumes the enet header */
970 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
972 ice_rx_csum(rx_ring->vsi, skb, rx_desc, ptype);
976 * ice_receive_skb - Send a completed packet up the stack
977 * @rx_ring: Rx ring in play
978 * @skb: packet to send up
979 * @vlan_tag: VLAN tag for packet
981 * This function sends the completed packet (via. skb) up the stack using
982 * gro receive functions (with/without VLAN tag)
985 ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
987 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
988 (vlan_tag & VLAN_VID_MASK))
989 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
990 napi_gro_receive(&rx_ring->q_vector->napi, skb);
994 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
995 * @rx_ring: Rx descriptor ring to transact packets on
996 * @budget: Total limit on number of packets to process
998 * This function provides a "bounce buffer" approach to Rx interrupt
999 * processing. The advantage to this is that on systems that have
1000 * expensive overhead for IOMMU access this provides a means of avoiding
1001 * it by maintaining the mapping of the page to the system.
1003 * Returns amount of work completed
1005 static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
1007 unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
1008 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1011 /* start the loop to process Rx packets bounded by 'budget' */
1012 while (likely(total_rx_pkts < (unsigned int)budget)) {
1013 union ice_32b_rx_flex_desc *rx_desc;
1014 struct ice_rx_buf *rx_buf;
1015 struct sk_buff *skb;
1021 /* get the Rx desc from Rx ring based on 'next_to_clean' */
1022 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
1024 /* status_error_len will always be zero for unused descriptors
1025 * because it's cleared in cleanup, and overlaps with hdr_addr
1026 * which is always zero because packet split isn't used, if the
1027 * hardware wrote DD then it will be non-zero
1029 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
1030 if (!ice_test_staterr(rx_desc, stat_err_bits))
1033 /* This memory barrier is needed to keep us from reading
1034 * any other fields out of the rx_desc until we know the
1039 size = le16_to_cpu(rx_desc->wb.pkt_len) &
1040 ICE_RX_FLX_DESC_PKT_LEN_M;
1042 rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
1043 /* allocate (if needed) and populate skb */
1045 ice_add_rx_frag(rx_buf, skb, size);
1047 skb = ice_construct_skb(rx_ring, rx_buf, size);
1049 /* exit if we failed to retrieve a buffer */
1051 rx_ring->rx_stats.alloc_buf_failed++;
1052 rx_buf->pagecnt_bias++;
1056 ice_put_rx_buf(rx_ring, rx_buf);
1059 /* skip if it is NOP desc */
1060 if (ice_is_non_eop(rx_ring, rx_desc, skb))
1063 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
1064 if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
1065 dev_kfree_skb_any(skb);
1069 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
1070 ICE_RX_FLEX_DESC_PTYPE_M;
1072 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
1073 if (ice_test_staterr(rx_desc, stat_err_bits))
1074 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
1076 /* correct empty headers and pad skb if needed (to make valid
1079 if (ice_cleanup_headers(skb)) {
1084 /* probably a little skewed due to removing CRC */
1085 total_rx_bytes += skb->len;
1087 /* populate checksum, VLAN, and protocol */
1088 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
1090 /* send completed skb up the stack */
1091 ice_receive_skb(rx_ring, skb, vlan_tag);
1093 /* update budget accounting */
1097 /* return up to cleaned_count buffers to hardware */
1098 failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
1100 /* update queue and vector specific stats */
1101 u64_stats_update_begin(&rx_ring->syncp);
1102 rx_ring->stats.pkts += total_rx_pkts;
1103 rx_ring->stats.bytes += total_rx_bytes;
1104 u64_stats_update_end(&rx_ring->syncp);
1105 rx_ring->q_vector->rx.total_pkts += total_rx_pkts;
1106 rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
1108 /* guarantee a trip back through this routine if there was a failure */
1109 return failure ? budget : (int)total_rx_pkts;
1113 * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
1114 * @port_info: port_info structure containing the current link speed
1115 * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
1116 * @itr: ITR value to update
1118 * Calculate how big of an increment should be applied to the ITR value passed
1119 * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
1122 * The following is a calculation derived from:
1123 * wmem_default / (size + overhead) = desired_pkts_per_int
1124 * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
1125 * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
1127 * Assuming wmem_default is 212992 and overhead is 640 bytes per
1128 * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
1131 * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
1132 * ITR = -------------------------------------------- * --------------
1133 * rate pkt_size + 640
1136 ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
1137 unsigned int avg_pkt_size,
1140 switch (port_info->phy.link_info.link_speed) {
1141 case ICE_AQ_LINK_SPEED_100GB:
1142 itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
1143 avg_pkt_size + 640);
1145 case ICE_AQ_LINK_SPEED_50GB:
1146 itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
1147 avg_pkt_size + 640);
1149 case ICE_AQ_LINK_SPEED_40GB:
1150 itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
1151 avg_pkt_size + 640);
1153 case ICE_AQ_LINK_SPEED_25GB:
1154 itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
1155 avg_pkt_size + 640);
1157 case ICE_AQ_LINK_SPEED_20GB:
1158 itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
1159 avg_pkt_size + 640);
1161 case ICE_AQ_LINK_SPEED_10GB:
1164 itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
1165 avg_pkt_size + 640);
1169 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1170 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1171 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1178 * ice_update_itr - update the adaptive ITR value based on statistics
1179 * @q_vector: structure containing interrupt and ring information
1180 * @rc: structure containing ring performance data
1182 * Stores a new ITR value based on packets and byte
1183 * counts during the last interrupt. The advantage of per interrupt
1184 * computation is faster updates and more accurate ITR for the current
1185 * traffic pattern. Constants in this function were computed
1186 * based on theoretical maximum wire speed and thresholds were set based
1187 * on testing data as well as attempting to minimize response time
1188 * while increasing bulk throughput.
1191 ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
1193 unsigned long next_update = jiffies;
1194 unsigned int packets, bytes, itr;
1195 bool container_is_rx;
1197 if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
1200 /* If itr_countdown is set it means we programmed an ITR within
1201 * the last 4 interrupt cycles. This has a side effect of us
1202 * potentially firing an early interrupt. In order to work around
1203 * this we need to throw out any data received for a few
1204 * interrupts following the update.
1206 if (q_vector->itr_countdown) {
1207 itr = rc->target_itr;
1211 container_is_rx = (&q_vector->rx == rc);
1212 /* For Rx we want to push the delay up and default to low latency.
1213 * for Tx we want to pull the delay down and default to high latency.
1215 itr = container_is_rx ?
1216 ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
1217 ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
1219 /* If we didn't update within up to 1 - 2 jiffies we can assume
1220 * that either packets are coming in so slow there hasn't been
1221 * any work, or that there is so much work that NAPI is dealing
1222 * with interrupt moderation and we don't need to do anything.
1224 if (time_after(next_update, rc->next_update))
1227 packets = rc->total_pkts;
1228 bytes = rc->total_bytes;
1230 if (container_is_rx) {
1231 /* If Rx there are 1 to 4 packets and bytes are less than
1232 * 9000 assume insufficient data to use bulk rate limiting
1233 * approach unless Tx is already in bulk rate limiting. We
1234 * are likely latency driven.
1236 if (packets && packets < 4 && bytes < 9000 &&
1237 (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
1238 itr = ICE_ITR_ADAPTIVE_LATENCY;
1239 goto adjust_by_size_and_speed;
1241 } else if (packets < 4) {
1242 /* If we have Tx and Rx ITR maxed and Tx ITR is running in
1243 * bulk mode and we are receiving 4 or fewer packets just
1244 * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
1245 * that the Rx can relax.
1247 if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
1248 (q_vector->rx.target_itr & ICE_ITR_MASK) ==
1249 ICE_ITR_ADAPTIVE_MAX_USECS)
1251 } else if (packets > 32) {
1252 /* If we have processed over 32 packets in a single interrupt
1253 * for Tx assume we need to switch over to "bulk" mode.
1255 rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
1258 /* We have no packets to actually measure against. This means
1259 * either one of the other queues on this vector is active or
1260 * we are a Tx queue doing TSO with too high of an interrupt rate.
1262 * Between 4 and 56 we can assume that our current interrupt delay
1263 * is only slightly too low. As such we should increase it by a small
1267 itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
1268 if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
1269 itr &= ICE_ITR_ADAPTIVE_LATENCY;
1270 itr += ICE_ITR_ADAPTIVE_MAX_USECS;
1275 if (packets <= 256) {
1276 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
1277 itr &= ICE_ITR_MASK;
1279 /* Between 56 and 112 is our "goldilocks" zone where we are
1280 * working out "just right". Just report that our current
1281 * ITR is good for us.
1286 /* If packet count is 128 or greater we are likely looking
1287 * at a slight overrun of the delay we want. Try halving
1288 * our delay to see if that will cut the number of packets
1289 * in half per interrupt.
1292 itr &= ICE_ITR_MASK;
1293 if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
1294 itr = ICE_ITR_ADAPTIVE_MIN_USECS;
1299 /* The paths below assume we are dealing with a bulk ITR since
1300 * number of packets is greater than 256. We are just going to have
1301 * to compute a value and try to bring the count under control,
1302 * though for smaller packet sizes there isn't much we can do as
1303 * NAPI polling will likely be kicking in sooner rather than later.
1305 itr = ICE_ITR_ADAPTIVE_BULK;
1307 adjust_by_size_and_speed:
1309 /* based on checks above packets cannot be 0 so division is safe */
1310 itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
1311 bytes / packets, itr);
1314 /* write back value */
1315 rc->target_itr = itr;
1317 /* next update should occur within next jiffy */
1318 rc->next_update = next_update + 1;
1320 rc->total_bytes = 0;
1325 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
1326 * @itr_idx: interrupt throttling index
1327 * @itr: interrupt throttling value in usecs
1329 static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1331 /* The ITR value is reported in microseconds, and the register value is
1332 * recorded in 2 microsecond units. For this reason we only need to
1333 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
1334 * granularity as a shift instead of division. The mask makes sure the
1335 * ITR value is never odd so we don't accidentally write into the field
1336 * prior to the ITR field.
1338 itr &= ICE_ITR_MASK;
1340 return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
1341 (itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1342 (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1345 /* The act of updating the ITR will cause it to immediately trigger. In order
1346 * to prevent this from throwing off adaptive update statistics we defer the
1347 * update so that it can only happen so often. So after either Tx or Rx are
1348 * updated we make the adaptive scheme wait until either the ITR completely
1349 * expires via the next_update expiration or we have been through at least
1352 #define ITR_COUNTDOWN_START 3
1355 * ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
1356 * @vsi: the VSI associated with the q_vector
1357 * @q_vector: q_vector for which ITR is being updated and interrupt enabled
1360 ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
1362 struct ice_ring_container *tx = &q_vector->tx;
1363 struct ice_ring_container *rx = &q_vector->rx;
1366 /* This will do nothing if dynamic updates are not enabled */
1367 ice_update_itr(q_vector, tx);
1368 ice_update_itr(q_vector, rx);
1370 /* This block of logic allows us to get away with only updating
1371 * one ITR value with each interrupt. The idea is to perform a
1372 * pseudo-lazy update with the following criteria.
1374 * 1. Rx is given higher priority than Tx if both are in same state
1375 * 2. If we must reduce an ITR that is given highest priority.
1376 * 3. We then give priority to increasing ITR based on amount.
1378 if (rx->target_itr < rx->current_itr) {
1379 /* Rx ITR needs to be reduced, this is highest priority */
1380 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1381 rx->current_itr = rx->target_itr;
1382 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1383 } else if ((tx->target_itr < tx->current_itr) ||
1384 ((rx->target_itr - rx->current_itr) <
1385 (tx->target_itr - tx->current_itr))) {
1386 /* Tx ITR needs to be reduced, this is second priority
1387 * Tx ITR needs to be increased more than Rx, fourth priority
1389 itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
1390 tx->current_itr = tx->target_itr;
1391 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1392 } else if (rx->current_itr != rx->target_itr) {
1393 /* Rx ITR needs to be increased, third priority */
1394 itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
1395 rx->current_itr = rx->target_itr;
1396 q_vector->itr_countdown = ITR_COUNTDOWN_START;
1398 /* Still have to re-enable the interrupts */
1399 itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1400 if (q_vector->itr_countdown)
1401 q_vector->itr_countdown--;
1404 if (!test_bit(__ICE_DOWN, vsi->state))
1405 wr32(&vsi->back->hw,
1406 GLINT_DYN_CTL(q_vector->reg_idx),
1411 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
1412 * @napi: napi struct with our devices info in it
1413 * @budget: amount of work driver is allowed to do this pass, in packets
1415 * This function will clean all queues associated with a q_vector.
1417 * Returns the amount of work done
1419 int ice_napi_poll(struct napi_struct *napi, int budget)
1421 struct ice_q_vector *q_vector =
1422 container_of(napi, struct ice_q_vector, napi);
1423 struct ice_vsi *vsi = q_vector->vsi;
1424 struct ice_pf *pf = vsi->back;
1425 bool clean_complete = true;
1426 int budget_per_ring = 0;
1427 struct ice_ring *ring;
1430 /* Since the actual Tx work is minimal, we can give the Tx a larger
1431 * budget and be more aggressive about cleaning up the Tx descriptors.
1433 ice_for_each_ring(ring, q_vector->tx)
1434 if (!ice_clean_tx_irq(vsi, ring, budget))
1435 clean_complete = false;
1437 /* Handle case where we are called by netpoll with a budget of 0 */
1441 /* We attempt to distribute budget to each Rx queue fairly, but don't
1442 * allow the budget to go below 1 because that would exit polling early.
1444 if (q_vector->num_ring_rx)
1445 budget_per_ring = max(budget / q_vector->num_ring_rx, 1);
1447 ice_for_each_ring(ring, q_vector->rx) {
1450 cleaned = ice_clean_rx_irq(ring, budget_per_ring);
1451 work_done += cleaned;
1452 /* if we clean as many as budgeted, we must not be done */
1453 if (cleaned >= budget_per_ring)
1454 clean_complete = false;
1457 /* If work not completed, return budget and polling will return */
1458 if (!clean_complete)
1461 /* Exit the polling mode, but don't re-enable interrupts if stack might
1462 * poll us due to busy-polling
1464 if (likely(napi_complete_done(napi, work_done)))
1465 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1466 ice_update_ena_itr(vsi, q_vector);
1468 return min_t(int, work_done, budget - 1);
1471 /* helper function for building cmd/type/offset */
1473 build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
1475 return cpu_to_le64(ICE_TX_DESC_DTYPE_DATA |
1476 (td_cmd << ICE_TXD_QW1_CMD_S) |
1477 (td_offset << ICE_TXD_QW1_OFFSET_S) |
1478 ((u64)size << ICE_TXD_QW1_TX_BUF_SZ_S) |
1479 (td_tag << ICE_TXD_QW1_L2TAG1_S));
1483 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1484 * @tx_ring: the ring to be checked
1485 * @size: the size buffer we want to assure is available
1487 * Returns -EBUSY if a stop is needed, else 0
1489 static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1491 netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
1492 /* Memory barrier before checking head and tail */
1495 /* Check again in a case another CPU has just made room available. */
1496 if (likely(ICE_DESC_UNUSED(tx_ring) < size))
1499 /* A reprieve! - use start_subqueue because it doesn't call schedule */
1500 netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
1501 ++tx_ring->tx_stats.restart_q;
1506 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1507 * @tx_ring: the ring to be checked
1508 * @size: the size buffer we want to assure is available
1510 * Returns 0 if stop is not needed
1512 static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
1514 if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
1517 return __ice_maybe_stop_tx(tx_ring, size);
1521 * ice_tx_map - Build the Tx descriptor
1522 * @tx_ring: ring to send buffer on
1523 * @first: first buffer info buffer to use
1524 * @off: pointer to struct that holds offload parameters
1526 * This function loops over the skb data pointed to by *first
1527 * and gets a physical address for each memory location and programs
1528 * it and the length into the transmit descriptor.
1531 ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
1532 struct ice_tx_offload_params *off)
1534 u64 td_offset, td_tag, td_cmd;
1535 u16 i = tx_ring->next_to_use;
1537 unsigned int data_len, size;
1538 struct ice_tx_desc *tx_desc;
1539 struct ice_tx_buf *tx_buf;
1540 struct sk_buff *skb;
1543 td_tag = off->td_l2tag1;
1544 td_cmd = off->td_cmd;
1545 td_offset = off->td_offset;
1548 data_len = skb->data_len;
1549 size = skb_headlen(skb);
1551 tx_desc = ICE_TX_DESC(tx_ring, i);
1553 if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
1554 td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
1555 td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
1556 ICE_TX_FLAGS_VLAN_S;
1559 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1563 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1564 unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1566 if (dma_mapping_error(tx_ring->dev, dma))
1569 /* record length, and DMA address */
1570 dma_unmap_len_set(tx_buf, len, size);
1571 dma_unmap_addr_set(tx_buf, dma, dma);
1573 /* align size to end of page */
1574 max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
1575 tx_desc->buf_addr = cpu_to_le64(dma);
1577 /* account for data chunks larger than the hardware
1580 while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
1581 tx_desc->cmd_type_offset_bsz =
1582 build_ctob(td_cmd, td_offset, max_data, td_tag);
1587 if (i == tx_ring->count) {
1588 tx_desc = ICE_TX_DESC(tx_ring, 0);
1595 max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
1596 tx_desc->buf_addr = cpu_to_le64(dma);
1599 if (likely(!data_len))
1602 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset,
1608 if (i == tx_ring->count) {
1609 tx_desc = ICE_TX_DESC(tx_ring, 0);
1613 size = skb_frag_size(frag);
1616 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
1619 tx_buf = &tx_ring->tx_buf[i];
1622 /* record bytecount for BQL */
1623 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1625 /* record SW timestamp if HW timestamp is not available */
1626 skb_tx_timestamp(first->skb);
1629 if (i == tx_ring->count)
1632 /* write last descriptor with RS and EOP bits */
1633 td_cmd |= (u64)(ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS);
1634 tx_desc->cmd_type_offset_bsz =
1635 build_ctob(td_cmd, td_offset, size, td_tag);
1637 /* Force memory writes to complete before letting h/w know there
1638 * are new descriptors to fetch.
1640 * We also use this memory barrier to make certain all of the
1641 * status bits have been updated before next_to_watch is written.
1645 /* set next_to_watch value indicating a packet is present */
1646 first->next_to_watch = tx_desc;
1648 tx_ring->next_to_use = i;
1650 ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
1652 /* notify HW of packet */
1653 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1654 writel(i, tx_ring->tail);
1660 /* clear DMA mappings for failed tx_buf map */
1662 tx_buf = &tx_ring->tx_buf[i];
1663 ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
1664 if (tx_buf == first)
1671 tx_ring->next_to_use = i;
1675 * ice_tx_csum - Enable Tx checksum offloads
1676 * @first: pointer to the first descriptor
1677 * @off: pointer to struct that holds offload parameters
1679 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
1682 int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1684 u32 l4_len = 0, l3_len = 0, l2_len = 0;
1685 struct sk_buff *skb = first->skb;
1695 __be16 frag_off, protocol;
1696 unsigned char *exthdr;
1697 u32 offset, cmd = 0;
1700 if (skb->ip_summed != CHECKSUM_PARTIAL)
1703 ip.hdr = skb_network_header(skb);
1704 l4.hdr = skb_transport_header(skb);
1706 /* compute outer L2 header size */
1707 l2_len = ip.hdr - skb->data;
1708 offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;
1710 if (skb->encapsulation)
1713 /* Enable IP checksum offloads */
1714 protocol = vlan_get_protocol(skb);
1715 if (protocol == htons(ETH_P_IP)) {
1716 l4_proto = ip.v4->protocol;
1717 /* the stack computes the IP header already, the only time we
1718 * need the hardware to recompute it is in the case of TSO.
1720 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1721 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
1723 cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;
1725 } else if (protocol == htons(ETH_P_IPV6)) {
1726 cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
1727 exthdr = ip.hdr + sizeof(*ip.v6);
1728 l4_proto = ip.v6->nexthdr;
1729 if (l4.hdr != exthdr)
1730 ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
1736 /* compute inner L3 header size */
1737 l3_len = l4.hdr - ip.hdr;
1738 offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
1740 /* Enable L4 checksum offloads */
1743 /* enable checksum offloads */
1744 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
1745 l4_len = l4.tcp->doff;
1746 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1749 /* enable UDP checksum offload */
1750 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
1751 l4_len = (sizeof(struct udphdr) >> 2);
1752 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1755 /* enable SCTP checksum offload */
1756 cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
1757 l4_len = sizeof(struct sctphdr) >> 2;
1758 offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
1762 if (first->tx_flags & ICE_TX_FLAGS_TSO)
1764 skb_checksum_help(skb);
1769 off->td_offset |= offset;
1774 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1775 * @tx_ring: ring to send buffer on
1776 * @first: pointer to struct ice_tx_buf
1778 * Checks the skb and set up correspondingly several generic transmit flags
1779 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
1781 * Returns error code indicate the frame should be dropped upon error and the
1782 * otherwise returns 0 to indicate the flags has been set properly.
1785 ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
1787 struct sk_buff *skb = first->skb;
1788 __be16 protocol = skb->protocol;
1790 if (protocol == htons(ETH_P_8021Q) &&
1791 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
1792 /* when HW VLAN acceleration is turned off by the user the
1793 * stack sets the protocol to 8021q so that the driver
1794 * can take any steps required to support the SW only
1795 * VLAN handling. In our case the driver doesn't need
1796 * to take any further steps so just set the protocol
1797 * to the encapsulated ethertype.
1799 skb->protocol = vlan_get_protocol(skb);
1803 /* if we have a HW VLAN tag being added, default to the HW one */
1804 if (skb_vlan_tag_present(skb)) {
1805 first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
1806 first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
1807 } else if (protocol == htons(ETH_P_8021Q)) {
1808 struct vlan_hdr *vhdr, _vhdr;
1810 /* for SW VLAN, check the next protocol and store the tag */
1811 vhdr = (struct vlan_hdr *)skb_header_pointer(skb, ETH_HLEN,
1817 first->tx_flags |= ntohs(vhdr->h_vlan_TCI) <<
1818 ICE_TX_FLAGS_VLAN_S;
1819 first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
1822 return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1826 * ice_tso - computes mss and TSO length to prepare for TSO
1827 * @first: pointer to struct ice_tx_buf
1828 * @off: pointer to struct that holds offload parameters
1830 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
1833 int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
1835 struct sk_buff *skb = first->skb;
1845 u64 cd_mss, cd_tso_len;
1846 u32 paylen, l4_start;
1849 if (skb->ip_summed != CHECKSUM_PARTIAL)
1852 if (!skb_is_gso(skb))
1855 err = skb_cow_head(skb, 0);
1859 /* cppcheck-suppress unreadVariable */
1860 ip.hdr = skb_network_header(skb);
1861 l4.hdr = skb_transport_header(skb);
1863 /* initialize outer IP header fields */
1864 if (ip.v4->version == 4) {
1868 ip.v6->payload_len = 0;
1871 /* determine offset of transport header */
1872 l4_start = l4.hdr - skb->data;
1874 /* remove payload length from checksum */
1875 paylen = skb->len - l4_start;
1876 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
1878 /* compute length of segmentation header */
1879 off->header_len = (l4.tcp->doff * 4) + l4_start;
1881 /* update gso_segs and bytecount */
1882 first->gso_segs = skb_shinfo(skb)->gso_segs;
1883 first->bytecount += (first->gso_segs - 1) * off->header_len;
1885 cd_tso_len = skb->len - off->header_len;
1886 cd_mss = skb_shinfo(skb)->gso_size;
1888 /* record cdesc_qw1 with TSO parameters */
1889 off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
1890 (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
1891 (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
1892 (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1893 first->tx_flags |= ICE_TX_FLAGS_TSO;
1898 * ice_txd_use_count - estimate the number of descriptors needed for Tx
1899 * @size: transmit request size in bytes
1901 * Due to hardware alignment restrictions (4K alignment), we need to
1902 * assume that we can have no more than 12K of data per descriptor, even
1903 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
1904 * Thus, we need to divide by 12K. But division is slow! Instead,
1905 * we decompose the operation into shifts and one relatively cheap
1906 * multiply operation.
1908 * To divide by 12K, we first divide by 4K, then divide by 3:
1909 * To divide by 4K, shift right by 12 bits
1910 * To divide by 3, multiply by 85, then divide by 256
1911 * (Divide by 256 is done by shifting right by 8 bits)
1912 * Finally, we add one to round up. Because 256 isn't an exact multiple of
1913 * 3, we'll underestimate near each multiple of 12K. This is actually more
1914 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
1915 * segment. For our purposes this is accurate out to 1M which is orders of
1916 * magnitude greater than our largest possible GSO size.
1918 * This would then be implemented as:
1919 * return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
1921 * Since multiplication and division are commutative, we can reorder
1923 * return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1925 static unsigned int ice_txd_use_count(unsigned int size)
1927 return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
1931 * ice_xmit_desc_count - calculate number of Tx descriptors needed
1934 * Returns number of data descriptors needed for this skb.
1936 static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
1938 const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
1939 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1940 unsigned int count = 0, size = skb_headlen(skb);
1943 count += ice_txd_use_count(size);
1948 size = skb_frag_size(frag++);
1955 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
1958 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
1959 * and so we need to figure out the cases where we need to linearize the skb.
1961 * For TSO we need to count the TSO header and segment payload separately.
1962 * As such we need to check cases where we have 7 fragments or more as we
1963 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
1964 * the segment payload in the first descriptor, and another 7 for the
1967 static bool __ice_chk_linearize(struct sk_buff *skb)
1969 const skb_frag_t *frag, *stale;
1972 /* no need to check if number of frags is less than 7 */
1973 nr_frags = skb_shinfo(skb)->nr_frags;
1974 if (nr_frags < (ICE_MAX_BUF_TXD - 1))
1977 /* We need to walk through the list and validate that each group
1978 * of 6 fragments totals at least gso_size.
1980 nr_frags -= ICE_MAX_BUF_TXD - 2;
1981 frag = &skb_shinfo(skb)->frags[0];
1983 /* Initialize size to the negative value of gso_size minus 1. We
1984 * use this as the worst case scenerio in which the frag ahead
1985 * of us only provides one byte which is why we are limited to 6
1986 * descriptors for a single transmit as the header and previous
1987 * fragment are already consuming 2 descriptors.
1989 sum = 1 - skb_shinfo(skb)->gso_size;
1991 /* Add size of frags 0 through 4 to create our initial sum */
1992 sum += skb_frag_size(frag++);
1993 sum += skb_frag_size(frag++);
1994 sum += skb_frag_size(frag++);
1995 sum += skb_frag_size(frag++);
1996 sum += skb_frag_size(frag++);
1998 /* Walk through fragments adding latest fragment, testing it, and
1999 * then removing stale fragments from the sum.
2001 stale = &skb_shinfo(skb)->frags[0];
2003 sum += skb_frag_size(frag++);
2005 /* if sum is negative we failed to make sufficient progress */
2012 sum -= skb_frag_size(stale++);
2019 * ice_chk_linearize - Check if there are more than 8 fragments per packet
2021 * @count: number of buffers used
2023 * Note: Our HW can't scatter-gather more than 8 fragments to build
2024 * a packet on the wire and so we need to figure out the cases where we
2025 * need to linearize the skb.
2027 static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
2029 /* Both TSO and single send will work if count is less than 8 */
2030 if (likely(count < ICE_MAX_BUF_TXD))
2033 if (skb_is_gso(skb))
2034 return __ice_chk_linearize(skb);
2036 /* we can support up to 8 data buffers for a single send */
2037 return count != ICE_MAX_BUF_TXD;
2041 * ice_xmit_frame_ring - Sends buffer on Tx ring
2043 * @tx_ring: ring to send buffer on
2045 * Returns NETDEV_TX_OK if sent, else an error code
2048 ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
2050 struct ice_tx_offload_params offload = { 0 };
2051 struct ice_tx_buf *first;
2055 count = ice_xmit_desc_count(skb);
2056 if (ice_chk_linearize(skb, count)) {
2057 if (__skb_linearize(skb))
2059 count = ice_txd_use_count(skb->len);
2060 tx_ring->tx_stats.tx_linearize++;
2063 /* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
2064 * + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
2065 * + 4 desc gap to avoid the cache line where head is,
2066 * + 1 desc for context descriptor,
2067 * otherwise try next time
2069 if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
2070 ICE_DESCS_FOR_CTX_DESC)) {
2071 tx_ring->tx_stats.tx_busy++;
2072 return NETDEV_TX_BUSY;
2075 offload.tx_ring = tx_ring;
2077 /* record the location of the first descriptor for this packet */
2078 first = &tx_ring->tx_buf[tx_ring->next_to_use];
2080 first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
2081 first->gso_segs = 1;
2082 first->tx_flags = 0;
2084 /* prepare the VLAN tagging flags for Tx */
2085 if (ice_tx_prepare_vlan_flags(tx_ring, first))
2088 /* set up TSO offload */
2089 tso = ice_tso(first, &offload);
2093 /* always set up Tx checksum offload */
2094 csum = ice_tx_csum(first, &offload);
2098 if (tso || offload.cd_tunnel_params) {
2099 struct ice_tx_ctx_desc *cdesc;
2100 int i = tx_ring->next_to_use;
2102 /* grab the next descriptor */
2103 cdesc = ICE_TX_CTX_DESC(tx_ring, i);
2105 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
2107 /* setup context descriptor */
2108 cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
2109 cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
2110 cdesc->rsvd = cpu_to_le16(0);
2111 cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
2114 ice_tx_map(tx_ring, first, &offload);
2115 return NETDEV_TX_OK;
2118 dev_kfree_skb_any(skb);
2119 return NETDEV_TX_OK;
2123 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
2125 * @netdev: network interface device structure
2127 * Returns NETDEV_TX_OK if sent, else an error code
2129 netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2131 struct ice_netdev_priv *np = netdev_priv(netdev);
2132 struct ice_vsi *vsi = np->vsi;
2133 struct ice_ring *tx_ring;
2135 tx_ring = vsi->tx_rings[skb->queue_mapping];
2137 /* hardware can't handle really short frames, hardware padding works
2140 if (skb_put_padto(skb, ICE_MIN_TX_LEN))
2141 return NETDEV_TX_OK;
2143 return ice_xmit_frame_ring(skb, tx_ring);