1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
10 #include <linux/pm_runtime.h>
11 #include <net/pkt_sched.h>
12 #include <linux/bpf_trace.h>
13 #include <net/xdp_sock_drv.h>
14 #include <linux/pci.h>
23 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
25 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
27 #define IGC_XDP_PASS 0
28 #define IGC_XDP_CONSUMED BIT(0)
29 #define IGC_XDP_TX BIT(1)
30 #define IGC_XDP_REDIRECT BIT(2)
32 static int debug = -1;
34 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
35 MODULE_DESCRIPTION(DRV_SUMMARY);
36 MODULE_LICENSE("GPL v2");
37 module_param(debug, int, 0);
38 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
40 char igc_driver_name[] = "igc";
41 static const char igc_driver_string[] = DRV_SUMMARY;
42 static const char igc_copyright[] =
43 "Copyright(c) 2018 Intel Corporation.";
45 static const struct igc_info *igc_info_tbl[] = {
46 [board_base] = &igc_base_info,
49 static const struct pci_device_id igc_pci_tbl[] = {
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
64 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
65 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
66 /* required last entry */
70 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
79 void igc_reset(struct igc_adapter *adapter)
81 struct net_device *dev = adapter->netdev;
82 struct igc_hw *hw = &adapter->hw;
83 struct igc_fc_info *fc = &hw->fc;
86 /* Repartition PBA for greater than 9k MTU if required */
89 /* flow control settings
90 * The high water mark must be low enough to fit one full frame
91 * after transmitting the pause frame. As such we must have enough
92 * space to allow for us to complete our current transmit and then
93 * receive the frame that is in progress from the link partner.
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
100 fc->low_water = fc->high_water - 16;
101 fc->pause_time = 0xFFFF;
103 fc->current_mode = fc->requested_mode;
105 hw->mac.ops.reset_hw(hw);
107 if (hw->mac.ops.init_hw(hw))
108 netdev_err(dev, "Error on hardware initialization\n");
110 /* Re-establish EEE setting */
111 igc_set_eee_i225(hw, true, true, true);
113 if (!netif_running(adapter->netdev))
114 igc_power_down_phy_copper_base(&adapter->hw);
116 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
117 wr32(IGC_VET, ETH_P_8021Q);
119 /* Re-enable PTP, where applicable. */
120 igc_ptp_reset(adapter);
122 /* Re-enable TSN offloading, where applicable. */
123 igc_tsn_reset(adapter);
125 igc_get_phy_info(hw);
129 * igc_power_up_link - Power up the phy link
130 * @adapter: address of board private structure
132 static void igc_power_up_link(struct igc_adapter *adapter)
134 igc_reset_phy(&adapter->hw);
136 igc_power_up_phy_copper(&adapter->hw);
138 igc_setup_link(&adapter->hw);
142 * igc_release_hw_control - release control of the h/w to f/w
143 * @adapter: address of board private structure
145 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
146 * For ASF and Pass Through versions of f/w this means that the
147 * driver is no longer loaded.
149 static void igc_release_hw_control(struct igc_adapter *adapter)
151 struct igc_hw *hw = &adapter->hw;
154 if (!pci_device_is_present(adapter->pdev))
157 /* Let firmware take over control of h/w */
158 ctrl_ext = rd32(IGC_CTRL_EXT);
160 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
164 * igc_get_hw_control - get control of the h/w from f/w
165 * @adapter: address of board private structure
167 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
168 * For ASF and Pass Through versions of f/w this means that
169 * the driver is loaded.
171 static void igc_get_hw_control(struct igc_adapter *adapter)
173 struct igc_hw *hw = &adapter->hw;
176 /* Let firmware know the driver has taken over */
177 ctrl_ext = rd32(IGC_CTRL_EXT);
179 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
182 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
184 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
185 dma_unmap_len(buf, len), DMA_TO_DEVICE);
187 dma_unmap_len_set(buf, len, 0);
191 * igc_clean_tx_ring - Free Tx Buffers
192 * @tx_ring: ring to be cleaned
194 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
196 u16 i = tx_ring->next_to_clean;
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
200 while (i != tx_ring->next_to_use) {
201 union igc_adv_tx_desc *eop_desc, *tx_desc;
203 switch (tx_buffer->type) {
204 case IGC_TX_BUFFER_TYPE_XSK:
207 case IGC_TX_BUFFER_TYPE_XDP:
208 xdp_return_frame(tx_buffer->xdpf);
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
211 case IGC_TX_BUFFER_TYPE_SKB:
212 dev_kfree_skb_any(tx_buffer->skb);
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
220 /* check for eop_desc to determine the end of the packet */
221 eop_desc = tx_buffer->next_to_watch;
222 tx_desc = IGC_TX_DESC(tx_ring, i);
224 /* unmap remaining buffers */
225 while (tx_desc != eop_desc) {
229 if (unlikely(i == tx_ring->count)) {
231 tx_buffer = tx_ring->tx_buffer_info;
232 tx_desc = IGC_TX_DESC(tx_ring, 0);
235 /* unmap any remaining paged data */
236 if (dma_unmap_len(tx_buffer, len))
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
240 tx_buffer->next_to_watch = NULL;
242 /* move us one more past the eop_desc for start of next pkt */
245 if (unlikely(i == tx_ring->count)) {
247 tx_buffer = tx_ring->tx_buffer_info;
251 if (tx_ring->xsk_pool && xsk_frames)
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
254 /* reset BQL for queue */
255 netdev_tx_reset_queue(txring_txq(tx_ring));
257 /* Zero out the buffer ring */
258 memset(tx_ring->tx_buffer_info, 0,
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count);
261 /* Zero out the descriptor ring */
262 memset(tx_ring->desc, 0, tx_ring->size);
264 /* reset next_to_use and next_to_clean */
265 tx_ring->next_to_use = 0;
266 tx_ring->next_to_clean = 0;
270 * igc_free_tx_resources - Free Tx Resources per Queue
271 * @tx_ring: Tx descriptor ring for a specific queue
273 * Free all transmit software resources
275 void igc_free_tx_resources(struct igc_ring *tx_ring)
277 igc_disable_tx_ring(tx_ring);
279 vfree(tx_ring->tx_buffer_info);
280 tx_ring->tx_buffer_info = NULL;
282 /* if not set, then don't free */
286 dma_free_coherent(tx_ring->dev, tx_ring->size,
287 tx_ring->desc, tx_ring->dma);
289 tx_ring->desc = NULL;
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
294 * @adapter: board private structure
296 * Free all transmit software resources
298 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
302 for (i = 0; i < adapter->num_tx_queues; i++)
303 igc_free_tx_resources(adapter->tx_ring[i]);
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
308 * @adapter: board private structure
310 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
314 for (i = 0; i < adapter->num_tx_queues; i++)
315 if (adapter->tx_ring[i])
316 igc_clean_tx_ring(adapter->tx_ring[i]);
319 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
321 struct igc_hw *hw = &ring->q_vector->adapter->hw;
322 u8 idx = ring->reg_idx;
325 txdctl = rd32(IGC_TXDCTL(idx));
326 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
327 txdctl |= IGC_TXDCTL_SWFLUSH;
328 wr32(IGC_TXDCTL(idx), txdctl);
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
333 * @adapter: board private structure
335 static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter)
339 for (i = 0; i < adapter->num_tx_queues; i++) {
340 struct igc_ring *tx_ring = adapter->tx_ring[i];
342 igc_disable_tx_ring_hw(tx_ring);
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
348 * @tx_ring: tx descriptor ring (for a specific queue) to setup
350 * Return 0 on success, negative on failure
352 int igc_setup_tx_resources(struct igc_ring *tx_ring)
354 struct net_device *ndev = tx_ring->netdev;
355 struct device *dev = tx_ring->dev;
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
359 tx_ring->tx_buffer_info = vzalloc(size);
360 if (!tx_ring->tx_buffer_info)
363 /* round up to nearest 4K */
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
365 tx_ring->size = ALIGN(tx_ring->size, 4096);
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
368 &tx_ring->dma, GFP_KERNEL);
373 tx_ring->next_to_use = 0;
374 tx_ring->next_to_clean = 0;
379 vfree(tx_ring->tx_buffer_info);
380 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
386 * @adapter: board private structure
388 * Return 0 on success, negative on failure
390 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
392 struct net_device *dev = adapter->netdev;
395 for (i = 0; i < adapter->num_tx_queues; i++) {
396 err = igc_setup_tx_resources(adapter->tx_ring[i]);
398 netdev_err(dev, "Error on Tx queue %u setup\n", i);
399 for (i--; i >= 0; i--)
400 igc_free_tx_resources(adapter->tx_ring[i]);
408 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
410 u16 i = rx_ring->next_to_clean;
412 dev_kfree_skb(rx_ring->skb);
415 /* Free all the Rx ring sk_buffs */
416 while (i != rx_ring->next_to_alloc) {
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
419 /* Invalidate cache lines that may have been written to by
420 * device so that we avoid corrupting memory.
422 dma_sync_single_range_for_cpu(rx_ring->dev,
424 buffer_info->page_offset,
425 igc_rx_bufsz(rx_ring),
428 /* free resources associated with mapping */
429 dma_unmap_page_attrs(rx_ring->dev,
431 igc_rx_pg_size(rx_ring),
434 __page_frag_cache_drain(buffer_info->page,
435 buffer_info->pagecnt_bias);
438 if (i == rx_ring->count)
443 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
445 struct igc_rx_buffer *bi;
448 for (i = 0; i < ring->count; i++) {
449 bi = &ring->rx_buffer_info[i];
453 xsk_buff_free(bi->xdp);
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
460 * @ring: ring to free buffers from
462 static void igc_clean_rx_ring(struct igc_ring *ring)
465 igc_clean_rx_ring_xsk_pool(ring);
467 igc_clean_rx_ring_page_shared(ring);
469 clear_ring_uses_large_buffer(ring);
471 ring->next_to_alloc = 0;
472 ring->next_to_clean = 0;
473 ring->next_to_use = 0;
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
478 * @adapter: board private structure
480 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
484 for (i = 0; i < adapter->num_rx_queues; i++)
485 if (adapter->rx_ring[i])
486 igc_clean_rx_ring(adapter->rx_ring[i]);
490 * igc_free_rx_resources - Free Rx Resources
491 * @rx_ring: ring to clean the resources from
493 * Free all receive software resources
495 void igc_free_rx_resources(struct igc_ring *rx_ring)
497 igc_clean_rx_ring(rx_ring);
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
501 vfree(rx_ring->rx_buffer_info);
502 rx_ring->rx_buffer_info = NULL;
504 /* if not set, then don't free */
508 dma_free_coherent(rx_ring->dev, rx_ring->size,
509 rx_ring->desc, rx_ring->dma);
511 rx_ring->desc = NULL;
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
516 * @adapter: board private structure
518 * Free all receive software resources
520 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
524 for (i = 0; i < adapter->num_rx_queues; i++)
525 igc_free_rx_resources(adapter->rx_ring[i]);
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
530 * @rx_ring: rx descriptor ring (for a specific queue) to setup
532 * Returns 0 on success, negative on failure
534 int igc_setup_rx_resources(struct igc_ring *rx_ring)
536 struct net_device *ndev = rx_ring->netdev;
537 struct device *dev = rx_ring->dev;
538 u8 index = rx_ring->queue_index;
539 int size, desc_len, res;
541 /* XDP RX-queue info */
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
545 rx_ring->q_vector->napi.napi_id);
547 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
553 rx_ring->rx_buffer_info = vzalloc(size);
554 if (!rx_ring->rx_buffer_info)
557 desc_len = sizeof(union igc_adv_rx_desc);
559 /* Round up to nearest 4K */
560 rx_ring->size = rx_ring->count * desc_len;
561 rx_ring->size = ALIGN(rx_ring->size, 4096);
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
564 &rx_ring->dma, GFP_KERNEL);
569 rx_ring->next_to_alloc = 0;
570 rx_ring->next_to_clean = 0;
571 rx_ring->next_to_use = 0;
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
577 vfree(rx_ring->rx_buffer_info);
578 rx_ring->rx_buffer_info = NULL;
579 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
585 * (Descriptors) for all queues
586 * @adapter: board private structure
588 * Return 0 on success, negative on failure
590 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
592 struct net_device *dev = adapter->netdev;
595 for (i = 0; i < adapter->num_rx_queues; i++) {
596 err = igc_setup_rx_resources(adapter->rx_ring[i]);
598 netdev_err(dev, "Error on Rx queue %u setup\n", i);
599 for (i--; i >= 0; i--)
600 igc_free_rx_resources(adapter->rx_ring[i]);
608 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
609 struct igc_ring *ring)
611 if (!igc_xdp_is_enabled(adapter) ||
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
619 * igc_configure_rx_ring - Configure a receive ring after Reset
620 * @adapter: board private structure
621 * @ring: receive ring to be configured
623 * Configure the Rx unit of the MAC after a reset.
625 static void igc_configure_rx_ring(struct igc_adapter *adapter,
626 struct igc_ring *ring)
628 struct igc_hw *hw = &adapter->hw;
629 union igc_adv_rx_desc *rx_desc;
630 int reg_idx = ring->reg_idx;
631 u32 srrctl = 0, rxdctl = 0;
632 u64 rdba = ring->dma;
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
637 if (ring->xsk_pool) {
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
639 MEM_TYPE_XSK_BUFF_POOL,
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
644 MEM_TYPE_PAGE_SHARED,
648 if (igc_xdp_is_enabled(adapter))
649 set_ring_uses_large_buffer(ring);
651 /* disable the queue */
652 wr32(IGC_RXDCTL(reg_idx), 0);
654 /* Set DMA base address registers */
655 wr32(IGC_RDBAL(reg_idx),
656 rdba & 0x00000000ffffffffULL);
657 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
658 wr32(IGC_RDLEN(reg_idx),
659 ring->count * sizeof(union igc_adv_rx_desc));
661 /* initialize head and tail */
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
663 wr32(IGC_RDH(reg_idx), 0);
664 writel(0, ring->tail);
666 /* reset next-to- use/clean to place SW in sync with hardware */
667 ring->next_to_clean = 0;
668 ring->next_to_use = 0;
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
672 else if (ring_uses_large_buffer(ring))
673 buf_size = IGC_RXBUFFER_3072;
675 buf_size = IGC_RXBUFFER_2048;
677 srrctl = rd32(IGC_SRRCTL(reg_idx));
678 srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
679 IGC_SRRCTL_DESCTYPE_MASK);
680 srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
681 srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
682 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
684 wr32(IGC_SRRCTL(reg_idx), srrctl);
686 rxdctl |= IGC_RX_PTHRESH;
687 rxdctl |= IGC_RX_HTHRESH << 8;
688 rxdctl |= IGC_RX_WTHRESH << 16;
690 /* initialize rx_buffer_info */
691 memset(ring->rx_buffer_info, 0,
692 sizeof(struct igc_rx_buffer) * ring->count);
694 /* initialize Rx descriptor 0 */
695 rx_desc = IGC_RX_DESC(ring, 0);
696 rx_desc->wb.upper.length = 0;
698 /* enable receive descriptor fetching */
699 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
701 wr32(IGC_RXDCTL(reg_idx), rxdctl);
705 * igc_configure_rx - Configure receive Unit after Reset
706 * @adapter: board private structure
708 * Configure the Rx unit of the MAC after a reset.
710 static void igc_configure_rx(struct igc_adapter *adapter)
714 /* Setup the HW Rx Head and Tail Descriptor Pointers and
715 * the Base and Length of the Rx Descriptor Ring
717 for (i = 0; i < adapter->num_rx_queues; i++)
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
722 * igc_configure_tx_ring - Configure transmit ring after Reset
723 * @adapter: board private structure
724 * @ring: tx ring to configure
726 * Configure a transmit ring after a reset.
728 static void igc_configure_tx_ring(struct igc_adapter *adapter,
729 struct igc_ring *ring)
731 struct igc_hw *hw = &adapter->hw;
732 int reg_idx = ring->reg_idx;
733 u64 tdba = ring->dma;
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
738 /* disable the queue */
739 wr32(IGC_TXDCTL(reg_idx), 0);
742 wr32(IGC_TDLEN(reg_idx),
743 ring->count * sizeof(union igc_adv_tx_desc));
744 wr32(IGC_TDBAL(reg_idx),
745 tdba & 0x00000000ffffffffULL);
746 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
749 wr32(IGC_TDH(reg_idx), 0);
750 writel(0, ring->tail);
752 txdctl |= IGC_TX_PTHRESH;
753 txdctl |= IGC_TX_HTHRESH << 8;
754 txdctl |= IGC_TX_WTHRESH << 16;
756 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
757 wr32(IGC_TXDCTL(reg_idx), txdctl);
761 * igc_configure_tx - Configure transmit Unit after Reset
762 * @adapter: board private structure
764 * Configure the Tx unit of the MAC after a reset.
766 static void igc_configure_tx(struct igc_adapter *adapter)
770 for (i = 0; i < adapter->num_tx_queues; i++)
771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
775 * igc_setup_mrqc - configure the multiple receive queue control registers
776 * @adapter: Board private structure
778 static void igc_setup_mrqc(struct igc_adapter *adapter)
780 struct igc_hw *hw = &adapter->hw;
781 u32 j, num_rx_queues;
785 netdev_rss_key_fill(rss_key, sizeof(rss_key));
786 for (j = 0; j < 10; j++)
787 wr32(IGC_RSSRK(j), rss_key[j]);
789 num_rx_queues = adapter->rss_queues;
791 if (adapter->rss_indir_tbl_init != num_rx_queues) {
792 for (j = 0; j < IGC_RETA_SIZE; j++)
793 adapter->rss_indir_tbl[j] =
794 (j * num_rx_queues) / IGC_RETA_SIZE;
795 adapter->rss_indir_tbl_init = num_rx_queues;
797 igc_write_rss_indir_tbl(adapter);
799 /* Disable raw packet checksumming so that RSS hash is placed in
800 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
801 * offloads as they are enabled by default
803 rxcsum = rd32(IGC_RXCSUM);
804 rxcsum |= IGC_RXCSUM_PCSD;
806 /* Enable Receive Checksum Offload for SCTP */
807 rxcsum |= IGC_RXCSUM_CRCOFL;
809 /* Don't need to set TUOFL or IPOFL, they default to 1 */
810 wr32(IGC_RXCSUM, rxcsum);
812 /* Generate RSS hash based on packet types, TCP/UDP
813 * port numbers and/or IPv4/v6 src and dst addresses
815 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
816 IGC_MRQC_RSS_FIELD_IPV4_TCP |
817 IGC_MRQC_RSS_FIELD_IPV6 |
818 IGC_MRQC_RSS_FIELD_IPV6_TCP |
819 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
822 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
824 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
826 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
828 wr32(IGC_MRQC, mrqc);
832 * igc_setup_rctl - configure the receive control registers
833 * @adapter: Board private structure
835 static void igc_setup_rctl(struct igc_adapter *adapter)
837 struct igc_hw *hw = &adapter->hw;
840 rctl = rd32(IGC_RCTL);
842 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
843 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
845 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
848 /* enable stripping of CRC. Newer features require
849 * that the HW strips the CRC.
851 rctl |= IGC_RCTL_SECRC;
853 /* disable store bad packets and clear size bits. */
854 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
856 /* enable LPE to allow for reception of jumbo frames */
857 rctl |= IGC_RCTL_LPE;
859 /* disable queue 0 to prevent tail write w/o re-config */
860 wr32(IGC_RXDCTL(0), 0);
862 /* This is useful for sniffing bad packets. */
863 if (adapter->netdev->features & NETIF_F_RXALL) {
864 /* UPE and MPE will be handled by normal PROMISC logic
867 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
868 IGC_RCTL_BAM | /* RX All Bcast Pkts */
869 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
871 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
872 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
875 wr32(IGC_RCTL, rctl);
879 * igc_setup_tctl - configure the transmit control registers
880 * @adapter: Board private structure
882 static void igc_setup_tctl(struct igc_adapter *adapter)
884 struct igc_hw *hw = &adapter->hw;
887 /* disable queue 0 which icould be enabled by default */
888 wr32(IGC_TXDCTL(0), 0);
890 /* Program the Transmit Control Register */
891 tctl = rd32(IGC_TCTL);
892 tctl &= ~IGC_TCTL_CT;
893 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
894 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
896 /* Enable transmits */
899 wr32(IGC_TCTL, tctl);
903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
904 * @adapter: Pointer to adapter where the filter should be set
905 * @index: Filter index
906 * @type: MAC address filter type (source or destination)
908 * @queue: If non-negative, queue assignment feature is enabled and frames
909 * matching the filter are enqueued onto 'queue'. Otherwise, queue
910 * assignment is disabled.
912 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
913 enum igc_mac_filter_type type,
914 const u8 *addr, int queue)
916 struct net_device *dev = adapter->netdev;
917 struct igc_hw *hw = &adapter->hw;
920 if (WARN_ON(index >= hw->mac.rar_entry_count))
923 ral = le32_to_cpup((__le32 *)(addr));
924 rah = le16_to_cpup((__le16 *)(addr + 4));
926 if (type == IGC_MAC_FILTER_TYPE_SRC) {
927 rah &= ~IGC_RAH_ASEL_MASK;
928 rah |= IGC_RAH_ASEL_SRC_ADDR;
932 rah &= ~IGC_RAH_QSEL_MASK;
933 rah |= (queue << IGC_RAH_QSEL_SHIFT);
934 rah |= IGC_RAH_QSEL_ENABLE;
939 wr32(IGC_RAL(index), ral);
940 wr32(IGC_RAH(index), rah);
942 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
947 * @adapter: Pointer to adapter where the filter should be cleared
948 * @index: Filter index
950 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
952 struct net_device *dev = adapter->netdev;
953 struct igc_hw *hw = &adapter->hw;
955 if (WARN_ON(index >= hw->mac.rar_entry_count))
958 wr32(IGC_RAL(index), 0);
959 wr32(IGC_RAH(index), 0);
961 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
964 /* Set default MAC address for the PF in the first RAR entry */
965 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
967 struct net_device *dev = adapter->netdev;
968 u8 *addr = adapter->hw.mac.addr;
970 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
976 * igc_set_mac - Change the Ethernet Address of the NIC
977 * @netdev: network interface device structure
978 * @p: pointer to an address structure
980 * Returns 0 on success, negative on failure
982 static int igc_set_mac(struct net_device *netdev, void *p)
984 struct igc_adapter *adapter = netdev_priv(netdev);
985 struct igc_hw *hw = &adapter->hw;
986 struct sockaddr *addr = p;
988 if (!is_valid_ether_addr(addr->sa_data))
989 return -EADDRNOTAVAIL;
991 eth_hw_addr_set(netdev, addr->sa_data);
992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
994 /* set the correct pool for the new PF MAC address in entry 0 */
995 igc_set_default_mac_filter(adapter);
1001 * igc_write_mc_addr_list - write multicast addresses to MTA
1002 * @netdev: network interface device structure
1004 * Writes multicast address list to the MTA hash table.
1005 * Returns: -ENOMEM on failure
1006 * 0 on no addresses written
1007 * X on writing X addresses to MTA
1009 static int igc_write_mc_addr_list(struct net_device *netdev)
1011 struct igc_adapter *adapter = netdev_priv(netdev);
1012 struct igc_hw *hw = &adapter->hw;
1013 struct netdev_hw_addr *ha;
1017 if (netdev_mc_empty(netdev)) {
1018 /* nothing to program, so clear mc list */
1019 igc_update_mc_addr_list(hw, NULL, 0);
1023 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
1027 /* The shared function expects a packed array of only addresses. */
1029 netdev_for_each_mc_addr(ha, netdev)
1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1032 igc_update_mc_addr_list(hw, mta_list, i);
1035 return netdev_mc_count(netdev);
1038 static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime,
1039 bool *first_flag, bool *insert_empty)
1041 struct igc_adapter *adapter = netdev_priv(ring->netdev);
1042 ktime_t cycle_time = adapter->cycle_time;
1043 ktime_t base_time = adapter->base_time;
1044 ktime_t now = ktime_get_clocktai();
1045 ktime_t baset_est, end_of_cycle;
1049 n = div64_s64(ktime_sub_ns(now, base_time), cycle_time);
1051 baset_est = ktime_add_ns(base_time, cycle_time * (n));
1052 end_of_cycle = ktime_add_ns(baset_est, cycle_time);
1054 if (ktime_compare(txtime, end_of_cycle) >= 0) {
1055 if (baset_est != ring->last_ff_cycle) {
1057 ring->last_ff_cycle = baset_est;
1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0)
1060 *insert_empty = true;
1064 /* Introducing a window at end of cycle on which packets
1065 * potentially not honor launchtime. Window of 5us chosen
1066 * considering software update the tail pointer and packets
1067 * are dma'ed to packet buffer.
1069 if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC))
1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n",
1073 ring->last_tx_cycle = end_of_cycle;
1075 launchtime = ktime_sub_ns(txtime, baset_est);
1077 div_s64_rem(launchtime, cycle_time, &launchtime);
1081 return cpu_to_le32(launchtime);
1084 static int igc_init_empty_frame(struct igc_ring *ring,
1085 struct igc_tx_buffer *buffer,
1086 struct sk_buff *skb)
1091 size = skb_headlen(skb);
1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE);
1094 if (dma_mapping_error(ring->dev, dma)) {
1095 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
1100 buffer->protocol = 0;
1101 buffer->bytecount = skb->len;
1102 buffer->gso_segs = 1;
1103 buffer->time_stamp = jiffies;
1104 dma_unmap_len_set(buffer, len, skb->len);
1105 dma_unmap_addr_set(buffer, dma, dma);
1110 static int igc_init_tx_empty_descriptor(struct igc_ring *ring,
1111 struct sk_buff *skb,
1112 struct igc_tx_buffer *first)
1114 union igc_adv_tx_desc *desc;
1115 u32 cmd_type, olinfo_status;
1118 if (!igc_desc_unused(ring))
1121 err = igc_init_empty_frame(ring, first, skb);
1125 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
1126 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
1128 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
1130 desc = IGC_TX_DESC(ring, ring->next_to_use);
1131 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1132 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1133 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma));
1135 netdev_tx_sent_queue(txring_txq(ring), skb->len);
1137 first->next_to_watch = desc;
1139 ring->next_to_use++;
1140 if (ring->next_to_use == ring->count)
1141 ring->next_to_use = 0;
1146 #define IGC_EMPTY_FRAME_SIZE 60
1148 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1149 __le32 launch_time, bool first_flag,
1150 u32 vlan_macip_lens, u32 type_tucmd,
1153 struct igc_adv_tx_context_desc *context_desc;
1154 u16 i = tx_ring->next_to_use;
1156 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1159 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1161 /* set bits to identify this as an advanced context descriptor */
1162 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1164 /* For i225, context index must be unique per ring. */
1165 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1166 mss_l4len_idx |= tx_ring->reg_idx << 4;
1169 mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST;
1171 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1172 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1173 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1174 context_desc->launch_time = launch_time;
1177 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first,
1178 __le32 launch_time, bool first_flag)
1180 struct sk_buff *skb = first->skb;
1181 u32 vlan_macip_lens = 0;
1184 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1186 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1187 !tx_ring->launchtime_enable)
1192 switch (skb->csum_offset) {
1193 case offsetof(struct tcphdr, check):
1194 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1196 case offsetof(struct udphdr, check):
1198 case offsetof(struct sctphdr, checksum):
1199 /* validate that this is actually an SCTP request */
1200 if (skb_csum_is_sctp(skb)) {
1201 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1206 skb_checksum_help(skb);
1210 /* update TX checksum flag */
1211 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1212 vlan_macip_lens = skb_checksum_start_offset(skb) -
1213 skb_network_offset(skb);
1215 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1216 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1218 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1219 vlan_macip_lens, type_tucmd, 0);
1222 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1224 struct net_device *netdev = tx_ring->netdev;
1226 netif_stop_subqueue(netdev, tx_ring->queue_index);
1228 /* memory barriier comment */
1231 /* We need to check again in a case another CPU has just
1232 * made room available.
1234 if (igc_desc_unused(tx_ring) < size)
1238 netif_wake_subqueue(netdev, tx_ring->queue_index);
1240 u64_stats_update_begin(&tx_ring->tx_syncp2);
1241 tx_ring->tx_stats.restart_queue2++;
1242 u64_stats_update_end(&tx_ring->tx_syncp2);
1247 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1249 if (igc_desc_unused(tx_ring) >= size)
1251 return __igc_maybe_stop_tx(tx_ring, size);
1254 #define IGC_SET_FLAG(_input, _flag, _result) \
1255 (((_flag) <= (_result)) ? \
1256 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1257 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1259 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1261 /* set type for advanced descriptor with frame checksum insertion */
1262 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1263 IGC_ADVTXD_DCMD_DEXT |
1264 IGC_ADVTXD_DCMD_IFCS;
1266 /* set HW vlan bit if vlan is present */
1267 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1268 IGC_ADVTXD_DCMD_VLE);
1270 /* set segmentation bits for TSO */
1271 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1272 (IGC_ADVTXD_DCMD_TSE));
1274 /* set timestamp bit if present, will select the register set
1275 * based on the _TSTAMP(_X) bit.
1277 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1278 (IGC_ADVTXD_MAC_TSTAMP));
1280 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_1,
1281 (IGC_ADVTXD_TSTAMP_REG_1));
1283 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_2,
1284 (IGC_ADVTXD_TSTAMP_REG_2));
1286 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_3,
1287 (IGC_ADVTXD_TSTAMP_REG_3));
1289 /* insert frame checksum */
1290 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1295 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1296 union igc_adv_tx_desc *tx_desc,
1297 u32 tx_flags, unsigned int paylen)
1299 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1301 /* insert L4 checksum */
1302 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_CSUM,
1303 (IGC_TXD_POPTS_TXSM << 8));
1305 /* insert IPv4 checksum */
1306 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_IPV4,
1307 (IGC_TXD_POPTS_IXSM << 8));
1309 /* Use the second timer (free running, in general) for the timestamp */
1310 olinfo_status |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP_TIMER_1,
1311 IGC_TXD_PTP2_TIMER_1);
1313 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1316 static int igc_tx_map(struct igc_ring *tx_ring,
1317 struct igc_tx_buffer *first,
1320 struct sk_buff *skb = first->skb;
1321 struct igc_tx_buffer *tx_buffer;
1322 union igc_adv_tx_desc *tx_desc;
1323 u32 tx_flags = first->tx_flags;
1325 u16 i = tx_ring->next_to_use;
1326 unsigned int data_len, size;
1330 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1331 tx_desc = IGC_TX_DESC(tx_ring, i);
1333 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1335 size = skb_headlen(skb);
1336 data_len = skb->data_len;
1338 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1342 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1343 if (dma_mapping_error(tx_ring->dev, dma))
1346 /* record length, and DMA address */
1347 dma_unmap_len_set(tx_buffer, len, size);
1348 dma_unmap_addr_set(tx_buffer, dma, dma);
1350 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1352 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1353 tx_desc->read.cmd_type_len =
1354 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1358 if (i == tx_ring->count) {
1359 tx_desc = IGC_TX_DESC(tx_ring, 0);
1362 tx_desc->read.olinfo_status = 0;
1364 dma += IGC_MAX_DATA_PER_TXD;
1365 size -= IGC_MAX_DATA_PER_TXD;
1367 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1370 if (likely(!data_len))
1373 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1377 if (i == tx_ring->count) {
1378 tx_desc = IGC_TX_DESC(tx_ring, 0);
1381 tx_desc->read.olinfo_status = 0;
1383 size = skb_frag_size(frag);
1386 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1387 size, DMA_TO_DEVICE);
1389 tx_buffer = &tx_ring->tx_buffer_info[i];
1392 /* write last descriptor with RS and EOP bits */
1393 cmd_type |= size | IGC_TXD_DCMD;
1394 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1396 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1398 /* set the timestamp */
1399 first->time_stamp = jiffies;
1401 skb_tx_timestamp(skb);
1403 /* Force memory writes to complete before letting h/w know there
1404 * are new descriptors to fetch. (Only applicable for weak-ordered
1405 * memory model archs, such as IA-64).
1407 * We also need this memory barrier to make certain all of the
1408 * status bits have been updated before next_to_watch is written.
1412 /* set next_to_watch value indicating a packet is present */
1413 first->next_to_watch = tx_desc;
1416 if (i == tx_ring->count)
1419 tx_ring->next_to_use = i;
1421 /* Make sure there is space in the ring for the next send. */
1422 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1424 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1425 writel(i, tx_ring->tail);
1430 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1431 tx_buffer = &tx_ring->tx_buffer_info[i];
1433 /* clear dma mappings for failed tx_buffer_info map */
1434 while (tx_buffer != first) {
1435 if (dma_unmap_len(tx_buffer, len))
1436 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1439 i += tx_ring->count;
1440 tx_buffer = &tx_ring->tx_buffer_info[i];
1443 if (dma_unmap_len(tx_buffer, len))
1444 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1446 dev_kfree_skb_any(tx_buffer->skb);
1447 tx_buffer->skb = NULL;
1449 tx_ring->next_to_use = i;
1454 static int igc_tso(struct igc_ring *tx_ring,
1455 struct igc_tx_buffer *first,
1456 __le32 launch_time, bool first_flag,
1459 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1460 struct sk_buff *skb = first->skb;
1471 u32 paylen, l4_offset;
1474 if (skb->ip_summed != CHECKSUM_PARTIAL)
1477 if (!skb_is_gso(skb))
1480 err = skb_cow_head(skb, 0);
1484 ip.hdr = skb_network_header(skb);
1485 l4.hdr = skb_checksum_start(skb);
1487 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1488 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1490 /* initialize outer IP header fields */
1491 if (ip.v4->version == 4) {
1492 unsigned char *csum_start = skb_checksum_start(skb);
1493 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1495 /* IP header will have to cancel out any data that
1496 * is not a part of the outer IP header
1498 ip.v4->check = csum_fold(csum_partial(trans_start,
1499 csum_start - trans_start,
1501 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1504 first->tx_flags |= IGC_TX_FLAGS_TSO |
1508 ip.v6->payload_len = 0;
1509 first->tx_flags |= IGC_TX_FLAGS_TSO |
1513 /* determine offset of inner transport header */
1514 l4_offset = l4.hdr - skb->data;
1516 /* remove payload length from inner checksum */
1517 paylen = skb->len - l4_offset;
1518 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1519 /* compute length of segmentation header */
1520 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1521 csum_replace_by_diff(&l4.tcp->check,
1522 (__force __wsum)htonl(paylen));
1524 /* compute length of segmentation header */
1525 *hdr_len = sizeof(*l4.udp) + l4_offset;
1526 csum_replace_by_diff(&l4.udp->check,
1527 (__force __wsum)htonl(paylen));
1530 /* update gso size and bytecount with header size */
1531 first->gso_segs = skb_shinfo(skb)->gso_segs;
1532 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1535 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1536 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1538 /* VLAN MACLEN IPLEN */
1539 vlan_macip_lens = l4.hdr - ip.hdr;
1540 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1541 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1543 igc_tx_ctxtdesc(tx_ring, launch_time, first_flag,
1544 vlan_macip_lens, type_tucmd, mss_l4len_idx);
1549 static bool igc_request_tx_tstamp(struct igc_adapter *adapter, struct sk_buff *skb, u32 *flags)
1553 for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
1554 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
1559 tstamp->skb = skb_get(skb);
1560 tstamp->start = jiffies;
1561 *flags = tstamp->flags;
1569 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1570 struct igc_ring *tx_ring)
1572 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1573 bool first_flag = false, insert_empty = false;
1574 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1575 __be16 protocol = vlan_get_protocol(skb);
1576 struct igc_tx_buffer *first;
1577 __le32 launch_time = 0;
1584 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1585 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1586 * + 2 desc gap to keep tail from touching head,
1587 * + 1 desc for context descriptor,
1588 * otherwise try next time
1590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1591 count += TXD_USE_COUNT(skb_frag_size(
1592 &skb_shinfo(skb)->frags[f]));
1594 if (igc_maybe_stop_tx(tx_ring, count + 5)) {
1595 /* this is a hard error */
1596 return NETDEV_TX_BUSY;
1599 if (!tx_ring->launchtime_enable)
1602 txtime = skb->tstamp;
1603 skb->tstamp = ktime_set(0, 0);
1604 launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty);
1607 struct igc_tx_buffer *empty_info;
1608 struct sk_buff *empty;
1611 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1612 empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC);
1616 data = skb_put(empty, IGC_EMPTY_FRAME_SIZE);
1617 memset(data, 0, IGC_EMPTY_FRAME_SIZE);
1619 igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0);
1621 if (igc_init_tx_empty_descriptor(tx_ring,
1624 dev_kfree_skb_any(empty);
1628 /* record the location of the first descriptor for this packet */
1629 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1630 first->type = IGC_TX_BUFFER_TYPE_SKB;
1632 first->bytecount = skb->len;
1633 first->gso_segs = 1;
1635 if (adapter->qbv_transition || tx_ring->oper_gate_closed)
1638 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) {
1639 adapter->stats.txdrop++;
1643 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) &&
1644 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1645 /* FIXME: add support for retrieving timestamps from
1646 * the other timer registers before skipping the
1647 * timestamping request.
1649 unsigned long flags;
1652 spin_lock_irqsave(&adapter->ptp_tx_lock, flags);
1653 if (igc_request_tx_tstamp(adapter, skb, &tstamp_flags)) {
1654 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1655 tx_flags |= IGC_TX_FLAGS_TSTAMP | tstamp_flags;
1656 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES)
1657 tx_flags |= IGC_TX_FLAGS_TSTAMP_TIMER_1;
1659 adapter->tx_hwtstamp_skipped++;
1662 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
1665 if (skb_vlan_tag_present(skb)) {
1666 tx_flags |= IGC_TX_FLAGS_VLAN;
1667 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1670 /* record initial flags and protocol */
1671 first->tx_flags = tx_flags;
1672 first->protocol = protocol;
1674 tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len);
1678 igc_tx_csum(tx_ring, first, launch_time, first_flag);
1680 igc_tx_map(tx_ring, first, hdr_len);
1682 return NETDEV_TX_OK;
1685 dev_kfree_skb_any(first->skb);
1688 return NETDEV_TX_OK;
1691 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1692 struct sk_buff *skb)
1694 unsigned int r_idx = skb->queue_mapping;
1696 if (r_idx >= adapter->num_tx_queues)
1697 r_idx = r_idx % adapter->num_tx_queues;
1699 return adapter->tx_ring[r_idx];
1702 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1703 struct net_device *netdev)
1705 struct igc_adapter *adapter = netdev_priv(netdev);
1707 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1708 * in order to meet this minimum size requirement.
1710 if (skb->len < 17) {
1711 if (skb_padto(skb, 17))
1712 return NETDEV_TX_OK;
1716 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1719 static void igc_rx_checksum(struct igc_ring *ring,
1720 union igc_adv_rx_desc *rx_desc,
1721 struct sk_buff *skb)
1723 skb_checksum_none_assert(skb);
1725 /* Ignore Checksum bit is set */
1726 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1729 /* Rx checksum disabled via ethtool */
1730 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1733 /* TCP/UDP checksum error bit is set */
1734 if (igc_test_staterr(rx_desc,
1735 IGC_RXDEXT_STATERR_L4E |
1736 IGC_RXDEXT_STATERR_IPE)) {
1737 /* work around errata with sctp packets where the TCPE aka
1738 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1739 * packets (aka let the stack check the crc32c)
1741 if (!(skb->len == 60 &&
1742 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1743 u64_stats_update_begin(&ring->rx_syncp);
1744 ring->rx_stats.csum_err++;
1745 u64_stats_update_end(&ring->rx_syncp);
1747 /* let the stack verify checksum errors */
1750 /* It must be a TCP or UDP packet with a valid checksum */
1751 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1752 IGC_RXD_STAT_UDPCS))
1753 skb->ip_summed = CHECKSUM_UNNECESSARY;
1755 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1756 le32_to_cpu(rx_desc->wb.upper.status_error));
1759 /* Mapping HW RSS Type to enum pkt_hash_types */
1760 static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = {
1761 [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2,
1762 [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4,
1763 [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3,
1764 [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4,
1765 [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3,
1766 [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3,
1767 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4,
1768 [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4,
1769 [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4,
1770 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4,
1771 [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
1772 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1773 [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */
1774 [13] = PKT_HASH_TYPE_NONE,
1775 [14] = PKT_HASH_TYPE_NONE,
1776 [15] = PKT_HASH_TYPE_NONE,
1779 static inline void igc_rx_hash(struct igc_ring *ring,
1780 union igc_adv_rx_desc *rx_desc,
1781 struct sk_buff *skb)
1783 if (ring->netdev->features & NETIF_F_RXHASH) {
1784 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1785 u32 rss_type = igc_rss_type(rx_desc);
1787 skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]);
1791 static void igc_rx_vlan(struct igc_ring *rx_ring,
1792 union igc_adv_rx_desc *rx_desc,
1793 struct sk_buff *skb)
1795 struct net_device *dev = rx_ring->netdev;
1798 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1799 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1800 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1801 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1802 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1804 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1811 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1812 * @rx_ring: rx descriptor ring packet is being transacted on
1813 * @rx_desc: pointer to the EOP Rx descriptor
1814 * @skb: pointer to current skb being populated
1816 * This function checks the ring, descriptor, and packet information in order
1817 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1820 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1821 union igc_adv_rx_desc *rx_desc,
1822 struct sk_buff *skb)
1824 igc_rx_hash(rx_ring, rx_desc, skb);
1826 igc_rx_checksum(rx_ring, rx_desc, skb);
1828 igc_rx_vlan(rx_ring, rx_desc, skb);
1830 skb_record_rx_queue(skb, rx_ring->queue_index);
1832 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1835 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1837 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1838 struct igc_adapter *adapter = netdev_priv(netdev);
1839 struct igc_hw *hw = &adapter->hw;
1842 ctrl = rd32(IGC_CTRL);
1845 /* enable VLAN tag insert/strip */
1846 ctrl |= IGC_CTRL_VME;
1848 /* disable VLAN tag insert/strip */
1849 ctrl &= ~IGC_CTRL_VME;
1851 wr32(IGC_CTRL, ctrl);
1854 static void igc_restore_vlan(struct igc_adapter *adapter)
1856 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1859 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1860 const unsigned int size,
1861 int *rx_buffer_pgcnt)
1863 struct igc_rx_buffer *rx_buffer;
1865 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1867 #if (PAGE_SIZE < 8192)
1868 page_count(rx_buffer->page);
1872 prefetchw(rx_buffer->page);
1874 /* we are reusing so sync this buffer for CPU use */
1875 dma_sync_single_range_for_cpu(rx_ring->dev,
1877 rx_buffer->page_offset,
1881 rx_buffer->pagecnt_bias--;
1886 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1887 unsigned int truesize)
1889 #if (PAGE_SIZE < 8192)
1890 buffer->page_offset ^= truesize;
1892 buffer->page_offset += truesize;
1896 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1899 unsigned int truesize;
1901 #if (PAGE_SIZE < 8192)
1902 truesize = igc_rx_pg_size(ring) / 2;
1904 truesize = ring_uses_build_skb(ring) ?
1905 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1906 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1907 SKB_DATA_ALIGN(size);
1913 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1914 * @rx_ring: rx descriptor ring to transact packets on
1915 * @rx_buffer: buffer containing page to add
1916 * @skb: sk_buff to place the data into
1917 * @size: size of buffer to be added
1919 * This function will add the data contained in rx_buffer->page to the skb.
1921 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1922 struct igc_rx_buffer *rx_buffer,
1923 struct sk_buff *skb,
1926 unsigned int truesize;
1928 #if (PAGE_SIZE < 8192)
1929 truesize = igc_rx_pg_size(rx_ring) / 2;
1931 truesize = ring_uses_build_skb(rx_ring) ?
1932 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1933 SKB_DATA_ALIGN(size);
1935 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1936 rx_buffer->page_offset, size, truesize);
1938 igc_rx_buffer_flip(rx_buffer, truesize);
1941 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1942 struct igc_rx_buffer *rx_buffer,
1943 struct xdp_buff *xdp)
1945 unsigned int size = xdp->data_end - xdp->data;
1946 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1947 unsigned int metasize = xdp->data - xdp->data_meta;
1948 struct sk_buff *skb;
1950 /* prefetch first cache line of first page */
1951 net_prefetch(xdp->data_meta);
1953 /* build an skb around the page buffer */
1954 skb = napi_build_skb(xdp->data_hard_start, truesize);
1958 /* update pointers within the skb to store the data */
1959 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1960 __skb_put(skb, size);
1962 skb_metadata_set(skb, metasize);
1964 igc_rx_buffer_flip(rx_buffer, truesize);
1968 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1969 struct igc_rx_buffer *rx_buffer,
1970 struct igc_xdp_buff *ctx)
1972 struct xdp_buff *xdp = &ctx->xdp;
1973 unsigned int metasize = xdp->data - xdp->data_meta;
1974 unsigned int size = xdp->data_end - xdp->data;
1975 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1976 void *va = xdp->data;
1977 unsigned int headlen;
1978 struct sk_buff *skb;
1980 /* prefetch first cache line of first page */
1981 net_prefetch(xdp->data_meta);
1983 /* allocate a skb to store the frags */
1984 skb = napi_alloc_skb(&rx_ring->q_vector->napi,
1985 IGC_RX_HDR_LEN + metasize);
1990 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
1991 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
1994 /* Determine available headroom for copy */
1996 if (headlen > IGC_RX_HDR_LEN)
1997 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1999 /* align pull length to size of long to optimize memcpy performance */
2000 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
2001 ALIGN(headlen + metasize, sizeof(long)));
2004 skb_metadata_set(skb, metasize);
2005 __skb_pull(skb, metasize);
2008 /* update all of the pointers */
2011 skb_add_rx_frag(skb, 0, rx_buffer->page,
2012 (va + headlen) - page_address(rx_buffer->page),
2014 igc_rx_buffer_flip(rx_buffer, truesize);
2016 rx_buffer->pagecnt_bias++;
2023 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2024 * @rx_ring: rx descriptor ring to store buffers on
2025 * @old_buff: donor buffer to have page reused
2027 * Synchronizes page for reuse by the adapter
2029 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
2030 struct igc_rx_buffer *old_buff)
2032 u16 nta = rx_ring->next_to_alloc;
2033 struct igc_rx_buffer *new_buff;
2035 new_buff = &rx_ring->rx_buffer_info[nta];
2037 /* update, and store next to alloc */
2039 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
2041 /* Transfer page from old buffer to new buffer.
2042 * Move each member individually to avoid possible store
2043 * forwarding stalls.
2045 new_buff->dma = old_buff->dma;
2046 new_buff->page = old_buff->page;
2047 new_buff->page_offset = old_buff->page_offset;
2048 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
2051 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
2052 int rx_buffer_pgcnt)
2054 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
2055 struct page *page = rx_buffer->page;
2057 /* avoid re-using remote and pfmemalloc pages */
2058 if (!dev_page_is_reusable(page))
2061 #if (PAGE_SIZE < 8192)
2062 /* if we are only owner of page we can reuse it */
2063 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
2066 #define IGC_LAST_OFFSET \
2067 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
2069 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
2073 /* If we have drained the page fragment pool we need to update
2074 * the pagecnt_bias and page count so that we fully restock the
2075 * number of references the driver holds.
2077 if (unlikely(pagecnt_bias == 1)) {
2078 page_ref_add(page, USHRT_MAX - 1);
2079 rx_buffer->pagecnt_bias = USHRT_MAX;
2086 * igc_is_non_eop - process handling of non-EOP buffers
2087 * @rx_ring: Rx ring being processed
2088 * @rx_desc: Rx descriptor for current buffer
2090 * This function updates next to clean. If the buffer is an EOP buffer
2091 * this function exits returning false, otherwise it will place the
2092 * sk_buff in the next buffer to be chained and return true indicating
2093 * that this is in fact a non-EOP buffer.
2095 static bool igc_is_non_eop(struct igc_ring *rx_ring,
2096 union igc_adv_rx_desc *rx_desc)
2098 u32 ntc = rx_ring->next_to_clean + 1;
2100 /* fetch, update, and store next to clean */
2101 ntc = (ntc < rx_ring->count) ? ntc : 0;
2102 rx_ring->next_to_clean = ntc;
2104 prefetch(IGC_RX_DESC(rx_ring, ntc));
2106 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
2113 * igc_cleanup_headers - Correct corrupted or empty headers
2114 * @rx_ring: rx descriptor ring packet is being transacted on
2115 * @rx_desc: pointer to the EOP Rx descriptor
2116 * @skb: pointer to current skb being fixed
2118 * Address the case where we are pulling data in on pages only
2119 * and as such no data is present in the skb header.
2121 * In addition if skb is not at least 60 bytes we need to pad it so that
2122 * it is large enough to qualify as a valid Ethernet frame.
2124 * Returns true if an error was encountered and skb was freed.
2126 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
2127 union igc_adv_rx_desc *rx_desc,
2128 struct sk_buff *skb)
2130 /* XDP packets use error pointer so abort at this point */
2134 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
2135 struct net_device *netdev = rx_ring->netdev;
2137 if (!(netdev->features & NETIF_F_RXALL)) {
2138 dev_kfree_skb_any(skb);
2143 /* if eth_skb_pad returns an error the skb was freed */
2144 if (eth_skb_pad(skb))
2150 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
2151 struct igc_rx_buffer *rx_buffer,
2152 int rx_buffer_pgcnt)
2154 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
2155 /* hand second half of page back to the ring */
2156 igc_reuse_rx_page(rx_ring, rx_buffer);
2158 /* We are not reusing the buffer so unmap it and free
2159 * any references we are holding to it
2161 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
2162 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
2164 __page_frag_cache_drain(rx_buffer->page,
2165 rx_buffer->pagecnt_bias);
2168 /* clear contents of rx_buffer */
2169 rx_buffer->page = NULL;
2172 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
2174 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
2176 if (ring_uses_build_skb(rx_ring))
2178 if (igc_xdp_is_enabled(adapter))
2179 return XDP_PACKET_HEADROOM;
2184 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
2185 struct igc_rx_buffer *bi)
2187 struct page *page = bi->page;
2190 /* since we are recycling buffers we should seldom need to alloc */
2194 /* alloc new page for storage */
2195 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
2196 if (unlikely(!page)) {
2197 rx_ring->rx_stats.alloc_failed++;
2201 /* map page for use */
2202 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
2203 igc_rx_pg_size(rx_ring),
2207 /* if mapping failed free memory back to system since
2208 * there isn't much point in holding memory we can't use
2210 if (dma_mapping_error(rx_ring->dev, dma)) {
2213 rx_ring->rx_stats.alloc_failed++;
2219 bi->page_offset = igc_rx_offset(rx_ring);
2220 page_ref_add(page, USHRT_MAX - 1);
2221 bi->pagecnt_bias = USHRT_MAX;
2227 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2228 * @rx_ring: rx descriptor ring
2229 * @cleaned_count: number of buffers to clean
2231 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
2233 union igc_adv_rx_desc *rx_desc;
2234 u16 i = rx_ring->next_to_use;
2235 struct igc_rx_buffer *bi;
2242 rx_desc = IGC_RX_DESC(rx_ring, i);
2243 bi = &rx_ring->rx_buffer_info[i];
2244 i -= rx_ring->count;
2246 bufsz = igc_rx_bufsz(rx_ring);
2249 if (!igc_alloc_mapped_page(rx_ring, bi))
2252 /* sync the buffer for use by the device */
2253 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2254 bi->page_offset, bufsz,
2257 /* Refresh the desc even if buffer_addrs didn't change
2258 * because each write-back erases this info.
2260 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2266 rx_desc = IGC_RX_DESC(rx_ring, 0);
2267 bi = rx_ring->rx_buffer_info;
2268 i -= rx_ring->count;
2271 /* clear the length for the next_to_use descriptor */
2272 rx_desc->wb.upper.length = 0;
2275 } while (cleaned_count);
2277 i += rx_ring->count;
2279 if (rx_ring->next_to_use != i) {
2280 /* record the next descriptor to use */
2281 rx_ring->next_to_use = i;
2283 /* update next to alloc since we have filled the ring */
2284 rx_ring->next_to_alloc = i;
2286 /* Force memory writes to complete before letting h/w
2287 * know there are new descriptors to fetch. (Only
2288 * applicable for weak-ordered memory model archs,
2292 writel(i, rx_ring->tail);
2296 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2298 union igc_adv_rx_desc *desc;
2299 u16 i = ring->next_to_use;
2300 struct igc_rx_buffer *bi;
2307 XSK_CHECK_PRIV_TYPE(struct igc_xdp_buff);
2309 desc = IGC_RX_DESC(ring, i);
2310 bi = &ring->rx_buffer_info[i];
2314 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2320 dma = xsk_buff_xdp_get_dma(bi->xdp);
2321 desc->read.pkt_addr = cpu_to_le64(dma);
2327 desc = IGC_RX_DESC(ring, 0);
2328 bi = ring->rx_buffer_info;
2332 /* Clear the length for the next_to_use descriptor. */
2333 desc->wb.upper.length = 0;
2340 if (ring->next_to_use != i) {
2341 ring->next_to_use = i;
2343 /* Force memory writes to complete before letting h/w
2344 * know there are new descriptors to fetch. (Only
2345 * applicable for weak-ordered memory model archs,
2349 writel(i, ring->tail);
2355 /* This function requires __netif_tx_lock is held by the caller. */
2356 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2357 struct xdp_frame *xdpf)
2359 struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
2360 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
2361 u16 count, index = ring->next_to_use;
2362 struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
2363 struct igc_tx_buffer *buffer = head;
2364 union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
2365 u32 olinfo_status, len = xdpf->len, cmd_type;
2366 void *data = xdpf->data;
2369 count = TXD_USE_COUNT(len);
2370 for (i = 0; i < nr_frags; i++)
2371 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
2373 if (igc_maybe_stop_tx(ring, count + 3)) {
2374 /* this is a hard error */
2379 head->bytecount = xdp_get_frame_len(xdpf);
2380 head->type = IGC_TX_BUFFER_TYPE_XDP;
2384 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2385 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2390 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
2391 if (dma_mapping_error(ring->dev, dma)) {
2392 netdev_err_once(ring->netdev,
2393 "Failed to map DMA for TX\n");
2397 dma_unmap_len_set(buffer, len, len);
2398 dma_unmap_addr_set(buffer, dma, dma);
2400 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2401 IGC_ADVTXD_DCMD_IFCS | len;
2403 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2404 desc->read.buffer_addr = cpu_to_le64(dma);
2406 buffer->protocol = 0;
2408 if (++index == ring->count)
2414 buffer = &ring->tx_buffer_info[index];
2415 desc = IGC_TX_DESC(ring, index);
2416 desc->read.olinfo_status = 0;
2418 data = skb_frag_address(&sinfo->frags[i]);
2419 len = skb_frag_size(&sinfo->frags[i]);
2422 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
2424 netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
2425 /* set the timestamp */
2426 head->time_stamp = jiffies;
2427 /* set next_to_watch value indicating a packet is present */
2428 head->next_to_watch = desc;
2429 ring->next_to_use = index;
2435 buffer = &ring->tx_buffer_info[index];
2436 if (dma_unmap_len(buffer, len))
2437 dma_unmap_page(ring->dev,
2438 dma_unmap_addr(buffer, dma),
2439 dma_unmap_len(buffer, len),
2441 dma_unmap_len_set(buffer, len, 0);
2446 index += ring->count;
2453 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2458 if (unlikely(index < 0))
2461 while (index >= adapter->num_tx_queues)
2462 index -= adapter->num_tx_queues;
2464 return adapter->tx_ring[index];
2467 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2469 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2470 int cpu = smp_processor_id();
2471 struct netdev_queue *nq;
2472 struct igc_ring *ring;
2475 if (unlikely(!xdpf))
2478 ring = igc_xdp_get_tx_ring(adapter, cpu);
2479 nq = txring_txq(ring);
2481 __netif_tx_lock(nq, cpu);
2482 /* Avoid transmit queue timeout since we share it with the slow path */
2483 txq_trans_cond_update(nq);
2484 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2485 __netif_tx_unlock(nq);
2489 /* This function assumes rcu_read_lock() is held by the caller. */
2490 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2491 struct bpf_prog *prog,
2492 struct xdp_buff *xdp)
2494 u32 act = bpf_prog_run_xdp(prog, xdp);
2498 return IGC_XDP_PASS;
2500 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2504 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2506 return IGC_XDP_REDIRECT;
2509 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act);
2513 trace_xdp_exception(adapter->netdev, prog, act);
2516 return IGC_XDP_CONSUMED;
2520 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2521 struct xdp_buff *xdp)
2523 struct bpf_prog *prog;
2526 prog = READ_ONCE(adapter->xdp_prog);
2532 res = __igc_xdp_run_prog(adapter, prog, xdp);
2535 return ERR_PTR(-res);
2538 /* This function assumes __netif_tx_lock is held by the caller. */
2539 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2541 /* Once tail pointer is updated, hardware can fetch the descriptors
2542 * any time so we issue a write membar here to ensure all memory
2543 * writes are complete before the tail pointer is updated.
2546 writel(ring->next_to_use, ring->tail);
2549 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2551 int cpu = smp_processor_id();
2552 struct netdev_queue *nq;
2553 struct igc_ring *ring;
2555 if (status & IGC_XDP_TX) {
2556 ring = igc_xdp_get_tx_ring(adapter, cpu);
2557 nq = txring_txq(ring);
2559 __netif_tx_lock(nq, cpu);
2560 igc_flush_tx_descriptors(ring);
2561 __netif_tx_unlock(nq);
2564 if (status & IGC_XDP_REDIRECT)
2568 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2569 unsigned int packets, unsigned int bytes)
2571 struct igc_ring *ring = q_vector->rx.ring;
2573 u64_stats_update_begin(&ring->rx_syncp);
2574 ring->rx_stats.packets += packets;
2575 ring->rx_stats.bytes += bytes;
2576 u64_stats_update_end(&ring->rx_syncp);
2578 q_vector->rx.total_packets += packets;
2579 q_vector->rx.total_bytes += bytes;
2582 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2584 unsigned int total_bytes = 0, total_packets = 0;
2585 struct igc_adapter *adapter = q_vector->adapter;
2586 struct igc_ring *rx_ring = q_vector->rx.ring;
2587 struct sk_buff *skb = rx_ring->skb;
2588 u16 cleaned_count = igc_desc_unused(rx_ring);
2589 int xdp_status = 0, rx_buffer_pgcnt;
2591 while (likely(total_packets < budget)) {
2592 struct igc_xdp_buff ctx = { .rx_ts = NULL };
2593 struct igc_rx_buffer *rx_buffer;
2594 union igc_adv_rx_desc *rx_desc;
2595 unsigned int size, truesize;
2599 /* return some buffers to hardware, one at a time is too slow */
2600 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2601 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2605 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2606 size = le16_to_cpu(rx_desc->wb.upper.length);
2610 /* This memory barrier is needed to keep us from reading
2611 * any other fields out of the rx_desc until we know the
2612 * descriptor has been written back
2616 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2617 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2619 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2621 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2623 pkt_offset = IGC_TS_HDR_LEN;
2624 size -= IGC_TS_HDR_LEN;
2628 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq);
2629 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring),
2630 igc_rx_offset(rx_ring) + pkt_offset,
2632 xdp_buff_clear_frags_flag(&ctx.xdp);
2633 ctx.rx_desc = rx_desc;
2635 skb = igc_xdp_run_prog(adapter, &ctx.xdp);
2639 unsigned int xdp_res = -PTR_ERR(skb);
2642 case IGC_XDP_CONSUMED:
2643 rx_buffer->pagecnt_bias++;
2646 case IGC_XDP_REDIRECT:
2647 igc_rx_buffer_flip(rx_buffer, truesize);
2648 xdp_status |= xdp_res;
2653 total_bytes += size;
2655 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2656 else if (ring_uses_build_skb(rx_ring))
2657 skb = igc_build_skb(rx_ring, rx_buffer, &ctx.xdp);
2659 skb = igc_construct_skb(rx_ring, rx_buffer, &ctx);
2661 /* exit if we failed to retrieve a buffer */
2663 rx_ring->rx_stats.alloc_failed++;
2664 rx_buffer->pagecnt_bias++;
2668 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2671 /* fetch next buffer in frame if non-eop */
2672 if (igc_is_non_eop(rx_ring, rx_desc))
2675 /* verify the packet layout is correct */
2676 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2681 /* probably a little skewed due to removing CRC */
2682 total_bytes += skb->len;
2684 /* populate checksum, VLAN, and protocol */
2685 igc_process_skb_fields(rx_ring, rx_desc, skb);
2687 napi_gro_receive(&q_vector->napi, skb);
2689 /* reset skb pointer */
2692 /* update budget accounting */
2697 igc_finalize_xdp(adapter, xdp_status);
2699 /* place incomplete frames back on ring for completion */
2702 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2705 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2707 return total_packets;
2710 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2711 struct xdp_buff *xdp)
2713 unsigned int totalsize = xdp->data_end - xdp->data_meta;
2714 unsigned int metasize = xdp->data - xdp->data_meta;
2715 struct sk_buff *skb;
2717 net_prefetch(xdp->data_meta);
2719 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
2720 GFP_ATOMIC | __GFP_NOWARN);
2724 memcpy(__skb_put(skb, totalsize), xdp->data_meta,
2725 ALIGN(totalsize, sizeof(long)));
2728 skb_metadata_set(skb, metasize);
2729 __skb_pull(skb, metasize);
2735 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2736 union igc_adv_rx_desc *desc,
2737 struct xdp_buff *xdp,
2740 struct igc_ring *ring = q_vector->rx.ring;
2741 struct sk_buff *skb;
2743 skb = igc_construct_skb_zc(ring, xdp);
2745 ring->rx_stats.alloc_failed++;
2750 skb_hwtstamps(skb)->hwtstamp = timestamp;
2752 if (igc_cleanup_headers(ring, desc, skb))
2755 igc_process_skb_fields(ring, desc, skb);
2756 napi_gro_receive(&q_vector->napi, skb);
2759 static struct igc_xdp_buff *xsk_buff_to_igc_ctx(struct xdp_buff *xdp)
2761 /* xdp_buff pointer used by ZC code path is alloc as xdp_buff_xsk. The
2762 * igc_xdp_buff shares its layout with xdp_buff_xsk and private
2763 * igc_xdp_buff fields fall into xdp_buff_xsk->cb
2765 return (struct igc_xdp_buff *)xdp;
2768 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2770 struct igc_adapter *adapter = q_vector->adapter;
2771 struct igc_ring *ring = q_vector->rx.ring;
2772 u16 cleaned_count = igc_desc_unused(ring);
2773 int total_bytes = 0, total_packets = 0;
2774 u16 ntc = ring->next_to_clean;
2775 struct bpf_prog *prog;
2776 bool failure = false;
2781 prog = READ_ONCE(adapter->xdp_prog);
2783 while (likely(total_packets < budget)) {
2784 union igc_adv_rx_desc *desc;
2785 struct igc_rx_buffer *bi;
2786 struct igc_xdp_buff *ctx;
2787 ktime_t timestamp = 0;
2791 desc = IGC_RX_DESC(ring, ntc);
2792 size = le16_to_cpu(desc->wb.upper.length);
2796 /* This memory barrier is needed to keep us from reading
2797 * any other fields out of the rx_desc until we know the
2798 * descriptor has been written back
2802 bi = &ring->rx_buffer_info[ntc];
2804 ctx = xsk_buff_to_igc_ctx(bi->xdp);
2805 ctx->rx_desc = desc;
2807 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2808 ctx->rx_ts = bi->xdp->data;
2810 bi->xdp->data += IGC_TS_HDR_LEN;
2812 /* HW timestamp has been copied into local variable. Metadata
2813 * length when XDP program is called should be 0.
2815 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2816 size -= IGC_TS_HDR_LEN;
2819 bi->xdp->data_end = bi->xdp->data + size;
2820 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2822 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2825 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2827 case IGC_XDP_CONSUMED:
2828 xsk_buff_free(bi->xdp);
2831 case IGC_XDP_REDIRECT:
2837 total_bytes += size;
2841 if (ntc == ring->count)
2845 ring->next_to_clean = ntc;
2848 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2849 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2852 igc_finalize_xdp(adapter, xdp_status);
2854 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2856 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2857 if (failure || ring->next_to_clean == ring->next_to_use)
2858 xsk_set_rx_need_wakeup(ring->xsk_pool);
2860 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2861 return total_packets;
2864 return failure ? budget : total_packets;
2867 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2868 unsigned int packets, unsigned int bytes)
2870 struct igc_ring *ring = q_vector->tx.ring;
2872 u64_stats_update_begin(&ring->tx_syncp);
2873 ring->tx_stats.bytes += bytes;
2874 ring->tx_stats.packets += packets;
2875 u64_stats_update_end(&ring->tx_syncp);
2877 q_vector->tx.total_bytes += bytes;
2878 q_vector->tx.total_packets += packets;
2881 static void igc_xdp_xmit_zc(struct igc_ring *ring)
2883 struct xsk_buff_pool *pool = ring->xsk_pool;
2884 struct netdev_queue *nq = txring_txq(ring);
2885 union igc_adv_tx_desc *tx_desc = NULL;
2886 int cpu = smp_processor_id();
2887 struct xdp_desc xdp_desc;
2890 if (!netif_carrier_ok(ring->netdev))
2893 __netif_tx_lock(nq, cpu);
2895 /* Avoid transmit queue timeout since we share it with the slow path */
2896 txq_trans_cond_update(nq);
2898 ntu = ring->next_to_use;
2899 budget = igc_desc_unused(ring);
2901 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2902 u32 cmd_type, olinfo_status;
2903 struct igc_tx_buffer *bi;
2906 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2907 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2909 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2911 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2912 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2914 tx_desc = IGC_TX_DESC(ring, ntu);
2915 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2916 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2917 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2919 bi = &ring->tx_buffer_info[ntu];
2920 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2922 bi->bytecount = xdp_desc.len;
2924 bi->time_stamp = jiffies;
2925 bi->next_to_watch = tx_desc;
2927 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2930 if (ntu == ring->count)
2934 ring->next_to_use = ntu;
2936 igc_flush_tx_descriptors(ring);
2937 xsk_tx_release(pool);
2940 __netif_tx_unlock(nq);
2944 * igc_clean_tx_irq - Reclaim resources after transmit completes
2945 * @q_vector: pointer to q_vector containing needed info
2946 * @napi_budget: Used to determine if we are in netpoll
2948 * returns true if ring is completely cleaned
2950 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2952 struct igc_adapter *adapter = q_vector->adapter;
2953 unsigned int total_bytes = 0, total_packets = 0;
2954 unsigned int budget = q_vector->tx.work_limit;
2955 struct igc_ring *tx_ring = q_vector->tx.ring;
2956 unsigned int i = tx_ring->next_to_clean;
2957 struct igc_tx_buffer *tx_buffer;
2958 union igc_adv_tx_desc *tx_desc;
2961 if (test_bit(__IGC_DOWN, &adapter->state))
2964 tx_buffer = &tx_ring->tx_buffer_info[i];
2965 tx_desc = IGC_TX_DESC(tx_ring, i);
2966 i -= tx_ring->count;
2969 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2971 /* if next_to_watch is not set then there is no work pending */
2975 /* prevent any other reads prior to eop_desc */
2978 /* if DD is not set pending work has not been completed */
2979 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2982 /* clear next_to_watch to prevent false hangs */
2983 tx_buffer->next_to_watch = NULL;
2985 /* update the statistics for this packet */
2986 total_bytes += tx_buffer->bytecount;
2987 total_packets += tx_buffer->gso_segs;
2989 switch (tx_buffer->type) {
2990 case IGC_TX_BUFFER_TYPE_XSK:
2993 case IGC_TX_BUFFER_TYPE_XDP:
2994 xdp_return_frame(tx_buffer->xdpf);
2995 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2997 case IGC_TX_BUFFER_TYPE_SKB:
2998 napi_consume_skb(tx_buffer->skb, napi_budget);
2999 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3002 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
3006 /* clear last DMA location and unmap remaining buffers */
3007 while (tx_desc != eop_desc) {
3012 i -= tx_ring->count;
3013 tx_buffer = tx_ring->tx_buffer_info;
3014 tx_desc = IGC_TX_DESC(tx_ring, 0);
3017 /* unmap any remaining paged data */
3018 if (dma_unmap_len(tx_buffer, len))
3019 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
3022 /* move us one more past the eop_desc for start of next pkt */
3027 i -= tx_ring->count;
3028 tx_buffer = tx_ring->tx_buffer_info;
3029 tx_desc = IGC_TX_DESC(tx_ring, 0);
3032 /* issue prefetch for next Tx descriptor */
3035 /* update budget accounting */
3037 } while (likely(budget));
3039 netdev_tx_completed_queue(txring_txq(tx_ring),
3040 total_packets, total_bytes);
3042 i += tx_ring->count;
3043 tx_ring->next_to_clean = i;
3045 igc_update_tx_stats(q_vector, total_packets, total_bytes);
3047 if (tx_ring->xsk_pool) {
3049 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
3050 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
3051 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
3052 igc_xdp_xmit_zc(tx_ring);
3055 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
3056 struct igc_hw *hw = &adapter->hw;
3058 /* Detect a transmit hang in hardware, this serializes the
3059 * check with the clearing of time_stamp and movement of i
3061 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3062 if (tx_buffer->next_to_watch &&
3063 time_after(jiffies, tx_buffer->time_stamp +
3064 (adapter->tx_timeout_factor * HZ)) &&
3065 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
3066 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) &&
3067 !tx_ring->oper_gate_closed) {
3068 /* detected Tx unit hang */
3069 netdev_err(tx_ring->netdev,
3070 "Detected Tx Unit Hang\n"
3074 " next_to_use <%x>\n"
3075 " next_to_clean <%x>\n"
3076 "buffer_info[next_to_clean]\n"
3077 " time_stamp <%lx>\n"
3078 " next_to_watch <%p>\n"
3080 " desc.status <%x>\n",
3081 tx_ring->queue_index,
3082 rd32(IGC_TDH(tx_ring->reg_idx)),
3083 readl(tx_ring->tail),
3084 tx_ring->next_to_use,
3085 tx_ring->next_to_clean,
3086 tx_buffer->time_stamp,
3087 tx_buffer->next_to_watch,
3089 tx_buffer->next_to_watch->wb.status);
3090 netif_stop_subqueue(tx_ring->netdev,
3091 tx_ring->queue_index);
3093 /* we are about to reset, no point in enabling stuff */
3098 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
3099 if (unlikely(total_packets &&
3100 netif_carrier_ok(tx_ring->netdev) &&
3101 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
3102 /* Make sure that anybody stopping the queue after this
3103 * sees the new next_to_clean.
3106 if (__netif_subqueue_stopped(tx_ring->netdev,
3107 tx_ring->queue_index) &&
3108 !(test_bit(__IGC_DOWN, &adapter->state))) {
3109 netif_wake_subqueue(tx_ring->netdev,
3110 tx_ring->queue_index);
3112 u64_stats_update_begin(&tx_ring->tx_syncp);
3113 tx_ring->tx_stats.restart_queue++;
3114 u64_stats_update_end(&tx_ring->tx_syncp);
3121 static int igc_find_mac_filter(struct igc_adapter *adapter,
3122 enum igc_mac_filter_type type, const u8 *addr)
3124 struct igc_hw *hw = &adapter->hw;
3125 int max_entries = hw->mac.rar_entry_count;
3129 for (i = 0; i < max_entries; i++) {
3130 ral = rd32(IGC_RAL(i));
3131 rah = rd32(IGC_RAH(i));
3133 if (!(rah & IGC_RAH_AV))
3135 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
3137 if ((rah & IGC_RAH_RAH_MASK) !=
3138 le16_to_cpup((__le16 *)(addr + 4)))
3140 if (ral != le32_to_cpup((__le32 *)(addr)))
3149 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
3151 struct igc_hw *hw = &adapter->hw;
3152 int max_entries = hw->mac.rar_entry_count;
3156 for (i = 0; i < max_entries; i++) {
3157 rah = rd32(IGC_RAH(i));
3159 if (!(rah & IGC_RAH_AV))
3167 * igc_add_mac_filter() - Add MAC address filter
3168 * @adapter: Pointer to adapter where the filter should be added
3169 * @type: MAC address filter type (source or destination)
3170 * @addr: MAC address
3171 * @queue: If non-negative, queue assignment feature is enabled and frames
3172 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3173 * assignment is disabled.
3175 * Return: 0 in case of success, negative errno code otherwise.
3177 static int igc_add_mac_filter(struct igc_adapter *adapter,
3178 enum igc_mac_filter_type type, const u8 *addr,
3181 struct net_device *dev = adapter->netdev;
3184 index = igc_find_mac_filter(adapter, type, addr);
3188 index = igc_get_avail_mac_filter_slot(adapter);
3192 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
3193 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3197 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
3202 * igc_del_mac_filter() - Delete MAC address filter
3203 * @adapter: Pointer to adapter where the filter should be deleted from
3204 * @type: MAC address filter type (source or destination)
3205 * @addr: MAC address
3207 static void igc_del_mac_filter(struct igc_adapter *adapter,
3208 enum igc_mac_filter_type type, const u8 *addr)
3210 struct net_device *dev = adapter->netdev;
3213 index = igc_find_mac_filter(adapter, type, addr);
3218 /* If this is the default filter, we don't actually delete it.
3219 * We just reset to its default value i.e. disable queue
3222 netdev_dbg(dev, "Disable default MAC filter queue assignment");
3224 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
3226 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
3228 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
3231 igc_clear_mac_filter_hw(adapter, index);
3236 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3237 * @adapter: Pointer to adapter where the filter should be added
3238 * @prio: VLAN priority value
3239 * @queue: Queue number which matching frames are assigned to
3241 * Return: 0 in case of success, negative errno code otherwise.
3243 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
3246 struct net_device *dev = adapter->netdev;
3247 struct igc_hw *hw = &adapter->hw;
3250 vlanpqf = rd32(IGC_VLANPQF);
3252 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
3253 netdev_dbg(dev, "VLAN priority filter already in use\n");
3257 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
3258 vlanpqf |= IGC_VLANPQF_VALID(prio);
3260 wr32(IGC_VLANPQF, vlanpqf);
3262 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
3268 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3269 * @adapter: Pointer to adapter where the filter should be deleted from
3270 * @prio: VLAN priority value
3272 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
3274 struct igc_hw *hw = &adapter->hw;
3277 vlanpqf = rd32(IGC_VLANPQF);
3279 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
3280 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
3282 wr32(IGC_VLANPQF, vlanpqf);
3284 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
3288 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
3290 struct igc_hw *hw = &adapter->hw;
3293 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3294 u32 etqf = rd32(IGC_ETQF(i));
3296 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3304 * igc_add_etype_filter() - Add ethertype filter
3305 * @adapter: Pointer to adapter where the filter should be added
3306 * @etype: Ethertype value
3307 * @queue: If non-negative, queue assignment feature is enabled and frames
3308 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3309 * assignment is disabled.
3311 * Return: 0 in case of success, negative errno code otherwise.
3313 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3316 struct igc_hw *hw = &adapter->hw;
3320 index = igc_get_avail_etype_filter_slot(adapter);
3324 etqf = rd32(IGC_ETQF(index));
3326 etqf &= ~IGC_ETQF_ETYPE_MASK;
3330 etqf &= ~IGC_ETQF_QUEUE_MASK;
3331 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3332 etqf |= IGC_ETQF_QUEUE_ENABLE;
3335 etqf |= IGC_ETQF_FILTER_ENABLE;
3337 wr32(IGC_ETQF(index), etqf);
3339 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3344 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3346 struct igc_hw *hw = &adapter->hw;
3349 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3350 u32 etqf = rd32(IGC_ETQF(i));
3352 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3360 * igc_del_etype_filter() - Delete ethertype filter
3361 * @adapter: Pointer to adapter where the filter should be deleted from
3362 * @etype: Ethertype value
3364 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3366 struct igc_hw *hw = &adapter->hw;
3369 index = igc_find_etype_filter(adapter, etype);
3373 wr32(IGC_ETQF(index), 0);
3375 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3379 static int igc_flex_filter_select(struct igc_adapter *adapter,
3380 struct igc_flex_filter *input,
3383 struct igc_hw *hw = &adapter->hw;
3387 if (input->index >= MAX_FLEX_FILTER) {
3388 netdev_err(adapter->netdev, "Wrong Flex Filter index selected!\n");
3392 /* Indirect table select register */
3393 fhftsl = rd32(IGC_FHFTSL);
3394 fhftsl &= ~IGC_FHFTSL_FTSL_MASK;
3395 switch (input->index) {
3409 wr32(IGC_FHFTSL, fhftsl);
3411 /* Normalize index down to host table register */
3412 fhft_index = input->index % 8;
3414 *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) :
3415 IGC_FHFT_EXT(fhft_index - 4);
3420 static int igc_write_flex_filter_ll(struct igc_adapter *adapter,
3421 struct igc_flex_filter *input)
3423 struct igc_hw *hw = &adapter->hw;
3424 u8 *data = input->data;
3425 u8 *mask = input->mask;
3432 /* Length has to be aligned to 8. Otherwise the filter will fail. Bail
3433 * out early to avoid surprises later.
3435 if (input->length % 8 != 0) {
3436 netdev_err(adapter->netdev, "The length of a flex filter has to be 8 byte aligned!\n");
3440 /* Select corresponding flex filter register and get base for host table. */
3441 ret = igc_flex_filter_select(adapter, input, &fhft);
3445 /* When adding a filter globally disable flex filter feature. That is
3446 * recommended within the datasheet.
3448 wufc = rd32(IGC_WUFC);
3449 wufc &= ~IGC_WUFC_FLEX_HQ;
3450 wr32(IGC_WUFC, wufc);
3452 /* Configure filter */
3453 queuing = input->length & IGC_FHFT_LENGTH_MASK;
3454 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue);
3455 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio);
3457 if (input->immediate_irq)
3458 queuing |= IGC_FHFT_IMM_INT;
3461 queuing |= IGC_FHFT_DROP;
3463 wr32(fhft + 0xFC, queuing);
3465 /* Write data (128 byte) and mask (128 bit) */
3466 for (i = 0; i < 16; ++i) {
3467 const size_t data_idx = i * 8;
3468 const size_t row_idx = i * 16;
3470 (data[data_idx + 0] << 0) |
3471 (data[data_idx + 1] << 8) |
3472 (data[data_idx + 2] << 16) |
3473 (data[data_idx + 3] << 24);
3475 (data[data_idx + 4] << 0) |
3476 (data[data_idx + 5] << 8) |
3477 (data[data_idx + 6] << 16) |
3478 (data[data_idx + 7] << 24);
3481 /* Write row: dw0, dw1 and mask */
3482 wr32(fhft + row_idx, dw0);
3483 wr32(fhft + row_idx + 4, dw1);
3485 /* mask is only valid for MASK(7, 0) */
3486 tmp = rd32(fhft + row_idx + 8);
3487 tmp &= ~GENMASK(7, 0);
3489 wr32(fhft + row_idx + 8, tmp);
3492 /* Enable filter. */
3493 wufc |= IGC_WUFC_FLEX_HQ;
3494 if (input->index > 8) {
3495 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */
3496 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3498 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8));
3500 wr32(IGC_WUFC_EXT, wufc_ext);
3502 wufc |= (IGC_WUFC_FLX0 << input->index);
3504 wr32(IGC_WUFC, wufc);
3506 netdev_dbg(adapter->netdev, "Added flex filter %u to HW.\n",
3512 static void igc_flex_filter_add_field(struct igc_flex_filter *flex,
3513 const void *src, unsigned int offset,
3514 size_t len, const void *mask)
3519 memcpy(&flex->data[offset], src, len);
3522 for (i = 0; i < len; ++i) {
3523 const unsigned int idx = i + offset;
3524 const u8 *ptr = mask;
3528 flex->mask[idx / 8] |= BIT(idx % 8);
3533 flex->mask[idx / 8] |= BIT(idx % 8);
3537 static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter)
3539 struct igc_hw *hw = &adapter->hw;
3543 wufc = rd32(IGC_WUFC);
3544 wufc_ext = rd32(IGC_WUFC_EXT);
3546 for (i = 0; i < MAX_FLEX_FILTER; i++) {
3548 if (!(wufc & (IGC_WUFC_FLX0 << i)))
3551 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8))))
3559 static bool igc_flex_filter_in_use(struct igc_adapter *adapter)
3561 struct igc_hw *hw = &adapter->hw;
3564 wufc = rd32(IGC_WUFC);
3565 wufc_ext = rd32(IGC_WUFC_EXT);
3567 if (wufc & IGC_WUFC_FILTER_MASK)
3570 if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK)
3576 static int igc_add_flex_filter(struct igc_adapter *adapter,
3577 struct igc_nfc_rule *rule)
3579 struct igc_nfc_filter *filter = &rule->filter;
3580 unsigned int eth_offset, user_offset;
3581 struct igc_flex_filter flex = { };
3585 index = igc_find_avail_flex_filter_slot(adapter);
3589 /* Construct the flex filter:
3596 * -> = 26 bytes => 32 length
3600 flex.rx_queue = rule->action;
3602 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype;
3603 eth_offset = vlan ? 16 : 12;
3604 user_offset = vlan ? 18 : 14;
3606 /* Add destination MAC */
3607 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3608 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0,
3611 /* Add source MAC */
3612 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3613 igc_flex_filter_add_field(&flex, &filter->src_addr, 6,
3616 /* Add VLAN etype */
3617 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) {
3618 __be16 vlan_etype = cpu_to_be16(filter->vlan_etype);
3620 igc_flex_filter_add_field(&flex, &vlan_etype, 12,
3621 sizeof(vlan_etype), NULL);
3625 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
3626 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14,
3627 sizeof(filter->vlan_tci), NULL);
3629 /* Add Ether type */
3630 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3631 __be16 etype = cpu_to_be16(filter->etype);
3633 igc_flex_filter_add_field(&flex, &etype, eth_offset,
3634 sizeof(etype), NULL);
3638 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA)
3639 igc_flex_filter_add_field(&flex, &filter->user_data,
3641 sizeof(filter->user_data),
3644 /* Add it down to the hardware and enable it. */
3645 ret = igc_write_flex_filter_ll(adapter, &flex);
3649 filter->flex_index = index;
3654 static void igc_del_flex_filter(struct igc_adapter *adapter,
3657 struct igc_hw *hw = &adapter->hw;
3660 /* Just disable the filter. The filter table itself is kept
3661 * intact. Another flex_filter_add() should override the "old" data
3664 if (reg_index > 8) {
3665 u32 wufc_ext = rd32(IGC_WUFC_EXT);
3667 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8));
3668 wr32(IGC_WUFC_EXT, wufc_ext);
3670 wufc = rd32(IGC_WUFC);
3672 wufc &= ~(IGC_WUFC_FLX0 << reg_index);
3673 wr32(IGC_WUFC, wufc);
3676 if (igc_flex_filter_in_use(adapter))
3679 /* No filters are in use, we may disable flex filters */
3680 wufc = rd32(IGC_WUFC);
3681 wufc &= ~IGC_WUFC_FLEX_HQ;
3682 wr32(IGC_WUFC, wufc);
3685 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3686 struct igc_nfc_rule *rule)
3691 return igc_add_flex_filter(adapter, rule);
3694 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3695 err = igc_add_etype_filter(adapter, rule->filter.etype,
3701 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3702 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3703 rule->filter.src_addr, rule->action);
3708 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3709 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3710 rule->filter.dst_addr, rule->action);
3715 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3716 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3718 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3726 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3727 const struct igc_nfc_rule *rule)
3730 igc_del_flex_filter(adapter, rule->filter.flex_index);
3734 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3735 igc_del_etype_filter(adapter, rule->filter.etype);
3737 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3738 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci);
3740 igc_del_vlan_prio_filter(adapter, prio);
3743 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3744 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3745 rule->filter.src_addr);
3747 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3748 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3749 rule->filter.dst_addr);
3753 * igc_get_nfc_rule() - Get NFC rule
3754 * @adapter: Pointer to adapter
3755 * @location: Rule location
3757 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3759 * Return: Pointer to NFC rule at @location. If not found, NULL.
3761 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3764 struct igc_nfc_rule *rule;
3766 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3767 if (rule->location == location)
3769 if (rule->location > location)
3777 * igc_del_nfc_rule() - Delete NFC rule
3778 * @adapter: Pointer to adapter
3779 * @rule: Pointer to rule to be deleted
3781 * Disable NFC rule in hardware and delete it from adapter.
3783 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3785 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3787 igc_disable_nfc_rule(adapter, rule);
3789 list_del(&rule->list);
3790 adapter->nfc_rule_count--;
3795 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3797 struct igc_nfc_rule *rule, *tmp;
3799 mutex_lock(&adapter->nfc_rule_lock);
3801 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3802 igc_del_nfc_rule(adapter, rule);
3804 mutex_unlock(&adapter->nfc_rule_lock);
3808 * igc_add_nfc_rule() - Add NFC rule
3809 * @adapter: Pointer to adapter
3810 * @rule: Pointer to rule to be added
3812 * Enable NFC rule in hardware and add it to adapter.
3814 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3816 * Return: 0 on success, negative errno on failure.
3818 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3820 struct igc_nfc_rule *pred, *cur;
3823 err = igc_enable_nfc_rule(adapter, rule);
3828 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3829 if (cur->location >= rule->location)
3834 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3835 adapter->nfc_rule_count++;
3839 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3841 struct igc_nfc_rule *rule;
3843 mutex_lock(&adapter->nfc_rule_lock);
3845 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3846 igc_enable_nfc_rule(adapter, rule);
3848 mutex_unlock(&adapter->nfc_rule_lock);
3851 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3853 struct igc_adapter *adapter = netdev_priv(netdev);
3855 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3858 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3860 struct igc_adapter *adapter = netdev_priv(netdev);
3862 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3867 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3868 * @netdev: network interface device structure
3870 * The set_rx_mode entry point is called whenever the unicast or multicast
3871 * address lists or the network interface flags are updated. This routine is
3872 * responsible for configuring the hardware for proper unicast, multicast,
3873 * promiscuous mode, and all-multi behavior.
3875 static void igc_set_rx_mode(struct net_device *netdev)
3877 struct igc_adapter *adapter = netdev_priv(netdev);
3878 struct igc_hw *hw = &adapter->hw;
3879 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3882 /* Check for Promiscuous and All Multicast modes */
3883 if (netdev->flags & IFF_PROMISC) {
3884 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3886 if (netdev->flags & IFF_ALLMULTI) {
3887 rctl |= IGC_RCTL_MPE;
3889 /* Write addresses to the MTA, if the attempt fails
3890 * then we should just turn on promiscuous mode so
3891 * that we can at least receive multicast traffic
3893 count = igc_write_mc_addr_list(netdev);
3895 rctl |= IGC_RCTL_MPE;
3899 /* Write addresses to available RAR registers, if there is not
3900 * sufficient space to store all the addresses then enable
3901 * unicast promiscuous mode
3903 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3904 rctl |= IGC_RCTL_UPE;
3906 /* update state of unicast and multicast */
3907 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3908 wr32(IGC_RCTL, rctl);
3910 #if (PAGE_SIZE < 8192)
3911 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3912 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3914 wr32(IGC_RLPML, rlpml);
3918 * igc_configure - configure the hardware for RX and TX
3919 * @adapter: private board structure
3921 static void igc_configure(struct igc_adapter *adapter)
3923 struct net_device *netdev = adapter->netdev;
3926 igc_get_hw_control(adapter);
3927 igc_set_rx_mode(netdev);
3929 igc_restore_vlan(adapter);
3931 igc_setup_tctl(adapter);
3932 igc_setup_mrqc(adapter);
3933 igc_setup_rctl(adapter);
3935 igc_set_default_mac_filter(adapter);
3936 igc_restore_nfc_rules(adapter);
3938 igc_configure_tx(adapter);
3939 igc_configure_rx(adapter);
3941 igc_rx_fifo_flush_base(&adapter->hw);
3943 /* call igc_desc_unused which always leaves
3944 * at least 1 descriptor unused to make sure
3945 * next_to_use != next_to_clean
3947 for (i = 0; i < adapter->num_rx_queues; i++) {
3948 struct igc_ring *ring = adapter->rx_ring[i];
3951 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3953 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3958 * igc_write_ivar - configure ivar for given MSI-X vector
3959 * @hw: pointer to the HW structure
3960 * @msix_vector: vector number we are allocating to a given ring
3961 * @index: row index of IVAR register to write within IVAR table
3962 * @offset: column offset of in IVAR, should be multiple of 8
3964 * The IVAR table consists of 2 columns,
3965 * each containing an cause allocation for an Rx and Tx ring, and a
3966 * variable number of rows depending on the number of queues supported.
3968 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3969 int index, int offset)
3971 u32 ivar = array_rd32(IGC_IVAR0, index);
3973 /* clear any bits that are currently set */
3974 ivar &= ~((u32)0xFF << offset);
3976 /* write vector and valid bit */
3977 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3979 array_wr32(IGC_IVAR0, index, ivar);
3982 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3984 struct igc_adapter *adapter = q_vector->adapter;
3985 struct igc_hw *hw = &adapter->hw;
3986 int rx_queue = IGC_N0_QUEUE;
3987 int tx_queue = IGC_N0_QUEUE;
3989 if (q_vector->rx.ring)
3990 rx_queue = q_vector->rx.ring->reg_idx;
3991 if (q_vector->tx.ring)
3992 tx_queue = q_vector->tx.ring->reg_idx;
3994 switch (hw->mac.type) {
3996 if (rx_queue > IGC_N0_QUEUE)
3997 igc_write_ivar(hw, msix_vector,
3999 (rx_queue & 0x1) << 4);
4000 if (tx_queue > IGC_N0_QUEUE)
4001 igc_write_ivar(hw, msix_vector,
4003 ((tx_queue & 0x1) << 4) + 8);
4004 q_vector->eims_value = BIT(msix_vector);
4007 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
4011 /* add q_vector eims value to global eims_enable_mask */
4012 adapter->eims_enable_mask |= q_vector->eims_value;
4014 /* configure q_vector to set itr on first interrupt */
4015 q_vector->set_itr = 1;
4019 * igc_configure_msix - Configure MSI-X hardware
4020 * @adapter: Pointer to adapter structure
4022 * igc_configure_msix sets up the hardware to properly
4023 * generate MSI-X interrupts.
4025 static void igc_configure_msix(struct igc_adapter *adapter)
4027 struct igc_hw *hw = &adapter->hw;
4031 adapter->eims_enable_mask = 0;
4033 /* set vector for other causes, i.e. link changes */
4034 switch (hw->mac.type) {
4036 /* Turn on MSI-X capability first, or our settings
4037 * won't stick. And it will take days to debug.
4039 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
4040 IGC_GPIE_PBA | IGC_GPIE_EIAME |
4043 /* enable msix_other interrupt */
4044 adapter->eims_other = BIT(vector);
4045 tmp = (vector++ | IGC_IVAR_VALID) << 8;
4047 wr32(IGC_IVAR_MISC, tmp);
4050 /* do nothing, since nothing else supports MSI-X */
4052 } /* switch (hw->mac.type) */
4054 adapter->eims_enable_mask |= adapter->eims_other;
4056 for (i = 0; i < adapter->num_q_vectors; i++)
4057 igc_assign_vector(adapter->q_vector[i], vector++);
4063 * igc_irq_enable - Enable default interrupt generation settings
4064 * @adapter: board private structure
4066 static void igc_irq_enable(struct igc_adapter *adapter)
4068 struct igc_hw *hw = &adapter->hw;
4070 if (adapter->msix_entries) {
4071 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
4072 u32 regval = rd32(IGC_EIAC);
4074 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
4075 regval = rd32(IGC_EIAM);
4076 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
4077 wr32(IGC_EIMS, adapter->eims_enable_mask);
4080 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4081 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
4086 * igc_irq_disable - Mask off interrupt generation on the NIC
4087 * @adapter: board private structure
4089 static void igc_irq_disable(struct igc_adapter *adapter)
4091 struct igc_hw *hw = &adapter->hw;
4093 if (adapter->msix_entries) {
4094 u32 regval = rd32(IGC_EIAM);
4096 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
4097 wr32(IGC_EIMC, adapter->eims_enable_mask);
4098 regval = rd32(IGC_EIAC);
4099 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
4106 if (adapter->msix_entries) {
4109 synchronize_irq(adapter->msix_entries[vector++].vector);
4111 for (i = 0; i < adapter->num_q_vectors; i++)
4112 synchronize_irq(adapter->msix_entries[vector++].vector);
4114 synchronize_irq(adapter->pdev->irq);
4118 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
4119 const u32 max_rss_queues)
4121 /* Determine if we need to pair queues. */
4122 /* If rss_queues > half of max_rss_queues, pair the queues in
4123 * order to conserve interrupts due to limited supply.
4125 if (adapter->rss_queues > (max_rss_queues / 2))
4126 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4128 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
4131 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
4133 return IGC_MAX_RX_QUEUES;
4136 static void igc_init_queue_configuration(struct igc_adapter *adapter)
4140 max_rss_queues = igc_get_max_rss_queues(adapter);
4141 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
4143 igc_set_flag_queue_pairs(adapter, max_rss_queues);
4147 * igc_reset_q_vector - Reset config for interrupt vector
4148 * @adapter: board private structure to initialize
4149 * @v_idx: Index of vector to be reset
4151 * If NAPI is enabled it will delete any references to the
4152 * NAPI struct. This is preparation for igc_free_q_vector.
4154 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
4156 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4158 /* if we're coming from igc_set_interrupt_capability, the vectors are
4164 if (q_vector->tx.ring)
4165 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
4167 if (q_vector->rx.ring)
4168 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
4170 netif_napi_del(&q_vector->napi);
4174 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4175 * @adapter: board private structure to initialize
4176 * @v_idx: Index of vector to be freed
4178 * This function frees the memory allocated to the q_vector.
4180 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
4182 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
4184 adapter->q_vector[v_idx] = NULL;
4186 /* igc_get_stats64() might access the rings on this vector,
4187 * we must wait a grace period before freeing it.
4190 kfree_rcu(q_vector, rcu);
4194 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4195 * @adapter: board private structure to initialize
4197 * This function frees the memory allocated to the q_vectors. In addition if
4198 * NAPI is enabled it will delete any references to the NAPI struct prior
4199 * to freeing the q_vector.
4201 static void igc_free_q_vectors(struct igc_adapter *adapter)
4203 int v_idx = adapter->num_q_vectors;
4205 adapter->num_tx_queues = 0;
4206 adapter->num_rx_queues = 0;
4207 adapter->num_q_vectors = 0;
4210 igc_reset_q_vector(adapter, v_idx);
4211 igc_free_q_vector(adapter, v_idx);
4216 * igc_update_itr - update the dynamic ITR value based on statistics
4217 * @q_vector: pointer to q_vector
4218 * @ring_container: ring info to update the itr for
4220 * Stores a new ITR value based on packets and byte
4221 * counts during the last interrupt. The advantage of per interrupt
4222 * computation is faster updates and more accurate ITR for the current
4223 * traffic pattern. Constants in this function were computed
4224 * based on theoretical maximum wire speed and thresholds were set based
4225 * on testing data as well as attempting to minimize response time
4226 * while increasing bulk throughput.
4227 * NOTE: These calculations are only valid when operating in a single-
4228 * queue environment.
4230 static void igc_update_itr(struct igc_q_vector *q_vector,
4231 struct igc_ring_container *ring_container)
4233 unsigned int packets = ring_container->total_packets;
4234 unsigned int bytes = ring_container->total_bytes;
4235 u8 itrval = ring_container->itr;
4237 /* no packets, exit with status unchanged */
4242 case lowest_latency:
4243 /* handle TSO and jumbo frames */
4244 if (bytes / packets > 8000)
4245 itrval = bulk_latency;
4246 else if ((packets < 5) && (bytes > 512))
4247 itrval = low_latency;
4249 case low_latency: /* 50 usec aka 20000 ints/s */
4250 if (bytes > 10000) {
4251 /* this if handles the TSO accounting */
4252 if (bytes / packets > 8000)
4253 itrval = bulk_latency;
4254 else if ((packets < 10) || ((bytes / packets) > 1200))
4255 itrval = bulk_latency;
4256 else if ((packets > 35))
4257 itrval = lowest_latency;
4258 } else if (bytes / packets > 2000) {
4259 itrval = bulk_latency;
4260 } else if (packets <= 2 && bytes < 512) {
4261 itrval = lowest_latency;
4264 case bulk_latency: /* 250 usec aka 4000 ints/s */
4265 if (bytes > 25000) {
4267 itrval = low_latency;
4268 } else if (bytes < 1500) {
4269 itrval = low_latency;
4274 /* clear work counters since we have the values we need */
4275 ring_container->total_bytes = 0;
4276 ring_container->total_packets = 0;
4278 /* write updated itr to ring container */
4279 ring_container->itr = itrval;
4282 static void igc_set_itr(struct igc_q_vector *q_vector)
4284 struct igc_adapter *adapter = q_vector->adapter;
4285 u32 new_itr = q_vector->itr_val;
4288 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
4289 switch (adapter->link_speed) {
4293 new_itr = IGC_4K_ITR;
4299 igc_update_itr(q_vector, &q_vector->tx);
4300 igc_update_itr(q_vector, &q_vector->rx);
4302 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
4304 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4305 if (current_itr == lowest_latency &&
4306 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4307 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4308 current_itr = low_latency;
4310 switch (current_itr) {
4311 /* counts and packets in update_itr are dependent on these numbers */
4312 case lowest_latency:
4313 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
4316 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
4319 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
4326 if (new_itr != q_vector->itr_val) {
4327 /* this attempts to bias the interrupt rate towards Bulk
4328 * by adding intermediate steps when interrupt rate is
4331 new_itr = new_itr > q_vector->itr_val ?
4332 max((new_itr * q_vector->itr_val) /
4333 (new_itr + (q_vector->itr_val >> 2)),
4335 /* Don't write the value here; it resets the adapter's
4336 * internal timer, and causes us to delay far longer than
4337 * we should between interrupts. Instead, we write the ITR
4338 * value at the beginning of the next interrupt so the timing
4339 * ends up being correct.
4341 q_vector->itr_val = new_itr;
4342 q_vector->set_itr = 1;
4346 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
4348 int v_idx = adapter->num_q_vectors;
4350 if (adapter->msix_entries) {
4351 pci_disable_msix(adapter->pdev);
4352 kfree(adapter->msix_entries);
4353 adapter->msix_entries = NULL;
4354 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
4355 pci_disable_msi(adapter->pdev);
4359 igc_reset_q_vector(adapter, v_idx);
4363 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4364 * @adapter: Pointer to adapter structure
4365 * @msix: boolean value for MSI-X capability
4367 * Attempt to configure interrupts using the best available
4368 * capabilities of the hardware and kernel.
4370 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
4378 adapter->flags |= IGC_FLAG_HAS_MSIX;
4380 /* Number of supported queues. */
4381 adapter->num_rx_queues = adapter->rss_queues;
4383 adapter->num_tx_queues = adapter->rss_queues;
4385 /* start with one vector for every Rx queue */
4386 numvecs = adapter->num_rx_queues;
4388 /* if Tx handler is separate add 1 for every Tx queue */
4389 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
4390 numvecs += adapter->num_tx_queues;
4392 /* store the number of vectors reserved for queues */
4393 adapter->num_q_vectors = numvecs;
4395 /* add 1 vector for link status interrupts */
4398 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
4401 if (!adapter->msix_entries)
4404 /* populate entry values */
4405 for (i = 0; i < numvecs; i++)
4406 adapter->msix_entries[i].entry = i;
4408 err = pci_enable_msix_range(adapter->pdev,
4409 adapter->msix_entries,
4415 kfree(adapter->msix_entries);
4416 adapter->msix_entries = NULL;
4418 igc_reset_interrupt_capability(adapter);
4421 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
4423 adapter->rss_queues = 1;
4424 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
4425 adapter->num_rx_queues = 1;
4426 adapter->num_tx_queues = 1;
4427 adapter->num_q_vectors = 1;
4428 if (!pci_enable_msi(adapter->pdev))
4429 adapter->flags |= IGC_FLAG_HAS_MSI;
4433 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4434 * @q_vector: pointer to q_vector
4436 * Stores a new ITR value based on strictly on packet size. This
4437 * algorithm is less sophisticated than that used in igc_update_itr,
4438 * due to the difficulty of synchronizing statistics across multiple
4439 * receive rings. The divisors and thresholds used by this function
4440 * were determined based on theoretical maximum wire speed and testing
4441 * data, in order to minimize response time while increasing bulk
4443 * NOTE: This function is called only when operating in a multiqueue
4444 * receive environment.
4446 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
4448 struct igc_adapter *adapter = q_vector->adapter;
4449 int new_val = q_vector->itr_val;
4450 int avg_wire_size = 0;
4451 unsigned int packets;
4453 /* For non-gigabit speeds, just fix the interrupt rate at 4000
4454 * ints/sec - ITR timer value of 120 ticks.
4456 switch (adapter->link_speed) {
4459 new_val = IGC_4K_ITR;
4465 packets = q_vector->rx.total_packets;
4467 avg_wire_size = q_vector->rx.total_bytes / packets;
4469 packets = q_vector->tx.total_packets;
4471 avg_wire_size = max_t(u32, avg_wire_size,
4472 q_vector->tx.total_bytes / packets);
4474 /* if avg_wire_size isn't set no work was done */
4478 /* Add 24 bytes to size to account for CRC, preamble, and gap */
4479 avg_wire_size += 24;
4481 /* Don't starve jumbo frames */
4482 avg_wire_size = min(avg_wire_size, 3000);
4484 /* Give a little boost to mid-size frames */
4485 if (avg_wire_size > 300 && avg_wire_size < 1200)
4486 new_val = avg_wire_size / 3;
4488 new_val = avg_wire_size / 2;
4490 /* conservative mode (itr 3) eliminates the lowest_latency setting */
4491 if (new_val < IGC_20K_ITR &&
4492 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
4493 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
4494 new_val = IGC_20K_ITR;
4497 if (new_val != q_vector->itr_val) {
4498 q_vector->itr_val = new_val;
4499 q_vector->set_itr = 1;
4502 q_vector->rx.total_bytes = 0;
4503 q_vector->rx.total_packets = 0;
4504 q_vector->tx.total_bytes = 0;
4505 q_vector->tx.total_packets = 0;
4508 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
4510 struct igc_adapter *adapter = q_vector->adapter;
4511 struct igc_hw *hw = &adapter->hw;
4513 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
4514 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
4515 if (adapter->num_q_vectors == 1)
4516 igc_set_itr(q_vector);
4518 igc_update_ring_itr(q_vector);
4521 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4522 if (adapter->msix_entries)
4523 wr32(IGC_EIMS, q_vector->eims_value);
4525 igc_irq_enable(adapter);
4529 static void igc_add_ring(struct igc_ring *ring,
4530 struct igc_ring_container *head)
4537 * igc_cache_ring_register - Descriptor ring to register mapping
4538 * @adapter: board private structure to initialize
4540 * Once we know the feature-set enabled for the device, we'll cache
4541 * the register offset the descriptor ring is assigned to.
4543 static void igc_cache_ring_register(struct igc_adapter *adapter)
4547 switch (adapter->hw.mac.type) {
4550 for (; i < adapter->num_rx_queues; i++)
4551 adapter->rx_ring[i]->reg_idx = i;
4552 for (; j < adapter->num_tx_queues; j++)
4553 adapter->tx_ring[j]->reg_idx = j;
4559 * igc_poll - NAPI Rx polling callback
4560 * @napi: napi polling structure
4561 * @budget: count of how many packets we should handle
4563 static int igc_poll(struct napi_struct *napi, int budget)
4565 struct igc_q_vector *q_vector = container_of(napi,
4566 struct igc_q_vector,
4568 struct igc_ring *rx_ring = q_vector->rx.ring;
4569 bool clean_complete = true;
4572 if (q_vector->tx.ring)
4573 clean_complete = igc_clean_tx_irq(q_vector, budget);
4576 int cleaned = rx_ring->xsk_pool ?
4577 igc_clean_rx_irq_zc(q_vector, budget) :
4578 igc_clean_rx_irq(q_vector, budget);
4580 work_done += cleaned;
4581 if (cleaned >= budget)
4582 clean_complete = false;
4585 /* If all work not completed, return budget and keep polling */
4586 if (!clean_complete)
4589 /* Exit the polling mode, but don't re-enable interrupts if stack might
4590 * poll us due to busy-polling
4592 if (likely(napi_complete_done(napi, work_done)))
4593 igc_ring_irq_enable(q_vector);
4595 return min(work_done, budget - 1);
4599 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4600 * @adapter: board private structure to initialize
4601 * @v_count: q_vectors allocated on adapter, used for ring interleaving
4602 * @v_idx: index of vector in adapter struct
4603 * @txr_count: total number of Tx rings to allocate
4604 * @txr_idx: index of first Tx ring to allocate
4605 * @rxr_count: total number of Rx rings to allocate
4606 * @rxr_idx: index of first Rx ring to allocate
4608 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4610 static int igc_alloc_q_vector(struct igc_adapter *adapter,
4611 unsigned int v_count, unsigned int v_idx,
4612 unsigned int txr_count, unsigned int txr_idx,
4613 unsigned int rxr_count, unsigned int rxr_idx)
4615 struct igc_q_vector *q_vector;
4616 struct igc_ring *ring;
4619 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4620 if (txr_count > 1 || rxr_count > 1)
4623 ring_count = txr_count + rxr_count;
4625 /* allocate q_vector and rings */
4626 q_vector = adapter->q_vector[v_idx];
4628 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4631 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4635 /* initialize NAPI */
4636 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
4638 /* tie q_vector and adapter together */
4639 adapter->q_vector[v_idx] = q_vector;
4640 q_vector->adapter = adapter;
4642 /* initialize work limits */
4643 q_vector->tx.work_limit = adapter->tx_work_limit;
4645 /* initialize ITR configuration */
4646 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4647 q_vector->itr_val = IGC_START_ITR;
4649 /* initialize pointer to rings */
4650 ring = q_vector->ring;
4652 /* initialize ITR */
4654 /* rx or rx/tx vector */
4655 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4656 q_vector->itr_val = adapter->rx_itr_setting;
4658 /* tx only vector */
4659 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4660 q_vector->itr_val = adapter->tx_itr_setting;
4664 /* assign generic ring traits */
4665 ring->dev = &adapter->pdev->dev;
4666 ring->netdev = adapter->netdev;
4668 /* configure backlink on ring */
4669 ring->q_vector = q_vector;
4671 /* update q_vector Tx values */
4672 igc_add_ring(ring, &q_vector->tx);
4674 /* apply Tx specific ring traits */
4675 ring->count = adapter->tx_ring_count;
4676 ring->queue_index = txr_idx;
4678 /* assign ring to adapter */
4679 adapter->tx_ring[txr_idx] = ring;
4681 /* push pointer to next ring */
4686 /* assign generic ring traits */
4687 ring->dev = &adapter->pdev->dev;
4688 ring->netdev = adapter->netdev;
4690 /* configure backlink on ring */
4691 ring->q_vector = q_vector;
4693 /* update q_vector Rx values */
4694 igc_add_ring(ring, &q_vector->rx);
4696 /* apply Rx specific ring traits */
4697 ring->count = adapter->rx_ring_count;
4698 ring->queue_index = rxr_idx;
4700 /* assign ring to adapter */
4701 adapter->rx_ring[rxr_idx] = ring;
4708 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4709 * @adapter: board private structure to initialize
4711 * We allocate one q_vector per queue interrupt. If allocation fails we
4714 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4716 int rxr_remaining = adapter->num_rx_queues;
4717 int txr_remaining = adapter->num_tx_queues;
4718 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4719 int q_vectors = adapter->num_q_vectors;
4722 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4723 for (; rxr_remaining; v_idx++) {
4724 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4730 /* update counts and index */
4736 for (; v_idx < q_vectors; v_idx++) {
4737 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4738 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4740 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4741 tqpv, txr_idx, rqpv, rxr_idx);
4746 /* update counts and index */
4747 rxr_remaining -= rqpv;
4748 txr_remaining -= tqpv;
4756 adapter->num_tx_queues = 0;
4757 adapter->num_rx_queues = 0;
4758 adapter->num_q_vectors = 0;
4761 igc_free_q_vector(adapter, v_idx);
4767 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4768 * @adapter: Pointer to adapter structure
4769 * @msix: boolean for MSI-X capability
4771 * This function initializes the interrupts and allocates all of the queues.
4773 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4775 struct net_device *dev = adapter->netdev;
4778 igc_set_interrupt_capability(adapter, msix);
4780 err = igc_alloc_q_vectors(adapter);
4782 netdev_err(dev, "Unable to allocate memory for vectors\n");
4783 goto err_alloc_q_vectors;
4786 igc_cache_ring_register(adapter);
4790 err_alloc_q_vectors:
4791 igc_reset_interrupt_capability(adapter);
4796 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4797 * @adapter: board private structure to initialize
4799 * igc_sw_init initializes the Adapter private data structure.
4800 * Fields are initialized based on PCI device information and
4801 * OS network device settings (MTU size).
4803 static int igc_sw_init(struct igc_adapter *adapter)
4805 struct net_device *netdev = adapter->netdev;
4806 struct pci_dev *pdev = adapter->pdev;
4807 struct igc_hw *hw = &adapter->hw;
4809 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4811 /* set default ring sizes */
4812 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4813 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4815 /* set default ITR values */
4816 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4817 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4819 /* set default work limits */
4820 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4822 /* adjust max frame to be at least the size of a standard frame */
4823 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4825 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4827 mutex_init(&adapter->nfc_rule_lock);
4828 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4829 adapter->nfc_rule_count = 0;
4831 spin_lock_init(&adapter->stats64_lock);
4832 spin_lock_init(&adapter->qbv_tx_lock);
4833 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4834 adapter->flags |= IGC_FLAG_HAS_MSIX;
4836 igc_init_queue_configuration(adapter);
4838 /* This call may decrease the number of queues */
4839 if (igc_init_interrupt_scheme(adapter, true)) {
4840 netdev_err(netdev, "Unable to allocate memory for queues\n");
4844 /* Explicitly disable IRQ since the NIC can be in any state. */
4845 igc_irq_disable(adapter);
4847 set_bit(__IGC_DOWN, &adapter->state);
4853 * igc_up - Open the interface and prepare it to handle traffic
4854 * @adapter: board private structure
4856 void igc_up(struct igc_adapter *adapter)
4858 struct igc_hw *hw = &adapter->hw;
4861 /* hardware has been reset, we need to reload some things */
4862 igc_configure(adapter);
4864 clear_bit(__IGC_DOWN, &adapter->state);
4866 for (i = 0; i < adapter->num_q_vectors; i++)
4867 napi_enable(&adapter->q_vector[i]->napi);
4869 if (adapter->msix_entries)
4870 igc_configure_msix(adapter);
4872 igc_assign_vector(adapter->q_vector[0], 0);
4874 /* Clear any pending interrupts. */
4876 igc_irq_enable(adapter);
4878 netif_tx_start_all_queues(adapter->netdev);
4880 /* start the watchdog. */
4881 hw->mac.get_link_status = true;
4882 schedule_work(&adapter->watchdog_task);
4886 * igc_update_stats - Update the board statistics counters
4887 * @adapter: board private structure
4889 void igc_update_stats(struct igc_adapter *adapter)
4891 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4892 struct pci_dev *pdev = adapter->pdev;
4893 struct igc_hw *hw = &adapter->hw;
4894 u64 _bytes, _packets;
4900 /* Prevent stats update while adapter is being reset, or if the pci
4901 * connection is down.
4903 if (adapter->link_speed == 0)
4905 if (pci_channel_offline(pdev))
4912 for (i = 0; i < adapter->num_rx_queues; i++) {
4913 struct igc_ring *ring = adapter->rx_ring[i];
4914 u32 rqdpc = rd32(IGC_RQDPC(i));
4916 if (hw->mac.type >= igc_i225)
4917 wr32(IGC_RQDPC(i), 0);
4920 ring->rx_stats.drops += rqdpc;
4921 net_stats->rx_fifo_errors += rqdpc;
4925 start = u64_stats_fetch_begin(&ring->rx_syncp);
4926 _bytes = ring->rx_stats.bytes;
4927 _packets = ring->rx_stats.packets;
4928 } while (u64_stats_fetch_retry(&ring->rx_syncp, start));
4930 packets += _packets;
4933 net_stats->rx_bytes = bytes;
4934 net_stats->rx_packets = packets;
4938 for (i = 0; i < adapter->num_tx_queues; i++) {
4939 struct igc_ring *ring = adapter->tx_ring[i];
4942 start = u64_stats_fetch_begin(&ring->tx_syncp);
4943 _bytes = ring->tx_stats.bytes;
4944 _packets = ring->tx_stats.packets;
4945 } while (u64_stats_fetch_retry(&ring->tx_syncp, start));
4947 packets += _packets;
4949 net_stats->tx_bytes = bytes;
4950 net_stats->tx_packets = packets;
4953 /* read stats registers */
4954 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4955 adapter->stats.gprc += rd32(IGC_GPRC);
4956 adapter->stats.gorc += rd32(IGC_GORCL);
4957 rd32(IGC_GORCH); /* clear GORCL */
4958 adapter->stats.bprc += rd32(IGC_BPRC);
4959 adapter->stats.mprc += rd32(IGC_MPRC);
4960 adapter->stats.roc += rd32(IGC_ROC);
4962 adapter->stats.prc64 += rd32(IGC_PRC64);
4963 adapter->stats.prc127 += rd32(IGC_PRC127);
4964 adapter->stats.prc255 += rd32(IGC_PRC255);
4965 adapter->stats.prc511 += rd32(IGC_PRC511);
4966 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4967 adapter->stats.prc1522 += rd32(IGC_PRC1522);
4968 adapter->stats.tlpic += rd32(IGC_TLPIC);
4969 adapter->stats.rlpic += rd32(IGC_RLPIC);
4970 adapter->stats.hgptc += rd32(IGC_HGPTC);
4972 mpc = rd32(IGC_MPC);
4973 adapter->stats.mpc += mpc;
4974 net_stats->rx_fifo_errors += mpc;
4975 adapter->stats.scc += rd32(IGC_SCC);
4976 adapter->stats.ecol += rd32(IGC_ECOL);
4977 adapter->stats.mcc += rd32(IGC_MCC);
4978 adapter->stats.latecol += rd32(IGC_LATECOL);
4979 adapter->stats.dc += rd32(IGC_DC);
4980 adapter->stats.rlec += rd32(IGC_RLEC);
4981 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4982 adapter->stats.xontxc += rd32(IGC_XONTXC);
4983 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4984 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4985 adapter->stats.fcruc += rd32(IGC_FCRUC);
4986 adapter->stats.gptc += rd32(IGC_GPTC);
4987 adapter->stats.gotc += rd32(IGC_GOTCL);
4988 rd32(IGC_GOTCH); /* clear GOTCL */
4989 adapter->stats.rnbc += rd32(IGC_RNBC);
4990 adapter->stats.ruc += rd32(IGC_RUC);
4991 adapter->stats.rfc += rd32(IGC_RFC);
4992 adapter->stats.rjc += rd32(IGC_RJC);
4993 adapter->stats.tor += rd32(IGC_TORH);
4994 adapter->stats.tot += rd32(IGC_TOTH);
4995 adapter->stats.tpr += rd32(IGC_TPR);
4997 adapter->stats.ptc64 += rd32(IGC_PTC64);
4998 adapter->stats.ptc127 += rd32(IGC_PTC127);
4999 adapter->stats.ptc255 += rd32(IGC_PTC255);
5000 adapter->stats.ptc511 += rd32(IGC_PTC511);
5001 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
5002 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
5004 adapter->stats.mptc += rd32(IGC_MPTC);
5005 adapter->stats.bptc += rd32(IGC_BPTC);
5007 adapter->stats.tpt += rd32(IGC_TPT);
5008 adapter->stats.colc += rd32(IGC_COLC);
5009 adapter->stats.colc += rd32(IGC_RERC);
5011 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
5013 adapter->stats.tsctc += rd32(IGC_TSCTC);
5015 adapter->stats.iac += rd32(IGC_IAC);
5017 /* Fill out the OS statistics structure */
5018 net_stats->multicast = adapter->stats.mprc;
5019 net_stats->collisions = adapter->stats.colc;
5023 /* RLEC on some newer hardware can be incorrect so build
5024 * our own version based on RUC and ROC
5026 net_stats->rx_errors = adapter->stats.rxerrc +
5027 adapter->stats.crcerrs + adapter->stats.algnerrc +
5028 adapter->stats.ruc + adapter->stats.roc +
5029 adapter->stats.cexterr;
5030 net_stats->rx_length_errors = adapter->stats.ruc +
5032 net_stats->rx_crc_errors = adapter->stats.crcerrs;
5033 net_stats->rx_frame_errors = adapter->stats.algnerrc;
5034 net_stats->rx_missed_errors = adapter->stats.mpc;
5037 net_stats->tx_errors = adapter->stats.ecol +
5038 adapter->stats.latecol;
5039 net_stats->tx_aborted_errors = adapter->stats.ecol;
5040 net_stats->tx_window_errors = adapter->stats.latecol;
5041 net_stats->tx_carrier_errors = adapter->stats.tncrs;
5044 net_stats->tx_dropped = adapter->stats.txdrop;
5046 /* Management Stats */
5047 adapter->stats.mgptc += rd32(IGC_MGTPTC);
5048 adapter->stats.mgprc += rd32(IGC_MGTPRC);
5049 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
5053 * igc_down - Close the interface
5054 * @adapter: board private structure
5056 void igc_down(struct igc_adapter *adapter)
5058 struct net_device *netdev = adapter->netdev;
5059 struct igc_hw *hw = &adapter->hw;
5063 set_bit(__IGC_DOWN, &adapter->state);
5065 igc_ptp_suspend(adapter);
5067 if (pci_device_is_present(adapter->pdev)) {
5068 /* disable receives in the hardware */
5069 rctl = rd32(IGC_RCTL);
5070 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
5071 /* flush and sleep below */
5073 /* set trans_start so we don't get spurious watchdogs during reset */
5074 netif_trans_update(netdev);
5076 netif_carrier_off(netdev);
5077 netif_tx_stop_all_queues(netdev);
5079 if (pci_device_is_present(adapter->pdev)) {
5080 /* disable transmits in the hardware */
5081 tctl = rd32(IGC_TCTL);
5082 tctl &= ~IGC_TCTL_EN;
5083 wr32(IGC_TCTL, tctl);
5084 /* flush both disables and wait for them to finish */
5086 usleep_range(10000, 20000);
5088 igc_irq_disable(adapter);
5091 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5093 for (i = 0; i < adapter->num_q_vectors; i++) {
5094 if (adapter->q_vector[i]) {
5095 napi_synchronize(&adapter->q_vector[i]->napi);
5096 napi_disable(&adapter->q_vector[i]->napi);
5100 del_timer_sync(&adapter->watchdog_timer);
5101 del_timer_sync(&adapter->phy_info_timer);
5103 /* record the stats before reset*/
5104 spin_lock(&adapter->stats64_lock);
5105 igc_update_stats(adapter);
5106 spin_unlock(&adapter->stats64_lock);
5108 adapter->link_speed = 0;
5109 adapter->link_duplex = 0;
5111 if (!pci_channel_offline(adapter->pdev))
5114 /* clear VLAN promisc flag so VFTA will be updated if necessary */
5115 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
5117 igc_disable_all_tx_rings_hw(adapter);
5118 igc_clean_all_tx_rings(adapter);
5119 igc_clean_all_rx_rings(adapter);
5122 void igc_reinit_locked(struct igc_adapter *adapter)
5124 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5125 usleep_range(1000, 2000);
5128 clear_bit(__IGC_RESETTING, &adapter->state);
5131 static void igc_reset_task(struct work_struct *work)
5133 struct igc_adapter *adapter;
5135 adapter = container_of(work, struct igc_adapter, reset_task);
5138 /* If we're already down or resetting, just bail */
5139 if (test_bit(__IGC_DOWN, &adapter->state) ||
5140 test_bit(__IGC_RESETTING, &adapter->state)) {
5145 igc_rings_dump(adapter);
5146 igc_regs_dump(adapter);
5147 netdev_err(adapter->netdev, "Reset adapter\n");
5148 igc_reinit_locked(adapter);
5153 * igc_change_mtu - Change the Maximum Transfer Unit
5154 * @netdev: network interface device structure
5155 * @new_mtu: new value for maximum frame size
5157 * Returns 0 on success, negative on failure
5159 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
5161 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
5162 struct igc_adapter *adapter = netdev_priv(netdev);
5164 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
5165 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
5169 /* adjust max frame to be at least the size of a standard frame */
5170 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
5171 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
5173 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
5174 usleep_range(1000, 2000);
5176 /* igc_down has a dependency on max_frame_size */
5177 adapter->max_frame_size = max_frame;
5179 if (netif_running(netdev))
5182 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5183 netdev->mtu = new_mtu;
5185 if (netif_running(netdev))
5190 clear_bit(__IGC_RESETTING, &adapter->state);
5196 * igc_tx_timeout - Respond to a Tx Hang
5197 * @netdev: network interface device structure
5198 * @txqueue: queue number that timed out
5200 static void igc_tx_timeout(struct net_device *netdev,
5201 unsigned int __always_unused txqueue)
5203 struct igc_adapter *adapter = netdev_priv(netdev);
5204 struct igc_hw *hw = &adapter->hw;
5206 /* Do the reset outside of interrupt context */
5207 adapter->tx_timeout_count++;
5208 schedule_work(&adapter->reset_task);
5210 (adapter->eims_enable_mask & ~adapter->eims_other));
5214 * igc_get_stats64 - Get System Network Statistics
5215 * @netdev: network interface device structure
5216 * @stats: rtnl_link_stats64 pointer
5218 * Returns the address of the device statistics structure.
5219 * The statistics are updated here and also from the timer callback.
5221 static void igc_get_stats64(struct net_device *netdev,
5222 struct rtnl_link_stats64 *stats)
5224 struct igc_adapter *adapter = netdev_priv(netdev);
5226 spin_lock(&adapter->stats64_lock);
5227 if (!test_bit(__IGC_RESETTING, &adapter->state))
5228 igc_update_stats(adapter);
5229 memcpy(stats, &adapter->stats64, sizeof(*stats));
5230 spin_unlock(&adapter->stats64_lock);
5233 static netdev_features_t igc_fix_features(struct net_device *netdev,
5234 netdev_features_t features)
5236 /* Since there is no support for separate Rx/Tx vlan accel
5237 * enable/disable make sure Tx flag is always in same state as Rx.
5239 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5240 features |= NETIF_F_HW_VLAN_CTAG_TX;
5242 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
5247 static int igc_set_features(struct net_device *netdev,
5248 netdev_features_t features)
5250 netdev_features_t changed = netdev->features ^ features;
5251 struct igc_adapter *adapter = netdev_priv(netdev);
5253 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
5254 igc_vlan_mode(netdev, features);
5256 /* Add VLAN support */
5257 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
5260 if (!(features & NETIF_F_NTUPLE))
5261 igc_flush_nfc_rules(adapter);
5263 netdev->features = features;
5265 if (netif_running(netdev))
5266 igc_reinit_locked(adapter);
5273 static netdev_features_t
5274 igc_features_check(struct sk_buff *skb, struct net_device *dev,
5275 netdev_features_t features)
5277 unsigned int network_hdr_len, mac_hdr_len;
5279 /* Make certain the headers can be described by a context descriptor */
5280 mac_hdr_len = skb_network_offset(skb);
5281 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
5282 return features & ~(NETIF_F_HW_CSUM |
5284 NETIF_F_HW_VLAN_CTAG_TX |
5288 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
5289 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
5290 return features & ~(NETIF_F_HW_CSUM |
5295 /* We can only support IPv4 TSO in tunnels if we can mangle the
5296 * inner IP ID field, so strip TSO if MANGLEID is not supported.
5298 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
5299 features &= ~NETIF_F_TSO;
5304 static void igc_tsync_interrupt(struct igc_adapter *adapter)
5306 struct igc_hw *hw = &adapter->hw;
5307 u32 tsauxc, sec, nsec, tsicr;
5308 struct ptp_clock_event event;
5309 struct timespec64 ts;
5311 tsicr = rd32(IGC_TSICR);
5313 if (tsicr & IGC_TSICR_SYS_WRAP) {
5314 event.type = PTP_CLOCK_PPS;
5315 if (adapter->ptp_caps.pps)
5316 ptp_clock_event(adapter->ptp_clock, &event);
5319 if (tsicr & IGC_TSICR_TXTS) {
5320 /* retrieve hardware timestamp */
5321 igc_ptp_tx_tstamp_event(adapter);
5324 if (tsicr & IGC_TSICR_TT0) {
5325 spin_lock(&adapter->tmreg_lock);
5326 ts = timespec64_add(adapter->perout[0].start,
5327 adapter->perout[0].period);
5328 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5329 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
5330 tsauxc = rd32(IGC_TSAUXC);
5331 tsauxc |= IGC_TSAUXC_EN_TT0;
5332 wr32(IGC_TSAUXC, tsauxc);
5333 adapter->perout[0].start = ts;
5334 spin_unlock(&adapter->tmreg_lock);
5337 if (tsicr & IGC_TSICR_TT1) {
5338 spin_lock(&adapter->tmreg_lock);
5339 ts = timespec64_add(adapter->perout[1].start,
5340 adapter->perout[1].period);
5341 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
5342 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
5343 tsauxc = rd32(IGC_TSAUXC);
5344 tsauxc |= IGC_TSAUXC_EN_TT1;
5345 wr32(IGC_TSAUXC, tsauxc);
5346 adapter->perout[1].start = ts;
5347 spin_unlock(&adapter->tmreg_lock);
5350 if (tsicr & IGC_TSICR_AUTT0) {
5351 nsec = rd32(IGC_AUXSTMPL0);
5352 sec = rd32(IGC_AUXSTMPH0);
5353 event.type = PTP_CLOCK_EXTTS;
5355 event.timestamp = sec * NSEC_PER_SEC + nsec;
5356 ptp_clock_event(adapter->ptp_clock, &event);
5359 if (tsicr & IGC_TSICR_AUTT1) {
5360 nsec = rd32(IGC_AUXSTMPL1);
5361 sec = rd32(IGC_AUXSTMPH1);
5362 event.type = PTP_CLOCK_EXTTS;
5364 event.timestamp = sec * NSEC_PER_SEC + nsec;
5365 ptp_clock_event(adapter->ptp_clock, &event);
5370 * igc_msix_other - msix other interrupt handler
5371 * @irq: interrupt number
5372 * @data: pointer to a q_vector
5374 static irqreturn_t igc_msix_other(int irq, void *data)
5376 struct igc_adapter *adapter = data;
5377 struct igc_hw *hw = &adapter->hw;
5378 u32 icr = rd32(IGC_ICR);
5380 /* reading ICR causes bit 31 of EICR to be cleared */
5381 if (icr & IGC_ICR_DRSTA)
5382 schedule_work(&adapter->reset_task);
5384 if (icr & IGC_ICR_DOUTSYNC) {
5385 /* HW is reporting DMA is out of sync */
5386 adapter->stats.doosync++;
5389 if (icr & IGC_ICR_LSC) {
5390 hw->mac.get_link_status = true;
5391 /* guard against interrupt when we're going down */
5392 if (!test_bit(__IGC_DOWN, &adapter->state))
5393 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5396 if (icr & IGC_ICR_TS)
5397 igc_tsync_interrupt(adapter);
5399 wr32(IGC_EIMS, adapter->eims_other);
5404 static void igc_write_itr(struct igc_q_vector *q_vector)
5406 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
5408 if (!q_vector->set_itr)
5412 itr_val = IGC_ITR_VAL_MASK;
5414 itr_val |= IGC_EITR_CNT_IGNR;
5416 writel(itr_val, q_vector->itr_register);
5417 q_vector->set_itr = 0;
5420 static irqreturn_t igc_msix_ring(int irq, void *data)
5422 struct igc_q_vector *q_vector = data;
5424 /* Write the ITR value calculated from the previous interrupt. */
5425 igc_write_itr(q_vector);
5427 napi_schedule(&q_vector->napi);
5433 * igc_request_msix - Initialize MSI-X interrupts
5434 * @adapter: Pointer to adapter structure
5436 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5439 static int igc_request_msix(struct igc_adapter *adapter)
5441 unsigned int num_q_vectors = adapter->num_q_vectors;
5442 int i = 0, err = 0, vector = 0, free_vector = 0;
5443 struct net_device *netdev = adapter->netdev;
5445 err = request_irq(adapter->msix_entries[vector].vector,
5446 &igc_msix_other, 0, netdev->name, adapter);
5450 if (num_q_vectors > MAX_Q_VECTORS) {
5451 num_q_vectors = MAX_Q_VECTORS;
5452 dev_warn(&adapter->pdev->dev,
5453 "The number of queue vectors (%d) is higher than max allowed (%d)\n",
5454 adapter->num_q_vectors, MAX_Q_VECTORS);
5456 for (i = 0; i < num_q_vectors; i++) {
5457 struct igc_q_vector *q_vector = adapter->q_vector[i];
5461 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
5463 if (q_vector->rx.ring && q_vector->tx.ring)
5464 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
5465 q_vector->rx.ring->queue_index);
5466 else if (q_vector->tx.ring)
5467 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
5468 q_vector->tx.ring->queue_index);
5469 else if (q_vector->rx.ring)
5470 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
5471 q_vector->rx.ring->queue_index);
5473 sprintf(q_vector->name, "%s-unused", netdev->name);
5475 err = request_irq(adapter->msix_entries[vector].vector,
5476 igc_msix_ring, 0, q_vector->name,
5482 igc_configure_msix(adapter);
5486 /* free already assigned IRQs */
5487 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
5490 for (i = 0; i < vector; i++) {
5491 free_irq(adapter->msix_entries[free_vector++].vector,
5492 adapter->q_vector[i]);
5499 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5500 * @adapter: Pointer to adapter structure
5502 * This function resets the device so that it has 0 rx queues, tx queues, and
5503 * MSI-X interrupts allocated.
5505 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
5507 igc_free_q_vectors(adapter);
5508 igc_reset_interrupt_capability(adapter);
5511 /* Need to wait a few seconds after link up to get diagnostic information from
5514 static void igc_update_phy_info(struct timer_list *t)
5516 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
5518 igc_get_phy_info(&adapter->hw);
5522 * igc_has_link - check shared code for link and determine up/down
5523 * @adapter: pointer to driver private info
5525 bool igc_has_link(struct igc_adapter *adapter)
5527 struct igc_hw *hw = &adapter->hw;
5528 bool link_active = false;
5530 /* get_link_status is set on LSC (link status) interrupt or
5531 * rx sequence error interrupt. get_link_status will stay
5532 * false until the igc_check_for_link establishes link
5533 * for copper adapters ONLY
5535 if (!hw->mac.get_link_status)
5537 hw->mac.ops.check_for_link(hw);
5538 link_active = !hw->mac.get_link_status;
5540 if (hw->mac.type == igc_i225) {
5541 if (!netif_carrier_ok(adapter->netdev)) {
5542 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5543 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
5544 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
5545 adapter->link_check_timeout = jiffies;
5553 * igc_watchdog - Timer Call-back
5554 * @t: timer for the watchdog
5556 static void igc_watchdog(struct timer_list *t)
5558 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
5559 /* Do the rest outside of interrupt context */
5560 schedule_work(&adapter->watchdog_task);
5563 static void igc_watchdog_task(struct work_struct *work)
5565 struct igc_adapter *adapter = container_of(work,
5568 struct net_device *netdev = adapter->netdev;
5569 struct igc_hw *hw = &adapter->hw;
5570 struct igc_phy_info *phy = &hw->phy;
5571 u16 phy_data, retry_count = 20;
5575 link = igc_has_link(adapter);
5577 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
5578 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
5579 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
5585 /* Cancel scheduled suspend requests. */
5586 pm_runtime_resume(netdev->dev.parent);
5588 if (!netif_carrier_ok(netdev)) {
5591 hw->mac.ops.get_speed_and_duplex(hw,
5592 &adapter->link_speed,
5593 &adapter->link_duplex);
5595 ctrl = rd32(IGC_CTRL);
5596 /* Link status message must follow this format */
5598 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
5599 adapter->link_speed,
5600 adapter->link_duplex == FULL_DUPLEX ?
5602 (ctrl & IGC_CTRL_TFCE) &&
5603 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
5604 (ctrl & IGC_CTRL_RFCE) ? "RX" :
5605 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
5607 /* disable EEE if enabled */
5608 if ((adapter->flags & IGC_FLAG_EEE) &&
5609 adapter->link_duplex == HALF_DUPLEX) {
5611 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
5612 adapter->hw.dev_spec._base.eee_enable = false;
5613 adapter->flags &= ~IGC_FLAG_EEE;
5616 /* check if SmartSpeed worked */
5617 igc_check_downshift(hw);
5618 if (phy->speed_downgraded)
5619 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
5621 /* adjust timeout factor according to speed/duplex */
5622 adapter->tx_timeout_factor = 1;
5623 switch (adapter->link_speed) {
5625 adapter->tx_timeout_factor = 14;
5630 adapter->tx_timeout_factor = 1;
5634 /* Once the launch time has been set on the wire, there
5635 * is a delay before the link speed can be determined
5636 * based on link-up activity. Write into the register
5637 * as soon as we know the correct link speed.
5639 igc_tsn_adjust_txtime_offset(adapter);
5641 if (adapter->link_speed != SPEED_1000)
5644 /* wait for Remote receiver status OK */
5646 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5648 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5652 goto retry_read_status;
5653 } else if (!retry_count) {
5654 netdev_err(netdev, "exceed max 2 second\n");
5657 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5660 netif_carrier_on(netdev);
5662 /* link state has changed, schedule phy info update */
5663 if (!test_bit(__IGC_DOWN, &adapter->state))
5664 mod_timer(&adapter->phy_info_timer,
5665 round_jiffies(jiffies + 2 * HZ));
5668 if (netif_carrier_ok(netdev)) {
5669 adapter->link_speed = 0;
5670 adapter->link_duplex = 0;
5672 /* Links status message must follow this format */
5673 netdev_info(netdev, "NIC Link is Down\n");
5674 netif_carrier_off(netdev);
5676 /* link state has changed, schedule phy info update */
5677 if (!test_bit(__IGC_DOWN, &adapter->state))
5678 mod_timer(&adapter->phy_info_timer,
5679 round_jiffies(jiffies + 2 * HZ));
5681 pm_schedule_suspend(netdev->dev.parent,
5686 spin_lock(&adapter->stats64_lock);
5687 igc_update_stats(adapter);
5688 spin_unlock(&adapter->stats64_lock);
5690 for (i = 0; i < adapter->num_tx_queues; i++) {
5691 struct igc_ring *tx_ring = adapter->tx_ring[i];
5693 if (!netif_carrier_ok(netdev)) {
5694 /* We've lost link, so the controller stops DMA,
5695 * but we've got queued Tx work that's never going
5696 * to get done, so reset controller to flush Tx.
5697 * (Do the reset outside of interrupt context).
5699 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5700 adapter->tx_timeout_count++;
5701 schedule_work(&adapter->reset_task);
5702 /* return immediately since reset is imminent */
5707 /* Force detection of hung controller every watchdog period */
5708 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5711 /* Cause software interrupt to ensure Rx ring is cleaned */
5712 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5715 for (i = 0; i < adapter->num_q_vectors; i++)
5716 eics |= adapter->q_vector[i]->eims_value;
5717 wr32(IGC_EICS, eics);
5719 wr32(IGC_ICS, IGC_ICS_RXDMT0);
5722 igc_ptp_tx_hang(adapter);
5724 /* Reset the timer */
5725 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5726 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5727 mod_timer(&adapter->watchdog_timer,
5728 round_jiffies(jiffies + HZ));
5730 mod_timer(&adapter->watchdog_timer,
5731 round_jiffies(jiffies + 2 * HZ));
5736 * igc_intr_msi - Interrupt Handler
5737 * @irq: interrupt number
5738 * @data: pointer to a network interface device structure
5740 static irqreturn_t igc_intr_msi(int irq, void *data)
5742 struct igc_adapter *adapter = data;
5743 struct igc_q_vector *q_vector = adapter->q_vector[0];
5744 struct igc_hw *hw = &adapter->hw;
5745 /* read ICR disables interrupts using IAM */
5746 u32 icr = rd32(IGC_ICR);
5748 igc_write_itr(q_vector);
5750 if (icr & IGC_ICR_DRSTA)
5751 schedule_work(&adapter->reset_task);
5753 if (icr & IGC_ICR_DOUTSYNC) {
5754 /* HW is reporting DMA is out of sync */
5755 adapter->stats.doosync++;
5758 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5759 hw->mac.get_link_status = true;
5760 if (!test_bit(__IGC_DOWN, &adapter->state))
5761 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5764 if (icr & IGC_ICR_TS)
5765 igc_tsync_interrupt(adapter);
5767 napi_schedule(&q_vector->napi);
5773 * igc_intr - Legacy Interrupt Handler
5774 * @irq: interrupt number
5775 * @data: pointer to a network interface device structure
5777 static irqreturn_t igc_intr(int irq, void *data)
5779 struct igc_adapter *adapter = data;
5780 struct igc_q_vector *q_vector = adapter->q_vector[0];
5781 struct igc_hw *hw = &adapter->hw;
5782 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5783 * need for the IMC write
5785 u32 icr = rd32(IGC_ICR);
5787 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5788 * not set, then the adapter didn't send an interrupt
5790 if (!(icr & IGC_ICR_INT_ASSERTED))
5793 igc_write_itr(q_vector);
5795 if (icr & IGC_ICR_DRSTA)
5796 schedule_work(&adapter->reset_task);
5798 if (icr & IGC_ICR_DOUTSYNC) {
5799 /* HW is reporting DMA is out of sync */
5800 adapter->stats.doosync++;
5803 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5804 hw->mac.get_link_status = true;
5805 /* guard against interrupt when we're going down */
5806 if (!test_bit(__IGC_DOWN, &adapter->state))
5807 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5810 if (icr & IGC_ICR_TS)
5811 igc_tsync_interrupt(adapter);
5813 napi_schedule(&q_vector->napi);
5818 static void igc_free_irq(struct igc_adapter *adapter)
5820 if (adapter->msix_entries) {
5823 free_irq(adapter->msix_entries[vector++].vector, adapter);
5825 for (i = 0; i < adapter->num_q_vectors; i++)
5826 free_irq(adapter->msix_entries[vector++].vector,
5827 adapter->q_vector[i]);
5829 free_irq(adapter->pdev->irq, adapter);
5834 * igc_request_irq - initialize interrupts
5835 * @adapter: Pointer to adapter structure
5837 * Attempts to configure interrupts using the best available
5838 * capabilities of the hardware and kernel.
5840 static int igc_request_irq(struct igc_adapter *adapter)
5842 struct net_device *netdev = adapter->netdev;
5843 struct pci_dev *pdev = adapter->pdev;
5846 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5847 err = igc_request_msix(adapter);
5850 /* fall back to MSI */
5851 igc_free_all_tx_resources(adapter);
5852 igc_free_all_rx_resources(adapter);
5854 igc_clear_interrupt_scheme(adapter);
5855 err = igc_init_interrupt_scheme(adapter, false);
5858 igc_setup_all_tx_resources(adapter);
5859 igc_setup_all_rx_resources(adapter);
5860 igc_configure(adapter);
5863 igc_assign_vector(adapter->q_vector[0], 0);
5865 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5866 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5867 netdev->name, adapter);
5871 /* fall back to legacy interrupts */
5872 igc_reset_interrupt_capability(adapter);
5873 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5876 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5877 netdev->name, adapter);
5880 netdev_err(netdev, "Error %d getting interrupt\n", err);
5887 * __igc_open - Called when a network interface is made active
5888 * @netdev: network interface device structure
5889 * @resuming: boolean indicating if the device is resuming
5891 * Returns 0 on success, negative value on failure
5893 * The open entry point is called when a network interface is made
5894 * active by the system (IFF_UP). At this point all resources needed
5895 * for transmit and receive operations are allocated, the interrupt
5896 * handler is registered with the OS, the watchdog timer is started,
5897 * and the stack is notified that the interface is ready.
5899 static int __igc_open(struct net_device *netdev, bool resuming)
5901 struct igc_adapter *adapter = netdev_priv(netdev);
5902 struct pci_dev *pdev = adapter->pdev;
5903 struct igc_hw *hw = &adapter->hw;
5907 /* disallow open during test */
5909 if (test_bit(__IGC_TESTING, &adapter->state)) {
5915 pm_runtime_get_sync(&pdev->dev);
5917 netif_carrier_off(netdev);
5919 /* allocate transmit descriptors */
5920 err = igc_setup_all_tx_resources(adapter);
5924 /* allocate receive descriptors */
5925 err = igc_setup_all_rx_resources(adapter);
5929 igc_power_up_link(adapter);
5931 igc_configure(adapter);
5933 err = igc_request_irq(adapter);
5937 /* Notify the stack of the actual queue counts. */
5938 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5940 goto err_set_queues;
5942 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5944 goto err_set_queues;
5946 clear_bit(__IGC_DOWN, &adapter->state);
5948 for (i = 0; i < adapter->num_q_vectors; i++)
5949 napi_enable(&adapter->q_vector[i]->napi);
5951 /* Clear any pending interrupts. */
5953 igc_irq_enable(adapter);
5956 pm_runtime_put(&pdev->dev);
5958 netif_tx_start_all_queues(netdev);
5960 /* start the watchdog. */
5961 hw->mac.get_link_status = true;
5962 schedule_work(&adapter->watchdog_task);
5967 igc_free_irq(adapter);
5969 igc_release_hw_control(adapter);
5970 igc_power_down_phy_copper_base(&adapter->hw);
5971 igc_free_all_rx_resources(adapter);
5973 igc_free_all_tx_resources(adapter);
5977 pm_runtime_put(&pdev->dev);
5982 int igc_open(struct net_device *netdev)
5984 return __igc_open(netdev, false);
5988 * __igc_close - Disables a network interface
5989 * @netdev: network interface device structure
5990 * @suspending: boolean indicating the device is suspending
5992 * Returns 0, this is not allowed to fail
5994 * The close entry point is called when an interface is de-activated
5995 * by the OS. The hardware is still under the driver's control, but
5996 * needs to be disabled. A global MAC reset is issued to stop the
5997 * hardware, and all transmit and receive resources are freed.
5999 static int __igc_close(struct net_device *netdev, bool suspending)
6001 struct igc_adapter *adapter = netdev_priv(netdev);
6002 struct pci_dev *pdev = adapter->pdev;
6004 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
6007 pm_runtime_get_sync(&pdev->dev);
6011 igc_release_hw_control(adapter);
6013 igc_free_irq(adapter);
6015 igc_free_all_tx_resources(adapter);
6016 igc_free_all_rx_resources(adapter);
6019 pm_runtime_put_sync(&pdev->dev);
6024 int igc_close(struct net_device *netdev)
6026 if (netif_device_present(netdev) || netdev->dismantle)
6027 return __igc_close(netdev, false);
6032 * igc_ioctl - Access the hwtstamp interface
6033 * @netdev: network interface device structure
6034 * @ifr: interface request data
6035 * @cmd: ioctl command
6037 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
6041 return igc_ptp_get_ts_config(netdev, ifr);
6043 return igc_ptp_set_ts_config(netdev, ifr);
6049 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
6052 struct igc_ring *ring;
6054 if (queue < 0 || queue >= adapter->num_tx_queues)
6057 ring = adapter->tx_ring[queue];
6058 ring->launchtime_enable = enable;
6063 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
6065 struct timespec64 b;
6067 b = ktime_to_timespec64(base_time);
6069 return timespec64_compare(now, &b) > 0;
6072 static bool validate_schedule(struct igc_adapter *adapter,
6073 const struct tc_taprio_qopt_offload *qopt)
6075 int queue_uses[IGC_MAX_TX_QUEUES] = { };
6076 struct igc_hw *hw = &adapter->hw;
6077 struct timespec64 now;
6080 if (qopt->cycle_time_extension)
6083 igc_ptp_read(adapter, &now);
6085 /* If we program the controller's BASET registers with a time
6086 * in the future, it will hold all the packets until that
6087 * time, causing a lot of TX Hangs, so to avoid that, we
6088 * reject schedules that would start in the future.
6089 * Note: Limitation above is no longer in i226.
6091 if (!is_base_time_past(qopt->base_time, &now) &&
6092 igc_is_device_id_i225(hw))
6095 for (n = 0; n < qopt->num_entries; n++) {
6096 const struct tc_taprio_sched_entry *e, *prev;
6099 prev = n ? &qopt->entries[n - 1] : NULL;
6100 e = &qopt->entries[n];
6102 /* i225 only supports "global" frame preemption
6105 if (e->command != TC_TAPRIO_CMD_SET_GATES)
6108 for (i = 0; i < adapter->num_tx_queues; i++)
6109 if (e->gate_mask & BIT(i)) {
6112 /* There are limitations: A single queue cannot
6113 * be opened and closed multiple times per cycle
6114 * unless the gate stays open. Check for it.
6116 if (queue_uses[i] > 1 &&
6117 !(prev->gate_mask & BIT(i)))
6125 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
6126 struct tc_etf_qopt_offload *qopt)
6128 struct igc_hw *hw = &adapter->hw;
6131 if (hw->mac.type != igc_i225)
6134 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
6138 return igc_tsn_offload_apply(adapter);
6141 static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
6143 unsigned long flags;
6146 adapter->base_time = 0;
6147 adapter->cycle_time = NSEC_PER_SEC;
6148 adapter->taprio_offload_enable = false;
6149 adapter->qbv_config_change_errors = 0;
6150 adapter->qbv_count = 0;
6152 for (i = 0; i < adapter->num_tx_queues; i++) {
6153 struct igc_ring *ring = adapter->tx_ring[i];
6155 ring->start_time = 0;
6156 ring->end_time = NSEC_PER_SEC;
6160 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6162 adapter->qbv_transition = false;
6164 for (i = 0; i < adapter->num_tx_queues; i++) {
6165 struct igc_ring *ring = adapter->tx_ring[i];
6167 ring->oper_gate_closed = false;
6168 ring->admin_gate_closed = false;
6171 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6176 static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
6178 igc_qbv_clear_schedule(adapter);
6183 static void igc_taprio_stats(struct net_device *dev,
6184 struct tc_taprio_qopt_stats *stats)
6186 /* When Strict_End is enabled, the tx_overruns counter
6187 * will always be zero.
6189 stats->tx_overruns = 0;
6192 static void igc_taprio_queue_stats(struct net_device *dev,
6193 struct tc_taprio_qopt_queue_stats *queue_stats)
6195 struct tc_taprio_qopt_stats *stats = &queue_stats->stats;
6197 /* When Strict_End is enabled, the tx_overruns counter
6198 * will always be zero.
6200 stats->tx_overruns = 0;
6203 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
6204 struct tc_taprio_qopt_offload *qopt)
6206 bool queue_configured[IGC_MAX_TX_QUEUES] = { };
6207 struct igc_hw *hw = &adapter->hw;
6208 u32 start_time = 0, end_time = 0;
6209 struct timespec64 now;
6210 unsigned long flags;
6214 switch (qopt->cmd) {
6215 case TAPRIO_CMD_REPLACE:
6217 case TAPRIO_CMD_DESTROY:
6218 return igc_tsn_clear_schedule(adapter);
6219 case TAPRIO_CMD_STATS:
6220 igc_taprio_stats(adapter->netdev, &qopt->stats);
6222 case TAPRIO_CMD_QUEUE_STATS:
6223 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
6229 if (qopt->base_time < 0)
6232 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable)
6235 if (!validate_schedule(adapter, qopt))
6238 adapter->cycle_time = qopt->cycle_time;
6239 adapter->base_time = qopt->base_time;
6240 adapter->taprio_offload_enable = true;
6242 igc_ptp_read(adapter, &now);
6244 for (n = 0; n < qopt->num_entries; n++) {
6245 struct tc_taprio_sched_entry *e = &qopt->entries[n];
6247 end_time += e->interval;
6249 /* If any of the conditions below are true, we need to manually
6250 * control the end time of the cycle.
6251 * 1. Qbv users can specify a cycle time that is not equal
6252 * to the total GCL intervals. Hence, recalculation is
6253 * necessary here to exclude the time interval that
6254 * exceeds the cycle time.
6255 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2,
6256 * once the end of the list is reached, it will switch
6257 * to the END_OF_CYCLE state and leave the gates in the
6258 * same state until the next cycle is started.
6260 if (end_time > adapter->cycle_time ||
6261 n + 1 == qopt->num_entries)
6262 end_time = adapter->cycle_time;
6264 for (i = 0; i < adapter->num_tx_queues; i++) {
6265 struct igc_ring *ring = adapter->tx_ring[i];
6267 if (!(e->gate_mask & BIT(i)))
6270 /* Check whether a queue stays open for more than one
6271 * entry. If so, keep the start and advance the end
6274 if (!queue_configured[i])
6275 ring->start_time = start_time;
6276 ring->end_time = end_time;
6278 if (ring->start_time >= adapter->cycle_time)
6279 queue_configured[i] = false;
6281 queue_configured[i] = true;
6284 start_time += e->interval;
6287 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6289 /* Check whether a queue gets configured.
6290 * If not, set the start and end time to be end time.
6292 for (i = 0; i < adapter->num_tx_queues; i++) {
6293 struct igc_ring *ring = adapter->tx_ring[i];
6295 if (!is_base_time_past(qopt->base_time, &now)) {
6296 ring->admin_gate_closed = false;
6298 ring->oper_gate_closed = false;
6299 ring->admin_gate_closed = false;
6302 if (!queue_configured[i]) {
6303 if (!is_base_time_past(qopt->base_time, &now))
6304 ring->admin_gate_closed = true;
6306 ring->oper_gate_closed = true;
6308 ring->start_time = end_time;
6309 ring->end_time = end_time;
6313 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6315 for (i = 0; i < adapter->num_tx_queues; i++) {
6316 struct igc_ring *ring = adapter->tx_ring[i];
6317 struct net_device *dev = adapter->netdev;
6319 if (qopt->max_sdu[i])
6320 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN;
6328 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
6329 struct tc_taprio_qopt_offload *qopt)
6331 struct igc_hw *hw = &adapter->hw;
6334 if (hw->mac.type != igc_i225)
6337 err = igc_save_qbv_schedule(adapter, qopt);
6341 return igc_tsn_offload_apply(adapter);
6344 static int igc_save_cbs_params(struct igc_adapter *adapter, int queue,
6345 bool enable, int idleslope, int sendslope,
6346 int hicredit, int locredit)
6348 bool cbs_status[IGC_MAX_SR_QUEUES] = { false };
6349 struct net_device *netdev = adapter->netdev;
6350 struct igc_ring *ring;
6353 /* i225 has two sets of credit-based shaper logic.
6354 * Supporting it only on the top two priority queues
6356 if (queue < 0 || queue > 1)
6359 ring = adapter->tx_ring[queue];
6361 for (i = 0; i < IGC_MAX_SR_QUEUES; i++)
6362 if (adapter->tx_ring[i])
6363 cbs_status[i] = adapter->tx_ring[i]->cbs_enable;
6365 /* CBS should be enabled on the highest priority queue first in order
6366 * for the CBS algorithm to operate as intended.
6369 if (queue == 1 && !cbs_status[0]) {
6371 "Enabling CBS on queue1 before queue0\n");
6375 if (queue == 0 && cbs_status[1]) {
6377 "Disabling CBS on queue0 before queue1\n");
6382 ring->cbs_enable = enable;
6383 ring->idleslope = idleslope;
6384 ring->sendslope = sendslope;
6385 ring->hicredit = hicredit;
6386 ring->locredit = locredit;
6391 static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
6392 struct tc_cbs_qopt_offload *qopt)
6394 struct igc_hw *hw = &adapter->hw;
6397 if (hw->mac.type != igc_i225)
6400 if (qopt->queue < 0 || qopt->queue > 1)
6403 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable,
6404 qopt->idleslope, qopt->sendslope,
6405 qopt->hicredit, qopt->locredit);
6409 return igc_tsn_offload_apply(adapter);
6412 static int igc_tc_query_caps(struct igc_adapter *adapter,
6413 struct tc_query_caps_base *base)
6415 struct igc_hw *hw = &adapter->hw;
6417 switch (base->type) {
6418 case TC_SETUP_QDISC_TAPRIO: {
6419 struct tc_taprio_caps *caps = base->caps;
6421 caps->broken_mqprio = true;
6423 if (hw->mac.type == igc_i225) {
6424 caps->supports_queue_max_sdu = true;
6425 caps->gate_mask_per_txq = true;
6435 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
6438 struct igc_adapter *adapter = netdev_priv(dev);
6440 adapter->tc_setup_type = type;
6444 return igc_tc_query_caps(adapter, type_data);
6445 case TC_SETUP_QDISC_TAPRIO:
6446 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
6448 case TC_SETUP_QDISC_ETF:
6449 return igc_tsn_enable_launchtime(adapter, type_data);
6451 case TC_SETUP_QDISC_CBS:
6452 return igc_tsn_enable_cbs(adapter, type_data);
6459 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6461 struct igc_adapter *adapter = netdev_priv(dev);
6463 switch (bpf->command) {
6464 case XDP_SETUP_PROG:
6465 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
6466 case XDP_SETUP_XSK_POOL:
6467 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
6474 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
6475 struct xdp_frame **frames, u32 flags)
6477 struct igc_adapter *adapter = netdev_priv(dev);
6478 int cpu = smp_processor_id();
6479 struct netdev_queue *nq;
6480 struct igc_ring *ring;
6483 if (unlikely(!netif_carrier_ok(dev)))
6486 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6489 ring = igc_xdp_get_tx_ring(adapter, cpu);
6490 nq = txring_txq(ring);
6492 __netif_tx_lock(nq, cpu);
6494 /* Avoid transmit queue timeout since we share it with the slow path */
6495 txq_trans_cond_update(nq);
6498 for (i = 0; i < num_frames; i++) {
6500 struct xdp_frame *xdpf = frames[i];
6502 err = igc_xdp_init_tx_descriptor(ring, xdpf);
6508 if (flags & XDP_XMIT_FLUSH)
6509 igc_flush_tx_descriptors(ring);
6511 __netif_tx_unlock(nq);
6516 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
6517 struct igc_q_vector *q_vector)
6519 struct igc_hw *hw = &adapter->hw;
6522 eics |= q_vector->eims_value;
6523 wr32(IGC_EICS, eics);
6526 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
6528 struct igc_adapter *adapter = netdev_priv(dev);
6529 struct igc_q_vector *q_vector;
6530 struct igc_ring *ring;
6532 if (test_bit(__IGC_DOWN, &adapter->state))
6535 if (!igc_xdp_is_enabled(adapter))
6538 if (queue_id >= adapter->num_rx_queues)
6541 ring = adapter->rx_ring[queue_id];
6543 if (!ring->xsk_pool)
6546 q_vector = adapter->q_vector[queue_id];
6547 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
6548 igc_trigger_rxtxq_interrupt(adapter, q_vector);
6553 static ktime_t igc_get_tstamp(struct net_device *dev,
6554 const struct skb_shared_hwtstamps *hwtstamps,
6557 struct igc_adapter *adapter = netdev_priv(dev);
6558 struct igc_inline_rx_tstamps *tstamp;
6561 tstamp = hwtstamps->netdev_data;
6564 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1);
6566 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
6571 static const struct net_device_ops igc_netdev_ops = {
6572 .ndo_open = igc_open,
6573 .ndo_stop = igc_close,
6574 .ndo_start_xmit = igc_xmit_frame,
6575 .ndo_set_rx_mode = igc_set_rx_mode,
6576 .ndo_set_mac_address = igc_set_mac,
6577 .ndo_change_mtu = igc_change_mtu,
6578 .ndo_tx_timeout = igc_tx_timeout,
6579 .ndo_get_stats64 = igc_get_stats64,
6580 .ndo_fix_features = igc_fix_features,
6581 .ndo_set_features = igc_set_features,
6582 .ndo_features_check = igc_features_check,
6583 .ndo_eth_ioctl = igc_ioctl,
6584 .ndo_setup_tc = igc_setup_tc,
6586 .ndo_xdp_xmit = igc_xdp_xmit,
6587 .ndo_xsk_wakeup = igc_xsk_wakeup,
6588 .ndo_get_tstamp = igc_get_tstamp,
6591 /* PCIe configuration access */
6592 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6594 struct igc_adapter *adapter = hw->back;
6596 pci_read_config_word(adapter->pdev, reg, value);
6599 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
6601 struct igc_adapter *adapter = hw->back;
6603 pci_write_config_word(adapter->pdev, reg, *value);
6606 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6608 struct igc_adapter *adapter = hw->back;
6610 if (!pci_is_pcie(adapter->pdev))
6611 return -IGC_ERR_CONFIG;
6613 pcie_capability_read_word(adapter->pdev, reg, value);
6618 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
6620 struct igc_adapter *adapter = hw->back;
6622 if (!pci_is_pcie(adapter->pdev))
6623 return -IGC_ERR_CONFIG;
6625 pcie_capability_write_word(adapter->pdev, reg, *value);
6630 u32 igc_rd32(struct igc_hw *hw, u32 reg)
6632 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
6633 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
6636 if (IGC_REMOVED(hw_addr))
6639 value = readl(&hw_addr[reg]);
6641 /* reads should not return all F's */
6642 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
6643 struct net_device *netdev = igc->netdev;
6646 netif_device_detach(netdev);
6647 netdev_err(netdev, "PCIe link lost, device now detached\n");
6648 WARN(pci_device_is_present(igc->pdev),
6649 "igc: Failed to read reg 0x%x!\n", reg);
6655 /* Mapping HW RSS Type to enum xdp_rss_hash_type */
6656 static enum xdp_rss_hash_type igc_xdp_rss_type[IGC_RSS_TYPE_MAX_TABLE] = {
6657 [IGC_RSS_TYPE_NO_HASH] = XDP_RSS_TYPE_L2,
6658 [IGC_RSS_TYPE_HASH_TCP_IPV4] = XDP_RSS_TYPE_L4_IPV4_TCP,
6659 [IGC_RSS_TYPE_HASH_IPV4] = XDP_RSS_TYPE_L3_IPV4,
6660 [IGC_RSS_TYPE_HASH_TCP_IPV6] = XDP_RSS_TYPE_L4_IPV6_TCP,
6661 [IGC_RSS_TYPE_HASH_IPV6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
6662 [IGC_RSS_TYPE_HASH_IPV6] = XDP_RSS_TYPE_L3_IPV6,
6663 [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
6664 [IGC_RSS_TYPE_HASH_UDP_IPV4] = XDP_RSS_TYPE_L4_IPV4_UDP,
6665 [IGC_RSS_TYPE_HASH_UDP_IPV6] = XDP_RSS_TYPE_L4_IPV6_UDP,
6666 [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX,
6667 [10] = XDP_RSS_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */
6668 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6669 [12] = XDP_RSS_TYPE_NONE, /* to handle future HW revisons */
6670 [13] = XDP_RSS_TYPE_NONE,
6671 [14] = XDP_RSS_TYPE_NONE,
6672 [15] = XDP_RSS_TYPE_NONE,
6675 static int igc_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
6676 enum xdp_rss_hash_type *rss_type)
6678 const struct igc_xdp_buff *ctx = (void *)_ctx;
6680 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH))
6683 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss);
6684 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)];
6689 static int igc_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
6691 const struct igc_xdp_buff *ctx = (void *)_ctx;
6692 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev);
6693 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts;
6695 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) {
6696 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0);
6704 static const struct xdp_metadata_ops igc_xdp_metadata_ops = {
6705 .xmo_rx_hash = igc_xdp_rx_hash,
6706 .xmo_rx_timestamp = igc_xdp_rx_timestamp,
6709 static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
6711 struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
6713 unsigned long flags;
6716 spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
6718 adapter->qbv_transition = true;
6719 for (i = 0; i < adapter->num_tx_queues; i++) {
6720 struct igc_ring *tx_ring = adapter->tx_ring[i];
6722 if (tx_ring->admin_gate_closed) {
6723 tx_ring->admin_gate_closed = false;
6724 tx_ring->oper_gate_closed = true;
6726 tx_ring->oper_gate_closed = false;
6729 adapter->qbv_transition = false;
6731 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
6733 return HRTIMER_NORESTART;
6737 * igc_probe - Device Initialization Routine
6738 * @pdev: PCI device information struct
6739 * @ent: entry in igc_pci_tbl
6741 * Returns 0 on success, negative on failure
6743 * igc_probe initializes an adapter identified by a pci_dev structure.
6744 * The OS initialization, configuring the adapter private structure,
6745 * and a hardware reset occur.
6747 static int igc_probe(struct pci_dev *pdev,
6748 const struct pci_device_id *ent)
6750 struct igc_adapter *adapter;
6751 struct net_device *netdev;
6753 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
6756 err = pci_enable_device_mem(pdev);
6760 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6763 "No usable DMA configuration, aborting\n");
6767 err = pci_request_mem_regions(pdev, igc_driver_name);
6771 err = pci_enable_ptm(pdev, NULL);
6773 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
6775 pci_set_master(pdev);
6778 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
6782 goto err_alloc_etherdev;
6784 SET_NETDEV_DEV(netdev, &pdev->dev);
6786 pci_set_drvdata(pdev, netdev);
6787 adapter = netdev_priv(netdev);
6788 adapter->netdev = netdev;
6789 adapter->pdev = pdev;
6792 adapter->port_num = hw->bus.func;
6793 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6795 err = pci_save_state(pdev);
6800 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
6801 pci_resource_len(pdev, 0));
6802 if (!adapter->io_addr)
6805 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
6806 hw->hw_addr = adapter->io_addr;
6808 netdev->netdev_ops = &igc_netdev_ops;
6809 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
6810 igc_ethtool_set_ops(netdev);
6811 netdev->watchdog_timeo = 5 * HZ;
6813 netdev->mem_start = pci_resource_start(pdev, 0);
6814 netdev->mem_end = pci_resource_end(pdev, 0);
6816 /* PCI config space info */
6817 hw->vendor_id = pdev->vendor;
6818 hw->device_id = pdev->device;
6819 hw->revision_id = pdev->revision;
6820 hw->subsystem_vendor_id = pdev->subsystem_vendor;
6821 hw->subsystem_device_id = pdev->subsystem_device;
6823 /* Copy the default MAC and PHY function pointers */
6824 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6825 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6827 /* Initialize skew-specific constants */
6828 err = ei->get_invariants(hw);
6832 /* Add supported features to the features list*/
6833 netdev->features |= NETIF_F_SG;
6834 netdev->features |= NETIF_F_TSO;
6835 netdev->features |= NETIF_F_TSO6;
6836 netdev->features |= NETIF_F_TSO_ECN;
6837 netdev->features |= NETIF_F_RXHASH;
6838 netdev->features |= NETIF_F_RXCSUM;
6839 netdev->features |= NETIF_F_HW_CSUM;
6840 netdev->features |= NETIF_F_SCTP_CRC;
6841 netdev->features |= NETIF_F_HW_TC;
6843 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
6844 NETIF_F_GSO_GRE_CSUM | \
6845 NETIF_F_GSO_IPXIP4 | \
6846 NETIF_F_GSO_IPXIP6 | \
6847 NETIF_F_GSO_UDP_TUNNEL | \
6848 NETIF_F_GSO_UDP_TUNNEL_CSUM)
6850 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
6851 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
6853 /* setup the private structure */
6854 err = igc_sw_init(adapter);
6858 /* copy netdev features into list of user selectable features */
6859 netdev->hw_features |= NETIF_F_NTUPLE;
6860 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
6861 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
6862 netdev->hw_features |= netdev->features;
6864 netdev->features |= NETIF_F_HIGHDMA;
6866 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
6867 netdev->mpls_features |= NETIF_F_HW_CSUM;
6868 netdev->hw_enc_features |= netdev->vlan_features;
6870 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
6871 NETDEV_XDP_ACT_XSK_ZEROCOPY;
6873 /* MTU range: 68 - 9216 */
6874 netdev->min_mtu = ETH_MIN_MTU;
6875 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
6877 /* before reading the NVM, reset the controller to put the device in a
6878 * known good starting state
6880 hw->mac.ops.reset_hw(hw);
6882 if (igc_get_flash_presence_i225(hw)) {
6883 if (hw->nvm.ops.validate(hw) < 0) {
6884 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6890 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
6891 /* copy the MAC address out of the NVM */
6892 if (hw->mac.ops.read_mac_addr(hw))
6893 dev_err(&pdev->dev, "NVM Read Error\n");
6896 eth_hw_addr_set(netdev, hw->mac.addr);
6898 if (!is_valid_ether_addr(netdev->dev_addr)) {
6899 dev_err(&pdev->dev, "Invalid MAC Address\n");
6904 /* configure RXPBSIZE and TXPBSIZE */
6905 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
6906 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
6908 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
6909 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
6911 INIT_WORK(&adapter->reset_task, igc_reset_task);
6912 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
6914 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6915 adapter->hrtimer.function = &igc_qbv_scheduling_timer;
6917 /* Initialize link properties that are user-changeable */
6918 adapter->fc_autoneg = true;
6919 hw->mac.autoneg = true;
6920 hw->phy.autoneg_advertised = 0xaf;
6922 hw->fc.requested_mode = igc_fc_default;
6923 hw->fc.current_mode = igc_fc_default;
6925 /* By default, support wake on port A */
6926 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6928 /* initialize the wol settings based on the eeprom settings */
6929 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6930 adapter->wol |= IGC_WUFC_MAG;
6932 device_set_wakeup_enable(&adapter->pdev->dev,
6933 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6935 igc_ptp_init(adapter);
6937 igc_tsn_clear_schedule(adapter);
6939 /* reset the hardware with the new settings */
6942 /* let the f/w know that the h/w is now under the control of the
6945 igc_get_hw_control(adapter);
6947 strscpy(netdev->name, "eth%d", sizeof(netdev->name));
6948 err = register_netdev(netdev);
6952 /* carrier off reporting is important to ethtool even BEFORE open */
6953 netif_carrier_off(netdev);
6955 /* Check if Media Autosense is enabled */
6958 /* print pcie link status and MAC address */
6959 pcie_print_link_status(pdev);
6960 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6962 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6963 /* Disable EEE for internal PHY devices */
6964 hw->dev_spec._base.eee_enable = false;
6965 adapter->flags &= ~IGC_FLAG_EEE;
6966 igc_set_eee_i225(hw, false, false, false);
6968 pm_runtime_put_noidle(&pdev->dev);
6970 if (IS_ENABLED(CONFIG_IGC_LEDS)) {
6971 err = igc_led_setup(adapter);
6979 igc_release_hw_control(adapter);
6981 if (!igc_check_reset_block(hw))
6984 igc_clear_interrupt_scheme(adapter);
6985 iounmap(adapter->io_addr);
6987 free_netdev(netdev);
6989 pci_release_mem_regions(pdev);
6992 pci_disable_device(pdev);
6997 * igc_remove - Device Removal Routine
6998 * @pdev: PCI device information struct
7000 * igc_remove is called by the PCI subsystem to alert the driver
7001 * that it should release a PCI device. This could be caused by a
7002 * Hot-Plug event, or because the driver is going to be removed from
7005 static void igc_remove(struct pci_dev *pdev)
7007 struct net_device *netdev = pci_get_drvdata(pdev);
7008 struct igc_adapter *adapter = netdev_priv(netdev);
7010 pm_runtime_get_noresume(&pdev->dev);
7012 igc_flush_nfc_rules(adapter);
7014 igc_ptp_stop(adapter);
7016 pci_disable_ptm(pdev);
7017 pci_clear_master(pdev);
7019 set_bit(__IGC_DOWN, &adapter->state);
7021 del_timer_sync(&adapter->watchdog_timer);
7022 del_timer_sync(&adapter->phy_info_timer);
7024 cancel_work_sync(&adapter->reset_task);
7025 cancel_work_sync(&adapter->watchdog_task);
7026 hrtimer_cancel(&adapter->hrtimer);
7028 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7029 * would have already happened in close and is redundant.
7031 igc_release_hw_control(adapter);
7032 unregister_netdev(netdev);
7034 igc_clear_interrupt_scheme(adapter);
7035 pci_iounmap(pdev, adapter->io_addr);
7036 pci_release_mem_regions(pdev);
7038 free_netdev(netdev);
7040 pci_disable_device(pdev);
7043 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
7046 struct net_device *netdev = pci_get_drvdata(pdev);
7047 struct igc_adapter *adapter = netdev_priv(netdev);
7048 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
7049 struct igc_hw *hw = &adapter->hw;
7050 u32 ctrl, rctl, status;
7054 netif_device_detach(netdev);
7056 if (netif_running(netdev))
7057 __igc_close(netdev, true);
7059 igc_ptp_suspend(adapter);
7061 igc_clear_interrupt_scheme(adapter);
7064 status = rd32(IGC_STATUS);
7065 if (status & IGC_STATUS_LU)
7066 wufc &= ~IGC_WUFC_LNKC;
7069 igc_setup_rctl(adapter);
7070 igc_set_rx_mode(netdev);
7072 /* turn on all-multi mode if wake on multicast is enabled */
7073 if (wufc & IGC_WUFC_MC) {
7074 rctl = rd32(IGC_RCTL);
7075 rctl |= IGC_RCTL_MPE;
7076 wr32(IGC_RCTL, rctl);
7079 ctrl = rd32(IGC_CTRL);
7080 ctrl |= IGC_CTRL_ADVD3WUC;
7081 wr32(IGC_CTRL, ctrl);
7083 /* Allow time for pending master requests to run */
7084 igc_disable_pcie_master(hw);
7086 wr32(IGC_WUC, IGC_WUC_PME_EN);
7087 wr32(IGC_WUFC, wufc);
7093 wake = wufc || adapter->en_mng_pt;
7095 igc_power_down_phy_copper_base(&adapter->hw);
7097 igc_power_up_link(adapter);
7100 *enable_wake = wake;
7102 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7103 * would have already happened in close and is redundant.
7105 igc_release_hw_control(adapter);
7107 pci_disable_device(pdev);
7113 static int __maybe_unused igc_runtime_suspend(struct device *dev)
7115 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
7118 static void igc_deliver_wake_packet(struct net_device *netdev)
7120 struct igc_adapter *adapter = netdev_priv(netdev);
7121 struct igc_hw *hw = &adapter->hw;
7122 struct sk_buff *skb;
7125 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
7127 /* WUPM stores only the first 128 bytes of the wake packet.
7128 * Read the packet only if we have the whole thing.
7130 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
7133 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
7139 /* Ensure reads are 32-bit aligned */
7140 wupl = roundup(wupl, 4);
7142 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
7144 skb->protocol = eth_type_trans(skb, netdev);
7148 static int __maybe_unused igc_resume(struct device *dev)
7150 struct pci_dev *pdev = to_pci_dev(dev);
7151 struct net_device *netdev = pci_get_drvdata(pdev);
7152 struct igc_adapter *adapter = netdev_priv(netdev);
7153 struct igc_hw *hw = &adapter->hw;
7156 pci_set_power_state(pdev, PCI_D0);
7157 pci_restore_state(pdev);
7158 pci_save_state(pdev);
7160 if (!pci_device_is_present(pdev))
7162 err = pci_enable_device_mem(pdev);
7164 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
7167 pci_set_master(pdev);
7169 pci_enable_wake(pdev, PCI_D3hot, 0);
7170 pci_enable_wake(pdev, PCI_D3cold, 0);
7172 if (igc_init_interrupt_scheme(adapter, true)) {
7173 netdev_err(netdev, "Unable to allocate memory for queues\n");
7179 /* let the f/w know that the h/w is now under the control of the
7182 igc_get_hw_control(adapter);
7184 val = rd32(IGC_WUS);
7185 if (val & WAKE_PKT_WUS)
7186 igc_deliver_wake_packet(netdev);
7191 if (!err && netif_running(netdev))
7192 err = __igc_open(netdev, true);
7195 netif_device_attach(netdev);
7201 static int __maybe_unused igc_runtime_resume(struct device *dev)
7203 return igc_resume(dev);
7206 static int __maybe_unused igc_suspend(struct device *dev)
7208 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
7211 static int __maybe_unused igc_runtime_idle(struct device *dev)
7213 struct net_device *netdev = dev_get_drvdata(dev);
7214 struct igc_adapter *adapter = netdev_priv(netdev);
7216 if (!igc_has_link(adapter))
7217 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
7221 #endif /* CONFIG_PM */
7223 static void igc_shutdown(struct pci_dev *pdev)
7227 __igc_shutdown(pdev, &wake, 0);
7229 if (system_state == SYSTEM_POWER_OFF) {
7230 pci_wake_from_d3(pdev, wake);
7231 pci_set_power_state(pdev, PCI_D3hot);
7236 * igc_io_error_detected - called when PCI error is detected
7237 * @pdev: Pointer to PCI device
7238 * @state: The current PCI connection state
7240 * This function is called after a PCI bus error affecting
7241 * this device has been detected.
7243 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
7244 pci_channel_state_t state)
7246 struct net_device *netdev = pci_get_drvdata(pdev);
7247 struct igc_adapter *adapter = netdev_priv(netdev);
7249 netif_device_detach(netdev);
7251 if (state == pci_channel_io_perm_failure)
7252 return PCI_ERS_RESULT_DISCONNECT;
7254 if (netif_running(netdev))
7256 pci_disable_device(pdev);
7258 /* Request a slot reset. */
7259 return PCI_ERS_RESULT_NEED_RESET;
7263 * igc_io_slot_reset - called after the PCI bus has been reset.
7264 * @pdev: Pointer to PCI device
7266 * Restart the card from scratch, as if from a cold-boot. Implementation
7267 * resembles the first-half of the igc_resume routine.
7269 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
7271 struct net_device *netdev = pci_get_drvdata(pdev);
7272 struct igc_adapter *adapter = netdev_priv(netdev);
7273 struct igc_hw *hw = &adapter->hw;
7274 pci_ers_result_t result;
7276 if (pci_enable_device_mem(pdev)) {
7277 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
7278 result = PCI_ERS_RESULT_DISCONNECT;
7280 pci_set_master(pdev);
7281 pci_restore_state(pdev);
7282 pci_save_state(pdev);
7284 pci_enable_wake(pdev, PCI_D3hot, 0);
7285 pci_enable_wake(pdev, PCI_D3cold, 0);
7287 /* In case of PCI error, adapter loses its HW address
7288 * so we should re-assign it here.
7290 hw->hw_addr = adapter->io_addr;
7294 result = PCI_ERS_RESULT_RECOVERED;
7301 * igc_io_resume - called when traffic can start to flow again.
7302 * @pdev: Pointer to PCI device
7304 * This callback is called when the error recovery driver tells us that
7305 * its OK to resume normal operation. Implementation resembles the
7306 * second-half of the igc_resume routine.
7308 static void igc_io_resume(struct pci_dev *pdev)
7310 struct net_device *netdev = pci_get_drvdata(pdev);
7311 struct igc_adapter *adapter = netdev_priv(netdev);
7314 if (netif_running(netdev)) {
7315 if (igc_open(netdev)) {
7316 netdev_err(netdev, "igc_open failed after reset\n");
7321 netif_device_attach(netdev);
7323 /* let the f/w know that the h/w is now under the control of the
7326 igc_get_hw_control(adapter);
7330 static const struct pci_error_handlers igc_err_handler = {
7331 .error_detected = igc_io_error_detected,
7332 .slot_reset = igc_io_slot_reset,
7333 .resume = igc_io_resume,
7337 static const struct dev_pm_ops igc_pm_ops = {
7338 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
7339 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
7344 static struct pci_driver igc_driver = {
7345 .name = igc_driver_name,
7346 .id_table = igc_pci_tbl,
7348 .remove = igc_remove,
7350 .driver.pm = &igc_pm_ops,
7352 .shutdown = igc_shutdown,
7353 .err_handler = &igc_err_handler,
7357 * igc_reinit_queues - return error
7358 * @adapter: pointer to adapter structure
7360 int igc_reinit_queues(struct igc_adapter *adapter)
7362 struct net_device *netdev = adapter->netdev;
7365 if (netif_running(netdev))
7368 igc_reset_interrupt_capability(adapter);
7370 if (igc_init_interrupt_scheme(adapter, true)) {
7371 netdev_err(netdev, "Unable to allocate memory for queues\n");
7375 if (netif_running(netdev))
7376 err = igc_open(netdev);
7382 * igc_get_hw_dev - return device
7383 * @hw: pointer to hardware structure
7385 * used by hardware layer to print debugging information
7387 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
7389 struct igc_adapter *adapter = hw->back;
7391 return adapter->netdev;
7394 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
7396 struct igc_hw *hw = &ring->q_vector->adapter->hw;
7397 u8 idx = ring->reg_idx;
7400 rxdctl = rd32(IGC_RXDCTL(idx));
7401 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
7402 rxdctl |= IGC_RXDCTL_SWFLUSH;
7403 wr32(IGC_RXDCTL(idx), rxdctl);
7406 void igc_disable_rx_ring(struct igc_ring *ring)
7408 igc_disable_rx_ring_hw(ring);
7409 igc_clean_rx_ring(ring);
7412 void igc_enable_rx_ring(struct igc_ring *ring)
7414 struct igc_adapter *adapter = ring->q_vector->adapter;
7416 igc_configure_rx_ring(adapter, ring);
7419 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
7421 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
7424 void igc_disable_tx_ring(struct igc_ring *ring)
7426 igc_disable_tx_ring_hw(ring);
7427 igc_clean_tx_ring(ring);
7430 void igc_enable_tx_ring(struct igc_ring *ring)
7432 struct igc_adapter *adapter = ring->q_vector->adapter;
7434 igc_configure_tx_ring(adapter, ring);
7438 * igc_init_module - Driver Registration Routine
7440 * igc_init_module is the first routine called when the driver is
7441 * loaded. All it does is register with the PCI subsystem.
7443 static int __init igc_init_module(void)
7447 pr_info("%s\n", igc_driver_string);
7448 pr_info("%s\n", igc_copyright);
7450 ret = pci_register_driver(&igc_driver);
7454 module_init(igc_init_module);
7457 * igc_exit_module - Driver Exit Cleanup Routine
7459 * igc_exit_module is called just before the driver is removed
7462 static void __exit igc_exit_module(void)
7464 pci_unregister_driver(&igc_driver);
7467 module_exit(igc_exit_module);