1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
11 #include <linux/pm_runtime.h>
12 #include <net/pkt_sched.h>
13 #include <linux/bpf_trace.h>
14 #include <net/xdp_sock_drv.h>
22 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
24 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
26 #define IGC_XDP_PASS 0
27 #define IGC_XDP_CONSUMED BIT(0)
28 #define IGC_XDP_TX BIT(1)
29 #define IGC_XDP_REDIRECT BIT(2)
31 static int debug = -1;
33 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
34 MODULE_DESCRIPTION(DRV_SUMMARY);
35 MODULE_LICENSE("GPL v2");
36 module_param(debug, int, 0);
37 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
39 char igc_driver_name[] = "igc";
40 static const char igc_driver_string[] = DRV_SUMMARY;
41 static const char igc_copyright[] =
42 "Copyright(c) 2018 Intel Corporation.";
44 static const struct igc_info *igc_info_tbl[] = {
45 [board_base] = &igc_base_info,
48 static const struct pci_device_id igc_pci_tbl[] = {
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
57 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
58 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
59 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
60 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
61 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
62 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
63 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
64 /* required last entry */
68 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
77 void igc_reset(struct igc_adapter *adapter)
79 struct net_device *dev = adapter->netdev;
80 struct igc_hw *hw = &adapter->hw;
81 struct igc_fc_info *fc = &hw->fc;
84 /* Repartition PBA for greater than 9k MTU if required */
87 /* flow control settings
88 * The high water mark must be low enough to fit one full frame
89 * after transmitting the pause frame. As such we must have enough
90 * space to allow for us to complete our current transmit and then
91 * receive the frame that is in progress from the link partner.
93 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
95 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
97 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
98 fc->low_water = fc->high_water - 16;
99 fc->pause_time = 0xFFFF;
101 fc->current_mode = fc->requested_mode;
103 hw->mac.ops.reset_hw(hw);
105 if (hw->mac.ops.init_hw(hw))
106 netdev_err(dev, "Error on hardware initialization\n");
108 /* Re-establish EEE setting */
109 igc_set_eee_i225(hw, true, true, true);
111 if (!netif_running(adapter->netdev))
112 igc_power_down_phy_copper_base(&adapter->hw);
114 /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
115 wr32(IGC_VET, ETH_P_8021Q);
117 /* Re-enable PTP, where applicable. */
118 igc_ptp_reset(adapter);
120 /* Re-enable TSN offloading, where applicable. */
121 igc_tsn_offload_apply(adapter);
123 igc_get_phy_info(hw);
127 * igc_power_up_link - Power up the phy link
128 * @adapter: address of board private structure
130 static void igc_power_up_link(struct igc_adapter *adapter)
132 igc_reset_phy(&adapter->hw);
134 igc_power_up_phy_copper(&adapter->hw);
136 igc_setup_link(&adapter->hw);
140 * igc_release_hw_control - release control of the h/w to f/w
141 * @adapter: address of board private structure
143 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
144 * For ASF and Pass Through versions of f/w this means that the
145 * driver is no longer loaded.
147 static void igc_release_hw_control(struct igc_adapter *adapter)
149 struct igc_hw *hw = &adapter->hw;
152 /* Let firmware take over control of h/w */
153 ctrl_ext = rd32(IGC_CTRL_EXT);
155 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
159 * igc_get_hw_control - get control of the h/w from f/w
160 * @adapter: address of board private structure
162 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
163 * For ASF and Pass Through versions of f/w this means that
164 * the driver is loaded.
166 static void igc_get_hw_control(struct igc_adapter *adapter)
168 struct igc_hw *hw = &adapter->hw;
171 /* Let firmware know the driver has taken over */
172 ctrl_ext = rd32(IGC_CTRL_EXT);
174 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
177 static void igc_unmap_tx_buffer(struct device *dev, struct igc_tx_buffer *buf)
179 dma_unmap_single(dev, dma_unmap_addr(buf, dma),
180 dma_unmap_len(buf, len), DMA_TO_DEVICE);
182 dma_unmap_len_set(buf, len, 0);
186 * igc_clean_tx_ring - Free Tx Buffers
187 * @tx_ring: ring to be cleaned
189 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
191 u16 i = tx_ring->next_to_clean;
192 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
195 while (i != tx_ring->next_to_use) {
196 union igc_adv_tx_desc *eop_desc, *tx_desc;
198 switch (tx_buffer->type) {
199 case IGC_TX_BUFFER_TYPE_XSK:
202 case IGC_TX_BUFFER_TYPE_XDP:
203 xdp_return_frame(tx_buffer->xdpf);
204 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
206 case IGC_TX_BUFFER_TYPE_SKB:
207 dev_kfree_skb_any(tx_buffer->skb);
208 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
211 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
215 /* check for eop_desc to determine the end of the packet */
216 eop_desc = tx_buffer->next_to_watch;
217 tx_desc = IGC_TX_DESC(tx_ring, i);
219 /* unmap remaining buffers */
220 while (tx_desc != eop_desc) {
224 if (unlikely(i == tx_ring->count)) {
226 tx_buffer = tx_ring->tx_buffer_info;
227 tx_desc = IGC_TX_DESC(tx_ring, 0);
230 /* unmap any remaining paged data */
231 if (dma_unmap_len(tx_buffer, len))
232 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
235 tx_buffer->next_to_watch = NULL;
237 /* move us one more past the eop_desc for start of next pkt */
240 if (unlikely(i == tx_ring->count)) {
242 tx_buffer = tx_ring->tx_buffer_info;
246 if (tx_ring->xsk_pool && xsk_frames)
247 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
249 /* reset BQL for queue */
250 netdev_tx_reset_queue(txring_txq(tx_ring));
252 /* reset next_to_use and next_to_clean */
253 tx_ring->next_to_use = 0;
254 tx_ring->next_to_clean = 0;
258 * igc_free_tx_resources - Free Tx Resources per Queue
259 * @tx_ring: Tx descriptor ring for a specific queue
261 * Free all transmit software resources
263 void igc_free_tx_resources(struct igc_ring *tx_ring)
265 igc_clean_tx_ring(tx_ring);
267 vfree(tx_ring->tx_buffer_info);
268 tx_ring->tx_buffer_info = NULL;
270 /* if not set, then don't free */
274 dma_free_coherent(tx_ring->dev, tx_ring->size,
275 tx_ring->desc, tx_ring->dma);
277 tx_ring->desc = NULL;
281 * igc_free_all_tx_resources - Free Tx Resources for All Queues
282 * @adapter: board private structure
284 * Free all transmit software resources
286 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
290 for (i = 0; i < adapter->num_tx_queues; i++)
291 igc_free_tx_resources(adapter->tx_ring[i]);
295 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
296 * @adapter: board private structure
298 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
302 for (i = 0; i < adapter->num_tx_queues; i++)
303 if (adapter->tx_ring[i])
304 igc_clean_tx_ring(adapter->tx_ring[i]);
308 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
309 * @tx_ring: tx descriptor ring (for a specific queue) to setup
311 * Return 0 on success, negative on failure
313 int igc_setup_tx_resources(struct igc_ring *tx_ring)
315 struct net_device *ndev = tx_ring->netdev;
316 struct device *dev = tx_ring->dev;
319 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
320 tx_ring->tx_buffer_info = vzalloc(size);
321 if (!tx_ring->tx_buffer_info)
324 /* round up to nearest 4K */
325 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
326 tx_ring->size = ALIGN(tx_ring->size, 4096);
328 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
329 &tx_ring->dma, GFP_KERNEL);
334 tx_ring->next_to_use = 0;
335 tx_ring->next_to_clean = 0;
340 vfree(tx_ring->tx_buffer_info);
341 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
346 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
347 * @adapter: board private structure
349 * Return 0 on success, negative on failure
351 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
353 struct net_device *dev = adapter->netdev;
356 for (i = 0; i < adapter->num_tx_queues; i++) {
357 err = igc_setup_tx_resources(adapter->tx_ring[i]);
359 netdev_err(dev, "Error on Tx queue %u setup\n", i);
360 for (i--; i >= 0; i--)
361 igc_free_tx_resources(adapter->tx_ring[i]);
369 static void igc_clean_rx_ring_page_shared(struct igc_ring *rx_ring)
371 u16 i = rx_ring->next_to_clean;
373 dev_kfree_skb(rx_ring->skb);
376 /* Free all the Rx ring sk_buffs */
377 while (i != rx_ring->next_to_alloc) {
378 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
380 /* Invalidate cache lines that may have been written to by
381 * device so that we avoid corrupting memory.
383 dma_sync_single_range_for_cpu(rx_ring->dev,
385 buffer_info->page_offset,
386 igc_rx_bufsz(rx_ring),
389 /* free resources associated with mapping */
390 dma_unmap_page_attrs(rx_ring->dev,
392 igc_rx_pg_size(rx_ring),
395 __page_frag_cache_drain(buffer_info->page,
396 buffer_info->pagecnt_bias);
399 if (i == rx_ring->count)
404 static void igc_clean_rx_ring_xsk_pool(struct igc_ring *ring)
406 struct igc_rx_buffer *bi;
409 for (i = 0; i < ring->count; i++) {
410 bi = &ring->rx_buffer_info[i];
414 xsk_buff_free(bi->xdp);
420 * igc_clean_rx_ring - Free Rx Buffers per Queue
421 * @ring: ring to free buffers from
423 static void igc_clean_rx_ring(struct igc_ring *ring)
426 igc_clean_rx_ring_xsk_pool(ring);
428 igc_clean_rx_ring_page_shared(ring);
430 clear_ring_uses_large_buffer(ring);
432 ring->next_to_alloc = 0;
433 ring->next_to_clean = 0;
434 ring->next_to_use = 0;
438 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
439 * @adapter: board private structure
441 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
445 for (i = 0; i < adapter->num_rx_queues; i++)
446 if (adapter->rx_ring[i])
447 igc_clean_rx_ring(adapter->rx_ring[i]);
451 * igc_free_rx_resources - Free Rx Resources
452 * @rx_ring: ring to clean the resources from
454 * Free all receive software resources
456 void igc_free_rx_resources(struct igc_ring *rx_ring)
458 igc_clean_rx_ring(rx_ring);
460 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
462 vfree(rx_ring->rx_buffer_info);
463 rx_ring->rx_buffer_info = NULL;
465 /* if not set, then don't free */
469 dma_free_coherent(rx_ring->dev, rx_ring->size,
470 rx_ring->desc, rx_ring->dma);
472 rx_ring->desc = NULL;
476 * igc_free_all_rx_resources - Free Rx Resources for All Queues
477 * @adapter: board private structure
479 * Free all receive software resources
481 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
485 for (i = 0; i < adapter->num_rx_queues; i++)
486 igc_free_rx_resources(adapter->rx_ring[i]);
490 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
491 * @rx_ring: rx descriptor ring (for a specific queue) to setup
493 * Returns 0 on success, negative on failure
495 int igc_setup_rx_resources(struct igc_ring *rx_ring)
497 struct net_device *ndev = rx_ring->netdev;
498 struct device *dev = rx_ring->dev;
499 u8 index = rx_ring->queue_index;
500 int size, desc_len, res;
502 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index,
503 rx_ring->q_vector->napi.napi_id);
505 netdev_err(ndev, "Failed to register xdp_rxq index %u\n",
510 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
511 rx_ring->rx_buffer_info = vzalloc(size);
512 if (!rx_ring->rx_buffer_info)
515 desc_len = sizeof(union igc_adv_rx_desc);
517 /* Round up to nearest 4K */
518 rx_ring->size = rx_ring->count * desc_len;
519 rx_ring->size = ALIGN(rx_ring->size, 4096);
521 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
522 &rx_ring->dma, GFP_KERNEL);
527 rx_ring->next_to_alloc = 0;
528 rx_ring->next_to_clean = 0;
529 rx_ring->next_to_use = 0;
534 xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
535 vfree(rx_ring->rx_buffer_info);
536 rx_ring->rx_buffer_info = NULL;
537 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
542 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
543 * (Descriptors) for all queues
544 * @adapter: board private structure
546 * Return 0 on success, negative on failure
548 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
550 struct net_device *dev = adapter->netdev;
553 for (i = 0; i < adapter->num_rx_queues; i++) {
554 err = igc_setup_rx_resources(adapter->rx_ring[i]);
556 netdev_err(dev, "Error on Rx queue %u setup\n", i);
557 for (i--; i >= 0; i--)
558 igc_free_rx_resources(adapter->rx_ring[i]);
566 static struct xsk_buff_pool *igc_get_xsk_pool(struct igc_adapter *adapter,
567 struct igc_ring *ring)
569 if (!igc_xdp_is_enabled(adapter) ||
570 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags))
573 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index);
577 * igc_configure_rx_ring - Configure a receive ring after Reset
578 * @adapter: board private structure
579 * @ring: receive ring to be configured
581 * Configure the Rx unit of the MAC after a reset.
583 static void igc_configure_rx_ring(struct igc_adapter *adapter,
584 struct igc_ring *ring)
586 struct igc_hw *hw = &adapter->hw;
587 union igc_adv_rx_desc *rx_desc;
588 int reg_idx = ring->reg_idx;
589 u32 srrctl = 0, rxdctl = 0;
590 u64 rdba = ring->dma;
593 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
594 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
595 if (ring->xsk_pool) {
596 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
597 MEM_TYPE_XSK_BUFF_POOL,
599 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
601 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
602 MEM_TYPE_PAGE_SHARED,
606 if (igc_xdp_is_enabled(adapter))
607 set_ring_uses_large_buffer(ring);
609 /* disable the queue */
610 wr32(IGC_RXDCTL(reg_idx), 0);
612 /* Set DMA base address registers */
613 wr32(IGC_RDBAL(reg_idx),
614 rdba & 0x00000000ffffffffULL);
615 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
616 wr32(IGC_RDLEN(reg_idx),
617 ring->count * sizeof(union igc_adv_rx_desc));
619 /* initialize head and tail */
620 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
621 wr32(IGC_RDH(reg_idx), 0);
622 writel(0, ring->tail);
624 /* reset next-to- use/clean to place SW in sync with hardware */
625 ring->next_to_clean = 0;
626 ring->next_to_use = 0;
629 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool);
630 else if (ring_uses_large_buffer(ring))
631 buf_size = IGC_RXBUFFER_3072;
633 buf_size = IGC_RXBUFFER_2048;
635 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
636 srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
637 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
639 wr32(IGC_SRRCTL(reg_idx), srrctl);
641 rxdctl |= IGC_RX_PTHRESH;
642 rxdctl |= IGC_RX_HTHRESH << 8;
643 rxdctl |= IGC_RX_WTHRESH << 16;
645 /* initialize rx_buffer_info */
646 memset(ring->rx_buffer_info, 0,
647 sizeof(struct igc_rx_buffer) * ring->count);
649 /* initialize Rx descriptor 0 */
650 rx_desc = IGC_RX_DESC(ring, 0);
651 rx_desc->wb.upper.length = 0;
653 /* enable receive descriptor fetching */
654 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
656 wr32(IGC_RXDCTL(reg_idx), rxdctl);
660 * igc_configure_rx - Configure receive Unit after Reset
661 * @adapter: board private structure
663 * Configure the Rx unit of the MAC after a reset.
665 static void igc_configure_rx(struct igc_adapter *adapter)
669 /* Setup the HW Rx Head and Tail Descriptor Pointers and
670 * the Base and Length of the Rx Descriptor Ring
672 for (i = 0; i < adapter->num_rx_queues; i++)
673 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
677 * igc_configure_tx_ring - Configure transmit ring after Reset
678 * @adapter: board private structure
679 * @ring: tx ring to configure
681 * Configure a transmit ring after a reset.
683 static void igc_configure_tx_ring(struct igc_adapter *adapter,
684 struct igc_ring *ring)
686 struct igc_hw *hw = &adapter->hw;
687 int reg_idx = ring->reg_idx;
688 u64 tdba = ring->dma;
691 ring->xsk_pool = igc_get_xsk_pool(adapter, ring);
693 /* disable the queue */
694 wr32(IGC_TXDCTL(reg_idx), 0);
698 wr32(IGC_TDLEN(reg_idx),
699 ring->count * sizeof(union igc_adv_tx_desc));
700 wr32(IGC_TDBAL(reg_idx),
701 tdba & 0x00000000ffffffffULL);
702 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
704 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
705 wr32(IGC_TDH(reg_idx), 0);
706 writel(0, ring->tail);
708 txdctl |= IGC_TX_PTHRESH;
709 txdctl |= IGC_TX_HTHRESH << 8;
710 txdctl |= IGC_TX_WTHRESH << 16;
712 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
713 wr32(IGC_TXDCTL(reg_idx), txdctl);
717 * igc_configure_tx - Configure transmit Unit after Reset
718 * @adapter: board private structure
720 * Configure the Tx unit of the MAC after a reset.
722 static void igc_configure_tx(struct igc_adapter *adapter)
726 for (i = 0; i < adapter->num_tx_queues; i++)
727 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
731 * igc_setup_mrqc - configure the multiple receive queue control registers
732 * @adapter: Board private structure
734 static void igc_setup_mrqc(struct igc_adapter *adapter)
736 struct igc_hw *hw = &adapter->hw;
737 u32 j, num_rx_queues;
741 netdev_rss_key_fill(rss_key, sizeof(rss_key));
742 for (j = 0; j < 10; j++)
743 wr32(IGC_RSSRK(j), rss_key[j]);
745 num_rx_queues = adapter->rss_queues;
747 if (adapter->rss_indir_tbl_init != num_rx_queues) {
748 for (j = 0; j < IGC_RETA_SIZE; j++)
749 adapter->rss_indir_tbl[j] =
750 (j * num_rx_queues) / IGC_RETA_SIZE;
751 adapter->rss_indir_tbl_init = num_rx_queues;
753 igc_write_rss_indir_tbl(adapter);
755 /* Disable raw packet checksumming so that RSS hash is placed in
756 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
757 * offloads as they are enabled by default
759 rxcsum = rd32(IGC_RXCSUM);
760 rxcsum |= IGC_RXCSUM_PCSD;
762 /* Enable Receive Checksum Offload for SCTP */
763 rxcsum |= IGC_RXCSUM_CRCOFL;
765 /* Don't need to set TUOFL or IPOFL, they default to 1 */
766 wr32(IGC_RXCSUM, rxcsum);
768 /* Generate RSS hash based on packet types, TCP/UDP
769 * port numbers and/or IPv4/v6 src and dst addresses
771 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
772 IGC_MRQC_RSS_FIELD_IPV4_TCP |
773 IGC_MRQC_RSS_FIELD_IPV6 |
774 IGC_MRQC_RSS_FIELD_IPV6_TCP |
775 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
777 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
778 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
779 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
780 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
782 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
784 wr32(IGC_MRQC, mrqc);
788 * igc_setup_rctl - configure the receive control registers
789 * @adapter: Board private structure
791 static void igc_setup_rctl(struct igc_adapter *adapter)
793 struct igc_hw *hw = &adapter->hw;
796 rctl = rd32(IGC_RCTL);
798 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
799 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
801 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
802 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
804 /* enable stripping of CRC. Newer features require
805 * that the HW strips the CRC.
807 rctl |= IGC_RCTL_SECRC;
809 /* disable store bad packets and clear size bits. */
810 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
812 /* enable LPE to allow for reception of jumbo frames */
813 rctl |= IGC_RCTL_LPE;
815 /* disable queue 0 to prevent tail write w/o re-config */
816 wr32(IGC_RXDCTL(0), 0);
818 /* This is useful for sniffing bad packets. */
819 if (adapter->netdev->features & NETIF_F_RXALL) {
820 /* UPE and MPE will be handled by normal PROMISC logic
823 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
824 IGC_RCTL_BAM | /* RX All Bcast Pkts */
825 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
827 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
828 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
831 wr32(IGC_RCTL, rctl);
835 * igc_setup_tctl - configure the transmit control registers
836 * @adapter: Board private structure
838 static void igc_setup_tctl(struct igc_adapter *adapter)
840 struct igc_hw *hw = &adapter->hw;
843 /* disable queue 0 which icould be enabled by default */
844 wr32(IGC_TXDCTL(0), 0);
846 /* Program the Transmit Control Register */
847 tctl = rd32(IGC_TCTL);
848 tctl &= ~IGC_TCTL_CT;
849 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
850 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
852 /* Enable transmits */
855 wr32(IGC_TCTL, tctl);
859 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
860 * @adapter: Pointer to adapter where the filter should be set
861 * @index: Filter index
862 * @type: MAC address filter type (source or destination)
864 * @queue: If non-negative, queue assignment feature is enabled and frames
865 * matching the filter are enqueued onto 'queue'. Otherwise, queue
866 * assignment is disabled.
868 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
869 enum igc_mac_filter_type type,
870 const u8 *addr, int queue)
872 struct net_device *dev = adapter->netdev;
873 struct igc_hw *hw = &adapter->hw;
876 if (WARN_ON(index >= hw->mac.rar_entry_count))
879 ral = le32_to_cpup((__le32 *)(addr));
880 rah = le16_to_cpup((__le16 *)(addr + 4));
882 if (type == IGC_MAC_FILTER_TYPE_SRC) {
883 rah &= ~IGC_RAH_ASEL_MASK;
884 rah |= IGC_RAH_ASEL_SRC_ADDR;
888 rah &= ~IGC_RAH_QSEL_MASK;
889 rah |= (queue << IGC_RAH_QSEL_SHIFT);
890 rah |= IGC_RAH_QSEL_ENABLE;
895 wr32(IGC_RAL(index), ral);
896 wr32(IGC_RAH(index), rah);
898 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
902 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
903 * @adapter: Pointer to adapter where the filter should be cleared
904 * @index: Filter index
906 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
908 struct net_device *dev = adapter->netdev;
909 struct igc_hw *hw = &adapter->hw;
911 if (WARN_ON(index >= hw->mac.rar_entry_count))
914 wr32(IGC_RAL(index), 0);
915 wr32(IGC_RAH(index), 0);
917 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
920 /* Set default MAC address for the PF in the first RAR entry */
921 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
923 struct net_device *dev = adapter->netdev;
924 u8 *addr = adapter->hw.mac.addr;
926 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
928 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
932 * igc_set_mac - Change the Ethernet Address of the NIC
933 * @netdev: network interface device structure
934 * @p: pointer to an address structure
936 * Returns 0 on success, negative on failure
938 static int igc_set_mac(struct net_device *netdev, void *p)
940 struct igc_adapter *adapter = netdev_priv(netdev);
941 struct igc_hw *hw = &adapter->hw;
942 struct sockaddr *addr = p;
944 if (!is_valid_ether_addr(addr->sa_data))
945 return -EADDRNOTAVAIL;
947 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
948 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
950 /* set the correct pool for the new PF MAC address in entry 0 */
951 igc_set_default_mac_filter(adapter);
957 * igc_write_mc_addr_list - write multicast addresses to MTA
958 * @netdev: network interface device structure
960 * Writes multicast address list to the MTA hash table.
961 * Returns: -ENOMEM on failure
962 * 0 on no addresses written
963 * X on writing X addresses to MTA
965 static int igc_write_mc_addr_list(struct net_device *netdev)
967 struct igc_adapter *adapter = netdev_priv(netdev);
968 struct igc_hw *hw = &adapter->hw;
969 struct netdev_hw_addr *ha;
973 if (netdev_mc_empty(netdev)) {
974 /* nothing to program, so clear mc list */
975 igc_update_mc_addr_list(hw, NULL, 0);
979 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
983 /* The shared function expects a packed array of only addresses. */
985 netdev_for_each_mc_addr(ha, netdev)
986 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
988 igc_update_mc_addr_list(hw, mta_list, i);
991 return netdev_mc_count(netdev);
994 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
996 ktime_t cycle_time = adapter->cycle_time;
997 ktime_t base_time = adapter->base_time;
1000 /* FIXME: when using ETF together with taprio, we may have a
1001 * case where 'delta' is larger than the cycle_time, this may
1002 * cause problems if we don't read the current value of
1003 * IGC_BASET, as the value writen into the launchtime
1004 * descriptor field may be misinterpreted.
1006 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
1008 return cpu_to_le32(launchtime);
1011 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
1012 struct igc_tx_buffer *first,
1013 u32 vlan_macip_lens, u32 type_tucmd,
1016 struct igc_adv_tx_context_desc *context_desc;
1017 u16 i = tx_ring->next_to_use;
1019 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
1022 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1024 /* set bits to identify this as an advanced context descriptor */
1025 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
1027 /* For i225, context index must be unique per ring. */
1028 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
1029 mss_l4len_idx |= tx_ring->reg_idx << 4;
1031 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
1032 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
1033 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1035 /* We assume there is always a valid Tx time available. Invalid times
1036 * should have been handled by the upper layers.
1038 if (tx_ring->launchtime_enable) {
1039 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1040 ktime_t txtime = first->skb->tstamp;
1042 skb_txtime_consumed(first->skb);
1043 context_desc->launch_time = igc_tx_launchtime(adapter,
1046 context_desc->launch_time = 0;
1050 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
1052 struct sk_buff *skb = first->skb;
1053 u32 vlan_macip_lens = 0;
1056 if (skb->ip_summed != CHECKSUM_PARTIAL) {
1058 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
1059 !tx_ring->launchtime_enable)
1064 switch (skb->csum_offset) {
1065 case offsetof(struct tcphdr, check):
1066 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1068 case offsetof(struct udphdr, check):
1070 case offsetof(struct sctphdr, checksum):
1071 /* validate that this is actually an SCTP request */
1072 if (skb_csum_is_sctp(skb)) {
1073 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
1078 skb_checksum_help(skb);
1082 /* update TX checksum flag */
1083 first->tx_flags |= IGC_TX_FLAGS_CSUM;
1084 vlan_macip_lens = skb_checksum_start_offset(skb) -
1085 skb_network_offset(skb);
1087 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
1088 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1090 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1093 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1095 struct net_device *netdev = tx_ring->netdev;
1097 netif_stop_subqueue(netdev, tx_ring->queue_index);
1099 /* memory barriier comment */
1102 /* We need to check again in a case another CPU has just
1103 * made room available.
1105 if (igc_desc_unused(tx_ring) < size)
1109 netif_wake_subqueue(netdev, tx_ring->queue_index);
1111 u64_stats_update_begin(&tx_ring->tx_syncp2);
1112 tx_ring->tx_stats.restart_queue2++;
1113 u64_stats_update_end(&tx_ring->tx_syncp2);
1118 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1120 if (igc_desc_unused(tx_ring) >= size)
1122 return __igc_maybe_stop_tx(tx_ring, size);
1125 #define IGC_SET_FLAG(_input, _flag, _result) \
1126 (((_flag) <= (_result)) ? \
1127 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1128 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1130 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1132 /* set type for advanced descriptor with frame checksum insertion */
1133 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1134 IGC_ADVTXD_DCMD_DEXT |
1135 IGC_ADVTXD_DCMD_IFCS;
1137 /* set HW vlan bit if vlan is present */
1138 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
1139 IGC_ADVTXD_DCMD_VLE);
1141 /* set segmentation bits for TSO */
1142 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1143 (IGC_ADVTXD_DCMD_TSE));
1145 /* set timestamp bit if present */
1146 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1147 (IGC_ADVTXD_MAC_TSTAMP));
1149 /* insert frame checksum */
1150 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
1155 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1156 union igc_adv_tx_desc *tx_desc,
1157 u32 tx_flags, unsigned int paylen)
1159 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1161 /* insert L4 checksum */
1162 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1163 ((IGC_TXD_POPTS_TXSM << 8) /
1166 /* insert IPv4 checksum */
1167 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1168 (((IGC_TXD_POPTS_IXSM << 8)) /
1171 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1174 static int igc_tx_map(struct igc_ring *tx_ring,
1175 struct igc_tx_buffer *first,
1178 struct sk_buff *skb = first->skb;
1179 struct igc_tx_buffer *tx_buffer;
1180 union igc_adv_tx_desc *tx_desc;
1181 u32 tx_flags = first->tx_flags;
1183 u16 i = tx_ring->next_to_use;
1184 unsigned int data_len, size;
1188 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1189 tx_desc = IGC_TX_DESC(tx_ring, i);
1191 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1193 size = skb_headlen(skb);
1194 data_len = skb->data_len;
1196 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1200 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1201 if (dma_mapping_error(tx_ring->dev, dma))
1204 /* record length, and DMA address */
1205 dma_unmap_len_set(tx_buffer, len, size);
1206 dma_unmap_addr_set(tx_buffer, dma, dma);
1208 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1210 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1211 tx_desc->read.cmd_type_len =
1212 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1216 if (i == tx_ring->count) {
1217 tx_desc = IGC_TX_DESC(tx_ring, 0);
1220 tx_desc->read.olinfo_status = 0;
1222 dma += IGC_MAX_DATA_PER_TXD;
1223 size -= IGC_MAX_DATA_PER_TXD;
1225 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1228 if (likely(!data_len))
1231 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1235 if (i == tx_ring->count) {
1236 tx_desc = IGC_TX_DESC(tx_ring, 0);
1239 tx_desc->read.olinfo_status = 0;
1241 size = skb_frag_size(frag);
1244 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1245 size, DMA_TO_DEVICE);
1247 tx_buffer = &tx_ring->tx_buffer_info[i];
1250 /* write last descriptor with RS and EOP bits */
1251 cmd_type |= size | IGC_TXD_DCMD;
1252 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1254 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1256 /* set the timestamp */
1257 first->time_stamp = jiffies;
1259 skb_tx_timestamp(skb);
1261 /* Force memory writes to complete before letting h/w know there
1262 * are new descriptors to fetch. (Only applicable for weak-ordered
1263 * memory model archs, such as IA-64).
1265 * We also need this memory barrier to make certain all of the
1266 * status bits have been updated before next_to_watch is written.
1270 /* set next_to_watch value indicating a packet is present */
1271 first->next_to_watch = tx_desc;
1274 if (i == tx_ring->count)
1277 tx_ring->next_to_use = i;
1279 /* Make sure there is space in the ring for the next send. */
1280 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1282 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1283 writel(i, tx_ring->tail);
1288 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1289 tx_buffer = &tx_ring->tx_buffer_info[i];
1291 /* clear dma mappings for failed tx_buffer_info map */
1292 while (tx_buffer != first) {
1293 if (dma_unmap_len(tx_buffer, len))
1294 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1297 i += tx_ring->count;
1298 tx_buffer = &tx_ring->tx_buffer_info[i];
1301 if (dma_unmap_len(tx_buffer, len))
1302 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
1304 dev_kfree_skb_any(tx_buffer->skb);
1305 tx_buffer->skb = NULL;
1307 tx_ring->next_to_use = i;
1312 static int igc_tso(struct igc_ring *tx_ring,
1313 struct igc_tx_buffer *first,
1316 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1317 struct sk_buff *skb = first->skb;
1328 u32 paylen, l4_offset;
1331 if (skb->ip_summed != CHECKSUM_PARTIAL)
1334 if (!skb_is_gso(skb))
1337 err = skb_cow_head(skb, 0);
1341 ip.hdr = skb_network_header(skb);
1342 l4.hdr = skb_checksum_start(skb);
1344 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1345 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1347 /* initialize outer IP header fields */
1348 if (ip.v4->version == 4) {
1349 unsigned char *csum_start = skb_checksum_start(skb);
1350 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1352 /* IP header will have to cancel out any data that
1353 * is not a part of the outer IP header
1355 ip.v4->check = csum_fold(csum_partial(trans_start,
1356 csum_start - trans_start,
1358 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1361 first->tx_flags |= IGC_TX_FLAGS_TSO |
1365 ip.v6->payload_len = 0;
1366 first->tx_flags |= IGC_TX_FLAGS_TSO |
1370 /* determine offset of inner transport header */
1371 l4_offset = l4.hdr - skb->data;
1373 /* remove payload length from inner checksum */
1374 paylen = skb->len - l4_offset;
1375 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1376 /* compute length of segmentation header */
1377 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1378 csum_replace_by_diff(&l4.tcp->check,
1379 (__force __wsum)htonl(paylen));
1381 /* compute length of segmentation header */
1382 *hdr_len = sizeof(*l4.udp) + l4_offset;
1383 csum_replace_by_diff(&l4.udp->check,
1384 (__force __wsum)htonl(paylen));
1387 /* update gso size and bytecount with header size */
1388 first->gso_segs = skb_shinfo(skb)->gso_segs;
1389 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1392 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1393 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1395 /* VLAN MACLEN IPLEN */
1396 vlan_macip_lens = l4.hdr - ip.hdr;
1397 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1398 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1400 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1401 type_tucmd, mss_l4len_idx);
1406 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1407 struct igc_ring *tx_ring)
1409 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1410 __be16 protocol = vlan_get_protocol(skb);
1411 struct igc_tx_buffer *first;
1417 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1418 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1419 * + 2 desc gap to keep tail from touching head,
1420 * + 1 desc for context descriptor,
1421 * otherwise try next time
1423 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1424 count += TXD_USE_COUNT(skb_frag_size(
1425 &skb_shinfo(skb)->frags[f]));
1427 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1428 /* this is a hard error */
1429 return NETDEV_TX_BUSY;
1432 /* record the location of the first descriptor for this packet */
1433 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1434 first->type = IGC_TX_BUFFER_TYPE_SKB;
1436 first->bytecount = skb->len;
1437 first->gso_segs = 1;
1439 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1440 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1442 /* FIXME: add support for retrieving timestamps from
1443 * the other timer registers before skipping the
1444 * timestamping request.
1446 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1447 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1450 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1452 adapter->ptp_tx_skb = skb_get(skb);
1453 adapter->ptp_tx_start = jiffies;
1455 adapter->tx_hwtstamp_skipped++;
1459 if (skb_vlan_tag_present(skb)) {
1460 tx_flags |= IGC_TX_FLAGS_VLAN;
1461 tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
1464 /* record initial flags and protocol */
1465 first->tx_flags = tx_flags;
1466 first->protocol = protocol;
1468 tso = igc_tso(tx_ring, first, &hdr_len);
1472 igc_tx_csum(tx_ring, first);
1474 igc_tx_map(tx_ring, first, hdr_len);
1476 return NETDEV_TX_OK;
1479 dev_kfree_skb_any(first->skb);
1482 return NETDEV_TX_OK;
1485 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1486 struct sk_buff *skb)
1488 unsigned int r_idx = skb->queue_mapping;
1490 if (r_idx >= adapter->num_tx_queues)
1491 r_idx = r_idx % adapter->num_tx_queues;
1493 return adapter->tx_ring[r_idx];
1496 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1497 struct net_device *netdev)
1499 struct igc_adapter *adapter = netdev_priv(netdev);
1501 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1502 * in order to meet this minimum size requirement.
1504 if (skb->len < 17) {
1505 if (skb_padto(skb, 17))
1506 return NETDEV_TX_OK;
1510 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1513 static void igc_rx_checksum(struct igc_ring *ring,
1514 union igc_adv_rx_desc *rx_desc,
1515 struct sk_buff *skb)
1517 skb_checksum_none_assert(skb);
1519 /* Ignore Checksum bit is set */
1520 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1523 /* Rx checksum disabled via ethtool */
1524 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1527 /* TCP/UDP checksum error bit is set */
1528 if (igc_test_staterr(rx_desc,
1529 IGC_RXDEXT_STATERR_L4E |
1530 IGC_RXDEXT_STATERR_IPE)) {
1531 /* work around errata with sctp packets where the TCPE aka
1532 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1533 * packets (aka let the stack check the crc32c)
1535 if (!(skb->len == 60 &&
1536 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1537 u64_stats_update_begin(&ring->rx_syncp);
1538 ring->rx_stats.csum_err++;
1539 u64_stats_update_end(&ring->rx_syncp);
1541 /* let the stack verify checksum errors */
1544 /* It must be a TCP or UDP packet with a valid checksum */
1545 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1546 IGC_RXD_STAT_UDPCS))
1547 skb->ip_summed = CHECKSUM_UNNECESSARY;
1549 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1550 le32_to_cpu(rx_desc->wb.upper.status_error));
1553 static inline void igc_rx_hash(struct igc_ring *ring,
1554 union igc_adv_rx_desc *rx_desc,
1555 struct sk_buff *skb)
1557 if (ring->netdev->features & NETIF_F_RXHASH)
1559 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1563 static void igc_rx_vlan(struct igc_ring *rx_ring,
1564 union igc_adv_rx_desc *rx_desc,
1565 struct sk_buff *skb)
1567 struct net_device *dev = rx_ring->netdev;
1570 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1571 igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
1572 if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
1573 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
1574 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
1576 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
1583 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1584 * @rx_ring: rx descriptor ring packet is being transacted on
1585 * @rx_desc: pointer to the EOP Rx descriptor
1586 * @skb: pointer to current skb being populated
1588 * This function checks the ring, descriptor, and packet information in order
1589 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1592 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1593 union igc_adv_rx_desc *rx_desc,
1594 struct sk_buff *skb)
1596 igc_rx_hash(rx_ring, rx_desc, skb);
1598 igc_rx_checksum(rx_ring, rx_desc, skb);
1600 igc_rx_vlan(rx_ring, rx_desc, skb);
1602 skb_record_rx_queue(skb, rx_ring->queue_index);
1604 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1607 static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
1609 bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
1610 struct igc_adapter *adapter = netdev_priv(netdev);
1611 struct igc_hw *hw = &adapter->hw;
1614 ctrl = rd32(IGC_CTRL);
1617 /* enable VLAN tag insert/strip */
1618 ctrl |= IGC_CTRL_VME;
1620 /* disable VLAN tag insert/strip */
1621 ctrl &= ~IGC_CTRL_VME;
1623 wr32(IGC_CTRL, ctrl);
1626 static void igc_restore_vlan(struct igc_adapter *adapter)
1628 igc_vlan_mode(adapter->netdev, adapter->netdev->features);
1631 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1632 const unsigned int size,
1633 int *rx_buffer_pgcnt)
1635 struct igc_rx_buffer *rx_buffer;
1637 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1639 #if (PAGE_SIZE < 8192)
1640 page_count(rx_buffer->page);
1644 prefetchw(rx_buffer->page);
1646 /* we are reusing so sync this buffer for CPU use */
1647 dma_sync_single_range_for_cpu(rx_ring->dev,
1649 rx_buffer->page_offset,
1653 rx_buffer->pagecnt_bias--;
1658 static void igc_rx_buffer_flip(struct igc_rx_buffer *buffer,
1659 unsigned int truesize)
1661 #if (PAGE_SIZE < 8192)
1662 buffer->page_offset ^= truesize;
1664 buffer->page_offset += truesize;
1668 static unsigned int igc_get_rx_frame_truesize(struct igc_ring *ring,
1671 unsigned int truesize;
1673 #if (PAGE_SIZE < 8192)
1674 truesize = igc_rx_pg_size(ring) / 2;
1676 truesize = ring_uses_build_skb(ring) ?
1677 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1678 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1679 SKB_DATA_ALIGN(size);
1685 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1686 * @rx_ring: rx descriptor ring to transact packets on
1687 * @rx_buffer: buffer containing page to add
1688 * @skb: sk_buff to place the data into
1689 * @size: size of buffer to be added
1691 * This function will add the data contained in rx_buffer->page to the skb.
1693 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1694 struct igc_rx_buffer *rx_buffer,
1695 struct sk_buff *skb,
1698 unsigned int truesize;
1700 #if (PAGE_SIZE < 8192)
1701 truesize = igc_rx_pg_size(rx_ring) / 2;
1703 truesize = ring_uses_build_skb(rx_ring) ?
1704 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1705 SKB_DATA_ALIGN(size);
1707 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1708 rx_buffer->page_offset, size, truesize);
1710 igc_rx_buffer_flip(rx_buffer, truesize);
1713 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1714 struct igc_rx_buffer *rx_buffer,
1715 union igc_adv_rx_desc *rx_desc,
1718 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1719 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1720 struct sk_buff *skb;
1722 /* prefetch first cache line of first page */
1725 /* build an skb around the page buffer */
1726 skb = build_skb(va - IGC_SKB_PAD, truesize);
1730 /* update pointers within the skb to store the data */
1731 skb_reserve(skb, IGC_SKB_PAD);
1732 __skb_put(skb, size);
1734 igc_rx_buffer_flip(rx_buffer, truesize);
1738 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1739 struct igc_rx_buffer *rx_buffer,
1740 struct xdp_buff *xdp,
1743 unsigned int size = xdp->data_end - xdp->data;
1744 unsigned int truesize = igc_get_rx_frame_truesize(rx_ring, size);
1745 void *va = xdp->data;
1746 unsigned int headlen;
1747 struct sk_buff *skb;
1749 /* prefetch first cache line of first page */
1752 /* allocate a skb to store the frags */
1753 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1758 skb_hwtstamps(skb)->hwtstamp = timestamp;
1760 /* Determine available headroom for copy */
1762 if (headlen > IGC_RX_HDR_LEN)
1763 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1765 /* align pull length to size of long to optimize memcpy performance */
1766 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1768 /* update all of the pointers */
1771 skb_add_rx_frag(skb, 0, rx_buffer->page,
1772 (va + headlen) - page_address(rx_buffer->page),
1774 igc_rx_buffer_flip(rx_buffer, truesize);
1776 rx_buffer->pagecnt_bias++;
1783 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1784 * @rx_ring: rx descriptor ring to store buffers on
1785 * @old_buff: donor buffer to have page reused
1787 * Synchronizes page for reuse by the adapter
1789 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1790 struct igc_rx_buffer *old_buff)
1792 u16 nta = rx_ring->next_to_alloc;
1793 struct igc_rx_buffer *new_buff;
1795 new_buff = &rx_ring->rx_buffer_info[nta];
1797 /* update, and store next to alloc */
1799 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1801 /* Transfer page from old buffer to new buffer.
1802 * Move each member individually to avoid possible store
1803 * forwarding stalls.
1805 new_buff->dma = old_buff->dma;
1806 new_buff->page = old_buff->page;
1807 new_buff->page_offset = old_buff->page_offset;
1808 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1811 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer,
1812 int rx_buffer_pgcnt)
1814 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1815 struct page *page = rx_buffer->page;
1817 /* avoid re-using remote and pfmemalloc pages */
1818 if (!dev_page_is_reusable(page))
1821 #if (PAGE_SIZE < 8192)
1822 /* if we are only owner of page we can reuse it */
1823 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
1826 #define IGC_LAST_OFFSET \
1827 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1829 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1833 /* If we have drained the page fragment pool we need to update
1834 * the pagecnt_bias and page count so that we fully restock the
1835 * number of references the driver holds.
1837 if (unlikely(pagecnt_bias == 1)) {
1838 page_ref_add(page, USHRT_MAX - 1);
1839 rx_buffer->pagecnt_bias = USHRT_MAX;
1846 * igc_is_non_eop - process handling of non-EOP buffers
1847 * @rx_ring: Rx ring being processed
1848 * @rx_desc: Rx descriptor for current buffer
1850 * This function updates next to clean. If the buffer is an EOP buffer
1851 * this function exits returning false, otherwise it will place the
1852 * sk_buff in the next buffer to be chained and return true indicating
1853 * that this is in fact a non-EOP buffer.
1855 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1856 union igc_adv_rx_desc *rx_desc)
1858 u32 ntc = rx_ring->next_to_clean + 1;
1860 /* fetch, update, and store next to clean */
1861 ntc = (ntc < rx_ring->count) ? ntc : 0;
1862 rx_ring->next_to_clean = ntc;
1864 prefetch(IGC_RX_DESC(rx_ring, ntc));
1866 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1873 * igc_cleanup_headers - Correct corrupted or empty headers
1874 * @rx_ring: rx descriptor ring packet is being transacted on
1875 * @rx_desc: pointer to the EOP Rx descriptor
1876 * @skb: pointer to current skb being fixed
1878 * Address the case where we are pulling data in on pages only
1879 * and as such no data is present in the skb header.
1881 * In addition if skb is not at least 60 bytes we need to pad it so that
1882 * it is large enough to qualify as a valid Ethernet frame.
1884 * Returns true if an error was encountered and skb was freed.
1886 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1887 union igc_adv_rx_desc *rx_desc,
1888 struct sk_buff *skb)
1890 /* XDP packets use error pointer so abort at this point */
1894 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1895 struct net_device *netdev = rx_ring->netdev;
1897 if (!(netdev->features & NETIF_F_RXALL)) {
1898 dev_kfree_skb_any(skb);
1903 /* if eth_skb_pad returns an error the skb was freed */
1904 if (eth_skb_pad(skb))
1910 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1911 struct igc_rx_buffer *rx_buffer,
1912 int rx_buffer_pgcnt)
1914 if (igc_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
1915 /* hand second half of page back to the ring */
1916 igc_reuse_rx_page(rx_ring, rx_buffer);
1918 /* We are not reusing the buffer so unmap it and free
1919 * any references we are holding to it
1921 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1922 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1924 __page_frag_cache_drain(rx_buffer->page,
1925 rx_buffer->pagecnt_bias);
1928 /* clear contents of rx_buffer */
1929 rx_buffer->page = NULL;
1932 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1934 struct igc_adapter *adapter = rx_ring->q_vector->adapter;
1936 if (ring_uses_build_skb(rx_ring))
1938 if (igc_xdp_is_enabled(adapter))
1939 return XDP_PACKET_HEADROOM;
1944 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1945 struct igc_rx_buffer *bi)
1947 struct page *page = bi->page;
1950 /* since we are recycling buffers we should seldom need to alloc */
1954 /* alloc new page for storage */
1955 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1956 if (unlikely(!page)) {
1957 rx_ring->rx_stats.alloc_failed++;
1961 /* map page for use */
1962 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1963 igc_rx_pg_size(rx_ring),
1967 /* if mapping failed free memory back to system since
1968 * there isn't much point in holding memory we can't use
1970 if (dma_mapping_error(rx_ring->dev, dma)) {
1973 rx_ring->rx_stats.alloc_failed++;
1979 bi->page_offset = igc_rx_offset(rx_ring);
1980 page_ref_add(page, USHRT_MAX - 1);
1981 bi->pagecnt_bias = USHRT_MAX;
1987 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1988 * @rx_ring: rx descriptor ring
1989 * @cleaned_count: number of buffers to clean
1991 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1993 union igc_adv_rx_desc *rx_desc;
1994 u16 i = rx_ring->next_to_use;
1995 struct igc_rx_buffer *bi;
2002 rx_desc = IGC_RX_DESC(rx_ring, i);
2003 bi = &rx_ring->rx_buffer_info[i];
2004 i -= rx_ring->count;
2006 bufsz = igc_rx_bufsz(rx_ring);
2009 if (!igc_alloc_mapped_page(rx_ring, bi))
2012 /* sync the buffer for use by the device */
2013 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
2014 bi->page_offset, bufsz,
2017 /* Refresh the desc even if buffer_addrs didn't change
2018 * because each write-back erases this info.
2020 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
2026 rx_desc = IGC_RX_DESC(rx_ring, 0);
2027 bi = rx_ring->rx_buffer_info;
2028 i -= rx_ring->count;
2031 /* clear the length for the next_to_use descriptor */
2032 rx_desc->wb.upper.length = 0;
2035 } while (cleaned_count);
2037 i += rx_ring->count;
2039 if (rx_ring->next_to_use != i) {
2040 /* record the next descriptor to use */
2041 rx_ring->next_to_use = i;
2043 /* update next to alloc since we have filled the ring */
2044 rx_ring->next_to_alloc = i;
2046 /* Force memory writes to complete before letting h/w
2047 * know there are new descriptors to fetch. (Only
2048 * applicable for weak-ordered memory model archs,
2052 writel(i, rx_ring->tail);
2056 static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
2058 union igc_adv_rx_desc *desc;
2059 u16 i = ring->next_to_use;
2060 struct igc_rx_buffer *bi;
2067 desc = IGC_RX_DESC(ring, i);
2068 bi = &ring->rx_buffer_info[i];
2072 bi->xdp = xsk_buff_alloc(ring->xsk_pool);
2078 dma = xsk_buff_xdp_get_dma(bi->xdp);
2079 desc->read.pkt_addr = cpu_to_le64(dma);
2085 desc = IGC_RX_DESC(ring, 0);
2086 bi = ring->rx_buffer_info;
2090 /* Clear the length for the next_to_use descriptor. */
2091 desc->wb.upper.length = 0;
2098 if (ring->next_to_use != i) {
2099 ring->next_to_use = i;
2101 /* Force memory writes to complete before letting h/w
2102 * know there are new descriptors to fetch. (Only
2103 * applicable for weak-ordered memory model archs,
2107 writel(i, ring->tail);
2113 static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
2114 struct xdp_frame *xdpf,
2115 struct igc_ring *ring)
2119 dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
2120 if (dma_mapping_error(ring->dev, dma)) {
2121 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
2125 buffer->type = IGC_TX_BUFFER_TYPE_XDP;
2126 buffer->xdpf = xdpf;
2127 buffer->protocol = 0;
2128 buffer->bytecount = xdpf->len;
2129 buffer->gso_segs = 1;
2130 buffer->time_stamp = jiffies;
2131 dma_unmap_len_set(buffer, len, xdpf->len);
2132 dma_unmap_addr_set(buffer, dma, dma);
2136 /* This function requires __netif_tx_lock is held by the caller. */
2137 static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
2138 struct xdp_frame *xdpf)
2140 struct igc_tx_buffer *buffer;
2141 union igc_adv_tx_desc *desc;
2142 u32 cmd_type, olinfo_status;
2145 if (!igc_desc_unused(ring))
2148 buffer = &ring->tx_buffer_info[ring->next_to_use];
2149 err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
2153 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2154 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2156 olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
2158 desc = IGC_TX_DESC(ring, ring->next_to_use);
2159 desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2160 desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2161 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
2163 netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
2165 buffer->next_to_watch = desc;
2167 ring->next_to_use++;
2168 if (ring->next_to_use == ring->count)
2169 ring->next_to_use = 0;
2174 static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
2179 if (unlikely(index < 0))
2182 while (index >= adapter->num_tx_queues)
2183 index -= adapter->num_tx_queues;
2185 return adapter->tx_ring[index];
2188 static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp)
2190 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
2191 int cpu = smp_processor_id();
2192 struct netdev_queue *nq;
2193 struct igc_ring *ring;
2196 if (unlikely(!xdpf))
2199 ring = igc_xdp_get_tx_ring(adapter, cpu);
2200 nq = txring_txq(ring);
2202 __netif_tx_lock(nq, cpu);
2203 res = igc_xdp_init_tx_descriptor(ring, xdpf);
2204 __netif_tx_unlock(nq);
2208 /* This function assumes rcu_read_lock() is held by the caller. */
2209 static int __igc_xdp_run_prog(struct igc_adapter *adapter,
2210 struct bpf_prog *prog,
2211 struct xdp_buff *xdp)
2213 u32 act = bpf_prog_run_xdp(prog, xdp);
2217 return IGC_XDP_PASS;
2219 if (igc_xdp_xmit_back(adapter, xdp) < 0)
2223 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
2225 return IGC_XDP_REDIRECT;
2228 bpf_warn_invalid_xdp_action(act);
2232 trace_xdp_exception(adapter->netdev, prog, act);
2235 return IGC_XDP_CONSUMED;
2239 static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter,
2240 struct xdp_buff *xdp)
2242 struct bpf_prog *prog;
2245 prog = READ_ONCE(adapter->xdp_prog);
2251 res = __igc_xdp_run_prog(adapter, prog, xdp);
2254 return ERR_PTR(-res);
2257 /* This function assumes __netif_tx_lock is held by the caller. */
2258 static void igc_flush_tx_descriptors(struct igc_ring *ring)
2260 /* Once tail pointer is updated, hardware can fetch the descriptors
2261 * any time so we issue a write membar here to ensure all memory
2262 * writes are complete before the tail pointer is updated.
2265 writel(ring->next_to_use, ring->tail);
2268 static void igc_finalize_xdp(struct igc_adapter *adapter, int status)
2270 int cpu = smp_processor_id();
2271 struct netdev_queue *nq;
2272 struct igc_ring *ring;
2274 if (status & IGC_XDP_TX) {
2275 ring = igc_xdp_get_tx_ring(adapter, cpu);
2276 nq = txring_txq(ring);
2278 __netif_tx_lock(nq, cpu);
2279 igc_flush_tx_descriptors(ring);
2280 __netif_tx_unlock(nq);
2283 if (status & IGC_XDP_REDIRECT)
2287 static void igc_update_rx_stats(struct igc_q_vector *q_vector,
2288 unsigned int packets, unsigned int bytes)
2290 struct igc_ring *ring = q_vector->rx.ring;
2292 u64_stats_update_begin(&ring->rx_syncp);
2293 ring->rx_stats.packets += packets;
2294 ring->rx_stats.bytes += bytes;
2295 u64_stats_update_end(&ring->rx_syncp);
2297 q_vector->rx.total_packets += packets;
2298 q_vector->rx.total_bytes += bytes;
2301 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
2303 unsigned int total_bytes = 0, total_packets = 0;
2304 struct igc_adapter *adapter = q_vector->adapter;
2305 struct igc_ring *rx_ring = q_vector->rx.ring;
2306 struct sk_buff *skb = rx_ring->skb;
2307 u16 cleaned_count = igc_desc_unused(rx_ring);
2308 int xdp_status = 0, rx_buffer_pgcnt;
2310 while (likely(total_packets < budget)) {
2311 union igc_adv_rx_desc *rx_desc;
2312 struct igc_rx_buffer *rx_buffer;
2313 unsigned int size, truesize;
2314 ktime_t timestamp = 0;
2315 struct xdp_buff xdp;
2319 /* return some buffers to hardware, one at a time is too slow */
2320 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
2321 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2325 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
2326 size = le16_to_cpu(rx_desc->wb.upper.length);
2330 /* This memory barrier is needed to keep us from reading
2331 * any other fields out of the rx_desc until we know the
2332 * descriptor has been written back
2336 rx_buffer = igc_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
2337 truesize = igc_get_rx_frame_truesize(rx_ring, size);
2339 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
2341 if (igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP)) {
2342 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2344 pkt_offset = IGC_TS_HDR_LEN;
2345 size -= IGC_TS_HDR_LEN;
2349 xdp_init_buff(&xdp, truesize, &rx_ring->xdp_rxq);
2350 xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
2351 igc_rx_offset(rx_ring) + pkt_offset, size, false);
2353 skb = igc_xdp_run_prog(adapter, &xdp);
2357 unsigned int xdp_res = -PTR_ERR(skb);
2360 case IGC_XDP_CONSUMED:
2361 rx_buffer->pagecnt_bias++;
2364 case IGC_XDP_REDIRECT:
2365 igc_rx_buffer_flip(rx_buffer, truesize);
2366 xdp_status |= xdp_res;
2371 total_bytes += size;
2373 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
2374 else if (ring_uses_build_skb(rx_ring))
2375 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
2377 skb = igc_construct_skb(rx_ring, rx_buffer, &xdp,
2380 /* exit if we failed to retrieve a buffer */
2382 rx_ring->rx_stats.alloc_failed++;
2383 rx_buffer->pagecnt_bias++;
2387 igc_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
2390 /* fetch next buffer in frame if non-eop */
2391 if (igc_is_non_eop(rx_ring, rx_desc))
2394 /* verify the packet layout is correct */
2395 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
2400 /* probably a little skewed due to removing CRC */
2401 total_bytes += skb->len;
2403 /* populate checksum, VLAN, and protocol */
2404 igc_process_skb_fields(rx_ring, rx_desc, skb);
2406 napi_gro_receive(&q_vector->napi, skb);
2408 /* reset skb pointer */
2411 /* update budget accounting */
2416 igc_finalize_xdp(adapter, xdp_status);
2418 /* place incomplete frames back on ring for completion */
2421 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2424 igc_alloc_rx_buffers(rx_ring, cleaned_count);
2426 return total_packets;
2429 static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
2430 struct xdp_buff *xdp)
2432 unsigned int metasize = xdp->data - xdp->data_meta;
2433 unsigned int datasize = xdp->data_end - xdp->data;
2434 unsigned int totalsize = metasize + datasize;
2435 struct sk_buff *skb;
2437 skb = __napi_alloc_skb(&ring->q_vector->napi,
2438 xdp->data_end - xdp->data_hard_start,
2439 GFP_ATOMIC | __GFP_NOWARN);
2443 skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
2444 memcpy(__skb_put(skb, totalsize), xdp->data_meta, totalsize);
2446 skb_metadata_set(skb, metasize);
2451 static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
2452 union igc_adv_rx_desc *desc,
2453 struct xdp_buff *xdp,
2456 struct igc_ring *ring = q_vector->rx.ring;
2457 struct sk_buff *skb;
2459 skb = igc_construct_skb_zc(ring, xdp);
2461 ring->rx_stats.alloc_failed++;
2466 skb_hwtstamps(skb)->hwtstamp = timestamp;
2468 if (igc_cleanup_headers(ring, desc, skb))
2471 igc_process_skb_fields(ring, desc, skb);
2472 napi_gro_receive(&q_vector->napi, skb);
2475 static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
2477 struct igc_adapter *adapter = q_vector->adapter;
2478 struct igc_ring *ring = q_vector->rx.ring;
2479 u16 cleaned_count = igc_desc_unused(ring);
2480 int total_bytes = 0, total_packets = 0;
2481 u16 ntc = ring->next_to_clean;
2482 struct bpf_prog *prog;
2483 bool failure = false;
2488 prog = READ_ONCE(adapter->xdp_prog);
2490 while (likely(total_packets < budget)) {
2491 union igc_adv_rx_desc *desc;
2492 struct igc_rx_buffer *bi;
2493 ktime_t timestamp = 0;
2497 desc = IGC_RX_DESC(ring, ntc);
2498 size = le16_to_cpu(desc->wb.upper.length);
2502 /* This memory barrier is needed to keep us from reading
2503 * any other fields out of the rx_desc until we know the
2504 * descriptor has been written back
2508 bi = &ring->rx_buffer_info[ntc];
2510 if (igc_test_staterr(desc, IGC_RXDADV_STAT_TSIP)) {
2511 timestamp = igc_ptp_rx_pktstamp(q_vector->adapter,
2514 bi->xdp->data += IGC_TS_HDR_LEN;
2516 /* HW timestamp has been copied into local variable. Metadata
2517 * length when XDP program is called should be 0.
2519 bi->xdp->data_meta += IGC_TS_HDR_LEN;
2520 size -= IGC_TS_HDR_LEN;
2523 bi->xdp->data_end = bi->xdp->data + size;
2524 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
2526 res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
2529 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
2531 case IGC_XDP_CONSUMED:
2532 xsk_buff_free(bi->xdp);
2535 case IGC_XDP_REDIRECT:
2541 total_bytes += size;
2545 if (ntc == ring->count)
2549 ring->next_to_clean = ntc;
2552 if (cleaned_count >= IGC_RX_BUFFER_WRITE)
2553 failure = !igc_alloc_rx_buffers_zc(ring, cleaned_count);
2556 igc_finalize_xdp(adapter, xdp_status);
2558 igc_update_rx_stats(q_vector, total_packets, total_bytes);
2560 if (xsk_uses_need_wakeup(ring->xsk_pool)) {
2561 if (failure || ring->next_to_clean == ring->next_to_use)
2562 xsk_set_rx_need_wakeup(ring->xsk_pool);
2564 xsk_clear_rx_need_wakeup(ring->xsk_pool);
2565 return total_packets;
2568 return failure ? budget : total_packets;
2571 static void igc_update_tx_stats(struct igc_q_vector *q_vector,
2572 unsigned int packets, unsigned int bytes)
2574 struct igc_ring *ring = q_vector->tx.ring;
2576 u64_stats_update_begin(&ring->tx_syncp);
2577 ring->tx_stats.bytes += bytes;
2578 ring->tx_stats.packets += packets;
2579 u64_stats_update_end(&ring->tx_syncp);
2581 q_vector->tx.total_bytes += bytes;
2582 q_vector->tx.total_packets += packets;
2585 static void igc_xdp_xmit_zc(struct igc_ring *ring)
2587 struct xsk_buff_pool *pool = ring->xsk_pool;
2588 struct netdev_queue *nq = txring_txq(ring);
2589 union igc_adv_tx_desc *tx_desc = NULL;
2590 int cpu = smp_processor_id();
2591 u16 ntu = ring->next_to_use;
2592 struct xdp_desc xdp_desc;
2595 if (!netif_carrier_ok(ring->netdev))
2598 __netif_tx_lock(nq, cpu);
2600 budget = igc_desc_unused(ring);
2602 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
2603 u32 cmd_type, olinfo_status;
2604 struct igc_tx_buffer *bi;
2607 cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
2608 IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
2610 olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
2612 dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2613 xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
2615 tx_desc = IGC_TX_DESC(ring, ntu);
2616 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
2617 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2618 tx_desc->read.buffer_addr = cpu_to_le64(dma);
2620 bi = &ring->tx_buffer_info[ntu];
2621 bi->type = IGC_TX_BUFFER_TYPE_XSK;
2623 bi->bytecount = xdp_desc.len;
2625 bi->time_stamp = jiffies;
2626 bi->next_to_watch = tx_desc;
2628 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len);
2631 if (ntu == ring->count)
2635 ring->next_to_use = ntu;
2637 igc_flush_tx_descriptors(ring);
2638 xsk_tx_release(pool);
2641 __netif_tx_unlock(nq);
2645 * igc_clean_tx_irq - Reclaim resources after transmit completes
2646 * @q_vector: pointer to q_vector containing needed info
2647 * @napi_budget: Used to determine if we are in netpoll
2649 * returns true if ring is completely cleaned
2651 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2653 struct igc_adapter *adapter = q_vector->adapter;
2654 unsigned int total_bytes = 0, total_packets = 0;
2655 unsigned int budget = q_vector->tx.work_limit;
2656 struct igc_ring *tx_ring = q_vector->tx.ring;
2657 unsigned int i = tx_ring->next_to_clean;
2658 struct igc_tx_buffer *tx_buffer;
2659 union igc_adv_tx_desc *tx_desc;
2662 if (test_bit(__IGC_DOWN, &adapter->state))
2665 tx_buffer = &tx_ring->tx_buffer_info[i];
2666 tx_desc = IGC_TX_DESC(tx_ring, i);
2667 i -= tx_ring->count;
2670 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2672 /* if next_to_watch is not set then there is no work pending */
2676 /* prevent any other reads prior to eop_desc */
2679 /* if DD is not set pending work has not been completed */
2680 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2683 /* clear next_to_watch to prevent false hangs */
2684 tx_buffer->next_to_watch = NULL;
2686 /* update the statistics for this packet */
2687 total_bytes += tx_buffer->bytecount;
2688 total_packets += tx_buffer->gso_segs;
2690 switch (tx_buffer->type) {
2691 case IGC_TX_BUFFER_TYPE_XSK:
2694 case IGC_TX_BUFFER_TYPE_XDP:
2695 xdp_return_frame(tx_buffer->xdpf);
2696 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2698 case IGC_TX_BUFFER_TYPE_SKB:
2699 napi_consume_skb(tx_buffer->skb, napi_budget);
2700 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2703 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n");
2707 /* clear last DMA location and unmap remaining buffers */
2708 while (tx_desc != eop_desc) {
2713 i -= tx_ring->count;
2714 tx_buffer = tx_ring->tx_buffer_info;
2715 tx_desc = IGC_TX_DESC(tx_ring, 0);
2718 /* unmap any remaining paged data */
2719 if (dma_unmap_len(tx_buffer, len))
2720 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
2723 /* move us one more past the eop_desc for start of next pkt */
2728 i -= tx_ring->count;
2729 tx_buffer = tx_ring->tx_buffer_info;
2730 tx_desc = IGC_TX_DESC(tx_ring, 0);
2733 /* issue prefetch for next Tx descriptor */
2736 /* update budget accounting */
2738 } while (likely(budget));
2740 netdev_tx_completed_queue(txring_txq(tx_ring),
2741 total_packets, total_bytes);
2743 i += tx_ring->count;
2744 tx_ring->next_to_clean = i;
2746 igc_update_tx_stats(q_vector, total_packets, total_bytes);
2748 if (tx_ring->xsk_pool) {
2750 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
2751 if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
2752 xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
2753 igc_xdp_xmit_zc(tx_ring);
2756 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2757 struct igc_hw *hw = &adapter->hw;
2759 /* Detect a transmit hang in hardware, this serializes the
2760 * check with the clearing of time_stamp and movement of i
2762 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2763 if (tx_buffer->next_to_watch &&
2764 time_after(jiffies, tx_buffer->time_stamp +
2765 (adapter->tx_timeout_factor * HZ)) &&
2766 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2767 /* detected Tx unit hang */
2768 netdev_err(tx_ring->netdev,
2769 "Detected Tx Unit Hang\n"
2773 " next_to_use <%x>\n"
2774 " next_to_clean <%x>\n"
2775 "buffer_info[next_to_clean]\n"
2776 " time_stamp <%lx>\n"
2777 " next_to_watch <%p>\n"
2779 " desc.status <%x>\n",
2780 tx_ring->queue_index,
2781 rd32(IGC_TDH(tx_ring->reg_idx)),
2782 readl(tx_ring->tail),
2783 tx_ring->next_to_use,
2784 tx_ring->next_to_clean,
2785 tx_buffer->time_stamp,
2786 tx_buffer->next_to_watch,
2788 tx_buffer->next_to_watch->wb.status);
2789 netif_stop_subqueue(tx_ring->netdev,
2790 tx_ring->queue_index);
2792 /* we are about to reset, no point in enabling stuff */
2797 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2798 if (unlikely(total_packets &&
2799 netif_carrier_ok(tx_ring->netdev) &&
2800 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2801 /* Make sure that anybody stopping the queue after this
2802 * sees the new next_to_clean.
2805 if (__netif_subqueue_stopped(tx_ring->netdev,
2806 tx_ring->queue_index) &&
2807 !(test_bit(__IGC_DOWN, &adapter->state))) {
2808 netif_wake_subqueue(tx_ring->netdev,
2809 tx_ring->queue_index);
2811 u64_stats_update_begin(&tx_ring->tx_syncp);
2812 tx_ring->tx_stats.restart_queue++;
2813 u64_stats_update_end(&tx_ring->tx_syncp);
2820 static int igc_find_mac_filter(struct igc_adapter *adapter,
2821 enum igc_mac_filter_type type, const u8 *addr)
2823 struct igc_hw *hw = &adapter->hw;
2824 int max_entries = hw->mac.rar_entry_count;
2828 for (i = 0; i < max_entries; i++) {
2829 ral = rd32(IGC_RAL(i));
2830 rah = rd32(IGC_RAH(i));
2832 if (!(rah & IGC_RAH_AV))
2834 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2836 if ((rah & IGC_RAH_RAH_MASK) !=
2837 le16_to_cpup((__le16 *)(addr + 4)))
2839 if (ral != le32_to_cpup((__le32 *)(addr)))
2848 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2850 struct igc_hw *hw = &adapter->hw;
2851 int max_entries = hw->mac.rar_entry_count;
2855 for (i = 0; i < max_entries; i++) {
2856 rah = rd32(IGC_RAH(i));
2858 if (!(rah & IGC_RAH_AV))
2866 * igc_add_mac_filter() - Add MAC address filter
2867 * @adapter: Pointer to adapter where the filter should be added
2868 * @type: MAC address filter type (source or destination)
2869 * @addr: MAC address
2870 * @queue: If non-negative, queue assignment feature is enabled and frames
2871 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2872 * assignment is disabled.
2874 * Return: 0 in case of success, negative errno code otherwise.
2876 static int igc_add_mac_filter(struct igc_adapter *adapter,
2877 enum igc_mac_filter_type type, const u8 *addr,
2880 struct net_device *dev = adapter->netdev;
2883 index = igc_find_mac_filter(adapter, type, addr);
2887 index = igc_get_avail_mac_filter_slot(adapter);
2891 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2892 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2896 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2901 * igc_del_mac_filter() - Delete MAC address filter
2902 * @adapter: Pointer to adapter where the filter should be deleted from
2903 * @type: MAC address filter type (source or destination)
2904 * @addr: MAC address
2906 static void igc_del_mac_filter(struct igc_adapter *adapter,
2907 enum igc_mac_filter_type type, const u8 *addr)
2909 struct net_device *dev = adapter->netdev;
2912 index = igc_find_mac_filter(adapter, type, addr);
2917 /* If this is the default filter, we don't actually delete it.
2918 * We just reset to its default value i.e. disable queue
2921 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2923 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2925 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2927 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2930 igc_clear_mac_filter_hw(adapter, index);
2935 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2936 * @adapter: Pointer to adapter where the filter should be added
2937 * @prio: VLAN priority value
2938 * @queue: Queue number which matching frames are assigned to
2940 * Return: 0 in case of success, negative errno code otherwise.
2942 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2945 struct net_device *dev = adapter->netdev;
2946 struct igc_hw *hw = &adapter->hw;
2949 vlanpqf = rd32(IGC_VLANPQF);
2951 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2952 netdev_dbg(dev, "VLAN priority filter already in use\n");
2956 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2957 vlanpqf |= IGC_VLANPQF_VALID(prio);
2959 wr32(IGC_VLANPQF, vlanpqf);
2961 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2967 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2968 * @adapter: Pointer to adapter where the filter should be deleted from
2969 * @prio: VLAN priority value
2971 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2973 struct igc_hw *hw = &adapter->hw;
2976 vlanpqf = rd32(IGC_VLANPQF);
2978 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2979 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2981 wr32(IGC_VLANPQF, vlanpqf);
2983 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2987 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2989 struct igc_hw *hw = &adapter->hw;
2992 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2993 u32 etqf = rd32(IGC_ETQF(i));
2995 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
3003 * igc_add_etype_filter() - Add ethertype filter
3004 * @adapter: Pointer to adapter where the filter should be added
3005 * @etype: Ethertype value
3006 * @queue: If non-negative, queue assignment feature is enabled and frames
3007 * matching the filter are enqueued onto 'queue'. Otherwise, queue
3008 * assignment is disabled.
3010 * Return: 0 in case of success, negative errno code otherwise.
3012 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
3015 struct igc_hw *hw = &adapter->hw;
3019 index = igc_get_avail_etype_filter_slot(adapter);
3023 etqf = rd32(IGC_ETQF(index));
3025 etqf &= ~IGC_ETQF_ETYPE_MASK;
3029 etqf &= ~IGC_ETQF_QUEUE_MASK;
3030 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
3031 etqf |= IGC_ETQF_QUEUE_ENABLE;
3034 etqf |= IGC_ETQF_FILTER_ENABLE;
3036 wr32(IGC_ETQF(index), etqf);
3038 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
3043 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
3045 struct igc_hw *hw = &adapter->hw;
3048 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
3049 u32 etqf = rd32(IGC_ETQF(i));
3051 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
3059 * igc_del_etype_filter() - Delete ethertype filter
3060 * @adapter: Pointer to adapter where the filter should be deleted from
3061 * @etype: Ethertype value
3063 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
3065 struct igc_hw *hw = &adapter->hw;
3068 index = igc_find_etype_filter(adapter, etype);
3072 wr32(IGC_ETQF(index), 0);
3074 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
3078 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
3079 const struct igc_nfc_rule *rule)
3083 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
3084 err = igc_add_etype_filter(adapter, rule->filter.etype,
3090 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
3091 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3092 rule->filter.src_addr, rule->action);
3097 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
3098 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3099 rule->filter.dst_addr, rule->action);
3104 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3105 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3108 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
3116 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
3117 const struct igc_nfc_rule *rule)
3119 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
3120 igc_del_etype_filter(adapter, rule->filter.etype);
3122 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
3123 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
3126 igc_del_vlan_prio_filter(adapter, prio);
3129 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
3130 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
3131 rule->filter.src_addr);
3133 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
3134 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
3135 rule->filter.dst_addr);
3139 * igc_get_nfc_rule() - Get NFC rule
3140 * @adapter: Pointer to adapter
3141 * @location: Rule location
3143 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3145 * Return: Pointer to NFC rule at @location. If not found, NULL.
3147 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
3150 struct igc_nfc_rule *rule;
3152 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
3153 if (rule->location == location)
3155 if (rule->location > location)
3163 * igc_del_nfc_rule() - Delete NFC rule
3164 * @adapter: Pointer to adapter
3165 * @rule: Pointer to rule to be deleted
3167 * Disable NFC rule in hardware and delete it from adapter.
3169 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3171 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3173 igc_disable_nfc_rule(adapter, rule);
3175 list_del(&rule->list);
3176 adapter->nfc_rule_count--;
3181 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
3183 struct igc_nfc_rule *rule, *tmp;
3185 mutex_lock(&adapter->nfc_rule_lock);
3187 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
3188 igc_del_nfc_rule(adapter, rule);
3190 mutex_unlock(&adapter->nfc_rule_lock);
3194 * igc_add_nfc_rule() - Add NFC rule
3195 * @adapter: Pointer to adapter
3196 * @rule: Pointer to rule to be added
3198 * Enable NFC rule in hardware and add it to adapter.
3200 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3202 * Return: 0 on success, negative errno on failure.
3204 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
3206 struct igc_nfc_rule *pred, *cur;
3209 err = igc_enable_nfc_rule(adapter, rule);
3214 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
3215 if (cur->location >= rule->location)
3220 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
3221 adapter->nfc_rule_count++;
3225 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
3227 struct igc_nfc_rule *rule;
3229 mutex_lock(&adapter->nfc_rule_lock);
3231 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
3232 igc_enable_nfc_rule(adapter, rule);
3234 mutex_unlock(&adapter->nfc_rule_lock);
3237 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
3239 struct igc_adapter *adapter = netdev_priv(netdev);
3241 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
3244 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
3246 struct igc_adapter *adapter = netdev_priv(netdev);
3248 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
3253 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3254 * @netdev: network interface device structure
3256 * The set_rx_mode entry point is called whenever the unicast or multicast
3257 * address lists or the network interface flags are updated. This routine is
3258 * responsible for configuring the hardware for proper unicast, multicast,
3259 * promiscuous mode, and all-multi behavior.
3261 static void igc_set_rx_mode(struct net_device *netdev)
3263 struct igc_adapter *adapter = netdev_priv(netdev);
3264 struct igc_hw *hw = &adapter->hw;
3265 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
3268 /* Check for Promiscuous and All Multicast modes */
3269 if (netdev->flags & IFF_PROMISC) {
3270 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
3272 if (netdev->flags & IFF_ALLMULTI) {
3273 rctl |= IGC_RCTL_MPE;
3275 /* Write addresses to the MTA, if the attempt fails
3276 * then we should just turn on promiscuous mode so
3277 * that we can at least receive multicast traffic
3279 count = igc_write_mc_addr_list(netdev);
3281 rctl |= IGC_RCTL_MPE;
3285 /* Write addresses to available RAR registers, if there is not
3286 * sufficient space to store all the addresses then enable
3287 * unicast promiscuous mode
3289 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
3290 rctl |= IGC_RCTL_UPE;
3292 /* update state of unicast and multicast */
3293 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
3294 wr32(IGC_RCTL, rctl);
3296 #if (PAGE_SIZE < 8192)
3297 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
3298 rlpml = IGC_MAX_FRAME_BUILD_SKB;
3300 wr32(IGC_RLPML, rlpml);
3304 * igc_configure - configure the hardware for RX and TX
3305 * @adapter: private board structure
3307 static void igc_configure(struct igc_adapter *adapter)
3309 struct net_device *netdev = adapter->netdev;
3312 igc_get_hw_control(adapter);
3313 igc_set_rx_mode(netdev);
3315 igc_restore_vlan(adapter);
3317 igc_setup_tctl(adapter);
3318 igc_setup_mrqc(adapter);
3319 igc_setup_rctl(adapter);
3321 igc_set_default_mac_filter(adapter);
3322 igc_restore_nfc_rules(adapter);
3324 igc_configure_tx(adapter);
3325 igc_configure_rx(adapter);
3327 igc_rx_fifo_flush_base(&adapter->hw);
3329 /* call igc_desc_unused which always leaves
3330 * at least 1 descriptor unused to make sure
3331 * next_to_use != next_to_clean
3333 for (i = 0; i < adapter->num_rx_queues; i++) {
3334 struct igc_ring *ring = adapter->rx_ring[i];
3337 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
3339 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
3344 * igc_write_ivar - configure ivar for given MSI-X vector
3345 * @hw: pointer to the HW structure
3346 * @msix_vector: vector number we are allocating to a given ring
3347 * @index: row index of IVAR register to write within IVAR table
3348 * @offset: column offset of in IVAR, should be multiple of 8
3350 * The IVAR table consists of 2 columns,
3351 * each containing an cause allocation for an Rx and Tx ring, and a
3352 * variable number of rows depending on the number of queues supported.
3354 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
3355 int index, int offset)
3357 u32 ivar = array_rd32(IGC_IVAR0, index);
3359 /* clear any bits that are currently set */
3360 ivar &= ~((u32)0xFF << offset);
3362 /* write vector and valid bit */
3363 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
3365 array_wr32(IGC_IVAR0, index, ivar);
3368 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
3370 struct igc_adapter *adapter = q_vector->adapter;
3371 struct igc_hw *hw = &adapter->hw;
3372 int rx_queue = IGC_N0_QUEUE;
3373 int tx_queue = IGC_N0_QUEUE;
3375 if (q_vector->rx.ring)
3376 rx_queue = q_vector->rx.ring->reg_idx;
3377 if (q_vector->tx.ring)
3378 tx_queue = q_vector->tx.ring->reg_idx;
3380 switch (hw->mac.type) {
3382 if (rx_queue > IGC_N0_QUEUE)
3383 igc_write_ivar(hw, msix_vector,
3385 (rx_queue & 0x1) << 4);
3386 if (tx_queue > IGC_N0_QUEUE)
3387 igc_write_ivar(hw, msix_vector,
3389 ((tx_queue & 0x1) << 4) + 8);
3390 q_vector->eims_value = BIT(msix_vector);
3393 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
3397 /* add q_vector eims value to global eims_enable_mask */
3398 adapter->eims_enable_mask |= q_vector->eims_value;
3400 /* configure q_vector to set itr on first interrupt */
3401 q_vector->set_itr = 1;
3405 * igc_configure_msix - Configure MSI-X hardware
3406 * @adapter: Pointer to adapter structure
3408 * igc_configure_msix sets up the hardware to properly
3409 * generate MSI-X interrupts.
3411 static void igc_configure_msix(struct igc_adapter *adapter)
3413 struct igc_hw *hw = &adapter->hw;
3417 adapter->eims_enable_mask = 0;
3419 /* set vector for other causes, i.e. link changes */
3420 switch (hw->mac.type) {
3422 /* Turn on MSI-X capability first, or our settings
3423 * won't stick. And it will take days to debug.
3425 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
3426 IGC_GPIE_PBA | IGC_GPIE_EIAME |
3429 /* enable msix_other interrupt */
3430 adapter->eims_other = BIT(vector);
3431 tmp = (vector++ | IGC_IVAR_VALID) << 8;
3433 wr32(IGC_IVAR_MISC, tmp);
3436 /* do nothing, since nothing else supports MSI-X */
3438 } /* switch (hw->mac.type) */
3440 adapter->eims_enable_mask |= adapter->eims_other;
3442 for (i = 0; i < adapter->num_q_vectors; i++)
3443 igc_assign_vector(adapter->q_vector[i], vector++);
3449 * igc_irq_enable - Enable default interrupt generation settings
3450 * @adapter: board private structure
3452 static void igc_irq_enable(struct igc_adapter *adapter)
3454 struct igc_hw *hw = &adapter->hw;
3456 if (adapter->msix_entries) {
3457 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
3458 u32 regval = rd32(IGC_EIAC);
3460 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
3461 regval = rd32(IGC_EIAM);
3462 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
3463 wr32(IGC_EIMS, adapter->eims_enable_mask);
3466 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3467 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
3472 * igc_irq_disable - Mask off interrupt generation on the NIC
3473 * @adapter: board private structure
3475 static void igc_irq_disable(struct igc_adapter *adapter)
3477 struct igc_hw *hw = &adapter->hw;
3479 if (adapter->msix_entries) {
3480 u32 regval = rd32(IGC_EIAM);
3482 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
3483 wr32(IGC_EIMC, adapter->eims_enable_mask);
3484 regval = rd32(IGC_EIAC);
3485 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
3492 if (adapter->msix_entries) {
3495 synchronize_irq(adapter->msix_entries[vector++].vector);
3497 for (i = 0; i < adapter->num_q_vectors; i++)
3498 synchronize_irq(adapter->msix_entries[vector++].vector);
3500 synchronize_irq(adapter->pdev->irq);
3504 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
3505 const u32 max_rss_queues)
3507 /* Determine if we need to pair queues. */
3508 /* If rss_queues > half of max_rss_queues, pair the queues in
3509 * order to conserve interrupts due to limited supply.
3511 if (adapter->rss_queues > (max_rss_queues / 2))
3512 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3514 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
3517 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
3519 return IGC_MAX_RX_QUEUES;
3522 static void igc_init_queue_configuration(struct igc_adapter *adapter)
3526 max_rss_queues = igc_get_max_rss_queues(adapter);
3527 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
3529 igc_set_flag_queue_pairs(adapter, max_rss_queues);
3533 * igc_reset_q_vector - Reset config for interrupt vector
3534 * @adapter: board private structure to initialize
3535 * @v_idx: Index of vector to be reset
3537 * If NAPI is enabled it will delete any references to the
3538 * NAPI struct. This is preparation for igc_free_q_vector.
3540 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
3542 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3544 /* if we're coming from igc_set_interrupt_capability, the vectors are
3550 if (q_vector->tx.ring)
3551 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
3553 if (q_vector->rx.ring)
3554 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
3556 netif_napi_del(&q_vector->napi);
3560 * igc_free_q_vector - Free memory allocated for specific interrupt vector
3561 * @adapter: board private structure to initialize
3562 * @v_idx: Index of vector to be freed
3564 * This function frees the memory allocated to the q_vector.
3566 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
3568 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
3570 adapter->q_vector[v_idx] = NULL;
3572 /* igc_get_stats64() might access the rings on this vector,
3573 * we must wait a grace period before freeing it.
3576 kfree_rcu(q_vector, rcu);
3580 * igc_free_q_vectors - Free memory allocated for interrupt vectors
3581 * @adapter: board private structure to initialize
3583 * This function frees the memory allocated to the q_vectors. In addition if
3584 * NAPI is enabled it will delete any references to the NAPI struct prior
3585 * to freeing the q_vector.
3587 static void igc_free_q_vectors(struct igc_adapter *adapter)
3589 int v_idx = adapter->num_q_vectors;
3591 adapter->num_tx_queues = 0;
3592 adapter->num_rx_queues = 0;
3593 adapter->num_q_vectors = 0;
3596 igc_reset_q_vector(adapter, v_idx);
3597 igc_free_q_vector(adapter, v_idx);
3602 * igc_update_itr - update the dynamic ITR value based on statistics
3603 * @q_vector: pointer to q_vector
3604 * @ring_container: ring info to update the itr for
3606 * Stores a new ITR value based on packets and byte
3607 * counts during the last interrupt. The advantage of per interrupt
3608 * computation is faster updates and more accurate ITR for the current
3609 * traffic pattern. Constants in this function were computed
3610 * based on theoretical maximum wire speed and thresholds were set based
3611 * on testing data as well as attempting to minimize response time
3612 * while increasing bulk throughput.
3613 * NOTE: These calculations are only valid when operating in a single-
3614 * queue environment.
3616 static void igc_update_itr(struct igc_q_vector *q_vector,
3617 struct igc_ring_container *ring_container)
3619 unsigned int packets = ring_container->total_packets;
3620 unsigned int bytes = ring_container->total_bytes;
3621 u8 itrval = ring_container->itr;
3623 /* no packets, exit with status unchanged */
3628 case lowest_latency:
3629 /* handle TSO and jumbo frames */
3630 if (bytes / packets > 8000)
3631 itrval = bulk_latency;
3632 else if ((packets < 5) && (bytes > 512))
3633 itrval = low_latency;
3635 case low_latency: /* 50 usec aka 20000 ints/s */
3636 if (bytes > 10000) {
3637 /* this if handles the TSO accounting */
3638 if (bytes / packets > 8000)
3639 itrval = bulk_latency;
3640 else if ((packets < 10) || ((bytes / packets) > 1200))
3641 itrval = bulk_latency;
3642 else if ((packets > 35))
3643 itrval = lowest_latency;
3644 } else if (bytes / packets > 2000) {
3645 itrval = bulk_latency;
3646 } else if (packets <= 2 && bytes < 512) {
3647 itrval = lowest_latency;
3650 case bulk_latency: /* 250 usec aka 4000 ints/s */
3651 if (bytes > 25000) {
3653 itrval = low_latency;
3654 } else if (bytes < 1500) {
3655 itrval = low_latency;
3660 /* clear work counters since we have the values we need */
3661 ring_container->total_bytes = 0;
3662 ring_container->total_packets = 0;
3664 /* write updated itr to ring container */
3665 ring_container->itr = itrval;
3668 static void igc_set_itr(struct igc_q_vector *q_vector)
3670 struct igc_adapter *adapter = q_vector->adapter;
3671 u32 new_itr = q_vector->itr_val;
3674 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3675 switch (adapter->link_speed) {
3679 new_itr = IGC_4K_ITR;
3685 igc_update_itr(q_vector, &q_vector->tx);
3686 igc_update_itr(q_vector, &q_vector->rx);
3688 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3690 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3691 if (current_itr == lowest_latency &&
3692 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3693 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3694 current_itr = low_latency;
3696 switch (current_itr) {
3697 /* counts and packets in update_itr are dependent on these numbers */
3698 case lowest_latency:
3699 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3702 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3705 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
3712 if (new_itr != q_vector->itr_val) {
3713 /* this attempts to bias the interrupt rate towards Bulk
3714 * by adding intermediate steps when interrupt rate is
3717 new_itr = new_itr > q_vector->itr_val ?
3718 max((new_itr * q_vector->itr_val) /
3719 (new_itr + (q_vector->itr_val >> 2)),
3721 /* Don't write the value here; it resets the adapter's
3722 * internal timer, and causes us to delay far longer than
3723 * we should between interrupts. Instead, we write the ITR
3724 * value at the beginning of the next interrupt so the timing
3725 * ends up being correct.
3727 q_vector->itr_val = new_itr;
3728 q_vector->set_itr = 1;
3732 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3734 int v_idx = adapter->num_q_vectors;
3736 if (adapter->msix_entries) {
3737 pci_disable_msix(adapter->pdev);
3738 kfree(adapter->msix_entries);
3739 adapter->msix_entries = NULL;
3740 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3741 pci_disable_msi(adapter->pdev);
3745 igc_reset_q_vector(adapter, v_idx);
3749 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3750 * @adapter: Pointer to adapter structure
3751 * @msix: boolean value for MSI-X capability
3753 * Attempt to configure interrupts using the best available
3754 * capabilities of the hardware and kernel.
3756 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3764 adapter->flags |= IGC_FLAG_HAS_MSIX;
3766 /* Number of supported queues. */
3767 adapter->num_rx_queues = adapter->rss_queues;
3769 adapter->num_tx_queues = adapter->rss_queues;
3771 /* start with one vector for every Rx queue */
3772 numvecs = adapter->num_rx_queues;
3774 /* if Tx handler is separate add 1 for every Tx queue */
3775 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3776 numvecs += adapter->num_tx_queues;
3778 /* store the number of vectors reserved for queues */
3779 adapter->num_q_vectors = numvecs;
3781 /* add 1 vector for link status interrupts */
3784 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3787 if (!adapter->msix_entries)
3790 /* populate entry values */
3791 for (i = 0; i < numvecs; i++)
3792 adapter->msix_entries[i].entry = i;
3794 err = pci_enable_msix_range(adapter->pdev,
3795 adapter->msix_entries,
3801 kfree(adapter->msix_entries);
3802 adapter->msix_entries = NULL;
3804 igc_reset_interrupt_capability(adapter);
3807 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3809 adapter->rss_queues = 1;
3810 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3811 adapter->num_rx_queues = 1;
3812 adapter->num_tx_queues = 1;
3813 adapter->num_q_vectors = 1;
3814 if (!pci_enable_msi(adapter->pdev))
3815 adapter->flags |= IGC_FLAG_HAS_MSI;
3819 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3820 * @q_vector: pointer to q_vector
3822 * Stores a new ITR value based on strictly on packet size. This
3823 * algorithm is less sophisticated than that used in igc_update_itr,
3824 * due to the difficulty of synchronizing statistics across multiple
3825 * receive rings. The divisors and thresholds used by this function
3826 * were determined based on theoretical maximum wire speed and testing
3827 * data, in order to minimize response time while increasing bulk
3829 * NOTE: This function is called only when operating in a multiqueue
3830 * receive environment.
3832 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3834 struct igc_adapter *adapter = q_vector->adapter;
3835 int new_val = q_vector->itr_val;
3836 int avg_wire_size = 0;
3837 unsigned int packets;
3839 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3840 * ints/sec - ITR timer value of 120 ticks.
3842 switch (adapter->link_speed) {
3845 new_val = IGC_4K_ITR;
3851 packets = q_vector->rx.total_packets;
3853 avg_wire_size = q_vector->rx.total_bytes / packets;
3855 packets = q_vector->tx.total_packets;
3857 avg_wire_size = max_t(u32, avg_wire_size,
3858 q_vector->tx.total_bytes / packets);
3860 /* if avg_wire_size isn't set no work was done */
3864 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3865 avg_wire_size += 24;
3867 /* Don't starve jumbo frames */
3868 avg_wire_size = min(avg_wire_size, 3000);
3870 /* Give a little boost to mid-size frames */
3871 if (avg_wire_size > 300 && avg_wire_size < 1200)
3872 new_val = avg_wire_size / 3;
3874 new_val = avg_wire_size / 2;
3876 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3877 if (new_val < IGC_20K_ITR &&
3878 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3879 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3880 new_val = IGC_20K_ITR;
3883 if (new_val != q_vector->itr_val) {
3884 q_vector->itr_val = new_val;
3885 q_vector->set_itr = 1;
3888 q_vector->rx.total_bytes = 0;
3889 q_vector->rx.total_packets = 0;
3890 q_vector->tx.total_bytes = 0;
3891 q_vector->tx.total_packets = 0;
3894 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3896 struct igc_adapter *adapter = q_vector->adapter;
3897 struct igc_hw *hw = &adapter->hw;
3899 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3900 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3901 if (adapter->num_q_vectors == 1)
3902 igc_set_itr(q_vector);
3904 igc_update_ring_itr(q_vector);
3907 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3908 if (adapter->msix_entries)
3909 wr32(IGC_EIMS, q_vector->eims_value);
3911 igc_irq_enable(adapter);
3915 static void igc_add_ring(struct igc_ring *ring,
3916 struct igc_ring_container *head)
3923 * igc_cache_ring_register - Descriptor ring to register mapping
3924 * @adapter: board private structure to initialize
3926 * Once we know the feature-set enabled for the device, we'll cache
3927 * the register offset the descriptor ring is assigned to.
3929 static void igc_cache_ring_register(struct igc_adapter *adapter)
3933 switch (adapter->hw.mac.type) {
3936 for (; i < adapter->num_rx_queues; i++)
3937 adapter->rx_ring[i]->reg_idx = i;
3938 for (; j < adapter->num_tx_queues; j++)
3939 adapter->tx_ring[j]->reg_idx = j;
3945 * igc_poll - NAPI Rx polling callback
3946 * @napi: napi polling structure
3947 * @budget: count of how many packets we should handle
3949 static int igc_poll(struct napi_struct *napi, int budget)
3951 struct igc_q_vector *q_vector = container_of(napi,
3952 struct igc_q_vector,
3954 struct igc_ring *rx_ring = q_vector->rx.ring;
3955 bool clean_complete = true;
3958 if (q_vector->tx.ring)
3959 clean_complete = igc_clean_tx_irq(q_vector, budget);
3962 int cleaned = rx_ring->xsk_pool ?
3963 igc_clean_rx_irq_zc(q_vector, budget) :
3964 igc_clean_rx_irq(q_vector, budget);
3966 work_done += cleaned;
3967 if (cleaned >= budget)
3968 clean_complete = false;
3971 /* If all work not completed, return budget and keep polling */
3972 if (!clean_complete)
3975 /* Exit the polling mode, but don't re-enable interrupts if stack might
3976 * poll us due to busy-polling
3978 if (likely(napi_complete_done(napi, work_done)))
3979 igc_ring_irq_enable(q_vector);
3981 return min(work_done, budget - 1);
3985 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3986 * @adapter: board private structure to initialize
3987 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3988 * @v_idx: index of vector in adapter struct
3989 * @txr_count: total number of Tx rings to allocate
3990 * @txr_idx: index of first Tx ring to allocate
3991 * @rxr_count: total number of Rx rings to allocate
3992 * @rxr_idx: index of first Rx ring to allocate
3994 * We allocate one q_vector. If allocation fails we return -ENOMEM.
3996 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3997 unsigned int v_count, unsigned int v_idx,
3998 unsigned int txr_count, unsigned int txr_idx,
3999 unsigned int rxr_count, unsigned int rxr_idx)
4001 struct igc_q_vector *q_vector;
4002 struct igc_ring *ring;
4005 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
4006 if (txr_count > 1 || rxr_count > 1)
4009 ring_count = txr_count + rxr_count;
4011 /* allocate q_vector and rings */
4012 q_vector = adapter->q_vector[v_idx];
4014 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
4017 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
4021 /* initialize NAPI */
4022 netif_napi_add(adapter->netdev, &q_vector->napi,
4025 /* tie q_vector and adapter together */
4026 adapter->q_vector[v_idx] = q_vector;
4027 q_vector->adapter = adapter;
4029 /* initialize work limits */
4030 q_vector->tx.work_limit = adapter->tx_work_limit;
4032 /* initialize ITR configuration */
4033 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
4034 q_vector->itr_val = IGC_START_ITR;
4036 /* initialize pointer to rings */
4037 ring = q_vector->ring;
4039 /* initialize ITR */
4041 /* rx or rx/tx vector */
4042 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
4043 q_vector->itr_val = adapter->rx_itr_setting;
4045 /* tx only vector */
4046 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
4047 q_vector->itr_val = adapter->tx_itr_setting;
4051 /* assign generic ring traits */
4052 ring->dev = &adapter->pdev->dev;
4053 ring->netdev = adapter->netdev;
4055 /* configure backlink on ring */
4056 ring->q_vector = q_vector;
4058 /* update q_vector Tx values */
4059 igc_add_ring(ring, &q_vector->tx);
4061 /* apply Tx specific ring traits */
4062 ring->count = adapter->tx_ring_count;
4063 ring->queue_index = txr_idx;
4065 /* assign ring to adapter */
4066 adapter->tx_ring[txr_idx] = ring;
4068 /* push pointer to next ring */
4073 /* assign generic ring traits */
4074 ring->dev = &adapter->pdev->dev;
4075 ring->netdev = adapter->netdev;
4077 /* configure backlink on ring */
4078 ring->q_vector = q_vector;
4080 /* update q_vector Rx values */
4081 igc_add_ring(ring, &q_vector->rx);
4083 /* apply Rx specific ring traits */
4084 ring->count = adapter->rx_ring_count;
4085 ring->queue_index = rxr_idx;
4087 /* assign ring to adapter */
4088 adapter->rx_ring[rxr_idx] = ring;
4095 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4096 * @adapter: board private structure to initialize
4098 * We allocate one q_vector per queue interrupt. If allocation fails we
4101 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
4103 int rxr_remaining = adapter->num_rx_queues;
4104 int txr_remaining = adapter->num_tx_queues;
4105 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
4106 int q_vectors = adapter->num_q_vectors;
4109 if (q_vectors >= (rxr_remaining + txr_remaining)) {
4110 for (; rxr_remaining; v_idx++) {
4111 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4117 /* update counts and index */
4123 for (; v_idx < q_vectors; v_idx++) {
4124 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
4125 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
4127 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
4128 tqpv, txr_idx, rqpv, rxr_idx);
4133 /* update counts and index */
4134 rxr_remaining -= rqpv;
4135 txr_remaining -= tqpv;
4143 adapter->num_tx_queues = 0;
4144 adapter->num_rx_queues = 0;
4145 adapter->num_q_vectors = 0;
4148 igc_free_q_vector(adapter, v_idx);
4154 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4155 * @adapter: Pointer to adapter structure
4156 * @msix: boolean for MSI-X capability
4158 * This function initializes the interrupts and allocates all of the queues.
4160 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
4162 struct net_device *dev = adapter->netdev;
4165 igc_set_interrupt_capability(adapter, msix);
4167 err = igc_alloc_q_vectors(adapter);
4169 netdev_err(dev, "Unable to allocate memory for vectors\n");
4170 goto err_alloc_q_vectors;
4173 igc_cache_ring_register(adapter);
4177 err_alloc_q_vectors:
4178 igc_reset_interrupt_capability(adapter);
4183 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4184 * @adapter: board private structure to initialize
4186 * igc_sw_init initializes the Adapter private data structure.
4187 * Fields are initialized based on PCI device information and
4188 * OS network device settings (MTU size).
4190 static int igc_sw_init(struct igc_adapter *adapter)
4192 struct net_device *netdev = adapter->netdev;
4193 struct pci_dev *pdev = adapter->pdev;
4194 struct igc_hw *hw = &adapter->hw;
4196 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
4198 /* set default ring sizes */
4199 adapter->tx_ring_count = IGC_DEFAULT_TXD;
4200 adapter->rx_ring_count = IGC_DEFAULT_RXD;
4202 /* set default ITR values */
4203 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
4204 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
4206 /* set default work limits */
4207 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
4209 /* adjust max frame to be at least the size of a standard frame */
4210 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
4212 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4214 mutex_init(&adapter->nfc_rule_lock);
4215 INIT_LIST_HEAD(&adapter->nfc_rule_list);
4216 adapter->nfc_rule_count = 0;
4218 spin_lock_init(&adapter->stats64_lock);
4219 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
4220 adapter->flags |= IGC_FLAG_HAS_MSIX;
4222 igc_init_queue_configuration(adapter);
4224 /* This call may decrease the number of queues */
4225 if (igc_init_interrupt_scheme(adapter, true)) {
4226 netdev_err(netdev, "Unable to allocate memory for queues\n");
4230 /* Explicitly disable IRQ since the NIC can be in any state. */
4231 igc_irq_disable(adapter);
4233 set_bit(__IGC_DOWN, &adapter->state);
4239 * igc_up - Open the interface and prepare it to handle traffic
4240 * @adapter: board private structure
4242 void igc_up(struct igc_adapter *adapter)
4244 struct igc_hw *hw = &adapter->hw;
4247 /* hardware has been reset, we need to reload some things */
4248 igc_configure(adapter);
4250 clear_bit(__IGC_DOWN, &adapter->state);
4252 for (i = 0; i < adapter->num_q_vectors; i++)
4253 napi_enable(&adapter->q_vector[i]->napi);
4255 if (adapter->msix_entries)
4256 igc_configure_msix(adapter);
4258 igc_assign_vector(adapter->q_vector[0], 0);
4260 /* Clear any pending interrupts. */
4262 igc_irq_enable(adapter);
4264 netif_tx_start_all_queues(adapter->netdev);
4266 /* start the watchdog. */
4267 hw->mac.get_link_status = true;
4268 schedule_work(&adapter->watchdog_task);
4272 * igc_update_stats - Update the board statistics counters
4273 * @adapter: board private structure
4275 void igc_update_stats(struct igc_adapter *adapter)
4277 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
4278 struct pci_dev *pdev = adapter->pdev;
4279 struct igc_hw *hw = &adapter->hw;
4280 u64 _bytes, _packets;
4286 /* Prevent stats update while adapter is being reset, or if the pci
4287 * connection is down.
4289 if (adapter->link_speed == 0)
4291 if (pci_channel_offline(pdev))
4298 for (i = 0; i < adapter->num_rx_queues; i++) {
4299 struct igc_ring *ring = adapter->rx_ring[i];
4300 u32 rqdpc = rd32(IGC_RQDPC(i));
4302 if (hw->mac.type >= igc_i225)
4303 wr32(IGC_RQDPC(i), 0);
4306 ring->rx_stats.drops += rqdpc;
4307 net_stats->rx_fifo_errors += rqdpc;
4311 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
4312 _bytes = ring->rx_stats.bytes;
4313 _packets = ring->rx_stats.packets;
4314 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
4316 packets += _packets;
4319 net_stats->rx_bytes = bytes;
4320 net_stats->rx_packets = packets;
4324 for (i = 0; i < adapter->num_tx_queues; i++) {
4325 struct igc_ring *ring = adapter->tx_ring[i];
4328 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
4329 _bytes = ring->tx_stats.bytes;
4330 _packets = ring->tx_stats.packets;
4331 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
4333 packets += _packets;
4335 net_stats->tx_bytes = bytes;
4336 net_stats->tx_packets = packets;
4339 /* read stats registers */
4340 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
4341 adapter->stats.gprc += rd32(IGC_GPRC);
4342 adapter->stats.gorc += rd32(IGC_GORCL);
4343 rd32(IGC_GORCH); /* clear GORCL */
4344 adapter->stats.bprc += rd32(IGC_BPRC);
4345 adapter->stats.mprc += rd32(IGC_MPRC);
4346 adapter->stats.roc += rd32(IGC_ROC);
4348 adapter->stats.prc64 += rd32(IGC_PRC64);
4349 adapter->stats.prc127 += rd32(IGC_PRC127);
4350 adapter->stats.prc255 += rd32(IGC_PRC255);
4351 adapter->stats.prc511 += rd32(IGC_PRC511);
4352 adapter->stats.prc1023 += rd32(IGC_PRC1023);
4353 adapter->stats.prc1522 += rd32(IGC_PRC1522);
4354 adapter->stats.tlpic += rd32(IGC_TLPIC);
4355 adapter->stats.rlpic += rd32(IGC_RLPIC);
4356 adapter->stats.hgptc += rd32(IGC_HGPTC);
4358 mpc = rd32(IGC_MPC);
4359 adapter->stats.mpc += mpc;
4360 net_stats->rx_fifo_errors += mpc;
4361 adapter->stats.scc += rd32(IGC_SCC);
4362 adapter->stats.ecol += rd32(IGC_ECOL);
4363 adapter->stats.mcc += rd32(IGC_MCC);
4364 adapter->stats.latecol += rd32(IGC_LATECOL);
4365 adapter->stats.dc += rd32(IGC_DC);
4366 adapter->stats.rlec += rd32(IGC_RLEC);
4367 adapter->stats.xonrxc += rd32(IGC_XONRXC);
4368 adapter->stats.xontxc += rd32(IGC_XONTXC);
4369 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
4370 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
4371 adapter->stats.fcruc += rd32(IGC_FCRUC);
4372 adapter->stats.gptc += rd32(IGC_GPTC);
4373 adapter->stats.gotc += rd32(IGC_GOTCL);
4374 rd32(IGC_GOTCH); /* clear GOTCL */
4375 adapter->stats.rnbc += rd32(IGC_RNBC);
4376 adapter->stats.ruc += rd32(IGC_RUC);
4377 adapter->stats.rfc += rd32(IGC_RFC);
4378 adapter->stats.rjc += rd32(IGC_RJC);
4379 adapter->stats.tor += rd32(IGC_TORH);
4380 adapter->stats.tot += rd32(IGC_TOTH);
4381 adapter->stats.tpr += rd32(IGC_TPR);
4383 adapter->stats.ptc64 += rd32(IGC_PTC64);
4384 adapter->stats.ptc127 += rd32(IGC_PTC127);
4385 adapter->stats.ptc255 += rd32(IGC_PTC255);
4386 adapter->stats.ptc511 += rd32(IGC_PTC511);
4387 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
4388 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
4390 adapter->stats.mptc += rd32(IGC_MPTC);
4391 adapter->stats.bptc += rd32(IGC_BPTC);
4393 adapter->stats.tpt += rd32(IGC_TPT);
4394 adapter->stats.colc += rd32(IGC_COLC);
4395 adapter->stats.colc += rd32(IGC_RERC);
4397 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
4399 adapter->stats.tsctc += rd32(IGC_TSCTC);
4401 adapter->stats.iac += rd32(IGC_IAC);
4403 /* Fill out the OS statistics structure */
4404 net_stats->multicast = adapter->stats.mprc;
4405 net_stats->collisions = adapter->stats.colc;
4409 /* RLEC on some newer hardware can be incorrect so build
4410 * our own version based on RUC and ROC
4412 net_stats->rx_errors = adapter->stats.rxerrc +
4413 adapter->stats.crcerrs + adapter->stats.algnerrc +
4414 adapter->stats.ruc + adapter->stats.roc +
4415 adapter->stats.cexterr;
4416 net_stats->rx_length_errors = adapter->stats.ruc +
4418 net_stats->rx_crc_errors = adapter->stats.crcerrs;
4419 net_stats->rx_frame_errors = adapter->stats.algnerrc;
4420 net_stats->rx_missed_errors = adapter->stats.mpc;
4423 net_stats->tx_errors = adapter->stats.ecol +
4424 adapter->stats.latecol;
4425 net_stats->tx_aborted_errors = adapter->stats.ecol;
4426 net_stats->tx_window_errors = adapter->stats.latecol;
4427 net_stats->tx_carrier_errors = adapter->stats.tncrs;
4429 /* Tx Dropped needs to be maintained elsewhere */
4431 /* Management Stats */
4432 adapter->stats.mgptc += rd32(IGC_MGTPTC);
4433 adapter->stats.mgprc += rd32(IGC_MGTPRC);
4434 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
4438 * igc_down - Close the interface
4439 * @adapter: board private structure
4441 void igc_down(struct igc_adapter *adapter)
4443 struct net_device *netdev = adapter->netdev;
4444 struct igc_hw *hw = &adapter->hw;
4448 set_bit(__IGC_DOWN, &adapter->state);
4450 igc_ptp_suspend(adapter);
4452 /* disable receives in the hardware */
4453 rctl = rd32(IGC_RCTL);
4454 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
4455 /* flush and sleep below */
4457 /* set trans_start so we don't get spurious watchdogs during reset */
4458 netif_trans_update(netdev);
4460 netif_carrier_off(netdev);
4461 netif_tx_stop_all_queues(netdev);
4463 /* disable transmits in the hardware */
4464 tctl = rd32(IGC_TCTL);
4465 tctl &= ~IGC_TCTL_EN;
4466 wr32(IGC_TCTL, tctl);
4467 /* flush both disables and wait for them to finish */
4469 usleep_range(10000, 20000);
4471 igc_irq_disable(adapter);
4473 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4475 for (i = 0; i < adapter->num_q_vectors; i++) {
4476 if (adapter->q_vector[i]) {
4477 napi_synchronize(&adapter->q_vector[i]->napi);
4478 napi_disable(&adapter->q_vector[i]->napi);
4482 del_timer_sync(&adapter->watchdog_timer);
4483 del_timer_sync(&adapter->phy_info_timer);
4485 /* record the stats before reset*/
4486 spin_lock(&adapter->stats64_lock);
4487 igc_update_stats(adapter);
4488 spin_unlock(&adapter->stats64_lock);
4490 adapter->link_speed = 0;
4491 adapter->link_duplex = 0;
4493 if (!pci_channel_offline(adapter->pdev))
4496 /* clear VLAN promisc flag so VFTA will be updated if necessary */
4497 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
4499 igc_clean_all_tx_rings(adapter);
4500 igc_clean_all_rx_rings(adapter);
4503 void igc_reinit_locked(struct igc_adapter *adapter)
4505 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4506 usleep_range(1000, 2000);
4509 clear_bit(__IGC_RESETTING, &adapter->state);
4512 static void igc_reset_task(struct work_struct *work)
4514 struct igc_adapter *adapter;
4516 adapter = container_of(work, struct igc_adapter, reset_task);
4519 /* If we're already down or resetting, just bail */
4520 if (test_bit(__IGC_DOWN, &adapter->state) ||
4521 test_bit(__IGC_RESETTING, &adapter->state)) {
4526 igc_rings_dump(adapter);
4527 igc_regs_dump(adapter);
4528 netdev_err(adapter->netdev, "Reset adapter\n");
4529 igc_reinit_locked(adapter);
4534 * igc_change_mtu - Change the Maximum Transfer Unit
4535 * @netdev: network interface device structure
4536 * @new_mtu: new value for maximum frame size
4538 * Returns 0 on success, negative on failure
4540 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
4542 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
4543 struct igc_adapter *adapter = netdev_priv(netdev);
4545 if (igc_xdp_is_enabled(adapter) && new_mtu > ETH_DATA_LEN) {
4546 netdev_dbg(netdev, "Jumbo frames not supported with XDP");
4550 /* adjust max frame to be at least the size of a standard frame */
4551 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
4552 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
4554 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
4555 usleep_range(1000, 2000);
4557 /* igc_down has a dependency on max_frame_size */
4558 adapter->max_frame_size = max_frame;
4560 if (netif_running(netdev))
4563 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
4564 netdev->mtu = new_mtu;
4566 if (netif_running(netdev))
4571 clear_bit(__IGC_RESETTING, &adapter->state);
4577 * igc_get_stats64 - Get System Network Statistics
4578 * @netdev: network interface device structure
4579 * @stats: rtnl_link_stats64 pointer
4581 * Returns the address of the device statistics structure.
4582 * The statistics are updated here and also from the timer callback.
4584 static void igc_get_stats64(struct net_device *netdev,
4585 struct rtnl_link_stats64 *stats)
4587 struct igc_adapter *adapter = netdev_priv(netdev);
4589 spin_lock(&adapter->stats64_lock);
4590 if (!test_bit(__IGC_RESETTING, &adapter->state))
4591 igc_update_stats(adapter);
4592 memcpy(stats, &adapter->stats64, sizeof(*stats));
4593 spin_unlock(&adapter->stats64_lock);
4596 static netdev_features_t igc_fix_features(struct net_device *netdev,
4597 netdev_features_t features)
4599 /* Since there is no support for separate Rx/Tx vlan accel
4600 * enable/disable make sure Tx flag is always in same state as Rx.
4602 if (features & NETIF_F_HW_VLAN_CTAG_RX)
4603 features |= NETIF_F_HW_VLAN_CTAG_TX;
4605 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
4610 static int igc_set_features(struct net_device *netdev,
4611 netdev_features_t features)
4613 netdev_features_t changed = netdev->features ^ features;
4614 struct igc_adapter *adapter = netdev_priv(netdev);
4616 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
4617 igc_vlan_mode(netdev, features);
4619 /* Add VLAN support */
4620 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
4623 if (!(features & NETIF_F_NTUPLE))
4624 igc_flush_nfc_rules(adapter);
4626 netdev->features = features;
4628 if (netif_running(netdev))
4629 igc_reinit_locked(adapter);
4636 static netdev_features_t
4637 igc_features_check(struct sk_buff *skb, struct net_device *dev,
4638 netdev_features_t features)
4640 unsigned int network_hdr_len, mac_hdr_len;
4642 /* Make certain the headers can be described by a context descriptor */
4643 mac_hdr_len = skb_network_header(skb) - skb->data;
4644 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
4645 return features & ~(NETIF_F_HW_CSUM |
4647 NETIF_F_HW_VLAN_CTAG_TX |
4651 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
4652 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
4653 return features & ~(NETIF_F_HW_CSUM |
4658 /* We can only support IPv4 TSO in tunnels if we can mangle the
4659 * inner IP ID field, so strip TSO if MANGLEID is not supported.
4661 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
4662 features &= ~NETIF_F_TSO;
4667 static void igc_tsync_interrupt(struct igc_adapter *adapter)
4669 u32 ack, tsauxc, sec, nsec, tsicr;
4670 struct igc_hw *hw = &adapter->hw;
4671 struct ptp_clock_event event;
4672 struct timespec64 ts;
4674 tsicr = rd32(IGC_TSICR);
4677 if (tsicr & IGC_TSICR_SYS_WRAP) {
4678 event.type = PTP_CLOCK_PPS;
4679 if (adapter->ptp_caps.pps)
4680 ptp_clock_event(adapter->ptp_clock, &event);
4681 ack |= IGC_TSICR_SYS_WRAP;
4684 if (tsicr & IGC_TSICR_TXTS) {
4685 /* retrieve hardware timestamp */
4686 schedule_work(&adapter->ptp_tx_work);
4687 ack |= IGC_TSICR_TXTS;
4690 if (tsicr & IGC_TSICR_TT0) {
4691 spin_lock(&adapter->tmreg_lock);
4692 ts = timespec64_add(adapter->perout[0].start,
4693 adapter->perout[0].period);
4694 wr32(IGC_TRGTTIML0, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4695 wr32(IGC_TRGTTIMH0, (u32)ts.tv_sec);
4696 tsauxc = rd32(IGC_TSAUXC);
4697 tsauxc |= IGC_TSAUXC_EN_TT0;
4698 wr32(IGC_TSAUXC, tsauxc);
4699 adapter->perout[0].start = ts;
4700 spin_unlock(&adapter->tmreg_lock);
4701 ack |= IGC_TSICR_TT0;
4704 if (tsicr & IGC_TSICR_TT1) {
4705 spin_lock(&adapter->tmreg_lock);
4706 ts = timespec64_add(adapter->perout[1].start,
4707 adapter->perout[1].period);
4708 wr32(IGC_TRGTTIML1, ts.tv_nsec | IGC_TT_IO_TIMER_SEL_SYSTIM0);
4709 wr32(IGC_TRGTTIMH1, (u32)ts.tv_sec);
4710 tsauxc = rd32(IGC_TSAUXC);
4711 tsauxc |= IGC_TSAUXC_EN_TT1;
4712 wr32(IGC_TSAUXC, tsauxc);
4713 adapter->perout[1].start = ts;
4714 spin_unlock(&adapter->tmreg_lock);
4715 ack |= IGC_TSICR_TT1;
4718 if (tsicr & IGC_TSICR_AUTT0) {
4719 nsec = rd32(IGC_AUXSTMPL0);
4720 sec = rd32(IGC_AUXSTMPH0);
4721 event.type = PTP_CLOCK_EXTTS;
4723 event.timestamp = sec * NSEC_PER_SEC + nsec;
4724 ptp_clock_event(adapter->ptp_clock, &event);
4725 ack |= IGC_TSICR_AUTT0;
4728 if (tsicr & IGC_TSICR_AUTT1) {
4729 nsec = rd32(IGC_AUXSTMPL1);
4730 sec = rd32(IGC_AUXSTMPH1);
4731 event.type = PTP_CLOCK_EXTTS;
4733 event.timestamp = sec * NSEC_PER_SEC + nsec;
4734 ptp_clock_event(adapter->ptp_clock, &event);
4735 ack |= IGC_TSICR_AUTT1;
4738 /* acknowledge the interrupts */
4739 wr32(IGC_TSICR, ack);
4743 * igc_msix_other - msix other interrupt handler
4744 * @irq: interrupt number
4745 * @data: pointer to a q_vector
4747 static irqreturn_t igc_msix_other(int irq, void *data)
4749 struct igc_adapter *adapter = data;
4750 struct igc_hw *hw = &adapter->hw;
4751 u32 icr = rd32(IGC_ICR);
4753 /* reading ICR causes bit 31 of EICR to be cleared */
4754 if (icr & IGC_ICR_DRSTA)
4755 schedule_work(&adapter->reset_task);
4757 if (icr & IGC_ICR_DOUTSYNC) {
4758 /* HW is reporting DMA is out of sync */
4759 adapter->stats.doosync++;
4762 if (icr & IGC_ICR_LSC) {
4763 hw->mac.get_link_status = true;
4764 /* guard against interrupt when we're going down */
4765 if (!test_bit(__IGC_DOWN, &adapter->state))
4766 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4769 if (icr & IGC_ICR_TS)
4770 igc_tsync_interrupt(adapter);
4772 wr32(IGC_EIMS, adapter->eims_other);
4777 static void igc_write_itr(struct igc_q_vector *q_vector)
4779 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4781 if (!q_vector->set_itr)
4785 itr_val = IGC_ITR_VAL_MASK;
4787 itr_val |= IGC_EITR_CNT_IGNR;
4789 writel(itr_val, q_vector->itr_register);
4790 q_vector->set_itr = 0;
4793 static irqreturn_t igc_msix_ring(int irq, void *data)
4795 struct igc_q_vector *q_vector = data;
4797 /* Write the ITR value calculated from the previous interrupt. */
4798 igc_write_itr(q_vector);
4800 napi_schedule(&q_vector->napi);
4806 * igc_request_msix - Initialize MSI-X interrupts
4807 * @adapter: Pointer to adapter structure
4809 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4812 static int igc_request_msix(struct igc_adapter *adapter)
4814 int i = 0, err = 0, vector = 0, free_vector = 0;
4815 struct net_device *netdev = adapter->netdev;
4817 err = request_irq(adapter->msix_entries[vector].vector,
4818 &igc_msix_other, 0, netdev->name, adapter);
4822 for (i = 0; i < adapter->num_q_vectors; i++) {
4823 struct igc_q_vector *q_vector = adapter->q_vector[i];
4827 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4829 if (q_vector->rx.ring && q_vector->tx.ring)
4830 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4831 q_vector->rx.ring->queue_index);
4832 else if (q_vector->tx.ring)
4833 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4834 q_vector->tx.ring->queue_index);
4835 else if (q_vector->rx.ring)
4836 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4837 q_vector->rx.ring->queue_index);
4839 sprintf(q_vector->name, "%s-unused", netdev->name);
4841 err = request_irq(adapter->msix_entries[vector].vector,
4842 igc_msix_ring, 0, q_vector->name,
4848 igc_configure_msix(adapter);
4852 /* free already assigned IRQs */
4853 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4856 for (i = 0; i < vector; i++) {
4857 free_irq(adapter->msix_entries[free_vector++].vector,
4858 adapter->q_vector[i]);
4865 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4866 * @adapter: Pointer to adapter structure
4868 * This function resets the device so that it has 0 rx queues, tx queues, and
4869 * MSI-X interrupts allocated.
4871 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4873 igc_free_q_vectors(adapter);
4874 igc_reset_interrupt_capability(adapter);
4877 /* Need to wait a few seconds after link up to get diagnostic information from
4880 static void igc_update_phy_info(struct timer_list *t)
4882 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4884 igc_get_phy_info(&adapter->hw);
4888 * igc_has_link - check shared code for link and determine up/down
4889 * @adapter: pointer to driver private info
4891 bool igc_has_link(struct igc_adapter *adapter)
4893 struct igc_hw *hw = &adapter->hw;
4894 bool link_active = false;
4896 /* get_link_status is set on LSC (link status) interrupt or
4897 * rx sequence error interrupt. get_link_status will stay
4898 * false until the igc_check_for_link establishes link
4899 * for copper adapters ONLY
4901 switch (hw->phy.media_type) {
4902 case igc_media_type_copper:
4903 if (!hw->mac.get_link_status)
4905 hw->mac.ops.check_for_link(hw);
4906 link_active = !hw->mac.get_link_status;
4909 case igc_media_type_unknown:
4913 if (hw->mac.type == igc_i225 &&
4914 hw->phy.id == I225_I_PHY_ID) {
4915 if (!netif_carrier_ok(adapter->netdev)) {
4916 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4917 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4918 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4919 adapter->link_check_timeout = jiffies;
4927 * igc_watchdog - Timer Call-back
4928 * @t: timer for the watchdog
4930 static void igc_watchdog(struct timer_list *t)
4932 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4933 /* Do the rest outside of interrupt context */
4934 schedule_work(&adapter->watchdog_task);
4937 static void igc_watchdog_task(struct work_struct *work)
4939 struct igc_adapter *adapter = container_of(work,
4942 struct net_device *netdev = adapter->netdev;
4943 struct igc_hw *hw = &adapter->hw;
4944 struct igc_phy_info *phy = &hw->phy;
4945 u16 phy_data, retry_count = 20;
4949 link = igc_has_link(adapter);
4951 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4952 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4953 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4959 /* Cancel scheduled suspend requests. */
4960 pm_runtime_resume(netdev->dev.parent);
4962 if (!netif_carrier_ok(netdev)) {
4965 hw->mac.ops.get_speed_and_duplex(hw,
4966 &adapter->link_speed,
4967 &adapter->link_duplex);
4969 ctrl = rd32(IGC_CTRL);
4970 /* Link status message must follow this format */
4972 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4973 adapter->link_speed,
4974 adapter->link_duplex == FULL_DUPLEX ?
4976 (ctrl & IGC_CTRL_TFCE) &&
4977 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4978 (ctrl & IGC_CTRL_RFCE) ? "RX" :
4979 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
4981 /* disable EEE if enabled */
4982 if ((adapter->flags & IGC_FLAG_EEE) &&
4983 adapter->link_duplex == HALF_DUPLEX) {
4985 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4986 adapter->hw.dev_spec._base.eee_enable = false;
4987 adapter->flags &= ~IGC_FLAG_EEE;
4990 /* check if SmartSpeed worked */
4991 igc_check_downshift(hw);
4992 if (phy->speed_downgraded)
4993 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4995 /* adjust timeout factor according to speed/duplex */
4996 adapter->tx_timeout_factor = 1;
4997 switch (adapter->link_speed) {
4999 adapter->tx_timeout_factor = 14;
5002 /* maybe add some timeout factor ? */
5006 if (adapter->link_speed != SPEED_1000)
5009 /* wait for Remote receiver status OK */
5011 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
5013 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
5017 goto retry_read_status;
5018 } else if (!retry_count) {
5019 netdev_err(netdev, "exceed max 2 second\n");
5022 netdev_err(netdev, "read 1000Base-T Status Reg\n");
5025 netif_carrier_on(netdev);
5027 /* link state has changed, schedule phy info update */
5028 if (!test_bit(__IGC_DOWN, &adapter->state))
5029 mod_timer(&adapter->phy_info_timer,
5030 round_jiffies(jiffies + 2 * HZ));
5033 if (netif_carrier_ok(netdev)) {
5034 adapter->link_speed = 0;
5035 adapter->link_duplex = 0;
5037 /* Links status message must follow this format */
5038 netdev_info(netdev, "NIC Link is Down\n");
5039 netif_carrier_off(netdev);
5041 /* link state has changed, schedule phy info update */
5042 if (!test_bit(__IGC_DOWN, &adapter->state))
5043 mod_timer(&adapter->phy_info_timer,
5044 round_jiffies(jiffies + 2 * HZ));
5046 /* link is down, time to check for alternate media */
5047 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
5048 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5049 schedule_work(&adapter->reset_task);
5050 /* return immediately */
5054 pm_schedule_suspend(netdev->dev.parent,
5057 /* also check for alternate media here */
5058 } else if (!netif_carrier_ok(netdev) &&
5059 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
5060 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
5061 schedule_work(&adapter->reset_task);
5062 /* return immediately */
5068 spin_lock(&adapter->stats64_lock);
5069 igc_update_stats(adapter);
5070 spin_unlock(&adapter->stats64_lock);
5072 for (i = 0; i < adapter->num_tx_queues; i++) {
5073 struct igc_ring *tx_ring = adapter->tx_ring[i];
5075 if (!netif_carrier_ok(netdev)) {
5076 /* We've lost link, so the controller stops DMA,
5077 * but we've got queued Tx work that's never going
5078 * to get done, so reset controller to flush Tx.
5079 * (Do the reset outside of interrupt context).
5081 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
5082 adapter->tx_timeout_count++;
5083 schedule_work(&adapter->reset_task);
5084 /* return immediately since reset is imminent */
5089 /* Force detection of hung controller every watchdog period */
5090 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5093 /* Cause software interrupt to ensure Rx ring is cleaned */
5094 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5097 for (i = 0; i < adapter->num_q_vectors; i++)
5098 eics |= adapter->q_vector[i]->eims_value;
5099 wr32(IGC_EICS, eics);
5101 wr32(IGC_ICS, IGC_ICS_RXDMT0);
5104 igc_ptp_tx_hang(adapter);
5106 /* Reset the timer */
5107 if (!test_bit(__IGC_DOWN, &adapter->state)) {
5108 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
5109 mod_timer(&adapter->watchdog_timer,
5110 round_jiffies(jiffies + HZ));
5112 mod_timer(&adapter->watchdog_timer,
5113 round_jiffies(jiffies + 2 * HZ));
5118 * igc_intr_msi - Interrupt Handler
5119 * @irq: interrupt number
5120 * @data: pointer to a network interface device structure
5122 static irqreturn_t igc_intr_msi(int irq, void *data)
5124 struct igc_adapter *adapter = data;
5125 struct igc_q_vector *q_vector = adapter->q_vector[0];
5126 struct igc_hw *hw = &adapter->hw;
5127 /* read ICR disables interrupts using IAM */
5128 u32 icr = rd32(IGC_ICR);
5130 igc_write_itr(q_vector);
5132 if (icr & IGC_ICR_DRSTA)
5133 schedule_work(&adapter->reset_task);
5135 if (icr & IGC_ICR_DOUTSYNC) {
5136 /* HW is reporting DMA is out of sync */
5137 adapter->stats.doosync++;
5140 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5141 hw->mac.get_link_status = true;
5142 if (!test_bit(__IGC_DOWN, &adapter->state))
5143 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5146 napi_schedule(&q_vector->napi);
5152 * igc_intr - Legacy Interrupt Handler
5153 * @irq: interrupt number
5154 * @data: pointer to a network interface device structure
5156 static irqreturn_t igc_intr(int irq, void *data)
5158 struct igc_adapter *adapter = data;
5159 struct igc_q_vector *q_vector = adapter->q_vector[0];
5160 struct igc_hw *hw = &adapter->hw;
5161 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5162 * need for the IMC write
5164 u32 icr = rd32(IGC_ICR);
5166 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5167 * not set, then the adapter didn't send an interrupt
5169 if (!(icr & IGC_ICR_INT_ASSERTED))
5172 igc_write_itr(q_vector);
5174 if (icr & IGC_ICR_DRSTA)
5175 schedule_work(&adapter->reset_task);
5177 if (icr & IGC_ICR_DOUTSYNC) {
5178 /* HW is reporting DMA is out of sync */
5179 adapter->stats.doosync++;
5182 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
5183 hw->mac.get_link_status = true;
5184 /* guard against interrupt when we're going down */
5185 if (!test_bit(__IGC_DOWN, &adapter->state))
5186 mod_timer(&adapter->watchdog_timer, jiffies + 1);
5189 napi_schedule(&q_vector->napi);
5194 static void igc_free_irq(struct igc_adapter *adapter)
5196 if (adapter->msix_entries) {
5199 free_irq(adapter->msix_entries[vector++].vector, adapter);
5201 for (i = 0; i < adapter->num_q_vectors; i++)
5202 free_irq(adapter->msix_entries[vector++].vector,
5203 adapter->q_vector[i]);
5205 free_irq(adapter->pdev->irq, adapter);
5210 * igc_request_irq - initialize interrupts
5211 * @adapter: Pointer to adapter structure
5213 * Attempts to configure interrupts using the best available
5214 * capabilities of the hardware and kernel.
5216 static int igc_request_irq(struct igc_adapter *adapter)
5218 struct net_device *netdev = adapter->netdev;
5219 struct pci_dev *pdev = adapter->pdev;
5222 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
5223 err = igc_request_msix(adapter);
5226 /* fall back to MSI */
5227 igc_free_all_tx_resources(adapter);
5228 igc_free_all_rx_resources(adapter);
5230 igc_clear_interrupt_scheme(adapter);
5231 err = igc_init_interrupt_scheme(adapter, false);
5234 igc_setup_all_tx_resources(adapter);
5235 igc_setup_all_rx_resources(adapter);
5236 igc_configure(adapter);
5239 igc_assign_vector(adapter->q_vector[0], 0);
5241 if (adapter->flags & IGC_FLAG_HAS_MSI) {
5242 err = request_irq(pdev->irq, &igc_intr_msi, 0,
5243 netdev->name, adapter);
5247 /* fall back to legacy interrupts */
5248 igc_reset_interrupt_capability(adapter);
5249 adapter->flags &= ~IGC_FLAG_HAS_MSI;
5252 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
5253 netdev->name, adapter);
5256 netdev_err(netdev, "Error %d getting interrupt\n", err);
5263 * __igc_open - Called when a network interface is made active
5264 * @netdev: network interface device structure
5265 * @resuming: boolean indicating if the device is resuming
5267 * Returns 0 on success, negative value on failure
5269 * The open entry point is called when a network interface is made
5270 * active by the system (IFF_UP). At this point all resources needed
5271 * for transmit and receive operations are allocated, the interrupt
5272 * handler is registered with the OS, the watchdog timer is started,
5273 * and the stack is notified that the interface is ready.
5275 static int __igc_open(struct net_device *netdev, bool resuming)
5277 struct igc_adapter *adapter = netdev_priv(netdev);
5278 struct pci_dev *pdev = adapter->pdev;
5279 struct igc_hw *hw = &adapter->hw;
5283 /* disallow open during test */
5285 if (test_bit(__IGC_TESTING, &adapter->state)) {
5291 pm_runtime_get_sync(&pdev->dev);
5293 netif_carrier_off(netdev);
5295 /* allocate transmit descriptors */
5296 err = igc_setup_all_tx_resources(adapter);
5300 /* allocate receive descriptors */
5301 err = igc_setup_all_rx_resources(adapter);
5305 igc_power_up_link(adapter);
5307 igc_configure(adapter);
5309 err = igc_request_irq(adapter);
5313 /* Notify the stack of the actual queue counts. */
5314 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
5316 goto err_set_queues;
5318 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
5320 goto err_set_queues;
5322 clear_bit(__IGC_DOWN, &adapter->state);
5324 for (i = 0; i < adapter->num_q_vectors; i++)
5325 napi_enable(&adapter->q_vector[i]->napi);
5327 /* Clear any pending interrupts. */
5329 igc_irq_enable(adapter);
5332 pm_runtime_put(&pdev->dev);
5334 netif_tx_start_all_queues(netdev);
5336 /* start the watchdog. */
5337 hw->mac.get_link_status = true;
5338 schedule_work(&adapter->watchdog_task);
5343 igc_free_irq(adapter);
5345 igc_release_hw_control(adapter);
5346 igc_power_down_phy_copper_base(&adapter->hw);
5347 igc_free_all_rx_resources(adapter);
5349 igc_free_all_tx_resources(adapter);
5353 pm_runtime_put(&pdev->dev);
5358 int igc_open(struct net_device *netdev)
5360 return __igc_open(netdev, false);
5364 * __igc_close - Disables a network interface
5365 * @netdev: network interface device structure
5366 * @suspending: boolean indicating the device is suspending
5368 * Returns 0, this is not allowed to fail
5370 * The close entry point is called when an interface is de-activated
5371 * by the OS. The hardware is still under the driver's control, but
5372 * needs to be disabled. A global MAC reset is issued to stop the
5373 * hardware, and all transmit and receive resources are freed.
5375 static int __igc_close(struct net_device *netdev, bool suspending)
5377 struct igc_adapter *adapter = netdev_priv(netdev);
5378 struct pci_dev *pdev = adapter->pdev;
5380 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
5383 pm_runtime_get_sync(&pdev->dev);
5387 igc_release_hw_control(adapter);
5389 igc_free_irq(adapter);
5391 igc_free_all_tx_resources(adapter);
5392 igc_free_all_rx_resources(adapter);
5395 pm_runtime_put_sync(&pdev->dev);
5400 int igc_close(struct net_device *netdev)
5402 if (netif_device_present(netdev) || netdev->dismantle)
5403 return __igc_close(netdev, false);
5408 * igc_ioctl - Access the hwtstamp interface
5409 * @netdev: network interface device structure
5410 * @ifr: interface request data
5411 * @cmd: ioctl command
5413 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5417 return igc_ptp_get_ts_config(netdev, ifr);
5419 return igc_ptp_set_ts_config(netdev, ifr);
5425 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
5428 struct igc_ring *ring;
5431 if (queue < 0 || queue >= adapter->num_tx_queues)
5434 ring = adapter->tx_ring[queue];
5435 ring->launchtime_enable = enable;
5437 if (adapter->base_time)
5440 adapter->cycle_time = NSEC_PER_SEC;
5442 for (i = 0; i < adapter->num_tx_queues; i++) {
5443 ring = adapter->tx_ring[i];
5444 ring->start_time = 0;
5445 ring->end_time = NSEC_PER_SEC;
5451 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
5453 struct timespec64 b;
5455 b = ktime_to_timespec64(base_time);
5457 return timespec64_compare(now, &b) > 0;
5460 static bool validate_schedule(struct igc_adapter *adapter,
5461 const struct tc_taprio_qopt_offload *qopt)
5463 int queue_uses[IGC_MAX_TX_QUEUES] = { };
5464 struct timespec64 now;
5467 if (qopt->cycle_time_extension)
5470 igc_ptp_read(adapter, &now);
5472 /* If we program the controller's BASET registers with a time
5473 * in the future, it will hold all the packets until that
5474 * time, causing a lot of TX Hangs, so to avoid that, we
5475 * reject schedules that would start in the future.
5477 if (!is_base_time_past(qopt->base_time, &now))
5480 for (n = 0; n < qopt->num_entries; n++) {
5481 const struct tc_taprio_sched_entry *e;
5484 e = &qopt->entries[n];
5486 /* i225 only supports "global" frame preemption
5489 if (e->command != TC_TAPRIO_CMD_SET_GATES)
5492 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5493 if (e->gate_mask & BIT(i))
5496 if (queue_uses[i] > 1)
5504 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
5505 struct tc_etf_qopt_offload *qopt)
5507 struct igc_hw *hw = &adapter->hw;
5510 if (hw->mac.type != igc_i225)
5513 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
5517 return igc_tsn_offload_apply(adapter);
5520 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
5521 struct tc_taprio_qopt_offload *qopt)
5523 u32 start_time = 0, end_time = 0;
5526 if (!qopt->enable) {
5527 adapter->base_time = 0;
5531 if (adapter->base_time)
5534 if (!validate_schedule(adapter, qopt))
5537 adapter->cycle_time = qopt->cycle_time;
5538 adapter->base_time = qopt->base_time;
5540 /* FIXME: be a little smarter about cases when the gate for a
5541 * queue stays open for more than one entry.
5543 for (n = 0; n < qopt->num_entries; n++) {
5544 struct tc_taprio_sched_entry *e = &qopt->entries[n];
5547 end_time += e->interval;
5549 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
5550 struct igc_ring *ring = adapter->tx_ring[i];
5552 if (!(e->gate_mask & BIT(i)))
5555 ring->start_time = start_time;
5556 ring->end_time = end_time;
5559 start_time += e->interval;
5565 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
5566 struct tc_taprio_qopt_offload *qopt)
5568 struct igc_hw *hw = &adapter->hw;
5571 if (hw->mac.type != igc_i225)
5574 err = igc_save_qbv_schedule(adapter, qopt);
5578 return igc_tsn_offload_apply(adapter);
5581 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
5584 struct igc_adapter *adapter = netdev_priv(dev);
5587 case TC_SETUP_QDISC_TAPRIO:
5588 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
5590 case TC_SETUP_QDISC_ETF:
5591 return igc_tsn_enable_launchtime(adapter, type_data);
5598 static int igc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
5600 struct igc_adapter *adapter = netdev_priv(dev);
5602 switch (bpf->command) {
5603 case XDP_SETUP_PROG:
5604 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack);
5605 case XDP_SETUP_XSK_POOL:
5606 return igc_xdp_setup_pool(adapter, bpf->xsk.pool,
5613 static int igc_xdp_xmit(struct net_device *dev, int num_frames,
5614 struct xdp_frame **frames, u32 flags)
5616 struct igc_adapter *adapter = netdev_priv(dev);
5617 int cpu = smp_processor_id();
5618 struct netdev_queue *nq;
5619 struct igc_ring *ring;
5622 if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
5625 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
5628 ring = igc_xdp_get_tx_ring(adapter, cpu);
5629 nq = txring_txq(ring);
5631 __netif_tx_lock(nq, cpu);
5634 for (i = 0; i < num_frames; i++) {
5636 struct xdp_frame *xdpf = frames[i];
5638 err = igc_xdp_init_tx_descriptor(ring, xdpf);
5640 xdp_return_frame_rx_napi(xdpf);
5645 if (flags & XDP_XMIT_FLUSH)
5646 igc_flush_tx_descriptors(ring);
5648 __netif_tx_unlock(nq);
5650 return num_frames - drops;
5653 static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
5654 struct igc_q_vector *q_vector)
5656 struct igc_hw *hw = &adapter->hw;
5659 eics |= q_vector->eims_value;
5660 wr32(IGC_EICS, eics);
5663 int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
5665 struct igc_adapter *adapter = netdev_priv(dev);
5666 struct igc_q_vector *q_vector;
5667 struct igc_ring *ring;
5669 if (test_bit(__IGC_DOWN, &adapter->state))
5672 if (!igc_xdp_is_enabled(adapter))
5675 if (queue_id >= adapter->num_rx_queues)
5678 ring = adapter->rx_ring[queue_id];
5680 if (!ring->xsk_pool)
5683 q_vector = adapter->q_vector[queue_id];
5684 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
5685 igc_trigger_rxtxq_interrupt(adapter, q_vector);
5690 static const struct net_device_ops igc_netdev_ops = {
5691 .ndo_open = igc_open,
5692 .ndo_stop = igc_close,
5693 .ndo_start_xmit = igc_xmit_frame,
5694 .ndo_set_rx_mode = igc_set_rx_mode,
5695 .ndo_set_mac_address = igc_set_mac,
5696 .ndo_change_mtu = igc_change_mtu,
5697 .ndo_get_stats64 = igc_get_stats64,
5698 .ndo_fix_features = igc_fix_features,
5699 .ndo_set_features = igc_set_features,
5700 .ndo_features_check = igc_features_check,
5701 .ndo_do_ioctl = igc_ioctl,
5702 .ndo_setup_tc = igc_setup_tc,
5704 .ndo_xdp_xmit = igc_xdp_xmit,
5705 .ndo_xsk_wakeup = igc_xsk_wakeup,
5708 /* PCIe configuration access */
5709 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5711 struct igc_adapter *adapter = hw->back;
5713 pci_read_config_word(adapter->pdev, reg, value);
5716 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
5718 struct igc_adapter *adapter = hw->back;
5720 pci_write_config_word(adapter->pdev, reg, *value);
5723 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5725 struct igc_adapter *adapter = hw->back;
5727 if (!pci_is_pcie(adapter->pdev))
5728 return -IGC_ERR_CONFIG;
5730 pcie_capability_read_word(adapter->pdev, reg, value);
5735 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
5737 struct igc_adapter *adapter = hw->back;
5739 if (!pci_is_pcie(adapter->pdev))
5740 return -IGC_ERR_CONFIG;
5742 pcie_capability_write_word(adapter->pdev, reg, *value);
5747 u32 igc_rd32(struct igc_hw *hw, u32 reg)
5749 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
5750 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
5753 value = readl(&hw_addr[reg]);
5755 /* reads should not return all F's */
5756 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
5757 struct net_device *netdev = igc->netdev;
5760 netif_device_detach(netdev);
5761 netdev_err(netdev, "PCIe link lost, device now detached\n");
5762 WARN(pci_device_is_present(igc->pdev),
5763 "igc: Failed to read reg 0x%x!\n", reg);
5769 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
5771 struct igc_mac_info *mac = &adapter->hw.mac;
5773 mac->autoneg = false;
5775 /* Make sure dplx is at most 1 bit and lsb of speed is not set
5776 * for the switch() below to work
5778 if ((spd & 1) || (dplx & ~1))
5781 switch (spd + dplx) {
5782 case SPEED_10 + DUPLEX_HALF:
5783 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5785 case SPEED_10 + DUPLEX_FULL:
5786 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5788 case SPEED_100 + DUPLEX_HALF:
5789 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5791 case SPEED_100 + DUPLEX_FULL:
5792 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5794 case SPEED_1000 + DUPLEX_FULL:
5795 mac->autoneg = true;
5796 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5798 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5800 case SPEED_2500 + DUPLEX_FULL:
5801 mac->autoneg = true;
5802 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
5804 case SPEED_2500 + DUPLEX_HALF: /* not supported */
5809 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5810 adapter->hw.phy.mdix = AUTO_ALL_MODES;
5815 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
5820 * igc_probe - Device Initialization Routine
5821 * @pdev: PCI device information struct
5822 * @ent: entry in igc_pci_tbl
5824 * Returns 0 on success, negative on failure
5826 * igc_probe initializes an adapter identified by a pci_dev structure.
5827 * The OS initialization, configuring the adapter private structure,
5828 * and a hardware reset occur.
5830 static int igc_probe(struct pci_dev *pdev,
5831 const struct pci_device_id *ent)
5833 struct igc_adapter *adapter;
5834 struct net_device *netdev;
5836 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
5837 int err, pci_using_dac;
5839 err = pci_enable_device_mem(pdev);
5844 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5848 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5851 "No usable DMA configuration, aborting\n");
5856 err = pci_request_mem_regions(pdev, igc_driver_name);
5860 pci_enable_pcie_error_reporting(pdev);
5862 pci_set_master(pdev);
5865 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5869 goto err_alloc_etherdev;
5871 SET_NETDEV_DEV(netdev, &pdev->dev);
5873 pci_set_drvdata(pdev, netdev);
5874 adapter = netdev_priv(netdev);
5875 adapter->netdev = netdev;
5876 adapter->pdev = pdev;
5879 adapter->port_num = hw->bus.func;
5880 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5882 err = pci_save_state(pdev);
5887 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5888 pci_resource_len(pdev, 0));
5889 if (!adapter->io_addr)
5892 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5893 hw->hw_addr = adapter->io_addr;
5895 netdev->netdev_ops = &igc_netdev_ops;
5896 igc_ethtool_set_ops(netdev);
5897 netdev->watchdog_timeo = 5 * HZ;
5899 netdev->mem_start = pci_resource_start(pdev, 0);
5900 netdev->mem_end = pci_resource_end(pdev, 0);
5902 /* PCI config space info */
5903 hw->vendor_id = pdev->vendor;
5904 hw->device_id = pdev->device;
5905 hw->revision_id = pdev->revision;
5906 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5907 hw->subsystem_device_id = pdev->subsystem_device;
5909 /* Copy the default MAC and PHY function pointers */
5910 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5911 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5913 /* Initialize skew-specific constants */
5914 err = ei->get_invariants(hw);
5918 /* Add supported features to the features list*/
5919 netdev->features |= NETIF_F_SG;
5920 netdev->features |= NETIF_F_TSO;
5921 netdev->features |= NETIF_F_TSO6;
5922 netdev->features |= NETIF_F_TSO_ECN;
5923 netdev->features |= NETIF_F_RXCSUM;
5924 netdev->features |= NETIF_F_HW_CSUM;
5925 netdev->features |= NETIF_F_SCTP_CRC;
5926 netdev->features |= NETIF_F_HW_TC;
5928 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5929 NETIF_F_GSO_GRE_CSUM | \
5930 NETIF_F_GSO_IPXIP4 | \
5931 NETIF_F_GSO_IPXIP6 | \
5932 NETIF_F_GSO_UDP_TUNNEL | \
5933 NETIF_F_GSO_UDP_TUNNEL_CSUM)
5935 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5936 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5938 /* setup the private structure */
5939 err = igc_sw_init(adapter);
5943 /* copy netdev features into list of user selectable features */
5944 netdev->hw_features |= NETIF_F_NTUPLE;
5945 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
5946 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
5947 netdev->hw_features |= netdev->features;
5950 netdev->features |= NETIF_F_HIGHDMA;
5952 netdev->vlan_features |= netdev->features;
5954 /* MTU range: 68 - 9216 */
5955 netdev->min_mtu = ETH_MIN_MTU;
5956 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5958 /* before reading the NVM, reset the controller to put the device in a
5959 * known good starting state
5961 hw->mac.ops.reset_hw(hw);
5963 if (igc_get_flash_presence_i225(hw)) {
5964 if (hw->nvm.ops.validate(hw) < 0) {
5965 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5971 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5972 /* copy the MAC address out of the NVM */
5973 if (hw->mac.ops.read_mac_addr(hw))
5974 dev_err(&pdev->dev, "NVM Read Error\n");
5977 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5979 if (!is_valid_ether_addr(netdev->dev_addr)) {
5980 dev_err(&pdev->dev, "Invalid MAC Address\n");
5985 /* configure RXPBSIZE and TXPBSIZE */
5986 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5987 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5989 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5990 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5992 INIT_WORK(&adapter->reset_task, igc_reset_task);
5993 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5995 /* Initialize link properties that are user-changeable */
5996 adapter->fc_autoneg = true;
5997 hw->mac.autoneg = true;
5998 hw->phy.autoneg_advertised = 0xaf;
6000 hw->fc.requested_mode = igc_fc_default;
6001 hw->fc.current_mode = igc_fc_default;
6003 /* By default, support wake on port A */
6004 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
6006 /* initialize the wol settings based on the eeprom settings */
6007 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
6008 adapter->wol |= IGC_WUFC_MAG;
6010 device_set_wakeup_enable(&adapter->pdev->dev,
6011 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
6013 igc_ptp_init(adapter);
6015 /* reset the hardware with the new settings */
6018 /* let the f/w know that the h/w is now under the control of the
6021 igc_get_hw_control(adapter);
6023 strncpy(netdev->name, "eth%d", IFNAMSIZ);
6024 err = register_netdev(netdev);
6028 /* carrier off reporting is important to ethtool even BEFORE open */
6029 netif_carrier_off(netdev);
6031 /* Check if Media Autosense is enabled */
6034 /* print pcie link status and MAC address */
6035 pcie_print_link_status(pdev);
6036 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
6038 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
6039 /* Disable EEE for internal PHY devices */
6040 hw->dev_spec._base.eee_enable = false;
6041 adapter->flags &= ~IGC_FLAG_EEE;
6042 igc_set_eee_i225(hw, false, false, false);
6044 pm_runtime_put_noidle(&pdev->dev);
6049 igc_release_hw_control(adapter);
6051 if (!igc_check_reset_block(hw))
6054 igc_clear_interrupt_scheme(adapter);
6055 iounmap(adapter->io_addr);
6057 free_netdev(netdev);
6059 pci_disable_pcie_error_reporting(pdev);
6060 pci_release_mem_regions(pdev);
6063 pci_disable_device(pdev);
6068 * igc_remove - Device Removal Routine
6069 * @pdev: PCI device information struct
6071 * igc_remove is called by the PCI subsystem to alert the driver
6072 * that it should release a PCI device. This could be caused by a
6073 * Hot-Plug event, or because the driver is going to be removed from
6076 static void igc_remove(struct pci_dev *pdev)
6078 struct net_device *netdev = pci_get_drvdata(pdev);
6079 struct igc_adapter *adapter = netdev_priv(netdev);
6081 pm_runtime_get_noresume(&pdev->dev);
6083 igc_flush_nfc_rules(adapter);
6085 igc_ptp_stop(adapter);
6087 set_bit(__IGC_DOWN, &adapter->state);
6089 del_timer_sync(&adapter->watchdog_timer);
6090 del_timer_sync(&adapter->phy_info_timer);
6092 cancel_work_sync(&adapter->reset_task);
6093 cancel_work_sync(&adapter->watchdog_task);
6095 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6096 * would have already happened in close and is redundant.
6098 igc_release_hw_control(adapter);
6099 unregister_netdev(netdev);
6101 igc_clear_interrupt_scheme(adapter);
6102 pci_iounmap(pdev, adapter->io_addr);
6103 pci_release_mem_regions(pdev);
6105 free_netdev(netdev);
6107 pci_disable_pcie_error_reporting(pdev);
6109 pci_disable_device(pdev);
6112 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
6115 struct net_device *netdev = pci_get_drvdata(pdev);
6116 struct igc_adapter *adapter = netdev_priv(netdev);
6117 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
6118 struct igc_hw *hw = &adapter->hw;
6119 u32 ctrl, rctl, status;
6123 netif_device_detach(netdev);
6125 if (netif_running(netdev))
6126 __igc_close(netdev, true);
6128 igc_ptp_suspend(adapter);
6130 igc_clear_interrupt_scheme(adapter);
6133 status = rd32(IGC_STATUS);
6134 if (status & IGC_STATUS_LU)
6135 wufc &= ~IGC_WUFC_LNKC;
6138 igc_setup_rctl(adapter);
6139 igc_set_rx_mode(netdev);
6141 /* turn on all-multi mode if wake on multicast is enabled */
6142 if (wufc & IGC_WUFC_MC) {
6143 rctl = rd32(IGC_RCTL);
6144 rctl |= IGC_RCTL_MPE;
6145 wr32(IGC_RCTL, rctl);
6148 ctrl = rd32(IGC_CTRL);
6149 ctrl |= IGC_CTRL_ADVD3WUC;
6150 wr32(IGC_CTRL, ctrl);
6152 /* Allow time for pending master requests to run */
6153 igc_disable_pcie_master(hw);
6155 wr32(IGC_WUC, IGC_WUC_PME_EN);
6156 wr32(IGC_WUFC, wufc);
6162 wake = wufc || adapter->en_mng_pt;
6164 igc_power_down_phy_copper_base(&adapter->hw);
6166 igc_power_up_link(adapter);
6169 *enable_wake = wake;
6171 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6172 * would have already happened in close and is redundant.
6174 igc_release_hw_control(adapter);
6176 pci_disable_device(pdev);
6182 static int __maybe_unused igc_runtime_suspend(struct device *dev)
6184 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
6187 static void igc_deliver_wake_packet(struct net_device *netdev)
6189 struct igc_adapter *adapter = netdev_priv(netdev);
6190 struct igc_hw *hw = &adapter->hw;
6191 struct sk_buff *skb;
6194 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
6196 /* WUPM stores only the first 128 bytes of the wake packet.
6197 * Read the packet only if we have the whole thing.
6199 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
6202 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
6208 /* Ensure reads are 32-bit aligned */
6209 wupl = roundup(wupl, 4);
6211 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
6213 skb->protocol = eth_type_trans(skb, netdev);
6217 static int __maybe_unused igc_resume(struct device *dev)
6219 struct pci_dev *pdev = to_pci_dev(dev);
6220 struct net_device *netdev = pci_get_drvdata(pdev);
6221 struct igc_adapter *adapter = netdev_priv(netdev);
6222 struct igc_hw *hw = &adapter->hw;
6225 pci_set_power_state(pdev, PCI_D0);
6226 pci_restore_state(pdev);
6227 pci_save_state(pdev);
6229 if (!pci_device_is_present(pdev))
6231 err = pci_enable_device_mem(pdev);
6233 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
6236 pci_set_master(pdev);
6238 pci_enable_wake(pdev, PCI_D3hot, 0);
6239 pci_enable_wake(pdev, PCI_D3cold, 0);
6241 if (igc_init_interrupt_scheme(adapter, true)) {
6242 netdev_err(netdev, "Unable to allocate memory for queues\n");
6248 /* let the f/w know that the h/w is now under the control of the
6251 igc_get_hw_control(adapter);
6253 val = rd32(IGC_WUS);
6254 if (val & WAKE_PKT_WUS)
6255 igc_deliver_wake_packet(netdev);
6260 if (!err && netif_running(netdev))
6261 err = __igc_open(netdev, true);
6264 netif_device_attach(netdev);
6270 static int __maybe_unused igc_runtime_resume(struct device *dev)
6272 return igc_resume(dev);
6275 static int __maybe_unused igc_suspend(struct device *dev)
6277 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
6280 static int __maybe_unused igc_runtime_idle(struct device *dev)
6282 struct net_device *netdev = dev_get_drvdata(dev);
6283 struct igc_adapter *adapter = netdev_priv(netdev);
6285 if (!igc_has_link(adapter))
6286 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
6290 #endif /* CONFIG_PM */
6292 static void igc_shutdown(struct pci_dev *pdev)
6296 __igc_shutdown(pdev, &wake, 0);
6298 if (system_state == SYSTEM_POWER_OFF) {
6299 pci_wake_from_d3(pdev, wake);
6300 pci_set_power_state(pdev, PCI_D3hot);
6305 * igc_io_error_detected - called when PCI error is detected
6306 * @pdev: Pointer to PCI device
6307 * @state: The current PCI connection state
6309 * This function is called after a PCI bus error affecting
6310 * this device has been detected.
6312 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
6313 pci_channel_state_t state)
6315 struct net_device *netdev = pci_get_drvdata(pdev);
6316 struct igc_adapter *adapter = netdev_priv(netdev);
6318 netif_device_detach(netdev);
6320 if (state == pci_channel_io_perm_failure)
6321 return PCI_ERS_RESULT_DISCONNECT;
6323 if (netif_running(netdev))
6325 pci_disable_device(pdev);
6327 /* Request a slot reset. */
6328 return PCI_ERS_RESULT_NEED_RESET;
6332 * igc_io_slot_reset - called after the PCI bus has been reset.
6333 * @pdev: Pointer to PCI device
6335 * Restart the card from scratch, as if from a cold-boot. Implementation
6336 * resembles the first-half of the igc_resume routine.
6338 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
6340 struct net_device *netdev = pci_get_drvdata(pdev);
6341 struct igc_adapter *adapter = netdev_priv(netdev);
6342 struct igc_hw *hw = &adapter->hw;
6343 pci_ers_result_t result;
6345 if (pci_enable_device_mem(pdev)) {
6346 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
6347 result = PCI_ERS_RESULT_DISCONNECT;
6349 pci_set_master(pdev);
6350 pci_restore_state(pdev);
6351 pci_save_state(pdev);
6353 pci_enable_wake(pdev, PCI_D3hot, 0);
6354 pci_enable_wake(pdev, PCI_D3cold, 0);
6356 /* In case of PCI error, adapter loses its HW address
6357 * so we should re-assign it here.
6359 hw->hw_addr = adapter->io_addr;
6363 result = PCI_ERS_RESULT_RECOVERED;
6370 * igc_io_resume - called when traffic can start to flow again.
6371 * @pdev: Pointer to PCI device
6373 * This callback is called when the error recovery driver tells us that
6374 * its OK to resume normal operation. Implementation resembles the
6375 * second-half of the igc_resume routine.
6377 static void igc_io_resume(struct pci_dev *pdev)
6379 struct net_device *netdev = pci_get_drvdata(pdev);
6380 struct igc_adapter *adapter = netdev_priv(netdev);
6383 if (netif_running(netdev)) {
6384 if (igc_open(netdev)) {
6385 netdev_err(netdev, "igc_open failed after reset\n");
6390 netif_device_attach(netdev);
6392 /* let the f/w know that the h/w is now under the control of the
6395 igc_get_hw_control(adapter);
6399 static const struct pci_error_handlers igc_err_handler = {
6400 .error_detected = igc_io_error_detected,
6401 .slot_reset = igc_io_slot_reset,
6402 .resume = igc_io_resume,
6406 static const struct dev_pm_ops igc_pm_ops = {
6407 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
6408 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
6413 static struct pci_driver igc_driver = {
6414 .name = igc_driver_name,
6415 .id_table = igc_pci_tbl,
6417 .remove = igc_remove,
6419 .driver.pm = &igc_pm_ops,
6421 .shutdown = igc_shutdown,
6422 .err_handler = &igc_err_handler,
6426 * igc_reinit_queues - return error
6427 * @adapter: pointer to adapter structure
6429 int igc_reinit_queues(struct igc_adapter *adapter)
6431 struct net_device *netdev = adapter->netdev;
6434 if (netif_running(netdev))
6437 igc_reset_interrupt_capability(adapter);
6439 if (igc_init_interrupt_scheme(adapter, true)) {
6440 netdev_err(netdev, "Unable to allocate memory for queues\n");
6444 if (netif_running(netdev))
6445 err = igc_open(netdev);
6451 * igc_get_hw_dev - return device
6452 * @hw: pointer to hardware structure
6454 * used by hardware layer to print debugging information
6456 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
6458 struct igc_adapter *adapter = hw->back;
6460 return adapter->netdev;
6463 static void igc_disable_rx_ring_hw(struct igc_ring *ring)
6465 struct igc_hw *hw = &ring->q_vector->adapter->hw;
6466 u8 idx = ring->reg_idx;
6469 rxdctl = rd32(IGC_RXDCTL(idx));
6470 rxdctl &= ~IGC_RXDCTL_QUEUE_ENABLE;
6471 rxdctl |= IGC_RXDCTL_SWFLUSH;
6472 wr32(IGC_RXDCTL(idx), rxdctl);
6475 void igc_disable_rx_ring(struct igc_ring *ring)
6477 igc_disable_rx_ring_hw(ring);
6478 igc_clean_rx_ring(ring);
6481 void igc_enable_rx_ring(struct igc_ring *ring)
6483 struct igc_adapter *adapter = ring->q_vector->adapter;
6485 igc_configure_rx_ring(adapter, ring);
6488 igc_alloc_rx_buffers_zc(ring, igc_desc_unused(ring));
6490 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
6493 static void igc_disable_tx_ring_hw(struct igc_ring *ring)
6495 struct igc_hw *hw = &ring->q_vector->adapter->hw;
6496 u8 idx = ring->reg_idx;
6499 txdctl = rd32(IGC_TXDCTL(idx));
6500 txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE;
6501 txdctl |= IGC_TXDCTL_SWFLUSH;
6502 wr32(IGC_TXDCTL(idx), txdctl);
6505 void igc_disable_tx_ring(struct igc_ring *ring)
6507 igc_disable_tx_ring_hw(ring);
6508 igc_clean_tx_ring(ring);
6511 void igc_enable_tx_ring(struct igc_ring *ring)
6513 struct igc_adapter *adapter = ring->q_vector->adapter;
6515 igc_configure_tx_ring(adapter, ring);
6519 * igc_init_module - Driver Registration Routine
6521 * igc_init_module is the first routine called when the driver is
6522 * loaded. All it does is register with the PCI subsystem.
6524 static int __init igc_init_module(void)
6528 pr_info("%s\n", igc_driver_string);
6529 pr_info("%s\n", igc_copyright);
6531 ret = pci_register_driver(&igc_driver);
6535 module_init(igc_init_module);
6538 * igc_exit_module - Driver Exit Cleanup Routine
6540 * igc_exit_module is called just before the driver is removed
6543 static void __exit igc_exit_module(void)
6545 pci_unregister_driver(&igc_driver);
6548 module_exit(igc_exit_module);