1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
11 #include <linux/pm_runtime.h>
12 #include <net/pkt_sched.h>
20 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
22 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
24 static int debug = -1;
26 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
27 MODULE_DESCRIPTION(DRV_SUMMARY);
28 MODULE_LICENSE("GPL v2");
29 module_param(debug, int, 0);
30 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
32 char igc_driver_name[] = "igc";
33 static const char igc_driver_string[] = DRV_SUMMARY;
34 static const char igc_copyright[] =
35 "Copyright(c) 2018 Intel Corporation.";
37 static const struct igc_info *igc_info_tbl[] = {
38 [board_base] = &igc_base_info,
41 static const struct pci_device_id igc_pci_tbl[] = {
42 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
43 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
44 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
45 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
46 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
47 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
48 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
51 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
52 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
53 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
54 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
55 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
56 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
57 /* required last entry */
61 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
70 void igc_reset(struct igc_adapter *adapter)
72 struct net_device *dev = adapter->netdev;
73 struct igc_hw *hw = &adapter->hw;
74 struct igc_fc_info *fc = &hw->fc;
77 /* Repartition PBA for greater than 9k MTU if required */
80 /* flow control settings
81 * The high water mark must be low enough to fit one full frame
82 * after transmitting the pause frame. As such we must have enough
83 * space to allow for us to complete our current transmit and then
84 * receive the frame that is in progress from the link partner.
86 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
88 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
90 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
91 fc->low_water = fc->high_water - 16;
92 fc->pause_time = 0xFFFF;
94 fc->current_mode = fc->requested_mode;
96 hw->mac.ops.reset_hw(hw);
98 if (hw->mac.ops.init_hw(hw))
99 netdev_err(dev, "Error on hardware initialization\n");
101 /* Re-establish EEE setting */
102 igc_set_eee_i225(hw, true, true, true);
104 if (!netif_running(adapter->netdev))
105 igc_power_down_phy_copper_base(&adapter->hw);
107 /* Re-enable PTP, where applicable. */
108 igc_ptp_reset(adapter);
110 /* Re-enable TSN offloading, where applicable. */
111 igc_tsn_offload_apply(adapter);
113 igc_get_phy_info(hw);
117 * igc_power_up_link - Power up the phy link
118 * @adapter: address of board private structure
120 static void igc_power_up_link(struct igc_adapter *adapter)
122 igc_reset_phy(&adapter->hw);
124 igc_power_up_phy_copper(&adapter->hw);
126 igc_setup_link(&adapter->hw);
130 * igc_release_hw_control - release control of the h/w to f/w
131 * @adapter: address of board private structure
133 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
134 * For ASF and Pass Through versions of f/w this means that the
135 * driver is no longer loaded.
137 static void igc_release_hw_control(struct igc_adapter *adapter)
139 struct igc_hw *hw = &adapter->hw;
142 /* Let firmware take over control of h/w */
143 ctrl_ext = rd32(IGC_CTRL_EXT);
145 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
149 * igc_get_hw_control - get control of the h/w from f/w
150 * @adapter: address of board private structure
152 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
153 * For ASF and Pass Through versions of f/w this means that
154 * the driver is loaded.
156 static void igc_get_hw_control(struct igc_adapter *adapter)
158 struct igc_hw *hw = &adapter->hw;
161 /* Let firmware know the driver has taken over */
162 ctrl_ext = rd32(IGC_CTRL_EXT);
164 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
168 * igc_clean_tx_ring - Free Tx Buffers
169 * @tx_ring: ring to be cleaned
171 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
173 u16 i = tx_ring->next_to_clean;
174 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
176 while (i != tx_ring->next_to_use) {
177 union igc_adv_tx_desc *eop_desc, *tx_desc;
179 /* Free all the Tx ring sk_buffs */
180 dev_kfree_skb_any(tx_buffer->skb);
182 /* unmap skb header data */
183 dma_unmap_single(tx_ring->dev,
184 dma_unmap_addr(tx_buffer, dma),
185 dma_unmap_len(tx_buffer, len),
188 /* check for eop_desc to determine the end of the packet */
189 eop_desc = tx_buffer->next_to_watch;
190 tx_desc = IGC_TX_DESC(tx_ring, i);
192 /* unmap remaining buffers */
193 while (tx_desc != eop_desc) {
197 if (unlikely(i == tx_ring->count)) {
199 tx_buffer = tx_ring->tx_buffer_info;
200 tx_desc = IGC_TX_DESC(tx_ring, 0);
203 /* unmap any remaining paged data */
204 if (dma_unmap_len(tx_buffer, len))
205 dma_unmap_page(tx_ring->dev,
206 dma_unmap_addr(tx_buffer, dma),
207 dma_unmap_len(tx_buffer, len),
211 /* move us one more past the eop_desc for start of next pkt */
214 if (unlikely(i == tx_ring->count)) {
216 tx_buffer = tx_ring->tx_buffer_info;
220 /* reset BQL for queue */
221 netdev_tx_reset_queue(txring_txq(tx_ring));
223 /* reset next_to_use and next_to_clean */
224 tx_ring->next_to_use = 0;
225 tx_ring->next_to_clean = 0;
229 * igc_free_tx_resources - Free Tx Resources per Queue
230 * @tx_ring: Tx descriptor ring for a specific queue
232 * Free all transmit software resources
234 void igc_free_tx_resources(struct igc_ring *tx_ring)
236 igc_clean_tx_ring(tx_ring);
238 vfree(tx_ring->tx_buffer_info);
239 tx_ring->tx_buffer_info = NULL;
241 /* if not set, then don't free */
245 dma_free_coherent(tx_ring->dev, tx_ring->size,
246 tx_ring->desc, tx_ring->dma);
248 tx_ring->desc = NULL;
252 * igc_free_all_tx_resources - Free Tx Resources for All Queues
253 * @adapter: board private structure
255 * Free all transmit software resources
257 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
261 for (i = 0; i < adapter->num_tx_queues; i++)
262 igc_free_tx_resources(adapter->tx_ring[i]);
266 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
267 * @adapter: board private structure
269 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
273 for (i = 0; i < adapter->num_tx_queues; i++)
274 if (adapter->tx_ring[i])
275 igc_clean_tx_ring(adapter->tx_ring[i]);
279 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
280 * @tx_ring: tx descriptor ring (for a specific queue) to setup
282 * Return 0 on success, negative on failure
284 int igc_setup_tx_resources(struct igc_ring *tx_ring)
286 struct net_device *ndev = tx_ring->netdev;
287 struct device *dev = tx_ring->dev;
290 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
291 tx_ring->tx_buffer_info = vzalloc(size);
292 if (!tx_ring->tx_buffer_info)
295 /* round up to nearest 4K */
296 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
297 tx_ring->size = ALIGN(tx_ring->size, 4096);
299 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
300 &tx_ring->dma, GFP_KERNEL);
305 tx_ring->next_to_use = 0;
306 tx_ring->next_to_clean = 0;
311 vfree(tx_ring->tx_buffer_info);
312 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
317 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
318 * @adapter: board private structure
320 * Return 0 on success, negative on failure
322 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
324 struct net_device *dev = adapter->netdev;
327 for (i = 0; i < adapter->num_tx_queues; i++) {
328 err = igc_setup_tx_resources(adapter->tx_ring[i]);
330 netdev_err(dev, "Error on Tx queue %u setup\n", i);
331 for (i--; i >= 0; i--)
332 igc_free_tx_resources(adapter->tx_ring[i]);
341 * igc_clean_rx_ring - Free Rx Buffers per Queue
342 * @rx_ring: ring to free buffers from
344 static void igc_clean_rx_ring(struct igc_ring *rx_ring)
346 u16 i = rx_ring->next_to_clean;
348 dev_kfree_skb(rx_ring->skb);
351 /* Free all the Rx ring sk_buffs */
352 while (i != rx_ring->next_to_alloc) {
353 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
355 /* Invalidate cache lines that may have been written to by
356 * device so that we avoid corrupting memory.
358 dma_sync_single_range_for_cpu(rx_ring->dev,
360 buffer_info->page_offset,
361 igc_rx_bufsz(rx_ring),
364 /* free resources associated with mapping */
365 dma_unmap_page_attrs(rx_ring->dev,
367 igc_rx_pg_size(rx_ring),
370 __page_frag_cache_drain(buffer_info->page,
371 buffer_info->pagecnt_bias);
374 if (i == rx_ring->count)
378 rx_ring->next_to_alloc = 0;
379 rx_ring->next_to_clean = 0;
380 rx_ring->next_to_use = 0;
384 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
385 * @adapter: board private structure
387 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
391 for (i = 0; i < adapter->num_rx_queues; i++)
392 if (adapter->rx_ring[i])
393 igc_clean_rx_ring(adapter->rx_ring[i]);
397 * igc_free_rx_resources - Free Rx Resources
398 * @rx_ring: ring to clean the resources from
400 * Free all receive software resources
402 void igc_free_rx_resources(struct igc_ring *rx_ring)
404 igc_clean_rx_ring(rx_ring);
406 vfree(rx_ring->rx_buffer_info);
407 rx_ring->rx_buffer_info = NULL;
409 /* if not set, then don't free */
413 dma_free_coherent(rx_ring->dev, rx_ring->size,
414 rx_ring->desc, rx_ring->dma);
416 rx_ring->desc = NULL;
420 * igc_free_all_rx_resources - Free Rx Resources for All Queues
421 * @adapter: board private structure
423 * Free all receive software resources
425 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
429 for (i = 0; i < adapter->num_rx_queues; i++)
430 igc_free_rx_resources(adapter->rx_ring[i]);
434 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
435 * @rx_ring: rx descriptor ring (for a specific queue) to setup
437 * Returns 0 on success, negative on failure
439 int igc_setup_rx_resources(struct igc_ring *rx_ring)
441 struct net_device *ndev = rx_ring->netdev;
442 struct device *dev = rx_ring->dev;
445 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
446 rx_ring->rx_buffer_info = vzalloc(size);
447 if (!rx_ring->rx_buffer_info)
450 desc_len = sizeof(union igc_adv_rx_desc);
452 /* Round up to nearest 4K */
453 rx_ring->size = rx_ring->count * desc_len;
454 rx_ring->size = ALIGN(rx_ring->size, 4096);
456 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
457 &rx_ring->dma, GFP_KERNEL);
462 rx_ring->next_to_alloc = 0;
463 rx_ring->next_to_clean = 0;
464 rx_ring->next_to_use = 0;
469 vfree(rx_ring->rx_buffer_info);
470 rx_ring->rx_buffer_info = NULL;
471 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
476 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
477 * (Descriptors) for all queues
478 * @adapter: board private structure
480 * Return 0 on success, negative on failure
482 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
484 struct net_device *dev = adapter->netdev;
487 for (i = 0; i < adapter->num_rx_queues; i++) {
488 err = igc_setup_rx_resources(adapter->rx_ring[i]);
490 netdev_err(dev, "Error on Rx queue %u setup\n", i);
491 for (i--; i >= 0; i--)
492 igc_free_rx_resources(adapter->rx_ring[i]);
501 * igc_configure_rx_ring - Configure a receive ring after Reset
502 * @adapter: board private structure
503 * @ring: receive ring to be configured
505 * Configure the Rx unit of the MAC after a reset.
507 static void igc_configure_rx_ring(struct igc_adapter *adapter,
508 struct igc_ring *ring)
510 struct igc_hw *hw = &adapter->hw;
511 union igc_adv_rx_desc *rx_desc;
512 int reg_idx = ring->reg_idx;
513 u32 srrctl = 0, rxdctl = 0;
514 u64 rdba = ring->dma;
516 /* disable the queue */
517 wr32(IGC_RXDCTL(reg_idx), 0);
519 /* Set DMA base address registers */
520 wr32(IGC_RDBAL(reg_idx),
521 rdba & 0x00000000ffffffffULL);
522 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
523 wr32(IGC_RDLEN(reg_idx),
524 ring->count * sizeof(union igc_adv_rx_desc));
526 /* initialize head and tail */
527 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
528 wr32(IGC_RDH(reg_idx), 0);
529 writel(0, ring->tail);
531 /* reset next-to- use/clean to place SW in sync with hardware */
532 ring->next_to_clean = 0;
533 ring->next_to_use = 0;
535 /* set descriptor configuration */
536 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
537 if (ring_uses_large_buffer(ring))
538 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
540 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
541 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
543 wr32(IGC_SRRCTL(reg_idx), srrctl);
545 rxdctl |= IGC_RX_PTHRESH;
546 rxdctl |= IGC_RX_HTHRESH << 8;
547 rxdctl |= IGC_RX_WTHRESH << 16;
549 /* initialize rx_buffer_info */
550 memset(ring->rx_buffer_info, 0,
551 sizeof(struct igc_rx_buffer) * ring->count);
553 /* initialize Rx descriptor 0 */
554 rx_desc = IGC_RX_DESC(ring, 0);
555 rx_desc->wb.upper.length = 0;
557 /* enable receive descriptor fetching */
558 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
560 wr32(IGC_RXDCTL(reg_idx), rxdctl);
564 * igc_configure_rx - Configure receive Unit after Reset
565 * @adapter: board private structure
567 * Configure the Rx unit of the MAC after a reset.
569 static void igc_configure_rx(struct igc_adapter *adapter)
573 /* Setup the HW Rx Head and Tail Descriptor Pointers and
574 * the Base and Length of the Rx Descriptor Ring
576 for (i = 0; i < adapter->num_rx_queues; i++)
577 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
581 * igc_configure_tx_ring - Configure transmit ring after Reset
582 * @adapter: board private structure
583 * @ring: tx ring to configure
585 * Configure a transmit ring after a reset.
587 static void igc_configure_tx_ring(struct igc_adapter *adapter,
588 struct igc_ring *ring)
590 struct igc_hw *hw = &adapter->hw;
591 int reg_idx = ring->reg_idx;
592 u64 tdba = ring->dma;
595 /* disable the queue */
596 wr32(IGC_TXDCTL(reg_idx), 0);
600 wr32(IGC_TDLEN(reg_idx),
601 ring->count * sizeof(union igc_adv_tx_desc));
602 wr32(IGC_TDBAL(reg_idx),
603 tdba & 0x00000000ffffffffULL);
604 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
606 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
607 wr32(IGC_TDH(reg_idx), 0);
608 writel(0, ring->tail);
610 txdctl |= IGC_TX_PTHRESH;
611 txdctl |= IGC_TX_HTHRESH << 8;
612 txdctl |= IGC_TX_WTHRESH << 16;
614 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
615 wr32(IGC_TXDCTL(reg_idx), txdctl);
619 * igc_configure_tx - Configure transmit Unit after Reset
620 * @adapter: board private structure
622 * Configure the Tx unit of the MAC after a reset.
624 static void igc_configure_tx(struct igc_adapter *adapter)
628 for (i = 0; i < adapter->num_tx_queues; i++)
629 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
633 * igc_setup_mrqc - configure the multiple receive queue control registers
634 * @adapter: Board private structure
636 static void igc_setup_mrqc(struct igc_adapter *adapter)
638 struct igc_hw *hw = &adapter->hw;
639 u32 j, num_rx_queues;
643 netdev_rss_key_fill(rss_key, sizeof(rss_key));
644 for (j = 0; j < 10; j++)
645 wr32(IGC_RSSRK(j), rss_key[j]);
647 num_rx_queues = adapter->rss_queues;
649 if (adapter->rss_indir_tbl_init != num_rx_queues) {
650 for (j = 0; j < IGC_RETA_SIZE; j++)
651 adapter->rss_indir_tbl[j] =
652 (j * num_rx_queues) / IGC_RETA_SIZE;
653 adapter->rss_indir_tbl_init = num_rx_queues;
655 igc_write_rss_indir_tbl(adapter);
657 /* Disable raw packet checksumming so that RSS hash is placed in
658 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
659 * offloads as they are enabled by default
661 rxcsum = rd32(IGC_RXCSUM);
662 rxcsum |= IGC_RXCSUM_PCSD;
664 /* Enable Receive Checksum Offload for SCTP */
665 rxcsum |= IGC_RXCSUM_CRCOFL;
667 /* Don't need to set TUOFL or IPOFL, they default to 1 */
668 wr32(IGC_RXCSUM, rxcsum);
670 /* Generate RSS hash based on packet types, TCP/UDP
671 * port numbers and/or IPv4/v6 src and dst addresses
673 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
674 IGC_MRQC_RSS_FIELD_IPV4_TCP |
675 IGC_MRQC_RSS_FIELD_IPV6 |
676 IGC_MRQC_RSS_FIELD_IPV6_TCP |
677 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
679 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
680 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
681 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
682 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
684 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
686 wr32(IGC_MRQC, mrqc);
690 * igc_setup_rctl - configure the receive control registers
691 * @adapter: Board private structure
693 static void igc_setup_rctl(struct igc_adapter *adapter)
695 struct igc_hw *hw = &adapter->hw;
698 rctl = rd32(IGC_RCTL);
700 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
701 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
703 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
704 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
706 /* enable stripping of CRC. Newer features require
707 * that the HW strips the CRC.
709 rctl |= IGC_RCTL_SECRC;
711 /* disable store bad packets and clear size bits. */
712 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
714 /* enable LPE to allow for reception of jumbo frames */
715 rctl |= IGC_RCTL_LPE;
717 /* disable queue 0 to prevent tail write w/o re-config */
718 wr32(IGC_RXDCTL(0), 0);
720 /* This is useful for sniffing bad packets. */
721 if (adapter->netdev->features & NETIF_F_RXALL) {
722 /* UPE and MPE will be handled by normal PROMISC logic
725 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
726 IGC_RCTL_BAM | /* RX All Bcast Pkts */
727 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
729 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
730 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
733 wr32(IGC_RCTL, rctl);
737 * igc_setup_tctl - configure the transmit control registers
738 * @adapter: Board private structure
740 static void igc_setup_tctl(struct igc_adapter *adapter)
742 struct igc_hw *hw = &adapter->hw;
745 /* disable queue 0 which icould be enabled by default */
746 wr32(IGC_TXDCTL(0), 0);
748 /* Program the Transmit Control Register */
749 tctl = rd32(IGC_TCTL);
750 tctl &= ~IGC_TCTL_CT;
751 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
752 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
754 /* Enable transmits */
757 wr32(IGC_TCTL, tctl);
761 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
762 * @adapter: Pointer to adapter where the filter should be set
763 * @index: Filter index
764 * @type: MAC address filter type (source or destination)
766 * @queue: If non-negative, queue assignment feature is enabled and frames
767 * matching the filter are enqueued onto 'queue'. Otherwise, queue
768 * assignment is disabled.
770 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
771 enum igc_mac_filter_type type,
772 const u8 *addr, int queue)
774 struct net_device *dev = adapter->netdev;
775 struct igc_hw *hw = &adapter->hw;
778 if (WARN_ON(index >= hw->mac.rar_entry_count))
781 ral = le32_to_cpup((__le32 *)(addr));
782 rah = le16_to_cpup((__le16 *)(addr + 4));
784 if (type == IGC_MAC_FILTER_TYPE_SRC) {
785 rah &= ~IGC_RAH_ASEL_MASK;
786 rah |= IGC_RAH_ASEL_SRC_ADDR;
790 rah &= ~IGC_RAH_QSEL_MASK;
791 rah |= (queue << IGC_RAH_QSEL_SHIFT);
792 rah |= IGC_RAH_QSEL_ENABLE;
797 wr32(IGC_RAL(index), ral);
798 wr32(IGC_RAH(index), rah);
800 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
804 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
805 * @adapter: Pointer to adapter where the filter should be cleared
806 * @index: Filter index
808 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
810 struct net_device *dev = adapter->netdev;
811 struct igc_hw *hw = &adapter->hw;
813 if (WARN_ON(index >= hw->mac.rar_entry_count))
816 wr32(IGC_RAL(index), 0);
817 wr32(IGC_RAH(index), 0);
819 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
822 /* Set default MAC address for the PF in the first RAR entry */
823 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
825 struct net_device *dev = adapter->netdev;
826 u8 *addr = adapter->hw.mac.addr;
828 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
830 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
834 * igc_set_mac - Change the Ethernet Address of the NIC
835 * @netdev: network interface device structure
836 * @p: pointer to an address structure
838 * Returns 0 on success, negative on failure
840 static int igc_set_mac(struct net_device *netdev, void *p)
842 struct igc_adapter *adapter = netdev_priv(netdev);
843 struct igc_hw *hw = &adapter->hw;
844 struct sockaddr *addr = p;
846 if (!is_valid_ether_addr(addr->sa_data))
847 return -EADDRNOTAVAIL;
849 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
850 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
852 /* set the correct pool for the new PF MAC address in entry 0 */
853 igc_set_default_mac_filter(adapter);
859 * igc_write_mc_addr_list - write multicast addresses to MTA
860 * @netdev: network interface device structure
862 * Writes multicast address list to the MTA hash table.
863 * Returns: -ENOMEM on failure
864 * 0 on no addresses written
865 * X on writing X addresses to MTA
867 static int igc_write_mc_addr_list(struct net_device *netdev)
869 struct igc_adapter *adapter = netdev_priv(netdev);
870 struct igc_hw *hw = &adapter->hw;
871 struct netdev_hw_addr *ha;
875 if (netdev_mc_empty(netdev)) {
876 /* nothing to program, so clear mc list */
877 igc_update_mc_addr_list(hw, NULL, 0);
881 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
885 /* The shared function expects a packed array of only addresses. */
887 netdev_for_each_mc_addr(ha, netdev)
888 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
890 igc_update_mc_addr_list(hw, mta_list, i);
893 return netdev_mc_count(netdev);
896 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
898 ktime_t cycle_time = adapter->cycle_time;
899 ktime_t base_time = adapter->base_time;
902 /* FIXME: when using ETF together with taprio, we may have a
903 * case where 'delta' is larger than the cycle_time, this may
904 * cause problems if we don't read the current value of
905 * IGC_BASET, as the value writen into the launchtime
906 * descriptor field may be misinterpreted.
908 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
910 return cpu_to_le32(launchtime);
913 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
914 struct igc_tx_buffer *first,
915 u32 vlan_macip_lens, u32 type_tucmd,
918 struct igc_adv_tx_context_desc *context_desc;
919 u16 i = tx_ring->next_to_use;
921 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
924 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
926 /* set bits to identify this as an advanced context descriptor */
927 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
929 /* For i225, context index must be unique per ring. */
930 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
931 mss_l4len_idx |= tx_ring->reg_idx << 4;
933 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
934 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
935 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
937 /* We assume there is always a valid Tx time available. Invalid times
938 * should have been handled by the upper layers.
940 if (tx_ring->launchtime_enable) {
941 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
942 ktime_t txtime = first->skb->tstamp;
944 first->skb->tstamp = ktime_set(0, 0);
945 context_desc->launch_time = igc_tx_launchtime(adapter,
948 context_desc->launch_time = 0;
952 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
954 struct sk_buff *skb = first->skb;
955 u32 vlan_macip_lens = 0;
958 if (skb->ip_summed != CHECKSUM_PARTIAL) {
960 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
961 !tx_ring->launchtime_enable)
966 switch (skb->csum_offset) {
967 case offsetof(struct tcphdr, check):
968 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
970 case offsetof(struct udphdr, check):
972 case offsetof(struct sctphdr, checksum):
973 /* validate that this is actually an SCTP request */
974 if (skb_csum_is_sctp(skb)) {
975 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
980 skb_checksum_help(skb);
984 /* update TX checksum flag */
985 first->tx_flags |= IGC_TX_FLAGS_CSUM;
986 vlan_macip_lens = skb_checksum_start_offset(skb) -
987 skb_network_offset(skb);
989 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
990 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
992 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
995 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
997 struct net_device *netdev = tx_ring->netdev;
999 netif_stop_subqueue(netdev, tx_ring->queue_index);
1001 /* memory barriier comment */
1004 /* We need to check again in a case another CPU has just
1005 * made room available.
1007 if (igc_desc_unused(tx_ring) < size)
1011 netif_wake_subqueue(netdev, tx_ring->queue_index);
1013 u64_stats_update_begin(&tx_ring->tx_syncp2);
1014 tx_ring->tx_stats.restart_queue2++;
1015 u64_stats_update_end(&tx_ring->tx_syncp2);
1020 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1022 if (igc_desc_unused(tx_ring) >= size)
1024 return __igc_maybe_stop_tx(tx_ring, size);
1027 #define IGC_SET_FLAG(_input, _flag, _result) \
1028 (((_flag) <= (_result)) ? \
1029 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1030 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1032 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1034 /* set type for advanced descriptor with frame checksum insertion */
1035 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1036 IGC_ADVTXD_DCMD_DEXT |
1037 IGC_ADVTXD_DCMD_IFCS;
1039 /* set segmentation bits for TSO */
1040 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1041 (IGC_ADVTXD_DCMD_TSE));
1043 /* set timestamp bit if present */
1044 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1045 (IGC_ADVTXD_MAC_TSTAMP));
1050 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1051 union igc_adv_tx_desc *tx_desc,
1052 u32 tx_flags, unsigned int paylen)
1054 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1056 /* insert L4 checksum */
1057 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1058 ((IGC_TXD_POPTS_TXSM << 8) /
1061 /* insert IPv4 checksum */
1062 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1063 (((IGC_TXD_POPTS_IXSM << 8)) /
1066 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1069 static int igc_tx_map(struct igc_ring *tx_ring,
1070 struct igc_tx_buffer *first,
1073 struct sk_buff *skb = first->skb;
1074 struct igc_tx_buffer *tx_buffer;
1075 union igc_adv_tx_desc *tx_desc;
1076 u32 tx_flags = first->tx_flags;
1078 u16 i = tx_ring->next_to_use;
1079 unsigned int data_len, size;
1081 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1083 tx_desc = IGC_TX_DESC(tx_ring, i);
1085 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1087 size = skb_headlen(skb);
1088 data_len = skb->data_len;
1090 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1094 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1095 if (dma_mapping_error(tx_ring->dev, dma))
1098 /* record length, and DMA address */
1099 dma_unmap_len_set(tx_buffer, len, size);
1100 dma_unmap_addr_set(tx_buffer, dma, dma);
1102 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1104 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1105 tx_desc->read.cmd_type_len =
1106 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1110 if (i == tx_ring->count) {
1111 tx_desc = IGC_TX_DESC(tx_ring, 0);
1114 tx_desc->read.olinfo_status = 0;
1116 dma += IGC_MAX_DATA_PER_TXD;
1117 size -= IGC_MAX_DATA_PER_TXD;
1119 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1122 if (likely(!data_len))
1125 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1129 if (i == tx_ring->count) {
1130 tx_desc = IGC_TX_DESC(tx_ring, 0);
1133 tx_desc->read.olinfo_status = 0;
1135 size = skb_frag_size(frag);
1138 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1139 size, DMA_TO_DEVICE);
1141 tx_buffer = &tx_ring->tx_buffer_info[i];
1144 /* write last descriptor with RS and EOP bits */
1145 cmd_type |= size | IGC_TXD_DCMD;
1146 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1148 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1150 /* set the timestamp */
1151 first->time_stamp = jiffies;
1153 skb_tx_timestamp(skb);
1155 /* Force memory writes to complete before letting h/w know there
1156 * are new descriptors to fetch. (Only applicable for weak-ordered
1157 * memory model archs, such as IA-64).
1159 * We also need this memory barrier to make certain all of the
1160 * status bits have been updated before next_to_watch is written.
1164 /* set next_to_watch value indicating a packet is present */
1165 first->next_to_watch = tx_desc;
1168 if (i == tx_ring->count)
1171 tx_ring->next_to_use = i;
1173 /* Make sure there is space in the ring for the next send. */
1174 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1176 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1177 writel(i, tx_ring->tail);
1182 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1183 tx_buffer = &tx_ring->tx_buffer_info[i];
1185 /* clear dma mappings for failed tx_buffer_info map */
1186 while (tx_buffer != first) {
1187 if (dma_unmap_len(tx_buffer, len))
1188 dma_unmap_page(tx_ring->dev,
1189 dma_unmap_addr(tx_buffer, dma),
1190 dma_unmap_len(tx_buffer, len),
1192 dma_unmap_len_set(tx_buffer, len, 0);
1195 i += tx_ring->count;
1196 tx_buffer = &tx_ring->tx_buffer_info[i];
1199 if (dma_unmap_len(tx_buffer, len))
1200 dma_unmap_single(tx_ring->dev,
1201 dma_unmap_addr(tx_buffer, dma),
1202 dma_unmap_len(tx_buffer, len),
1204 dma_unmap_len_set(tx_buffer, len, 0);
1206 dev_kfree_skb_any(tx_buffer->skb);
1207 tx_buffer->skb = NULL;
1209 tx_ring->next_to_use = i;
1214 static int igc_tso(struct igc_ring *tx_ring,
1215 struct igc_tx_buffer *first,
1218 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1219 struct sk_buff *skb = first->skb;
1230 u32 paylen, l4_offset;
1233 if (skb->ip_summed != CHECKSUM_PARTIAL)
1236 if (!skb_is_gso(skb))
1239 err = skb_cow_head(skb, 0);
1243 ip.hdr = skb_network_header(skb);
1244 l4.hdr = skb_checksum_start(skb);
1246 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1247 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1249 /* initialize outer IP header fields */
1250 if (ip.v4->version == 4) {
1251 unsigned char *csum_start = skb_checksum_start(skb);
1252 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1254 /* IP header will have to cancel out any data that
1255 * is not a part of the outer IP header
1257 ip.v4->check = csum_fold(csum_partial(trans_start,
1258 csum_start - trans_start,
1260 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1263 first->tx_flags |= IGC_TX_FLAGS_TSO |
1267 ip.v6->payload_len = 0;
1268 first->tx_flags |= IGC_TX_FLAGS_TSO |
1272 /* determine offset of inner transport header */
1273 l4_offset = l4.hdr - skb->data;
1275 /* remove payload length from inner checksum */
1276 paylen = skb->len - l4_offset;
1277 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1278 /* compute length of segmentation header */
1279 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1280 csum_replace_by_diff(&l4.tcp->check,
1281 (__force __wsum)htonl(paylen));
1283 /* compute length of segmentation header */
1284 *hdr_len = sizeof(*l4.udp) + l4_offset;
1285 csum_replace_by_diff(&l4.udp->check,
1286 (__force __wsum)htonl(paylen));
1289 /* update gso size and bytecount with header size */
1290 first->gso_segs = skb_shinfo(skb)->gso_segs;
1291 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1294 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1295 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1297 /* VLAN MACLEN IPLEN */
1298 vlan_macip_lens = l4.hdr - ip.hdr;
1299 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1300 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1302 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1303 type_tucmd, mss_l4len_idx);
1308 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1309 struct igc_ring *tx_ring)
1311 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1312 __be16 protocol = vlan_get_protocol(skb);
1313 struct igc_tx_buffer *first;
1319 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1320 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1321 * + 2 desc gap to keep tail from touching head,
1322 * + 1 desc for context descriptor,
1323 * otherwise try next time
1325 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1326 count += TXD_USE_COUNT(skb_frag_size(
1327 &skb_shinfo(skb)->frags[f]));
1329 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1330 /* this is a hard error */
1331 return NETDEV_TX_BUSY;
1334 /* record the location of the first descriptor for this packet */
1335 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1337 first->bytecount = skb->len;
1338 first->gso_segs = 1;
1340 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1341 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1343 /* FIXME: add support for retrieving timestamps from
1344 * the other timer registers before skipping the
1345 * timestamping request.
1347 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1348 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1350 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1351 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1353 adapter->ptp_tx_skb = skb_get(skb);
1354 adapter->ptp_tx_start = jiffies;
1356 adapter->tx_hwtstamp_skipped++;
1360 /* record initial flags and protocol */
1361 first->tx_flags = tx_flags;
1362 first->protocol = protocol;
1364 tso = igc_tso(tx_ring, first, &hdr_len);
1368 igc_tx_csum(tx_ring, first);
1370 igc_tx_map(tx_ring, first, hdr_len);
1372 return NETDEV_TX_OK;
1375 dev_kfree_skb_any(first->skb);
1378 return NETDEV_TX_OK;
1381 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1382 struct sk_buff *skb)
1384 unsigned int r_idx = skb->queue_mapping;
1386 if (r_idx >= adapter->num_tx_queues)
1387 r_idx = r_idx % adapter->num_tx_queues;
1389 return adapter->tx_ring[r_idx];
1392 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1393 struct net_device *netdev)
1395 struct igc_adapter *adapter = netdev_priv(netdev);
1397 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1398 * in order to meet this minimum size requirement.
1400 if (skb->len < 17) {
1401 if (skb_padto(skb, 17))
1402 return NETDEV_TX_OK;
1406 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1409 static void igc_rx_checksum(struct igc_ring *ring,
1410 union igc_adv_rx_desc *rx_desc,
1411 struct sk_buff *skb)
1413 skb_checksum_none_assert(skb);
1415 /* Ignore Checksum bit is set */
1416 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1419 /* Rx checksum disabled via ethtool */
1420 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1423 /* TCP/UDP checksum error bit is set */
1424 if (igc_test_staterr(rx_desc,
1425 IGC_RXDEXT_STATERR_L4E |
1426 IGC_RXDEXT_STATERR_IPE)) {
1427 /* work around errata with sctp packets where the TCPE aka
1428 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1429 * packets (aka let the stack check the crc32c)
1431 if (!(skb->len == 60 &&
1432 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1433 u64_stats_update_begin(&ring->rx_syncp);
1434 ring->rx_stats.csum_err++;
1435 u64_stats_update_end(&ring->rx_syncp);
1437 /* let the stack verify checksum errors */
1440 /* It must be a TCP or UDP packet with a valid checksum */
1441 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1442 IGC_RXD_STAT_UDPCS))
1443 skb->ip_summed = CHECKSUM_UNNECESSARY;
1445 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1446 le32_to_cpu(rx_desc->wb.upper.status_error));
1449 static inline void igc_rx_hash(struct igc_ring *ring,
1450 union igc_adv_rx_desc *rx_desc,
1451 struct sk_buff *skb)
1453 if (ring->netdev->features & NETIF_F_RXHASH)
1455 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1460 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1461 * @rx_ring: rx descriptor ring packet is being transacted on
1462 * @rx_desc: pointer to the EOP Rx descriptor
1463 * @skb: pointer to current skb being populated
1465 * This function checks the ring, descriptor, and packet information in order
1466 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1469 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1470 union igc_adv_rx_desc *rx_desc,
1471 struct sk_buff *skb)
1473 igc_rx_hash(rx_ring, rx_desc, skb);
1475 igc_rx_checksum(rx_ring, rx_desc, skb);
1477 skb_record_rx_queue(skb, rx_ring->queue_index);
1479 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1482 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1483 const unsigned int size)
1485 struct igc_rx_buffer *rx_buffer;
1487 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1488 prefetchw(rx_buffer->page);
1490 /* we are reusing so sync this buffer for CPU use */
1491 dma_sync_single_range_for_cpu(rx_ring->dev,
1493 rx_buffer->page_offset,
1497 rx_buffer->pagecnt_bias--;
1503 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1504 * @rx_ring: rx descriptor ring to transact packets on
1505 * @rx_buffer: buffer containing page to add
1506 * @skb: sk_buff to place the data into
1507 * @size: size of buffer to be added
1509 * This function will add the data contained in rx_buffer->page to the skb.
1511 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1512 struct igc_rx_buffer *rx_buffer,
1513 struct sk_buff *skb,
1516 #if (PAGE_SIZE < 8192)
1517 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1519 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1520 rx_buffer->page_offset, size, truesize);
1521 rx_buffer->page_offset ^= truesize;
1523 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1524 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1525 SKB_DATA_ALIGN(size);
1526 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1527 rx_buffer->page_offset, size, truesize);
1528 rx_buffer->page_offset += truesize;
1532 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1533 struct igc_rx_buffer *rx_buffer,
1534 union igc_adv_rx_desc *rx_desc,
1537 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1538 #if (PAGE_SIZE < 8192)
1539 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1541 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1542 SKB_DATA_ALIGN(IGC_SKB_PAD + size);
1544 struct sk_buff *skb;
1546 /* prefetch first cache line of first page */
1549 /* build an skb around the page buffer */
1550 skb = build_skb(va - IGC_SKB_PAD, truesize);
1554 /* update pointers within the skb to store the data */
1555 skb_reserve(skb, IGC_SKB_PAD);
1556 __skb_put(skb, size);
1558 /* update buffer offset */
1559 #if (PAGE_SIZE < 8192)
1560 rx_buffer->page_offset ^= truesize;
1562 rx_buffer->page_offset += truesize;
1568 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1569 struct igc_rx_buffer *rx_buffer,
1570 union igc_adv_rx_desc *rx_desc,
1573 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1574 #if (PAGE_SIZE < 8192)
1575 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1577 unsigned int truesize = SKB_DATA_ALIGN(size);
1579 unsigned int headlen;
1580 struct sk_buff *skb;
1582 /* prefetch first cache line of first page */
1585 /* allocate a skb to store the frags */
1586 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1590 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
1591 igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
1592 va += IGC_TS_HDR_LEN;
1593 size -= IGC_TS_HDR_LEN;
1596 /* Determine available headroom for copy */
1598 if (headlen > IGC_RX_HDR_LEN)
1599 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1601 /* align pull length to size of long to optimize memcpy performance */
1602 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1604 /* update all of the pointers */
1607 skb_add_rx_frag(skb, 0, rx_buffer->page,
1608 (va + headlen) - page_address(rx_buffer->page),
1610 #if (PAGE_SIZE < 8192)
1611 rx_buffer->page_offset ^= truesize;
1613 rx_buffer->page_offset += truesize;
1616 rx_buffer->pagecnt_bias++;
1623 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1624 * @rx_ring: rx descriptor ring to store buffers on
1625 * @old_buff: donor buffer to have page reused
1627 * Synchronizes page for reuse by the adapter
1629 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1630 struct igc_rx_buffer *old_buff)
1632 u16 nta = rx_ring->next_to_alloc;
1633 struct igc_rx_buffer *new_buff;
1635 new_buff = &rx_ring->rx_buffer_info[nta];
1637 /* update, and store next to alloc */
1639 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1641 /* Transfer page from old buffer to new buffer.
1642 * Move each member individually to avoid possible store
1643 * forwarding stalls.
1645 new_buff->dma = old_buff->dma;
1646 new_buff->page = old_buff->page;
1647 new_buff->page_offset = old_buff->page_offset;
1648 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1651 static inline bool igc_page_is_reserved(struct page *page)
1653 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1656 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1658 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1659 struct page *page = rx_buffer->page;
1661 /* avoid re-using remote pages */
1662 if (unlikely(igc_page_is_reserved(page)))
1665 #if (PAGE_SIZE < 8192)
1666 /* if we are only owner of page we can reuse it */
1667 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1670 #define IGC_LAST_OFFSET \
1671 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1673 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1677 /* If we have drained the page fragment pool we need to update
1678 * the pagecnt_bias and page count so that we fully restock the
1679 * number of references the driver holds.
1681 if (unlikely(!pagecnt_bias)) {
1682 page_ref_add(page, USHRT_MAX);
1683 rx_buffer->pagecnt_bias = USHRT_MAX;
1690 * igc_is_non_eop - process handling of non-EOP buffers
1691 * @rx_ring: Rx ring being processed
1692 * @rx_desc: Rx descriptor for current buffer
1694 * This function updates next to clean. If the buffer is an EOP buffer
1695 * this function exits returning false, otherwise it will place the
1696 * sk_buff in the next buffer to be chained and return true indicating
1697 * that this is in fact a non-EOP buffer.
1699 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1700 union igc_adv_rx_desc *rx_desc)
1702 u32 ntc = rx_ring->next_to_clean + 1;
1704 /* fetch, update, and store next to clean */
1705 ntc = (ntc < rx_ring->count) ? ntc : 0;
1706 rx_ring->next_to_clean = ntc;
1708 prefetch(IGC_RX_DESC(rx_ring, ntc));
1710 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1717 * igc_cleanup_headers - Correct corrupted or empty headers
1718 * @rx_ring: rx descriptor ring packet is being transacted on
1719 * @rx_desc: pointer to the EOP Rx descriptor
1720 * @skb: pointer to current skb being fixed
1722 * Address the case where we are pulling data in on pages only
1723 * and as such no data is present in the skb header.
1725 * In addition if skb is not at least 60 bytes we need to pad it so that
1726 * it is large enough to qualify as a valid Ethernet frame.
1728 * Returns true if an error was encountered and skb was freed.
1730 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1731 union igc_adv_rx_desc *rx_desc,
1732 struct sk_buff *skb)
1734 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
1735 struct net_device *netdev = rx_ring->netdev;
1737 if (!(netdev->features & NETIF_F_RXALL)) {
1738 dev_kfree_skb_any(skb);
1743 /* if eth_skb_pad returns an error the skb was freed */
1744 if (eth_skb_pad(skb))
1750 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1751 struct igc_rx_buffer *rx_buffer)
1753 if (igc_can_reuse_rx_page(rx_buffer)) {
1754 /* hand second half of page back to the ring */
1755 igc_reuse_rx_page(rx_ring, rx_buffer);
1757 /* We are not reusing the buffer so unmap it and free
1758 * any references we are holding to it
1760 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1761 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1763 __page_frag_cache_drain(rx_buffer->page,
1764 rx_buffer->pagecnt_bias);
1767 /* clear contents of rx_buffer */
1768 rx_buffer->page = NULL;
1771 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1773 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
1776 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1777 struct igc_rx_buffer *bi)
1779 struct page *page = bi->page;
1782 /* since we are recycling buffers we should seldom need to alloc */
1786 /* alloc new page for storage */
1787 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1788 if (unlikely(!page)) {
1789 rx_ring->rx_stats.alloc_failed++;
1793 /* map page for use */
1794 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1795 igc_rx_pg_size(rx_ring),
1799 /* if mapping failed free memory back to system since
1800 * there isn't much point in holding memory we can't use
1802 if (dma_mapping_error(rx_ring->dev, dma)) {
1805 rx_ring->rx_stats.alloc_failed++;
1811 bi->page_offset = igc_rx_offset(rx_ring);
1812 bi->pagecnt_bias = 1;
1818 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1819 * @rx_ring: rx descriptor ring
1820 * @cleaned_count: number of buffers to clean
1822 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1824 union igc_adv_rx_desc *rx_desc;
1825 u16 i = rx_ring->next_to_use;
1826 struct igc_rx_buffer *bi;
1833 rx_desc = IGC_RX_DESC(rx_ring, i);
1834 bi = &rx_ring->rx_buffer_info[i];
1835 i -= rx_ring->count;
1837 bufsz = igc_rx_bufsz(rx_ring);
1840 if (!igc_alloc_mapped_page(rx_ring, bi))
1843 /* sync the buffer for use by the device */
1844 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1845 bi->page_offset, bufsz,
1848 /* Refresh the desc even if buffer_addrs didn't change
1849 * because each write-back erases this info.
1851 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1857 rx_desc = IGC_RX_DESC(rx_ring, 0);
1858 bi = rx_ring->rx_buffer_info;
1859 i -= rx_ring->count;
1862 /* clear the length for the next_to_use descriptor */
1863 rx_desc->wb.upper.length = 0;
1866 } while (cleaned_count);
1868 i += rx_ring->count;
1870 if (rx_ring->next_to_use != i) {
1871 /* record the next descriptor to use */
1872 rx_ring->next_to_use = i;
1874 /* update next to alloc since we have filled the ring */
1875 rx_ring->next_to_alloc = i;
1877 /* Force memory writes to complete before letting h/w
1878 * know there are new descriptors to fetch. (Only
1879 * applicable for weak-ordered memory model archs,
1883 writel(i, rx_ring->tail);
1887 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1889 unsigned int total_bytes = 0, total_packets = 0;
1890 struct igc_ring *rx_ring = q_vector->rx.ring;
1891 struct sk_buff *skb = rx_ring->skb;
1892 u16 cleaned_count = igc_desc_unused(rx_ring);
1894 while (likely(total_packets < budget)) {
1895 union igc_adv_rx_desc *rx_desc;
1896 struct igc_rx_buffer *rx_buffer;
1899 /* return some buffers to hardware, one at a time is too slow */
1900 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
1901 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1905 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
1906 size = le16_to_cpu(rx_desc->wb.upper.length);
1910 /* This memory barrier is needed to keep us from reading
1911 * any other fields out of the rx_desc until we know the
1912 * descriptor has been written back
1916 rx_buffer = igc_get_rx_buffer(rx_ring, size);
1918 /* retrieve a buffer from the ring */
1920 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
1921 else if (ring_uses_build_skb(rx_ring))
1922 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
1924 skb = igc_construct_skb(rx_ring, rx_buffer,
1927 /* exit if we failed to retrieve a buffer */
1929 rx_ring->rx_stats.alloc_failed++;
1930 rx_buffer->pagecnt_bias++;
1934 igc_put_rx_buffer(rx_ring, rx_buffer);
1937 /* fetch next buffer in frame if non-eop */
1938 if (igc_is_non_eop(rx_ring, rx_desc))
1941 /* verify the packet layout is correct */
1942 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
1947 /* probably a little skewed due to removing CRC */
1948 total_bytes += skb->len;
1950 /* populate checksum, VLAN, and protocol */
1951 igc_process_skb_fields(rx_ring, rx_desc, skb);
1953 napi_gro_receive(&q_vector->napi, skb);
1955 /* reset skb pointer */
1958 /* update budget accounting */
1962 /* place incomplete frames back on ring for completion */
1965 u64_stats_update_begin(&rx_ring->rx_syncp);
1966 rx_ring->rx_stats.packets += total_packets;
1967 rx_ring->rx_stats.bytes += total_bytes;
1968 u64_stats_update_end(&rx_ring->rx_syncp);
1969 q_vector->rx.total_packets += total_packets;
1970 q_vector->rx.total_bytes += total_bytes;
1973 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1975 return total_packets;
1979 * igc_clean_tx_irq - Reclaim resources after transmit completes
1980 * @q_vector: pointer to q_vector containing needed info
1981 * @napi_budget: Used to determine if we are in netpoll
1983 * returns true if ring is completely cleaned
1985 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
1987 struct igc_adapter *adapter = q_vector->adapter;
1988 unsigned int total_bytes = 0, total_packets = 0;
1989 unsigned int budget = q_vector->tx.work_limit;
1990 struct igc_ring *tx_ring = q_vector->tx.ring;
1991 unsigned int i = tx_ring->next_to_clean;
1992 struct igc_tx_buffer *tx_buffer;
1993 union igc_adv_tx_desc *tx_desc;
1995 if (test_bit(__IGC_DOWN, &adapter->state))
1998 tx_buffer = &tx_ring->tx_buffer_info[i];
1999 tx_desc = IGC_TX_DESC(tx_ring, i);
2000 i -= tx_ring->count;
2003 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2005 /* if next_to_watch is not set then there is no work pending */
2009 /* prevent any other reads prior to eop_desc */
2012 /* if DD is not set pending work has not been completed */
2013 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2016 /* clear next_to_watch to prevent false hangs */
2017 tx_buffer->next_to_watch = NULL;
2019 /* update the statistics for this packet */
2020 total_bytes += tx_buffer->bytecount;
2021 total_packets += tx_buffer->gso_segs;
2024 napi_consume_skb(tx_buffer->skb, napi_budget);
2026 /* unmap skb header data */
2027 dma_unmap_single(tx_ring->dev,
2028 dma_unmap_addr(tx_buffer, dma),
2029 dma_unmap_len(tx_buffer, len),
2032 /* clear tx_buffer data */
2033 dma_unmap_len_set(tx_buffer, len, 0);
2035 /* clear last DMA location and unmap remaining buffers */
2036 while (tx_desc != eop_desc) {
2041 i -= tx_ring->count;
2042 tx_buffer = tx_ring->tx_buffer_info;
2043 tx_desc = IGC_TX_DESC(tx_ring, 0);
2046 /* unmap any remaining paged data */
2047 if (dma_unmap_len(tx_buffer, len)) {
2048 dma_unmap_page(tx_ring->dev,
2049 dma_unmap_addr(tx_buffer, dma),
2050 dma_unmap_len(tx_buffer, len),
2052 dma_unmap_len_set(tx_buffer, len, 0);
2056 /* move us one more past the eop_desc for start of next pkt */
2061 i -= tx_ring->count;
2062 tx_buffer = tx_ring->tx_buffer_info;
2063 tx_desc = IGC_TX_DESC(tx_ring, 0);
2066 /* issue prefetch for next Tx descriptor */
2069 /* update budget accounting */
2071 } while (likely(budget));
2073 netdev_tx_completed_queue(txring_txq(tx_ring),
2074 total_packets, total_bytes);
2076 i += tx_ring->count;
2077 tx_ring->next_to_clean = i;
2078 u64_stats_update_begin(&tx_ring->tx_syncp);
2079 tx_ring->tx_stats.bytes += total_bytes;
2080 tx_ring->tx_stats.packets += total_packets;
2081 u64_stats_update_end(&tx_ring->tx_syncp);
2082 q_vector->tx.total_bytes += total_bytes;
2083 q_vector->tx.total_packets += total_packets;
2085 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2086 struct igc_hw *hw = &adapter->hw;
2088 /* Detect a transmit hang in hardware, this serializes the
2089 * check with the clearing of time_stamp and movement of i
2091 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2092 if (tx_buffer->next_to_watch &&
2093 time_after(jiffies, tx_buffer->time_stamp +
2094 (adapter->tx_timeout_factor * HZ)) &&
2095 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2096 /* detected Tx unit hang */
2097 netdev_err(tx_ring->netdev,
2098 "Detected Tx Unit Hang\n"
2102 " next_to_use <%x>\n"
2103 " next_to_clean <%x>\n"
2104 "buffer_info[next_to_clean]\n"
2105 " time_stamp <%lx>\n"
2106 " next_to_watch <%p>\n"
2108 " desc.status <%x>\n",
2109 tx_ring->queue_index,
2110 rd32(IGC_TDH(tx_ring->reg_idx)),
2111 readl(tx_ring->tail),
2112 tx_ring->next_to_use,
2113 tx_ring->next_to_clean,
2114 tx_buffer->time_stamp,
2115 tx_buffer->next_to_watch,
2117 tx_buffer->next_to_watch->wb.status);
2118 netif_stop_subqueue(tx_ring->netdev,
2119 tx_ring->queue_index);
2121 /* we are about to reset, no point in enabling stuff */
2126 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2127 if (unlikely(total_packets &&
2128 netif_carrier_ok(tx_ring->netdev) &&
2129 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2130 /* Make sure that anybody stopping the queue after this
2131 * sees the new next_to_clean.
2134 if (__netif_subqueue_stopped(tx_ring->netdev,
2135 tx_ring->queue_index) &&
2136 !(test_bit(__IGC_DOWN, &adapter->state))) {
2137 netif_wake_subqueue(tx_ring->netdev,
2138 tx_ring->queue_index);
2140 u64_stats_update_begin(&tx_ring->tx_syncp);
2141 tx_ring->tx_stats.restart_queue++;
2142 u64_stats_update_end(&tx_ring->tx_syncp);
2149 static int igc_find_mac_filter(struct igc_adapter *adapter,
2150 enum igc_mac_filter_type type, const u8 *addr)
2152 struct igc_hw *hw = &adapter->hw;
2153 int max_entries = hw->mac.rar_entry_count;
2157 for (i = 0; i < max_entries; i++) {
2158 ral = rd32(IGC_RAL(i));
2159 rah = rd32(IGC_RAH(i));
2161 if (!(rah & IGC_RAH_AV))
2163 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2165 if ((rah & IGC_RAH_RAH_MASK) !=
2166 le16_to_cpup((__le16 *)(addr + 4)))
2168 if (ral != le32_to_cpup((__le32 *)(addr)))
2177 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2179 struct igc_hw *hw = &adapter->hw;
2180 int max_entries = hw->mac.rar_entry_count;
2184 for (i = 0; i < max_entries; i++) {
2185 rah = rd32(IGC_RAH(i));
2187 if (!(rah & IGC_RAH_AV))
2195 * igc_add_mac_filter() - Add MAC address filter
2196 * @adapter: Pointer to adapter where the filter should be added
2197 * @type: MAC address filter type (source or destination)
2198 * @addr: MAC address
2199 * @queue: If non-negative, queue assignment feature is enabled and frames
2200 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2201 * assignment is disabled.
2203 * Return: 0 in case of success, negative errno code otherwise.
2205 static int igc_add_mac_filter(struct igc_adapter *adapter,
2206 enum igc_mac_filter_type type, const u8 *addr,
2209 struct net_device *dev = adapter->netdev;
2212 index = igc_find_mac_filter(adapter, type, addr);
2216 index = igc_get_avail_mac_filter_slot(adapter);
2220 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2221 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2225 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2230 * igc_del_mac_filter() - Delete MAC address filter
2231 * @adapter: Pointer to adapter where the filter should be deleted from
2232 * @type: MAC address filter type (source or destination)
2233 * @addr: MAC address
2235 static void igc_del_mac_filter(struct igc_adapter *adapter,
2236 enum igc_mac_filter_type type, const u8 *addr)
2238 struct net_device *dev = adapter->netdev;
2241 index = igc_find_mac_filter(adapter, type, addr);
2246 /* If this is the default filter, we don't actually delete it.
2247 * We just reset to its default value i.e. disable queue
2250 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2252 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2254 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2256 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2259 igc_clear_mac_filter_hw(adapter, index);
2264 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2265 * @adapter: Pointer to adapter where the filter should be added
2266 * @prio: VLAN priority value
2267 * @queue: Queue number which matching frames are assigned to
2269 * Return: 0 in case of success, negative errno code otherwise.
2271 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2274 struct net_device *dev = adapter->netdev;
2275 struct igc_hw *hw = &adapter->hw;
2278 vlanpqf = rd32(IGC_VLANPQF);
2280 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2281 netdev_dbg(dev, "VLAN priority filter already in use\n");
2285 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2286 vlanpqf |= IGC_VLANPQF_VALID(prio);
2288 wr32(IGC_VLANPQF, vlanpqf);
2290 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2296 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2297 * @adapter: Pointer to adapter where the filter should be deleted from
2298 * @prio: VLAN priority value
2300 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2302 struct igc_hw *hw = &adapter->hw;
2305 vlanpqf = rd32(IGC_VLANPQF);
2307 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2308 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2310 wr32(IGC_VLANPQF, vlanpqf);
2312 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2316 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2318 struct igc_hw *hw = &adapter->hw;
2321 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2322 u32 etqf = rd32(IGC_ETQF(i));
2324 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2332 * igc_add_etype_filter() - Add ethertype filter
2333 * @adapter: Pointer to adapter where the filter should be added
2334 * @etype: Ethertype value
2335 * @queue: If non-negative, queue assignment feature is enabled and frames
2336 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2337 * assignment is disabled.
2339 * Return: 0 in case of success, negative errno code otherwise.
2341 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
2344 struct igc_hw *hw = &adapter->hw;
2348 index = igc_get_avail_etype_filter_slot(adapter);
2352 etqf = rd32(IGC_ETQF(index));
2354 etqf &= ~IGC_ETQF_ETYPE_MASK;
2358 etqf &= ~IGC_ETQF_QUEUE_MASK;
2359 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
2360 etqf |= IGC_ETQF_QUEUE_ENABLE;
2363 etqf |= IGC_ETQF_FILTER_ENABLE;
2365 wr32(IGC_ETQF(index), etqf);
2367 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
2372 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
2374 struct igc_hw *hw = &adapter->hw;
2377 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2378 u32 etqf = rd32(IGC_ETQF(i));
2380 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
2388 * igc_del_etype_filter() - Delete ethertype filter
2389 * @adapter: Pointer to adapter where the filter should be deleted from
2390 * @etype: Ethertype value
2392 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
2394 struct igc_hw *hw = &adapter->hw;
2397 index = igc_find_etype_filter(adapter, etype);
2401 wr32(IGC_ETQF(index), 0);
2403 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
2407 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2408 const struct igc_nfc_rule *rule)
2412 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
2413 err = igc_add_etype_filter(adapter, rule->filter.etype,
2419 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
2420 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2421 rule->filter.src_addr, rule->action);
2426 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
2427 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2428 rule->filter.dst_addr, rule->action);
2433 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2434 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2437 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
2445 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
2446 const struct igc_nfc_rule *rule)
2448 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
2449 igc_del_etype_filter(adapter, rule->filter.etype);
2451 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2452 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2455 igc_del_vlan_prio_filter(adapter, prio);
2458 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
2459 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2460 rule->filter.src_addr);
2462 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
2463 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2464 rule->filter.dst_addr);
2468 * igc_get_nfc_rule() - Get NFC rule
2469 * @adapter: Pointer to adapter
2470 * @location: Rule location
2472 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2474 * Return: Pointer to NFC rule at @location. If not found, NULL.
2476 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
2479 struct igc_nfc_rule *rule;
2481 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
2482 if (rule->location == location)
2484 if (rule->location > location)
2492 * igc_del_nfc_rule() - Delete NFC rule
2493 * @adapter: Pointer to adapter
2494 * @rule: Pointer to rule to be deleted
2496 * Disable NFC rule in hardware and delete it from adapter.
2498 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2500 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2502 igc_disable_nfc_rule(adapter, rule);
2504 list_del(&rule->list);
2505 adapter->nfc_rule_count--;
2510 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
2512 struct igc_nfc_rule *rule, *tmp;
2514 mutex_lock(&adapter->nfc_rule_lock);
2516 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
2517 igc_del_nfc_rule(adapter, rule);
2519 mutex_unlock(&adapter->nfc_rule_lock);
2523 * igc_add_nfc_rule() - Add NFC rule
2524 * @adapter: Pointer to adapter
2525 * @rule: Pointer to rule to be added
2527 * Enable NFC rule in hardware and add it to adapter.
2529 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2531 * Return: 0 on success, negative errno on failure.
2533 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2535 struct igc_nfc_rule *pred, *cur;
2538 err = igc_enable_nfc_rule(adapter, rule);
2543 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
2544 if (cur->location >= rule->location)
2549 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
2550 adapter->nfc_rule_count++;
2554 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
2556 struct igc_nfc_rule *rule;
2558 mutex_lock(&adapter->nfc_rule_lock);
2560 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
2561 igc_enable_nfc_rule(adapter, rule);
2563 mutex_unlock(&adapter->nfc_rule_lock);
2566 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
2568 struct igc_adapter *adapter = netdev_priv(netdev);
2570 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
2573 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
2575 struct igc_adapter *adapter = netdev_priv(netdev);
2577 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
2582 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2583 * @netdev: network interface device structure
2585 * The set_rx_mode entry point is called whenever the unicast or multicast
2586 * address lists or the network interface flags are updated. This routine is
2587 * responsible for configuring the hardware for proper unicast, multicast,
2588 * promiscuous mode, and all-multi behavior.
2590 static void igc_set_rx_mode(struct net_device *netdev)
2592 struct igc_adapter *adapter = netdev_priv(netdev);
2593 struct igc_hw *hw = &adapter->hw;
2594 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
2597 /* Check for Promiscuous and All Multicast modes */
2598 if (netdev->flags & IFF_PROMISC) {
2599 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
2601 if (netdev->flags & IFF_ALLMULTI) {
2602 rctl |= IGC_RCTL_MPE;
2604 /* Write addresses to the MTA, if the attempt fails
2605 * then we should just turn on promiscuous mode so
2606 * that we can at least receive multicast traffic
2608 count = igc_write_mc_addr_list(netdev);
2610 rctl |= IGC_RCTL_MPE;
2614 /* Write addresses to available RAR registers, if there is not
2615 * sufficient space to store all the addresses then enable
2616 * unicast promiscuous mode
2618 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
2619 rctl |= IGC_RCTL_UPE;
2621 /* update state of unicast and multicast */
2622 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
2623 wr32(IGC_RCTL, rctl);
2625 #if (PAGE_SIZE < 8192)
2626 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
2627 rlpml = IGC_MAX_FRAME_BUILD_SKB;
2629 wr32(IGC_RLPML, rlpml);
2633 * igc_configure - configure the hardware for RX and TX
2634 * @adapter: private board structure
2636 static void igc_configure(struct igc_adapter *adapter)
2638 struct net_device *netdev = adapter->netdev;
2641 igc_get_hw_control(adapter);
2642 igc_set_rx_mode(netdev);
2644 igc_setup_tctl(adapter);
2645 igc_setup_mrqc(adapter);
2646 igc_setup_rctl(adapter);
2648 igc_set_default_mac_filter(adapter);
2649 igc_restore_nfc_rules(adapter);
2651 igc_configure_tx(adapter);
2652 igc_configure_rx(adapter);
2654 igc_rx_fifo_flush_base(&adapter->hw);
2656 /* call igc_desc_unused which always leaves
2657 * at least 1 descriptor unused to make sure
2658 * next_to_use != next_to_clean
2660 for (i = 0; i < adapter->num_rx_queues; i++) {
2661 struct igc_ring *ring = adapter->rx_ring[i];
2663 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2668 * igc_write_ivar - configure ivar for given MSI-X vector
2669 * @hw: pointer to the HW structure
2670 * @msix_vector: vector number we are allocating to a given ring
2671 * @index: row index of IVAR register to write within IVAR table
2672 * @offset: column offset of in IVAR, should be multiple of 8
2674 * The IVAR table consists of 2 columns,
2675 * each containing an cause allocation for an Rx and Tx ring, and a
2676 * variable number of rows depending on the number of queues supported.
2678 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2679 int index, int offset)
2681 u32 ivar = array_rd32(IGC_IVAR0, index);
2683 /* clear any bits that are currently set */
2684 ivar &= ~((u32)0xFF << offset);
2686 /* write vector and valid bit */
2687 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2689 array_wr32(IGC_IVAR0, index, ivar);
2692 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2694 struct igc_adapter *adapter = q_vector->adapter;
2695 struct igc_hw *hw = &adapter->hw;
2696 int rx_queue = IGC_N0_QUEUE;
2697 int tx_queue = IGC_N0_QUEUE;
2699 if (q_vector->rx.ring)
2700 rx_queue = q_vector->rx.ring->reg_idx;
2701 if (q_vector->tx.ring)
2702 tx_queue = q_vector->tx.ring->reg_idx;
2704 switch (hw->mac.type) {
2706 if (rx_queue > IGC_N0_QUEUE)
2707 igc_write_ivar(hw, msix_vector,
2709 (rx_queue & 0x1) << 4);
2710 if (tx_queue > IGC_N0_QUEUE)
2711 igc_write_ivar(hw, msix_vector,
2713 ((tx_queue & 0x1) << 4) + 8);
2714 q_vector->eims_value = BIT(msix_vector);
2717 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2721 /* add q_vector eims value to global eims_enable_mask */
2722 adapter->eims_enable_mask |= q_vector->eims_value;
2724 /* configure q_vector to set itr on first interrupt */
2725 q_vector->set_itr = 1;
2729 * igc_configure_msix - Configure MSI-X hardware
2730 * @adapter: Pointer to adapter structure
2732 * igc_configure_msix sets up the hardware to properly
2733 * generate MSI-X interrupts.
2735 static void igc_configure_msix(struct igc_adapter *adapter)
2737 struct igc_hw *hw = &adapter->hw;
2741 adapter->eims_enable_mask = 0;
2743 /* set vector for other causes, i.e. link changes */
2744 switch (hw->mac.type) {
2746 /* Turn on MSI-X capability first, or our settings
2747 * won't stick. And it will take days to debug.
2749 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
2750 IGC_GPIE_PBA | IGC_GPIE_EIAME |
2753 /* enable msix_other interrupt */
2754 adapter->eims_other = BIT(vector);
2755 tmp = (vector++ | IGC_IVAR_VALID) << 8;
2757 wr32(IGC_IVAR_MISC, tmp);
2760 /* do nothing, since nothing else supports MSI-X */
2762 } /* switch (hw->mac.type) */
2764 adapter->eims_enable_mask |= adapter->eims_other;
2766 for (i = 0; i < adapter->num_q_vectors; i++)
2767 igc_assign_vector(adapter->q_vector[i], vector++);
2773 * igc_irq_enable - Enable default interrupt generation settings
2774 * @adapter: board private structure
2776 static void igc_irq_enable(struct igc_adapter *adapter)
2778 struct igc_hw *hw = &adapter->hw;
2780 if (adapter->msix_entries) {
2781 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
2782 u32 regval = rd32(IGC_EIAC);
2784 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
2785 regval = rd32(IGC_EIAM);
2786 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
2787 wr32(IGC_EIMS, adapter->eims_enable_mask);
2790 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2791 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2796 * igc_irq_disable - Mask off interrupt generation on the NIC
2797 * @adapter: board private structure
2799 static void igc_irq_disable(struct igc_adapter *adapter)
2801 struct igc_hw *hw = &adapter->hw;
2803 if (adapter->msix_entries) {
2804 u32 regval = rd32(IGC_EIAM);
2806 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
2807 wr32(IGC_EIMC, adapter->eims_enable_mask);
2808 regval = rd32(IGC_EIAC);
2809 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
2816 if (adapter->msix_entries) {
2819 synchronize_irq(adapter->msix_entries[vector++].vector);
2821 for (i = 0; i < adapter->num_q_vectors; i++)
2822 synchronize_irq(adapter->msix_entries[vector++].vector);
2824 synchronize_irq(adapter->pdev->irq);
2828 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
2829 const u32 max_rss_queues)
2831 /* Determine if we need to pair queues. */
2832 /* If rss_queues > half of max_rss_queues, pair the queues in
2833 * order to conserve interrupts due to limited supply.
2835 if (adapter->rss_queues > (max_rss_queues / 2))
2836 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
2838 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
2841 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
2843 return IGC_MAX_RX_QUEUES;
2846 static void igc_init_queue_configuration(struct igc_adapter *adapter)
2850 max_rss_queues = igc_get_max_rss_queues(adapter);
2851 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2853 igc_set_flag_queue_pairs(adapter, max_rss_queues);
2857 * igc_reset_q_vector - Reset config for interrupt vector
2858 * @adapter: board private structure to initialize
2859 * @v_idx: Index of vector to be reset
2861 * If NAPI is enabled it will delete any references to the
2862 * NAPI struct. This is preparation for igc_free_q_vector.
2864 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
2866 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2868 /* if we're coming from igc_set_interrupt_capability, the vectors are
2874 if (q_vector->tx.ring)
2875 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
2877 if (q_vector->rx.ring)
2878 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
2880 netif_napi_del(&q_vector->napi);
2884 * igc_free_q_vector - Free memory allocated for specific interrupt vector
2885 * @adapter: board private structure to initialize
2886 * @v_idx: Index of vector to be freed
2888 * This function frees the memory allocated to the q_vector.
2890 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
2892 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2894 adapter->q_vector[v_idx] = NULL;
2896 /* igc_get_stats64() might access the rings on this vector,
2897 * we must wait a grace period before freeing it.
2900 kfree_rcu(q_vector, rcu);
2904 * igc_free_q_vectors - Free memory allocated for interrupt vectors
2905 * @adapter: board private structure to initialize
2907 * This function frees the memory allocated to the q_vectors. In addition if
2908 * NAPI is enabled it will delete any references to the NAPI struct prior
2909 * to freeing the q_vector.
2911 static void igc_free_q_vectors(struct igc_adapter *adapter)
2913 int v_idx = adapter->num_q_vectors;
2915 adapter->num_tx_queues = 0;
2916 adapter->num_rx_queues = 0;
2917 adapter->num_q_vectors = 0;
2920 igc_reset_q_vector(adapter, v_idx);
2921 igc_free_q_vector(adapter, v_idx);
2926 * igc_update_itr - update the dynamic ITR value based on statistics
2927 * @q_vector: pointer to q_vector
2928 * @ring_container: ring info to update the itr for
2930 * Stores a new ITR value based on packets and byte
2931 * counts during the last interrupt. The advantage of per interrupt
2932 * computation is faster updates and more accurate ITR for the current
2933 * traffic pattern. Constants in this function were computed
2934 * based on theoretical maximum wire speed and thresholds were set based
2935 * on testing data as well as attempting to minimize response time
2936 * while increasing bulk throughput.
2937 * NOTE: These calculations are only valid when operating in a single-
2938 * queue environment.
2940 static void igc_update_itr(struct igc_q_vector *q_vector,
2941 struct igc_ring_container *ring_container)
2943 unsigned int packets = ring_container->total_packets;
2944 unsigned int bytes = ring_container->total_bytes;
2945 u8 itrval = ring_container->itr;
2947 /* no packets, exit with status unchanged */
2952 case lowest_latency:
2953 /* handle TSO and jumbo frames */
2954 if (bytes / packets > 8000)
2955 itrval = bulk_latency;
2956 else if ((packets < 5) && (bytes > 512))
2957 itrval = low_latency;
2959 case low_latency: /* 50 usec aka 20000 ints/s */
2960 if (bytes > 10000) {
2961 /* this if handles the TSO accounting */
2962 if (bytes / packets > 8000)
2963 itrval = bulk_latency;
2964 else if ((packets < 10) || ((bytes / packets) > 1200))
2965 itrval = bulk_latency;
2966 else if ((packets > 35))
2967 itrval = lowest_latency;
2968 } else if (bytes / packets > 2000) {
2969 itrval = bulk_latency;
2970 } else if (packets <= 2 && bytes < 512) {
2971 itrval = lowest_latency;
2974 case bulk_latency: /* 250 usec aka 4000 ints/s */
2975 if (bytes > 25000) {
2977 itrval = low_latency;
2978 } else if (bytes < 1500) {
2979 itrval = low_latency;
2984 /* clear work counters since we have the values we need */
2985 ring_container->total_bytes = 0;
2986 ring_container->total_packets = 0;
2988 /* write updated itr to ring container */
2989 ring_container->itr = itrval;
2992 static void igc_set_itr(struct igc_q_vector *q_vector)
2994 struct igc_adapter *adapter = q_vector->adapter;
2995 u32 new_itr = q_vector->itr_val;
2998 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2999 switch (adapter->link_speed) {
3003 new_itr = IGC_4K_ITR;
3009 igc_update_itr(q_vector, &q_vector->tx);
3010 igc_update_itr(q_vector, &q_vector->rx);
3012 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3014 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3015 if (current_itr == lowest_latency &&
3016 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3017 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3018 current_itr = low_latency;
3020 switch (current_itr) {
3021 /* counts and packets in update_itr are dependent on these numbers */
3022 case lowest_latency:
3023 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3026 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3029 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
3036 if (new_itr != q_vector->itr_val) {
3037 /* this attempts to bias the interrupt rate towards Bulk
3038 * by adding intermediate steps when interrupt rate is
3041 new_itr = new_itr > q_vector->itr_val ?
3042 max((new_itr * q_vector->itr_val) /
3043 (new_itr + (q_vector->itr_val >> 2)),
3045 /* Don't write the value here; it resets the adapter's
3046 * internal timer, and causes us to delay far longer than
3047 * we should between interrupts. Instead, we write the ITR
3048 * value at the beginning of the next interrupt so the timing
3049 * ends up being correct.
3051 q_vector->itr_val = new_itr;
3052 q_vector->set_itr = 1;
3056 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3058 int v_idx = adapter->num_q_vectors;
3060 if (adapter->msix_entries) {
3061 pci_disable_msix(adapter->pdev);
3062 kfree(adapter->msix_entries);
3063 adapter->msix_entries = NULL;
3064 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3065 pci_disable_msi(adapter->pdev);
3069 igc_reset_q_vector(adapter, v_idx);
3073 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3074 * @adapter: Pointer to adapter structure
3075 * @msix: boolean value for MSI-X capability
3077 * Attempt to configure interrupts using the best available
3078 * capabilities of the hardware and kernel.
3080 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3088 adapter->flags |= IGC_FLAG_HAS_MSIX;
3090 /* Number of supported queues. */
3091 adapter->num_rx_queues = adapter->rss_queues;
3093 adapter->num_tx_queues = adapter->rss_queues;
3095 /* start with one vector for every Rx queue */
3096 numvecs = adapter->num_rx_queues;
3098 /* if Tx handler is separate add 1 for every Tx queue */
3099 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3100 numvecs += adapter->num_tx_queues;
3102 /* store the number of vectors reserved for queues */
3103 adapter->num_q_vectors = numvecs;
3105 /* add 1 vector for link status interrupts */
3108 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3111 if (!adapter->msix_entries)
3114 /* populate entry values */
3115 for (i = 0; i < numvecs; i++)
3116 adapter->msix_entries[i].entry = i;
3118 err = pci_enable_msix_range(adapter->pdev,
3119 adapter->msix_entries,
3125 kfree(adapter->msix_entries);
3126 adapter->msix_entries = NULL;
3128 igc_reset_interrupt_capability(adapter);
3131 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3133 adapter->rss_queues = 1;
3134 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3135 adapter->num_rx_queues = 1;
3136 adapter->num_tx_queues = 1;
3137 adapter->num_q_vectors = 1;
3138 if (!pci_enable_msi(adapter->pdev))
3139 adapter->flags |= IGC_FLAG_HAS_MSI;
3143 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3144 * @q_vector: pointer to q_vector
3146 * Stores a new ITR value based on strictly on packet size. This
3147 * algorithm is less sophisticated than that used in igc_update_itr,
3148 * due to the difficulty of synchronizing statistics across multiple
3149 * receive rings. The divisors and thresholds used by this function
3150 * were determined based on theoretical maximum wire speed and testing
3151 * data, in order to minimize response time while increasing bulk
3153 * NOTE: This function is called only when operating in a multiqueue
3154 * receive environment.
3156 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3158 struct igc_adapter *adapter = q_vector->adapter;
3159 int new_val = q_vector->itr_val;
3160 int avg_wire_size = 0;
3161 unsigned int packets;
3163 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3164 * ints/sec - ITR timer value of 120 ticks.
3166 switch (adapter->link_speed) {
3169 new_val = IGC_4K_ITR;
3175 packets = q_vector->rx.total_packets;
3177 avg_wire_size = q_vector->rx.total_bytes / packets;
3179 packets = q_vector->tx.total_packets;
3181 avg_wire_size = max_t(u32, avg_wire_size,
3182 q_vector->tx.total_bytes / packets);
3184 /* if avg_wire_size isn't set no work was done */
3188 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3189 avg_wire_size += 24;
3191 /* Don't starve jumbo frames */
3192 avg_wire_size = min(avg_wire_size, 3000);
3194 /* Give a little boost to mid-size frames */
3195 if (avg_wire_size > 300 && avg_wire_size < 1200)
3196 new_val = avg_wire_size / 3;
3198 new_val = avg_wire_size / 2;
3200 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3201 if (new_val < IGC_20K_ITR &&
3202 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3203 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3204 new_val = IGC_20K_ITR;
3207 if (new_val != q_vector->itr_val) {
3208 q_vector->itr_val = new_val;
3209 q_vector->set_itr = 1;
3212 q_vector->rx.total_bytes = 0;
3213 q_vector->rx.total_packets = 0;
3214 q_vector->tx.total_bytes = 0;
3215 q_vector->tx.total_packets = 0;
3218 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3220 struct igc_adapter *adapter = q_vector->adapter;
3221 struct igc_hw *hw = &adapter->hw;
3223 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3224 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3225 if (adapter->num_q_vectors == 1)
3226 igc_set_itr(q_vector);
3228 igc_update_ring_itr(q_vector);
3231 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3232 if (adapter->msix_entries)
3233 wr32(IGC_EIMS, q_vector->eims_value);
3235 igc_irq_enable(adapter);
3239 static void igc_add_ring(struct igc_ring *ring,
3240 struct igc_ring_container *head)
3247 * igc_cache_ring_register - Descriptor ring to register mapping
3248 * @adapter: board private structure to initialize
3250 * Once we know the feature-set enabled for the device, we'll cache
3251 * the register offset the descriptor ring is assigned to.
3253 static void igc_cache_ring_register(struct igc_adapter *adapter)
3257 switch (adapter->hw.mac.type) {
3260 for (; i < adapter->num_rx_queues; i++)
3261 adapter->rx_ring[i]->reg_idx = i;
3262 for (; j < adapter->num_tx_queues; j++)
3263 adapter->tx_ring[j]->reg_idx = j;
3269 * igc_poll - NAPI Rx polling callback
3270 * @napi: napi polling structure
3271 * @budget: count of how many packets we should handle
3273 static int igc_poll(struct napi_struct *napi, int budget)
3275 struct igc_q_vector *q_vector = container_of(napi,
3276 struct igc_q_vector,
3278 bool clean_complete = true;
3281 if (q_vector->tx.ring)
3282 clean_complete = igc_clean_tx_irq(q_vector, budget);
3284 if (q_vector->rx.ring) {
3285 int cleaned = igc_clean_rx_irq(q_vector, budget);
3287 work_done += cleaned;
3288 if (cleaned >= budget)
3289 clean_complete = false;
3292 /* If all work not completed, return budget and keep polling */
3293 if (!clean_complete)
3296 /* Exit the polling mode, but don't re-enable interrupts if stack might
3297 * poll us due to busy-polling
3299 if (likely(napi_complete_done(napi, work_done)))
3300 igc_ring_irq_enable(q_vector);
3302 return min(work_done, budget - 1);
3306 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3307 * @adapter: board private structure to initialize
3308 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3309 * @v_idx: index of vector in adapter struct
3310 * @txr_count: total number of Tx rings to allocate
3311 * @txr_idx: index of first Tx ring to allocate
3312 * @rxr_count: total number of Rx rings to allocate
3313 * @rxr_idx: index of first Rx ring to allocate
3315 * We allocate one q_vector. If allocation fails we return -ENOMEM.
3317 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3318 unsigned int v_count, unsigned int v_idx,
3319 unsigned int txr_count, unsigned int txr_idx,
3320 unsigned int rxr_count, unsigned int rxr_idx)
3322 struct igc_q_vector *q_vector;
3323 struct igc_ring *ring;
3326 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
3327 if (txr_count > 1 || rxr_count > 1)
3330 ring_count = txr_count + rxr_count;
3332 /* allocate q_vector and rings */
3333 q_vector = adapter->q_vector[v_idx];
3335 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3338 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3342 /* initialize NAPI */
3343 netif_napi_add(adapter->netdev, &q_vector->napi,
3346 /* tie q_vector and adapter together */
3347 adapter->q_vector[v_idx] = q_vector;
3348 q_vector->adapter = adapter;
3350 /* initialize work limits */
3351 q_vector->tx.work_limit = adapter->tx_work_limit;
3353 /* initialize ITR configuration */
3354 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3355 q_vector->itr_val = IGC_START_ITR;
3357 /* initialize pointer to rings */
3358 ring = q_vector->ring;
3360 /* initialize ITR */
3362 /* rx or rx/tx vector */
3363 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3364 q_vector->itr_val = adapter->rx_itr_setting;
3366 /* tx only vector */
3367 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3368 q_vector->itr_val = adapter->tx_itr_setting;
3372 /* assign generic ring traits */
3373 ring->dev = &adapter->pdev->dev;
3374 ring->netdev = adapter->netdev;
3376 /* configure backlink on ring */
3377 ring->q_vector = q_vector;
3379 /* update q_vector Tx values */
3380 igc_add_ring(ring, &q_vector->tx);
3382 /* apply Tx specific ring traits */
3383 ring->count = adapter->tx_ring_count;
3384 ring->queue_index = txr_idx;
3386 /* assign ring to adapter */
3387 adapter->tx_ring[txr_idx] = ring;
3389 /* push pointer to next ring */
3394 /* assign generic ring traits */
3395 ring->dev = &adapter->pdev->dev;
3396 ring->netdev = adapter->netdev;
3398 /* configure backlink on ring */
3399 ring->q_vector = q_vector;
3401 /* update q_vector Rx values */
3402 igc_add_ring(ring, &q_vector->rx);
3404 /* apply Rx specific ring traits */
3405 ring->count = adapter->rx_ring_count;
3406 ring->queue_index = rxr_idx;
3408 /* assign ring to adapter */
3409 adapter->rx_ring[rxr_idx] = ring;
3416 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
3417 * @adapter: board private structure to initialize
3419 * We allocate one q_vector per queue interrupt. If allocation fails we
3422 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3424 int rxr_remaining = adapter->num_rx_queues;
3425 int txr_remaining = adapter->num_tx_queues;
3426 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3427 int q_vectors = adapter->num_q_vectors;
3430 if (q_vectors >= (rxr_remaining + txr_remaining)) {
3431 for (; rxr_remaining; v_idx++) {
3432 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3438 /* update counts and index */
3444 for (; v_idx < q_vectors; v_idx++) {
3445 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3446 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3448 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3449 tqpv, txr_idx, rqpv, rxr_idx);
3454 /* update counts and index */
3455 rxr_remaining -= rqpv;
3456 txr_remaining -= tqpv;
3464 adapter->num_tx_queues = 0;
3465 adapter->num_rx_queues = 0;
3466 adapter->num_q_vectors = 0;
3469 igc_free_q_vector(adapter, v_idx);
3475 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3476 * @adapter: Pointer to adapter structure
3477 * @msix: boolean for MSI-X capability
3479 * This function initializes the interrupts and allocates all of the queues.
3481 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3483 struct net_device *dev = adapter->netdev;
3486 igc_set_interrupt_capability(adapter, msix);
3488 err = igc_alloc_q_vectors(adapter);
3490 netdev_err(dev, "Unable to allocate memory for vectors\n");
3491 goto err_alloc_q_vectors;
3494 igc_cache_ring_register(adapter);
3498 err_alloc_q_vectors:
3499 igc_reset_interrupt_capability(adapter);
3504 * igc_sw_init - Initialize general software structures (struct igc_adapter)
3505 * @adapter: board private structure to initialize
3507 * igc_sw_init initializes the Adapter private data structure.
3508 * Fields are initialized based on PCI device information and
3509 * OS network device settings (MTU size).
3511 static int igc_sw_init(struct igc_adapter *adapter)
3513 struct net_device *netdev = adapter->netdev;
3514 struct pci_dev *pdev = adapter->pdev;
3515 struct igc_hw *hw = &adapter->hw;
3517 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3519 /* set default ring sizes */
3520 adapter->tx_ring_count = IGC_DEFAULT_TXD;
3521 adapter->rx_ring_count = IGC_DEFAULT_RXD;
3523 /* set default ITR values */
3524 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
3525 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3527 /* set default work limits */
3528 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3530 /* adjust max frame to be at least the size of a standard frame */
3531 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3533 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3535 mutex_init(&adapter->nfc_rule_lock);
3536 INIT_LIST_HEAD(&adapter->nfc_rule_list);
3537 adapter->nfc_rule_count = 0;
3539 spin_lock_init(&adapter->stats64_lock);
3540 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3541 adapter->flags |= IGC_FLAG_HAS_MSIX;
3543 igc_init_queue_configuration(adapter);
3545 /* This call may decrease the number of queues */
3546 if (igc_init_interrupt_scheme(adapter, true)) {
3547 netdev_err(netdev, "Unable to allocate memory for queues\n");
3551 /* Explicitly disable IRQ since the NIC can be in any state. */
3552 igc_irq_disable(adapter);
3554 set_bit(__IGC_DOWN, &adapter->state);
3560 * igc_up - Open the interface and prepare it to handle traffic
3561 * @adapter: board private structure
3563 void igc_up(struct igc_adapter *adapter)
3565 struct igc_hw *hw = &adapter->hw;
3568 /* hardware has been reset, we need to reload some things */
3569 igc_configure(adapter);
3571 clear_bit(__IGC_DOWN, &adapter->state);
3573 for (i = 0; i < adapter->num_q_vectors; i++)
3574 napi_enable(&adapter->q_vector[i]->napi);
3576 if (adapter->msix_entries)
3577 igc_configure_msix(adapter);
3579 igc_assign_vector(adapter->q_vector[0], 0);
3581 /* Clear any pending interrupts. */
3583 igc_irq_enable(adapter);
3585 netif_tx_start_all_queues(adapter->netdev);
3587 /* start the watchdog. */
3588 hw->mac.get_link_status = 1;
3589 schedule_work(&adapter->watchdog_task);
3593 * igc_update_stats - Update the board statistics counters
3594 * @adapter: board private structure
3596 void igc_update_stats(struct igc_adapter *adapter)
3598 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
3599 struct pci_dev *pdev = adapter->pdev;
3600 struct igc_hw *hw = &adapter->hw;
3601 u64 _bytes, _packets;
3607 /* Prevent stats update while adapter is being reset, or if the pci
3608 * connection is down.
3610 if (adapter->link_speed == 0)
3612 if (pci_channel_offline(pdev))
3619 for (i = 0; i < adapter->num_rx_queues; i++) {
3620 struct igc_ring *ring = adapter->rx_ring[i];
3621 u32 rqdpc = rd32(IGC_RQDPC(i));
3623 if (hw->mac.type >= igc_i225)
3624 wr32(IGC_RQDPC(i), 0);
3627 ring->rx_stats.drops += rqdpc;
3628 net_stats->rx_fifo_errors += rqdpc;
3632 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
3633 _bytes = ring->rx_stats.bytes;
3634 _packets = ring->rx_stats.packets;
3635 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
3637 packets += _packets;
3640 net_stats->rx_bytes = bytes;
3641 net_stats->rx_packets = packets;
3645 for (i = 0; i < adapter->num_tx_queues; i++) {
3646 struct igc_ring *ring = adapter->tx_ring[i];
3649 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
3650 _bytes = ring->tx_stats.bytes;
3651 _packets = ring->tx_stats.packets;
3652 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
3654 packets += _packets;
3656 net_stats->tx_bytes = bytes;
3657 net_stats->tx_packets = packets;
3660 /* read stats registers */
3661 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
3662 adapter->stats.gprc += rd32(IGC_GPRC);
3663 adapter->stats.gorc += rd32(IGC_GORCL);
3664 rd32(IGC_GORCH); /* clear GORCL */
3665 adapter->stats.bprc += rd32(IGC_BPRC);
3666 adapter->stats.mprc += rd32(IGC_MPRC);
3667 adapter->stats.roc += rd32(IGC_ROC);
3669 adapter->stats.prc64 += rd32(IGC_PRC64);
3670 adapter->stats.prc127 += rd32(IGC_PRC127);
3671 adapter->stats.prc255 += rd32(IGC_PRC255);
3672 adapter->stats.prc511 += rd32(IGC_PRC511);
3673 adapter->stats.prc1023 += rd32(IGC_PRC1023);
3674 adapter->stats.prc1522 += rd32(IGC_PRC1522);
3675 adapter->stats.tlpic += rd32(IGC_TLPIC);
3676 adapter->stats.rlpic += rd32(IGC_RLPIC);
3678 mpc = rd32(IGC_MPC);
3679 adapter->stats.mpc += mpc;
3680 net_stats->rx_fifo_errors += mpc;
3681 adapter->stats.scc += rd32(IGC_SCC);
3682 adapter->stats.ecol += rd32(IGC_ECOL);
3683 adapter->stats.mcc += rd32(IGC_MCC);
3684 adapter->stats.latecol += rd32(IGC_LATECOL);
3685 adapter->stats.dc += rd32(IGC_DC);
3686 adapter->stats.rlec += rd32(IGC_RLEC);
3687 adapter->stats.xonrxc += rd32(IGC_XONRXC);
3688 adapter->stats.xontxc += rd32(IGC_XONTXC);
3689 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
3690 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
3691 adapter->stats.fcruc += rd32(IGC_FCRUC);
3692 adapter->stats.gptc += rd32(IGC_GPTC);
3693 adapter->stats.gotc += rd32(IGC_GOTCL);
3694 rd32(IGC_GOTCH); /* clear GOTCL */
3695 adapter->stats.rnbc += rd32(IGC_RNBC);
3696 adapter->stats.ruc += rd32(IGC_RUC);
3697 adapter->stats.rfc += rd32(IGC_RFC);
3698 adapter->stats.rjc += rd32(IGC_RJC);
3699 adapter->stats.tor += rd32(IGC_TORH);
3700 adapter->stats.tot += rd32(IGC_TOTH);
3701 adapter->stats.tpr += rd32(IGC_TPR);
3703 adapter->stats.ptc64 += rd32(IGC_PTC64);
3704 adapter->stats.ptc127 += rd32(IGC_PTC127);
3705 adapter->stats.ptc255 += rd32(IGC_PTC255);
3706 adapter->stats.ptc511 += rd32(IGC_PTC511);
3707 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
3708 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
3710 adapter->stats.mptc += rd32(IGC_MPTC);
3711 adapter->stats.bptc += rd32(IGC_BPTC);
3713 adapter->stats.tpt += rd32(IGC_TPT);
3714 adapter->stats.colc += rd32(IGC_COLC);
3715 adapter->stats.colc += rd32(IGC_RERC);
3717 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
3719 adapter->stats.tsctc += rd32(IGC_TSCTC);
3721 adapter->stats.iac += rd32(IGC_IAC);
3723 /* Fill out the OS statistics structure */
3724 net_stats->multicast = adapter->stats.mprc;
3725 net_stats->collisions = adapter->stats.colc;
3729 /* RLEC on some newer hardware can be incorrect so build
3730 * our own version based on RUC and ROC
3732 net_stats->rx_errors = adapter->stats.rxerrc +
3733 adapter->stats.crcerrs + adapter->stats.algnerrc +
3734 adapter->stats.ruc + adapter->stats.roc +
3735 adapter->stats.cexterr;
3736 net_stats->rx_length_errors = adapter->stats.ruc +
3738 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3739 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3740 net_stats->rx_missed_errors = adapter->stats.mpc;
3743 net_stats->tx_errors = adapter->stats.ecol +
3744 adapter->stats.latecol;
3745 net_stats->tx_aborted_errors = adapter->stats.ecol;
3746 net_stats->tx_window_errors = adapter->stats.latecol;
3747 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3749 /* Tx Dropped needs to be maintained elsewhere */
3751 /* Management Stats */
3752 adapter->stats.mgptc += rd32(IGC_MGTPTC);
3753 adapter->stats.mgprc += rd32(IGC_MGTPRC);
3754 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
3758 * igc_down - Close the interface
3759 * @adapter: board private structure
3761 void igc_down(struct igc_adapter *adapter)
3763 struct net_device *netdev = adapter->netdev;
3764 struct igc_hw *hw = &adapter->hw;
3768 set_bit(__IGC_DOWN, &adapter->state);
3770 igc_ptp_suspend(adapter);
3772 /* disable receives in the hardware */
3773 rctl = rd32(IGC_RCTL);
3774 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
3775 /* flush and sleep below */
3777 /* set trans_start so we don't get spurious watchdogs during reset */
3778 netif_trans_update(netdev);
3780 netif_carrier_off(netdev);
3781 netif_tx_stop_all_queues(netdev);
3783 /* disable transmits in the hardware */
3784 tctl = rd32(IGC_TCTL);
3785 tctl &= ~IGC_TCTL_EN;
3786 wr32(IGC_TCTL, tctl);
3787 /* flush both disables and wait for them to finish */
3789 usleep_range(10000, 20000);
3791 igc_irq_disable(adapter);
3793 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
3795 for (i = 0; i < adapter->num_q_vectors; i++) {
3796 if (adapter->q_vector[i]) {
3797 napi_synchronize(&adapter->q_vector[i]->napi);
3798 napi_disable(&adapter->q_vector[i]->napi);
3802 del_timer_sync(&adapter->watchdog_timer);
3803 del_timer_sync(&adapter->phy_info_timer);
3805 /* record the stats before reset*/
3806 spin_lock(&adapter->stats64_lock);
3807 igc_update_stats(adapter);
3808 spin_unlock(&adapter->stats64_lock);
3810 adapter->link_speed = 0;
3811 adapter->link_duplex = 0;
3813 if (!pci_channel_offline(adapter->pdev))
3816 /* clear VLAN promisc flag so VFTA will be updated if necessary */
3817 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
3819 igc_clean_all_tx_rings(adapter);
3820 igc_clean_all_rx_rings(adapter);
3823 void igc_reinit_locked(struct igc_adapter *adapter)
3825 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3826 usleep_range(1000, 2000);
3829 clear_bit(__IGC_RESETTING, &adapter->state);
3832 static void igc_reset_task(struct work_struct *work)
3834 struct igc_adapter *adapter;
3836 adapter = container_of(work, struct igc_adapter, reset_task);
3838 igc_rings_dump(adapter);
3839 igc_regs_dump(adapter);
3840 netdev_err(adapter->netdev, "Reset adapter\n");
3841 igc_reinit_locked(adapter);
3845 * igc_change_mtu - Change the Maximum Transfer Unit
3846 * @netdev: network interface device structure
3847 * @new_mtu: new value for maximum frame size
3849 * Returns 0 on success, negative on failure
3851 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
3853 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
3854 struct igc_adapter *adapter = netdev_priv(netdev);
3856 /* adjust max frame to be at least the size of a standard frame */
3857 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3858 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
3860 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3861 usleep_range(1000, 2000);
3863 /* igc_down has a dependency on max_frame_size */
3864 adapter->max_frame_size = max_frame;
3866 if (netif_running(netdev))
3869 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3870 netdev->mtu = new_mtu;
3872 if (netif_running(netdev))
3877 clear_bit(__IGC_RESETTING, &adapter->state);
3883 * igc_get_stats64 - Get System Network Statistics
3884 * @netdev: network interface device structure
3885 * @stats: rtnl_link_stats64 pointer
3887 * Returns the address of the device statistics structure.
3888 * The statistics are updated here and also from the timer callback.
3890 static void igc_get_stats64(struct net_device *netdev,
3891 struct rtnl_link_stats64 *stats)
3893 struct igc_adapter *adapter = netdev_priv(netdev);
3895 spin_lock(&adapter->stats64_lock);
3896 if (!test_bit(__IGC_RESETTING, &adapter->state))
3897 igc_update_stats(adapter);
3898 memcpy(stats, &adapter->stats64, sizeof(*stats));
3899 spin_unlock(&adapter->stats64_lock);
3902 static netdev_features_t igc_fix_features(struct net_device *netdev,
3903 netdev_features_t features)
3905 /* Since there is no support for separate Rx/Tx vlan accel
3906 * enable/disable make sure Tx flag is always in same state as Rx.
3908 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3909 features |= NETIF_F_HW_VLAN_CTAG_TX;
3911 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3916 static int igc_set_features(struct net_device *netdev,
3917 netdev_features_t features)
3919 netdev_features_t changed = netdev->features ^ features;
3920 struct igc_adapter *adapter = netdev_priv(netdev);
3922 /* Add VLAN support */
3923 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
3926 if (!(features & NETIF_F_NTUPLE))
3927 igc_flush_nfc_rules(adapter);
3929 netdev->features = features;
3931 if (netif_running(netdev))
3932 igc_reinit_locked(adapter);
3939 static netdev_features_t
3940 igc_features_check(struct sk_buff *skb, struct net_device *dev,
3941 netdev_features_t features)
3943 unsigned int network_hdr_len, mac_hdr_len;
3945 /* Make certain the headers can be described by a context descriptor */
3946 mac_hdr_len = skb_network_header(skb) - skb->data;
3947 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
3948 return features & ~(NETIF_F_HW_CSUM |
3950 NETIF_F_HW_VLAN_CTAG_TX |
3954 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3955 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
3956 return features & ~(NETIF_F_HW_CSUM |
3961 /* We can only support IPv4 TSO in tunnels if we can mangle the
3962 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3964 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3965 features &= ~NETIF_F_TSO;
3970 static void igc_tsync_interrupt(struct igc_adapter *adapter)
3972 struct igc_hw *hw = &adapter->hw;
3973 u32 tsicr = rd32(IGC_TSICR);
3976 if (tsicr & IGC_TSICR_TXTS) {
3977 /* retrieve hardware timestamp */
3978 schedule_work(&adapter->ptp_tx_work);
3979 ack |= IGC_TSICR_TXTS;
3982 /* acknowledge the interrupts */
3983 wr32(IGC_TSICR, ack);
3987 * igc_msix_other - msix other interrupt handler
3988 * @irq: interrupt number
3989 * @data: pointer to a q_vector
3991 static irqreturn_t igc_msix_other(int irq, void *data)
3993 struct igc_adapter *adapter = data;
3994 struct igc_hw *hw = &adapter->hw;
3995 u32 icr = rd32(IGC_ICR);
3997 /* reading ICR causes bit 31 of EICR to be cleared */
3998 if (icr & IGC_ICR_DRSTA)
3999 schedule_work(&adapter->reset_task);
4001 if (icr & IGC_ICR_DOUTSYNC) {
4002 /* HW is reporting DMA is out of sync */
4003 adapter->stats.doosync++;
4006 if (icr & IGC_ICR_LSC) {
4007 hw->mac.get_link_status = 1;
4008 /* guard against interrupt when we're going down */
4009 if (!test_bit(__IGC_DOWN, &adapter->state))
4010 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4013 if (icr & IGC_ICR_TS)
4014 igc_tsync_interrupt(adapter);
4016 wr32(IGC_EIMS, adapter->eims_other);
4021 static void igc_write_itr(struct igc_q_vector *q_vector)
4023 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4025 if (!q_vector->set_itr)
4029 itr_val = IGC_ITR_VAL_MASK;
4031 itr_val |= IGC_EITR_CNT_IGNR;
4033 writel(itr_val, q_vector->itr_register);
4034 q_vector->set_itr = 0;
4037 static irqreturn_t igc_msix_ring(int irq, void *data)
4039 struct igc_q_vector *q_vector = data;
4041 /* Write the ITR value calculated from the previous interrupt. */
4042 igc_write_itr(q_vector);
4044 napi_schedule(&q_vector->napi);
4050 * igc_request_msix - Initialize MSI-X interrupts
4051 * @adapter: Pointer to adapter structure
4053 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4056 static int igc_request_msix(struct igc_adapter *adapter)
4058 int i = 0, err = 0, vector = 0, free_vector = 0;
4059 struct net_device *netdev = adapter->netdev;
4061 err = request_irq(adapter->msix_entries[vector].vector,
4062 &igc_msix_other, 0, netdev->name, adapter);
4066 for (i = 0; i < adapter->num_q_vectors; i++) {
4067 struct igc_q_vector *q_vector = adapter->q_vector[i];
4071 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4073 if (q_vector->rx.ring && q_vector->tx.ring)
4074 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4075 q_vector->rx.ring->queue_index);
4076 else if (q_vector->tx.ring)
4077 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4078 q_vector->tx.ring->queue_index);
4079 else if (q_vector->rx.ring)
4080 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4081 q_vector->rx.ring->queue_index);
4083 sprintf(q_vector->name, "%s-unused", netdev->name);
4085 err = request_irq(adapter->msix_entries[vector].vector,
4086 igc_msix_ring, 0, q_vector->name,
4092 igc_configure_msix(adapter);
4096 /* free already assigned IRQs */
4097 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4100 for (i = 0; i < vector; i++) {
4101 free_irq(adapter->msix_entries[free_vector++].vector,
4102 adapter->q_vector[i]);
4109 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4110 * @adapter: Pointer to adapter structure
4112 * This function resets the device so that it has 0 rx queues, tx queues, and
4113 * MSI-X interrupts allocated.
4115 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4117 igc_free_q_vectors(adapter);
4118 igc_reset_interrupt_capability(adapter);
4121 /* Need to wait a few seconds after link up to get diagnostic information from
4124 static void igc_update_phy_info(struct timer_list *t)
4126 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4128 igc_get_phy_info(&adapter->hw);
4132 * igc_has_link - check shared code for link and determine up/down
4133 * @adapter: pointer to driver private info
4135 bool igc_has_link(struct igc_adapter *adapter)
4137 struct igc_hw *hw = &adapter->hw;
4138 bool link_active = false;
4140 /* get_link_status is set on LSC (link status) interrupt or
4141 * rx sequence error interrupt. get_link_status will stay
4142 * false until the igc_check_for_link establishes link
4143 * for copper adapters ONLY
4145 switch (hw->phy.media_type) {
4146 case igc_media_type_copper:
4147 if (!hw->mac.get_link_status)
4149 hw->mac.ops.check_for_link(hw);
4150 link_active = !hw->mac.get_link_status;
4153 case igc_media_type_unknown:
4157 if (hw->mac.type == igc_i225 &&
4158 hw->phy.id == I225_I_PHY_ID) {
4159 if (!netif_carrier_ok(adapter->netdev)) {
4160 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4161 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4162 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4163 adapter->link_check_timeout = jiffies;
4171 * igc_watchdog - Timer Call-back
4172 * @t: timer for the watchdog
4174 static void igc_watchdog(struct timer_list *t)
4176 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4177 /* Do the rest outside of interrupt context */
4178 schedule_work(&adapter->watchdog_task);
4181 static void igc_watchdog_task(struct work_struct *work)
4183 struct igc_adapter *adapter = container_of(work,
4186 struct net_device *netdev = adapter->netdev;
4187 struct igc_hw *hw = &adapter->hw;
4188 struct igc_phy_info *phy = &hw->phy;
4189 u16 phy_data, retry_count = 20;
4193 link = igc_has_link(adapter);
4195 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4196 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4197 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4203 /* Cancel scheduled suspend requests. */
4204 pm_runtime_resume(netdev->dev.parent);
4206 if (!netif_carrier_ok(netdev)) {
4209 hw->mac.ops.get_speed_and_duplex(hw,
4210 &adapter->link_speed,
4211 &adapter->link_duplex);
4213 ctrl = rd32(IGC_CTRL);
4214 /* Link status message must follow this format */
4216 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4217 adapter->link_speed,
4218 adapter->link_duplex == FULL_DUPLEX ?
4220 (ctrl & IGC_CTRL_TFCE) &&
4221 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4222 (ctrl & IGC_CTRL_RFCE) ? "RX" :
4223 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
4225 /* disable EEE if enabled */
4226 if ((adapter->flags & IGC_FLAG_EEE) &&
4227 adapter->link_duplex == HALF_DUPLEX) {
4229 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4230 adapter->hw.dev_spec._base.eee_enable = false;
4231 adapter->flags &= ~IGC_FLAG_EEE;
4234 /* check if SmartSpeed worked */
4235 igc_check_downshift(hw);
4236 if (phy->speed_downgraded)
4237 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4239 /* adjust timeout factor according to speed/duplex */
4240 adapter->tx_timeout_factor = 1;
4241 switch (adapter->link_speed) {
4243 adapter->tx_timeout_factor = 14;
4246 /* maybe add some timeout factor ? */
4250 if (adapter->link_speed != SPEED_1000)
4253 /* wait for Remote receiver status OK */
4255 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
4257 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4261 goto retry_read_status;
4262 } else if (!retry_count) {
4263 netdev_err(netdev, "exceed max 2 second\n");
4266 netdev_err(netdev, "read 1000Base-T Status Reg\n");
4269 netif_carrier_on(netdev);
4271 /* link state has changed, schedule phy info update */
4272 if (!test_bit(__IGC_DOWN, &adapter->state))
4273 mod_timer(&adapter->phy_info_timer,
4274 round_jiffies(jiffies + 2 * HZ));
4277 if (netif_carrier_ok(netdev)) {
4278 adapter->link_speed = 0;
4279 adapter->link_duplex = 0;
4281 /* Links status message must follow this format */
4282 netdev_info(netdev, "NIC Link is Down\n");
4283 netif_carrier_off(netdev);
4285 /* link state has changed, schedule phy info update */
4286 if (!test_bit(__IGC_DOWN, &adapter->state))
4287 mod_timer(&adapter->phy_info_timer,
4288 round_jiffies(jiffies + 2 * HZ));
4290 /* link is down, time to check for alternate media */
4291 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
4292 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4293 schedule_work(&adapter->reset_task);
4294 /* return immediately */
4298 pm_schedule_suspend(netdev->dev.parent,
4301 /* also check for alternate media here */
4302 } else if (!netif_carrier_ok(netdev) &&
4303 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
4304 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4305 schedule_work(&adapter->reset_task);
4306 /* return immediately */
4312 spin_lock(&adapter->stats64_lock);
4313 igc_update_stats(adapter);
4314 spin_unlock(&adapter->stats64_lock);
4316 for (i = 0; i < adapter->num_tx_queues; i++) {
4317 struct igc_ring *tx_ring = adapter->tx_ring[i];
4319 if (!netif_carrier_ok(netdev)) {
4320 /* We've lost link, so the controller stops DMA,
4321 * but we've got queued Tx work that's never going
4322 * to get done, so reset controller to flush Tx.
4323 * (Do the reset outside of interrupt context).
4325 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
4326 adapter->tx_timeout_count++;
4327 schedule_work(&adapter->reset_task);
4328 /* return immediately since reset is imminent */
4333 /* Force detection of hung controller every watchdog period */
4334 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4337 /* Cause software interrupt to ensure Rx ring is cleaned */
4338 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4341 for (i = 0; i < adapter->num_q_vectors; i++)
4342 eics |= adapter->q_vector[i]->eims_value;
4343 wr32(IGC_EICS, eics);
4345 wr32(IGC_ICS, IGC_ICS_RXDMT0);
4348 igc_ptp_tx_hang(adapter);
4350 /* Reset the timer */
4351 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4352 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
4353 mod_timer(&adapter->watchdog_timer,
4354 round_jiffies(jiffies + HZ));
4356 mod_timer(&adapter->watchdog_timer,
4357 round_jiffies(jiffies + 2 * HZ));
4362 * igc_intr_msi - Interrupt Handler
4363 * @irq: interrupt number
4364 * @data: pointer to a network interface device structure
4366 static irqreturn_t igc_intr_msi(int irq, void *data)
4368 struct igc_adapter *adapter = data;
4369 struct igc_q_vector *q_vector = adapter->q_vector[0];
4370 struct igc_hw *hw = &adapter->hw;
4371 /* read ICR disables interrupts using IAM */
4372 u32 icr = rd32(IGC_ICR);
4374 igc_write_itr(q_vector);
4376 if (icr & IGC_ICR_DRSTA)
4377 schedule_work(&adapter->reset_task);
4379 if (icr & IGC_ICR_DOUTSYNC) {
4380 /* HW is reporting DMA is out of sync */
4381 adapter->stats.doosync++;
4384 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4385 hw->mac.get_link_status = 1;
4386 if (!test_bit(__IGC_DOWN, &adapter->state))
4387 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4390 napi_schedule(&q_vector->napi);
4396 * igc_intr - Legacy Interrupt Handler
4397 * @irq: interrupt number
4398 * @data: pointer to a network interface device structure
4400 static irqreturn_t igc_intr(int irq, void *data)
4402 struct igc_adapter *adapter = data;
4403 struct igc_q_vector *q_vector = adapter->q_vector[0];
4404 struct igc_hw *hw = &adapter->hw;
4405 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4406 * need for the IMC write
4408 u32 icr = rd32(IGC_ICR);
4410 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4411 * not set, then the adapter didn't send an interrupt
4413 if (!(icr & IGC_ICR_INT_ASSERTED))
4416 igc_write_itr(q_vector);
4418 if (icr & IGC_ICR_DRSTA)
4419 schedule_work(&adapter->reset_task);
4421 if (icr & IGC_ICR_DOUTSYNC) {
4422 /* HW is reporting DMA is out of sync */
4423 adapter->stats.doosync++;
4426 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4427 hw->mac.get_link_status = 1;
4428 /* guard against interrupt when we're going down */
4429 if (!test_bit(__IGC_DOWN, &adapter->state))
4430 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4433 napi_schedule(&q_vector->napi);
4438 static void igc_free_irq(struct igc_adapter *adapter)
4440 if (adapter->msix_entries) {
4443 free_irq(adapter->msix_entries[vector++].vector, adapter);
4445 for (i = 0; i < adapter->num_q_vectors; i++)
4446 free_irq(adapter->msix_entries[vector++].vector,
4447 adapter->q_vector[i]);
4449 free_irq(adapter->pdev->irq, adapter);
4454 * igc_request_irq - initialize interrupts
4455 * @adapter: Pointer to adapter structure
4457 * Attempts to configure interrupts using the best available
4458 * capabilities of the hardware and kernel.
4460 static int igc_request_irq(struct igc_adapter *adapter)
4462 struct net_device *netdev = adapter->netdev;
4463 struct pci_dev *pdev = adapter->pdev;
4466 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4467 err = igc_request_msix(adapter);
4470 /* fall back to MSI */
4471 igc_free_all_tx_resources(adapter);
4472 igc_free_all_rx_resources(adapter);
4474 igc_clear_interrupt_scheme(adapter);
4475 err = igc_init_interrupt_scheme(adapter, false);
4478 igc_setup_all_tx_resources(adapter);
4479 igc_setup_all_rx_resources(adapter);
4480 igc_configure(adapter);
4483 igc_assign_vector(adapter->q_vector[0], 0);
4485 if (adapter->flags & IGC_FLAG_HAS_MSI) {
4486 err = request_irq(pdev->irq, &igc_intr_msi, 0,
4487 netdev->name, adapter);
4491 /* fall back to legacy interrupts */
4492 igc_reset_interrupt_capability(adapter);
4493 adapter->flags &= ~IGC_FLAG_HAS_MSI;
4496 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
4497 netdev->name, adapter);
4500 netdev_err(netdev, "Error %d getting interrupt\n", err);
4507 * __igc_open - Called when a network interface is made active
4508 * @netdev: network interface device structure
4509 * @resuming: boolean indicating if the device is resuming
4511 * Returns 0 on success, negative value on failure
4513 * The open entry point is called when a network interface is made
4514 * active by the system (IFF_UP). At this point all resources needed
4515 * for transmit and receive operations are allocated, the interrupt
4516 * handler is registered with the OS, the watchdog timer is started,
4517 * and the stack is notified that the interface is ready.
4519 static int __igc_open(struct net_device *netdev, bool resuming)
4521 struct igc_adapter *adapter = netdev_priv(netdev);
4522 struct pci_dev *pdev = adapter->pdev;
4523 struct igc_hw *hw = &adapter->hw;
4527 /* disallow open during test */
4529 if (test_bit(__IGC_TESTING, &adapter->state)) {
4535 pm_runtime_get_sync(&pdev->dev);
4537 netif_carrier_off(netdev);
4539 /* allocate transmit descriptors */
4540 err = igc_setup_all_tx_resources(adapter);
4544 /* allocate receive descriptors */
4545 err = igc_setup_all_rx_resources(adapter);
4549 igc_power_up_link(adapter);
4551 igc_configure(adapter);
4553 err = igc_request_irq(adapter);
4557 /* Notify the stack of the actual queue counts. */
4558 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
4560 goto err_set_queues;
4562 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
4564 goto err_set_queues;
4566 clear_bit(__IGC_DOWN, &adapter->state);
4568 for (i = 0; i < adapter->num_q_vectors; i++)
4569 napi_enable(&adapter->q_vector[i]->napi);
4571 /* Clear any pending interrupts. */
4573 igc_irq_enable(adapter);
4576 pm_runtime_put(&pdev->dev);
4578 netif_tx_start_all_queues(netdev);
4580 /* start the watchdog. */
4581 hw->mac.get_link_status = 1;
4582 schedule_work(&adapter->watchdog_task);
4587 igc_free_irq(adapter);
4589 igc_release_hw_control(adapter);
4590 igc_power_down_phy_copper_base(&adapter->hw);
4591 igc_free_all_rx_resources(adapter);
4593 igc_free_all_tx_resources(adapter);
4597 pm_runtime_put(&pdev->dev);
4602 int igc_open(struct net_device *netdev)
4604 return __igc_open(netdev, false);
4608 * __igc_close - Disables a network interface
4609 * @netdev: network interface device structure
4610 * @suspending: boolean indicating the device is suspending
4612 * Returns 0, this is not allowed to fail
4614 * The close entry point is called when an interface is de-activated
4615 * by the OS. The hardware is still under the driver's control, but
4616 * needs to be disabled. A global MAC reset is issued to stop the
4617 * hardware, and all transmit and receive resources are freed.
4619 static int __igc_close(struct net_device *netdev, bool suspending)
4621 struct igc_adapter *adapter = netdev_priv(netdev);
4622 struct pci_dev *pdev = adapter->pdev;
4624 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
4627 pm_runtime_get_sync(&pdev->dev);
4631 igc_release_hw_control(adapter);
4633 igc_free_irq(adapter);
4635 igc_free_all_tx_resources(adapter);
4636 igc_free_all_rx_resources(adapter);
4639 pm_runtime_put_sync(&pdev->dev);
4644 int igc_close(struct net_device *netdev)
4646 if (netif_device_present(netdev) || netdev->dismantle)
4647 return __igc_close(netdev, false);
4652 * igc_ioctl - Access the hwtstamp interface
4653 * @netdev: network interface device structure
4654 * @ifr: interface request data
4655 * @cmd: ioctl command
4657 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4661 return igc_ptp_get_ts_config(netdev, ifr);
4663 return igc_ptp_set_ts_config(netdev, ifr);
4669 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
4672 struct igc_ring *ring;
4675 if (queue < 0 || queue >= adapter->num_tx_queues)
4678 ring = adapter->tx_ring[queue];
4679 ring->launchtime_enable = enable;
4681 if (adapter->base_time)
4684 adapter->cycle_time = NSEC_PER_SEC;
4686 for (i = 0; i < adapter->num_tx_queues; i++) {
4687 ring = adapter->tx_ring[i];
4688 ring->start_time = 0;
4689 ring->end_time = NSEC_PER_SEC;
4695 static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
4697 struct timespec64 b;
4699 b = ktime_to_timespec64(base_time);
4701 return timespec64_compare(now, &b) > 0;
4704 static bool validate_schedule(struct igc_adapter *adapter,
4705 const struct tc_taprio_qopt_offload *qopt)
4707 int queue_uses[IGC_MAX_TX_QUEUES] = { };
4708 struct timespec64 now;
4711 if (qopt->cycle_time_extension)
4714 igc_ptp_read(adapter, &now);
4716 /* If we program the controller's BASET registers with a time
4717 * in the future, it will hold all the packets until that
4718 * time, causing a lot of TX Hangs, so to avoid that, we
4719 * reject schedules that would start in the future.
4721 if (!is_base_time_past(qopt->base_time, &now))
4724 for (n = 0; n < qopt->num_entries; n++) {
4725 const struct tc_taprio_sched_entry *e;
4728 e = &qopt->entries[n];
4730 /* i225 only supports "global" frame preemption
4733 if (e->command != TC_TAPRIO_CMD_SET_GATES)
4736 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4737 if (e->gate_mask & BIT(i))
4740 if (queue_uses[i] > 1)
4748 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
4749 struct tc_etf_qopt_offload *qopt)
4751 struct igc_hw *hw = &adapter->hw;
4754 if (hw->mac.type != igc_i225)
4757 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
4761 return igc_tsn_offload_apply(adapter);
4764 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
4765 struct tc_taprio_qopt_offload *qopt)
4767 u32 start_time = 0, end_time = 0;
4770 if (!qopt->enable) {
4771 adapter->base_time = 0;
4775 if (adapter->base_time)
4778 if (!validate_schedule(adapter, qopt))
4781 adapter->cycle_time = qopt->cycle_time;
4782 adapter->base_time = qopt->base_time;
4784 /* FIXME: be a little smarter about cases when the gate for a
4785 * queue stays open for more than one entry.
4787 for (n = 0; n < qopt->num_entries; n++) {
4788 struct tc_taprio_sched_entry *e = &qopt->entries[n];
4791 end_time += e->interval;
4793 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4794 struct igc_ring *ring = adapter->tx_ring[i];
4796 if (!(e->gate_mask & BIT(i)))
4799 ring->start_time = start_time;
4800 ring->end_time = end_time;
4803 start_time += e->interval;
4809 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
4810 struct tc_taprio_qopt_offload *qopt)
4812 struct igc_hw *hw = &adapter->hw;
4815 if (hw->mac.type != igc_i225)
4818 err = igc_save_qbv_schedule(adapter, qopt);
4822 return igc_tsn_offload_apply(adapter);
4825 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
4828 struct igc_adapter *adapter = netdev_priv(dev);
4831 case TC_SETUP_QDISC_TAPRIO:
4832 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
4834 case TC_SETUP_QDISC_ETF:
4835 return igc_tsn_enable_launchtime(adapter, type_data);
4842 static const struct net_device_ops igc_netdev_ops = {
4843 .ndo_open = igc_open,
4844 .ndo_stop = igc_close,
4845 .ndo_start_xmit = igc_xmit_frame,
4846 .ndo_set_rx_mode = igc_set_rx_mode,
4847 .ndo_set_mac_address = igc_set_mac,
4848 .ndo_change_mtu = igc_change_mtu,
4849 .ndo_get_stats64 = igc_get_stats64,
4850 .ndo_fix_features = igc_fix_features,
4851 .ndo_set_features = igc_set_features,
4852 .ndo_features_check = igc_features_check,
4853 .ndo_do_ioctl = igc_ioctl,
4854 .ndo_setup_tc = igc_setup_tc,
4857 /* PCIe configuration access */
4858 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4860 struct igc_adapter *adapter = hw->back;
4862 pci_read_config_word(adapter->pdev, reg, value);
4865 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4867 struct igc_adapter *adapter = hw->back;
4869 pci_write_config_word(adapter->pdev, reg, *value);
4872 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4874 struct igc_adapter *adapter = hw->back;
4876 if (!pci_is_pcie(adapter->pdev))
4877 return -IGC_ERR_CONFIG;
4879 pcie_capability_read_word(adapter->pdev, reg, value);
4884 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4886 struct igc_adapter *adapter = hw->back;
4888 if (!pci_is_pcie(adapter->pdev))
4889 return -IGC_ERR_CONFIG;
4891 pcie_capability_write_word(adapter->pdev, reg, *value);
4896 u32 igc_rd32(struct igc_hw *hw, u32 reg)
4898 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
4899 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
4902 value = readl(&hw_addr[reg]);
4904 /* reads should not return all F's */
4905 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
4906 struct net_device *netdev = igc->netdev;
4909 netif_device_detach(netdev);
4910 netdev_err(netdev, "PCIe link lost, device now detached\n");
4911 WARN(pci_device_is_present(igc->pdev),
4912 "igc: Failed to read reg 0x%x!\n", reg);
4918 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
4920 struct igc_mac_info *mac = &adapter->hw.mac;
4924 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4925 * for the switch() below to work
4927 if ((spd & 1) || (dplx & ~1))
4930 switch (spd + dplx) {
4931 case SPEED_10 + DUPLEX_HALF:
4932 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4934 case SPEED_10 + DUPLEX_FULL:
4935 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4937 case SPEED_100 + DUPLEX_HALF:
4938 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4940 case SPEED_100 + DUPLEX_FULL:
4941 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4943 case SPEED_1000 + DUPLEX_FULL:
4945 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4947 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4949 case SPEED_2500 + DUPLEX_FULL:
4951 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
4953 case SPEED_2500 + DUPLEX_HALF: /* not supported */
4958 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4959 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4964 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
4969 * igc_probe - Device Initialization Routine
4970 * @pdev: PCI device information struct
4971 * @ent: entry in igc_pci_tbl
4973 * Returns 0 on success, negative on failure
4975 * igc_probe initializes an adapter identified by a pci_dev structure.
4976 * The OS initialization, configuring the adapter private structure,
4977 * and a hardware reset occur.
4979 static int igc_probe(struct pci_dev *pdev,
4980 const struct pci_device_id *ent)
4982 struct igc_adapter *adapter;
4983 struct net_device *netdev;
4985 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
4986 int err, pci_using_dac;
4988 err = pci_enable_device_mem(pdev);
4993 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4997 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5000 "No usable DMA configuration, aborting\n");
5005 err = pci_request_mem_regions(pdev, igc_driver_name);
5009 pci_enable_pcie_error_reporting(pdev);
5011 pci_set_master(pdev);
5014 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5018 goto err_alloc_etherdev;
5020 SET_NETDEV_DEV(netdev, &pdev->dev);
5022 pci_set_drvdata(pdev, netdev);
5023 adapter = netdev_priv(netdev);
5024 adapter->netdev = netdev;
5025 adapter->pdev = pdev;
5028 adapter->port_num = hw->bus.func;
5029 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5031 err = pci_save_state(pdev);
5036 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5037 pci_resource_len(pdev, 0));
5038 if (!adapter->io_addr)
5041 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5042 hw->hw_addr = adapter->io_addr;
5044 netdev->netdev_ops = &igc_netdev_ops;
5045 igc_ethtool_set_ops(netdev);
5046 netdev->watchdog_timeo = 5 * HZ;
5048 netdev->mem_start = pci_resource_start(pdev, 0);
5049 netdev->mem_end = pci_resource_end(pdev, 0);
5051 /* PCI config space info */
5052 hw->vendor_id = pdev->vendor;
5053 hw->device_id = pdev->device;
5054 hw->revision_id = pdev->revision;
5055 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5056 hw->subsystem_device_id = pdev->subsystem_device;
5058 /* Copy the default MAC and PHY function pointers */
5059 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5060 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5062 /* Initialize skew-specific constants */
5063 err = ei->get_invariants(hw);
5067 /* Add supported features to the features list*/
5068 netdev->features |= NETIF_F_SG;
5069 netdev->features |= NETIF_F_TSO;
5070 netdev->features |= NETIF_F_TSO6;
5071 netdev->features |= NETIF_F_TSO_ECN;
5072 netdev->features |= NETIF_F_RXCSUM;
5073 netdev->features |= NETIF_F_HW_CSUM;
5074 netdev->features |= NETIF_F_SCTP_CRC;
5075 netdev->features |= NETIF_F_HW_TC;
5077 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5078 NETIF_F_GSO_GRE_CSUM | \
5079 NETIF_F_GSO_IPXIP4 | \
5080 NETIF_F_GSO_IPXIP6 | \
5081 NETIF_F_GSO_UDP_TUNNEL | \
5082 NETIF_F_GSO_UDP_TUNNEL_CSUM)
5084 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5085 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5087 /* setup the private structure */
5088 err = igc_sw_init(adapter);
5092 /* copy netdev features into list of user selectable features */
5093 netdev->hw_features |= NETIF_F_NTUPLE;
5094 netdev->hw_features |= netdev->features;
5097 netdev->features |= NETIF_F_HIGHDMA;
5099 /* MTU range: 68 - 9216 */
5100 netdev->min_mtu = ETH_MIN_MTU;
5101 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5103 /* before reading the NVM, reset the controller to put the device in a
5104 * known good starting state
5106 hw->mac.ops.reset_hw(hw);
5108 if (igc_get_flash_presence_i225(hw)) {
5109 if (hw->nvm.ops.validate(hw) < 0) {
5110 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5116 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5117 /* copy the MAC address out of the NVM */
5118 if (hw->mac.ops.read_mac_addr(hw))
5119 dev_err(&pdev->dev, "NVM Read Error\n");
5122 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5124 if (!is_valid_ether_addr(netdev->dev_addr)) {
5125 dev_err(&pdev->dev, "Invalid MAC Address\n");
5130 /* configure RXPBSIZE and TXPBSIZE */
5131 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5132 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5134 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5135 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5137 INIT_WORK(&adapter->reset_task, igc_reset_task);
5138 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5140 /* Initialize link properties that are user-changeable */
5141 adapter->fc_autoneg = true;
5142 hw->mac.autoneg = true;
5143 hw->phy.autoneg_advertised = 0xaf;
5145 hw->fc.requested_mode = igc_fc_default;
5146 hw->fc.current_mode = igc_fc_default;
5148 /* By default, support wake on port A */
5149 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
5151 /* initialize the wol settings based on the eeprom settings */
5152 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
5153 adapter->wol |= IGC_WUFC_MAG;
5155 device_set_wakeup_enable(&adapter->pdev->dev,
5156 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
5158 igc_ptp_init(adapter);
5160 /* reset the hardware with the new settings */
5163 /* let the f/w know that the h/w is now under the control of the
5166 igc_get_hw_control(adapter);
5168 strncpy(netdev->name, "eth%d", IFNAMSIZ);
5169 err = register_netdev(netdev);
5173 /* carrier off reporting is important to ethtool even BEFORE open */
5174 netif_carrier_off(netdev);
5176 /* Check if Media Autosense is enabled */
5179 /* print pcie link status and MAC address */
5180 pcie_print_link_status(pdev);
5181 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
5183 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
5184 /* Disable EEE for internal PHY devices */
5185 hw->dev_spec._base.eee_enable = false;
5186 adapter->flags &= ~IGC_FLAG_EEE;
5187 igc_set_eee_i225(hw, false, false, false);
5189 pm_runtime_put_noidle(&pdev->dev);
5194 igc_release_hw_control(adapter);
5196 if (!igc_check_reset_block(hw))
5199 igc_clear_interrupt_scheme(adapter);
5200 iounmap(adapter->io_addr);
5202 free_netdev(netdev);
5204 pci_release_mem_regions(pdev);
5207 pci_disable_device(pdev);
5212 * igc_remove - Device Removal Routine
5213 * @pdev: PCI device information struct
5215 * igc_remove is called by the PCI subsystem to alert the driver
5216 * that it should release a PCI device. This could be caused by a
5217 * Hot-Plug event, or because the driver is going to be removed from
5220 static void igc_remove(struct pci_dev *pdev)
5222 struct net_device *netdev = pci_get_drvdata(pdev);
5223 struct igc_adapter *adapter = netdev_priv(netdev);
5225 pm_runtime_get_noresume(&pdev->dev);
5227 igc_flush_nfc_rules(adapter);
5229 igc_ptp_stop(adapter);
5231 set_bit(__IGC_DOWN, &adapter->state);
5233 del_timer_sync(&adapter->watchdog_timer);
5234 del_timer_sync(&adapter->phy_info_timer);
5236 cancel_work_sync(&adapter->reset_task);
5237 cancel_work_sync(&adapter->watchdog_task);
5239 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5240 * would have already happened in close and is redundant.
5242 igc_release_hw_control(adapter);
5243 unregister_netdev(netdev);
5245 igc_clear_interrupt_scheme(adapter);
5246 pci_iounmap(pdev, adapter->io_addr);
5247 pci_release_mem_regions(pdev);
5249 free_netdev(netdev);
5251 pci_disable_pcie_error_reporting(pdev);
5253 pci_disable_device(pdev);
5256 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
5259 struct net_device *netdev = pci_get_drvdata(pdev);
5260 struct igc_adapter *adapter = netdev_priv(netdev);
5261 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
5262 struct igc_hw *hw = &adapter->hw;
5263 u32 ctrl, rctl, status;
5267 netif_device_detach(netdev);
5269 if (netif_running(netdev))
5270 __igc_close(netdev, true);
5272 igc_ptp_suspend(adapter);
5274 igc_clear_interrupt_scheme(adapter);
5277 status = rd32(IGC_STATUS);
5278 if (status & IGC_STATUS_LU)
5279 wufc &= ~IGC_WUFC_LNKC;
5282 igc_setup_rctl(adapter);
5283 igc_set_rx_mode(netdev);
5285 /* turn on all-multi mode if wake on multicast is enabled */
5286 if (wufc & IGC_WUFC_MC) {
5287 rctl = rd32(IGC_RCTL);
5288 rctl |= IGC_RCTL_MPE;
5289 wr32(IGC_RCTL, rctl);
5292 ctrl = rd32(IGC_CTRL);
5293 ctrl |= IGC_CTRL_ADVD3WUC;
5294 wr32(IGC_CTRL, ctrl);
5296 /* Allow time for pending master requests to run */
5297 igc_disable_pcie_master(hw);
5299 wr32(IGC_WUC, IGC_WUC_PME_EN);
5300 wr32(IGC_WUFC, wufc);
5306 wake = wufc || adapter->en_mng_pt;
5308 igc_power_down_phy_copper_base(&adapter->hw);
5310 igc_power_up_link(adapter);
5313 *enable_wake = wake;
5315 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5316 * would have already happened in close and is redundant.
5318 igc_release_hw_control(adapter);
5320 pci_disable_device(pdev);
5326 static int __maybe_unused igc_runtime_suspend(struct device *dev)
5328 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
5331 static void igc_deliver_wake_packet(struct net_device *netdev)
5333 struct igc_adapter *adapter = netdev_priv(netdev);
5334 struct igc_hw *hw = &adapter->hw;
5335 struct sk_buff *skb;
5338 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
5340 /* WUPM stores only the first 128 bytes of the wake packet.
5341 * Read the packet only if we have the whole thing.
5343 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
5346 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
5352 /* Ensure reads are 32-bit aligned */
5353 wupl = roundup(wupl, 4);
5355 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
5357 skb->protocol = eth_type_trans(skb, netdev);
5361 static int __maybe_unused igc_resume(struct device *dev)
5363 struct pci_dev *pdev = to_pci_dev(dev);
5364 struct net_device *netdev = pci_get_drvdata(pdev);
5365 struct igc_adapter *adapter = netdev_priv(netdev);
5366 struct igc_hw *hw = &adapter->hw;
5369 pci_set_power_state(pdev, PCI_D0);
5370 pci_restore_state(pdev);
5371 pci_save_state(pdev);
5373 if (!pci_device_is_present(pdev))
5375 err = pci_enable_device_mem(pdev);
5377 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
5380 pci_set_master(pdev);
5382 pci_enable_wake(pdev, PCI_D3hot, 0);
5383 pci_enable_wake(pdev, PCI_D3cold, 0);
5385 if (igc_init_interrupt_scheme(adapter, true)) {
5386 netdev_err(netdev, "Unable to allocate memory for queues\n");
5392 /* let the f/w know that the h/w is now under the control of the
5395 igc_get_hw_control(adapter);
5397 val = rd32(IGC_WUS);
5398 if (val & WAKE_PKT_WUS)
5399 igc_deliver_wake_packet(netdev);
5404 if (!err && netif_running(netdev))
5405 err = __igc_open(netdev, true);
5408 netif_device_attach(netdev);
5414 static int __maybe_unused igc_runtime_resume(struct device *dev)
5416 return igc_resume(dev);
5419 static int __maybe_unused igc_suspend(struct device *dev)
5421 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
5424 static int __maybe_unused igc_runtime_idle(struct device *dev)
5426 struct net_device *netdev = dev_get_drvdata(dev);
5427 struct igc_adapter *adapter = netdev_priv(netdev);
5429 if (!igc_has_link(adapter))
5430 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
5434 #endif /* CONFIG_PM */
5436 static void igc_shutdown(struct pci_dev *pdev)
5440 __igc_shutdown(pdev, &wake, 0);
5442 if (system_state == SYSTEM_POWER_OFF) {
5443 pci_wake_from_d3(pdev, wake);
5444 pci_set_power_state(pdev, PCI_D3hot);
5449 * igc_io_error_detected - called when PCI error is detected
5450 * @pdev: Pointer to PCI device
5451 * @state: The current PCI connection state
5453 * This function is called after a PCI bus error affecting
5454 * this device has been detected.
5456 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
5457 pci_channel_state_t state)
5459 struct net_device *netdev = pci_get_drvdata(pdev);
5460 struct igc_adapter *adapter = netdev_priv(netdev);
5462 netif_device_detach(netdev);
5464 if (state == pci_channel_io_perm_failure)
5465 return PCI_ERS_RESULT_DISCONNECT;
5467 if (netif_running(netdev))
5469 pci_disable_device(pdev);
5471 /* Request a slot reset. */
5472 return PCI_ERS_RESULT_NEED_RESET;
5476 * igc_io_slot_reset - called after the PCI bus has been reset.
5477 * @pdev: Pointer to PCI device
5479 * Restart the card from scratch, as if from a cold-boot. Implementation
5480 * resembles the first-half of the igc_resume routine.
5482 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
5484 struct net_device *netdev = pci_get_drvdata(pdev);
5485 struct igc_adapter *adapter = netdev_priv(netdev);
5486 struct igc_hw *hw = &adapter->hw;
5487 pci_ers_result_t result;
5489 if (pci_enable_device_mem(pdev)) {
5490 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
5491 result = PCI_ERS_RESULT_DISCONNECT;
5493 pci_set_master(pdev);
5494 pci_restore_state(pdev);
5495 pci_save_state(pdev);
5497 pci_enable_wake(pdev, PCI_D3hot, 0);
5498 pci_enable_wake(pdev, PCI_D3cold, 0);
5500 /* In case of PCI error, adapter loses its HW address
5501 * so we should re-assign it here.
5503 hw->hw_addr = adapter->io_addr;
5507 result = PCI_ERS_RESULT_RECOVERED;
5514 * igc_io_resume - called when traffic can start to flow again.
5515 * @pdev: Pointer to PCI device
5517 * This callback is called when the error recovery driver tells us that
5518 * its OK to resume normal operation. Implementation resembles the
5519 * second-half of the igc_resume routine.
5521 static void igc_io_resume(struct pci_dev *pdev)
5523 struct net_device *netdev = pci_get_drvdata(pdev);
5524 struct igc_adapter *adapter = netdev_priv(netdev);
5527 if (netif_running(netdev)) {
5528 if (igc_open(netdev)) {
5529 netdev_err(netdev, "igc_open failed after reset\n");
5534 netif_device_attach(netdev);
5536 /* let the f/w know that the h/w is now under the control of the
5539 igc_get_hw_control(adapter);
5543 static const struct pci_error_handlers igc_err_handler = {
5544 .error_detected = igc_io_error_detected,
5545 .slot_reset = igc_io_slot_reset,
5546 .resume = igc_io_resume,
5550 static const struct dev_pm_ops igc_pm_ops = {
5551 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
5552 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
5557 static struct pci_driver igc_driver = {
5558 .name = igc_driver_name,
5559 .id_table = igc_pci_tbl,
5561 .remove = igc_remove,
5563 .driver.pm = &igc_pm_ops,
5565 .shutdown = igc_shutdown,
5566 .err_handler = &igc_err_handler,
5570 * igc_reinit_queues - return error
5571 * @adapter: pointer to adapter structure
5573 int igc_reinit_queues(struct igc_adapter *adapter)
5575 struct net_device *netdev = adapter->netdev;
5578 if (netif_running(netdev))
5581 igc_reset_interrupt_capability(adapter);
5583 if (igc_init_interrupt_scheme(adapter, true)) {
5584 netdev_err(netdev, "Unable to allocate memory for queues\n");
5588 if (netif_running(netdev))
5589 err = igc_open(netdev);
5595 * igc_get_hw_dev - return device
5596 * @hw: pointer to hardware structure
5598 * used by hardware layer to print debugging information
5600 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
5602 struct igc_adapter *adapter = hw->back;
5604 return adapter->netdev;
5608 * igc_init_module - Driver Registration Routine
5610 * igc_init_module is the first routine called when the driver is
5611 * loaded. All it does is register with the PCI subsystem.
5613 static int __init igc_init_module(void)
5617 pr_info("%s\n", igc_driver_string);
5618 pr_info("%s\n", igc_copyright);
5620 ret = pci_register_driver(&igc_driver);
5624 module_init(igc_init_module);
5627 * igc_exit_module - Driver Exit Cleanup Routine
5629 * igc_exit_module is called just before the driver is removed
5632 static void __exit igc_exit_module(void)
5634 pci_unregister_driver(&igc_driver);
5637 module_exit(igc_exit_module);