1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018 Intel Corporation */
4 #include <linux/module.h>
5 #include <linux/types.h>
6 #include <linux/if_vlan.h>
11 #include <linux/pm_runtime.h>
12 #include <net/pkt_sched.h>
20 #define DRV_SUMMARY "Intel(R) 2.5G Ethernet Linux Driver"
22 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
24 static int debug = -1;
26 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
27 MODULE_DESCRIPTION(DRV_SUMMARY);
28 MODULE_LICENSE("GPL v2");
29 module_param(debug, int, 0);
30 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
32 char igc_driver_name[] = "igc";
33 static const char igc_driver_string[] = DRV_SUMMARY;
34 static const char igc_copyright[] =
35 "Copyright(c) 2018 Intel Corporation.";
37 static const struct igc_info *igc_info_tbl[] = {
38 [board_base] = &igc_base_info,
41 static const struct pci_device_id igc_pci_tbl[] = {
42 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LM), board_base },
43 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_V), board_base },
44 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_I), board_base },
45 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I220_V), board_base },
46 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K), board_base },
47 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
48 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
49 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
50 { PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
51 /* required last entry */
55 MODULE_DEVICE_TABLE(pci, igc_pci_tbl);
64 void igc_reset(struct igc_adapter *adapter)
66 struct net_device *dev = adapter->netdev;
67 struct igc_hw *hw = &adapter->hw;
68 struct igc_fc_info *fc = &hw->fc;
71 /* Repartition PBA for greater than 9k MTU if required */
74 /* flow control settings
75 * The high water mark must be low enough to fit one full frame
76 * after transmitting the pause frame. As such we must have enough
77 * space to allow for us to complete our current transmit and then
78 * receive the frame that is in progress from the link partner.
80 * - the full Rx FIFO size minus one full Tx plus one full Rx frame
82 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
84 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
85 fc->low_water = fc->high_water - 16;
86 fc->pause_time = 0xFFFF;
88 fc->current_mode = fc->requested_mode;
90 hw->mac.ops.reset_hw(hw);
92 if (hw->mac.ops.init_hw(hw))
93 netdev_err(dev, "Error on hardware initialization\n");
95 /* Re-establish EEE setting */
96 igc_set_eee_i225(hw, true, true, true);
98 if (!netif_running(adapter->netdev))
99 igc_power_down_phy_copper_base(&adapter->hw);
101 /* Re-enable PTP, where applicable. */
102 igc_ptp_reset(adapter);
104 /* Re-enable TSN offloading, where applicable. */
105 igc_tsn_offload_apply(adapter);
107 igc_get_phy_info(hw);
111 * igc_power_up_link - Power up the phy link
112 * @adapter: address of board private structure
114 static void igc_power_up_link(struct igc_adapter *adapter)
116 igc_reset_phy(&adapter->hw);
118 igc_power_up_phy_copper(&adapter->hw);
120 igc_setup_link(&adapter->hw);
124 * igc_release_hw_control - release control of the h/w to f/w
125 * @adapter: address of board private structure
127 * igc_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
128 * For ASF and Pass Through versions of f/w this means that the
129 * driver is no longer loaded.
131 static void igc_release_hw_control(struct igc_adapter *adapter)
133 struct igc_hw *hw = &adapter->hw;
136 /* Let firmware take over control of h/w */
137 ctrl_ext = rd32(IGC_CTRL_EXT);
139 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
143 * igc_get_hw_control - get control of the h/w from f/w
144 * @adapter: address of board private structure
146 * igc_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
147 * For ASF and Pass Through versions of f/w this means that
148 * the driver is loaded.
150 static void igc_get_hw_control(struct igc_adapter *adapter)
152 struct igc_hw *hw = &adapter->hw;
155 /* Let firmware know the driver has taken over */
156 ctrl_ext = rd32(IGC_CTRL_EXT);
158 ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
162 * igc_clean_tx_ring - Free Tx Buffers
163 * @tx_ring: ring to be cleaned
165 static void igc_clean_tx_ring(struct igc_ring *tx_ring)
167 u16 i = tx_ring->next_to_clean;
168 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
170 while (i != tx_ring->next_to_use) {
171 union igc_adv_tx_desc *eop_desc, *tx_desc;
173 /* Free all the Tx ring sk_buffs */
174 dev_kfree_skb_any(tx_buffer->skb);
176 /* unmap skb header data */
177 dma_unmap_single(tx_ring->dev,
178 dma_unmap_addr(tx_buffer, dma),
179 dma_unmap_len(tx_buffer, len),
182 /* check for eop_desc to determine the end of the packet */
183 eop_desc = tx_buffer->next_to_watch;
184 tx_desc = IGC_TX_DESC(tx_ring, i);
186 /* unmap remaining buffers */
187 while (tx_desc != eop_desc) {
191 if (unlikely(i == tx_ring->count)) {
193 tx_buffer = tx_ring->tx_buffer_info;
194 tx_desc = IGC_TX_DESC(tx_ring, 0);
197 /* unmap any remaining paged data */
198 if (dma_unmap_len(tx_buffer, len))
199 dma_unmap_page(tx_ring->dev,
200 dma_unmap_addr(tx_buffer, dma),
201 dma_unmap_len(tx_buffer, len),
205 /* move us one more past the eop_desc for start of next pkt */
208 if (unlikely(i == tx_ring->count)) {
210 tx_buffer = tx_ring->tx_buffer_info;
214 /* reset BQL for queue */
215 netdev_tx_reset_queue(txring_txq(tx_ring));
217 /* reset next_to_use and next_to_clean */
218 tx_ring->next_to_use = 0;
219 tx_ring->next_to_clean = 0;
223 * igc_free_tx_resources - Free Tx Resources per Queue
224 * @tx_ring: Tx descriptor ring for a specific queue
226 * Free all transmit software resources
228 void igc_free_tx_resources(struct igc_ring *tx_ring)
230 igc_clean_tx_ring(tx_ring);
232 vfree(tx_ring->tx_buffer_info);
233 tx_ring->tx_buffer_info = NULL;
235 /* if not set, then don't free */
239 dma_free_coherent(tx_ring->dev, tx_ring->size,
240 tx_ring->desc, tx_ring->dma);
242 tx_ring->desc = NULL;
246 * igc_free_all_tx_resources - Free Tx Resources for All Queues
247 * @adapter: board private structure
249 * Free all transmit software resources
251 static void igc_free_all_tx_resources(struct igc_adapter *adapter)
255 for (i = 0; i < adapter->num_tx_queues; i++)
256 igc_free_tx_resources(adapter->tx_ring[i]);
260 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
261 * @adapter: board private structure
263 static void igc_clean_all_tx_rings(struct igc_adapter *adapter)
267 for (i = 0; i < adapter->num_tx_queues; i++)
268 if (adapter->tx_ring[i])
269 igc_clean_tx_ring(adapter->tx_ring[i]);
273 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
274 * @tx_ring: tx descriptor ring (for a specific queue) to setup
276 * Return 0 on success, negative on failure
278 int igc_setup_tx_resources(struct igc_ring *tx_ring)
280 struct net_device *ndev = tx_ring->netdev;
281 struct device *dev = tx_ring->dev;
284 size = sizeof(struct igc_tx_buffer) * tx_ring->count;
285 tx_ring->tx_buffer_info = vzalloc(size);
286 if (!tx_ring->tx_buffer_info)
289 /* round up to nearest 4K */
290 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc);
291 tx_ring->size = ALIGN(tx_ring->size, 4096);
293 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
294 &tx_ring->dma, GFP_KERNEL);
299 tx_ring->next_to_use = 0;
300 tx_ring->next_to_clean = 0;
305 vfree(tx_ring->tx_buffer_info);
306 netdev_err(ndev, "Unable to allocate memory for Tx descriptor ring\n");
311 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
312 * @adapter: board private structure
314 * Return 0 on success, negative on failure
316 static int igc_setup_all_tx_resources(struct igc_adapter *adapter)
318 struct net_device *dev = adapter->netdev;
321 for (i = 0; i < adapter->num_tx_queues; i++) {
322 err = igc_setup_tx_resources(adapter->tx_ring[i]);
324 netdev_err(dev, "Error on Tx queue %u setup\n", i);
325 for (i--; i >= 0; i--)
326 igc_free_tx_resources(adapter->tx_ring[i]);
335 * igc_clean_rx_ring - Free Rx Buffers per Queue
336 * @rx_ring: ring to free buffers from
338 static void igc_clean_rx_ring(struct igc_ring *rx_ring)
340 u16 i = rx_ring->next_to_clean;
342 dev_kfree_skb(rx_ring->skb);
345 /* Free all the Rx ring sk_buffs */
346 while (i != rx_ring->next_to_alloc) {
347 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
349 /* Invalidate cache lines that may have been written to by
350 * device so that we avoid corrupting memory.
352 dma_sync_single_range_for_cpu(rx_ring->dev,
354 buffer_info->page_offset,
355 igc_rx_bufsz(rx_ring),
358 /* free resources associated with mapping */
359 dma_unmap_page_attrs(rx_ring->dev,
361 igc_rx_pg_size(rx_ring),
364 __page_frag_cache_drain(buffer_info->page,
365 buffer_info->pagecnt_bias);
368 if (i == rx_ring->count)
372 rx_ring->next_to_alloc = 0;
373 rx_ring->next_to_clean = 0;
374 rx_ring->next_to_use = 0;
378 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
379 * @adapter: board private structure
381 static void igc_clean_all_rx_rings(struct igc_adapter *adapter)
385 for (i = 0; i < adapter->num_rx_queues; i++)
386 if (adapter->rx_ring[i])
387 igc_clean_rx_ring(adapter->rx_ring[i]);
391 * igc_free_rx_resources - Free Rx Resources
392 * @rx_ring: ring to clean the resources from
394 * Free all receive software resources
396 void igc_free_rx_resources(struct igc_ring *rx_ring)
398 igc_clean_rx_ring(rx_ring);
400 vfree(rx_ring->rx_buffer_info);
401 rx_ring->rx_buffer_info = NULL;
403 /* if not set, then don't free */
407 dma_free_coherent(rx_ring->dev, rx_ring->size,
408 rx_ring->desc, rx_ring->dma);
410 rx_ring->desc = NULL;
414 * igc_free_all_rx_resources - Free Rx Resources for All Queues
415 * @adapter: board private structure
417 * Free all receive software resources
419 static void igc_free_all_rx_resources(struct igc_adapter *adapter)
423 for (i = 0; i < adapter->num_rx_queues; i++)
424 igc_free_rx_resources(adapter->rx_ring[i]);
428 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
429 * @rx_ring: rx descriptor ring (for a specific queue) to setup
431 * Returns 0 on success, negative on failure
433 int igc_setup_rx_resources(struct igc_ring *rx_ring)
435 struct net_device *ndev = rx_ring->netdev;
436 struct device *dev = rx_ring->dev;
439 size = sizeof(struct igc_rx_buffer) * rx_ring->count;
440 rx_ring->rx_buffer_info = vzalloc(size);
441 if (!rx_ring->rx_buffer_info)
444 desc_len = sizeof(union igc_adv_rx_desc);
446 /* Round up to nearest 4K */
447 rx_ring->size = rx_ring->count * desc_len;
448 rx_ring->size = ALIGN(rx_ring->size, 4096);
450 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
451 &rx_ring->dma, GFP_KERNEL);
456 rx_ring->next_to_alloc = 0;
457 rx_ring->next_to_clean = 0;
458 rx_ring->next_to_use = 0;
463 vfree(rx_ring->rx_buffer_info);
464 rx_ring->rx_buffer_info = NULL;
465 netdev_err(ndev, "Unable to allocate memory for Rx descriptor ring\n");
470 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
471 * (Descriptors) for all queues
472 * @adapter: board private structure
474 * Return 0 on success, negative on failure
476 static int igc_setup_all_rx_resources(struct igc_adapter *adapter)
478 struct net_device *dev = adapter->netdev;
481 for (i = 0; i < adapter->num_rx_queues; i++) {
482 err = igc_setup_rx_resources(adapter->rx_ring[i]);
484 netdev_err(dev, "Error on Rx queue %u setup\n", i);
485 for (i--; i >= 0; i--)
486 igc_free_rx_resources(adapter->rx_ring[i]);
495 * igc_configure_rx_ring - Configure a receive ring after Reset
496 * @adapter: board private structure
497 * @ring: receive ring to be configured
499 * Configure the Rx unit of the MAC after a reset.
501 static void igc_configure_rx_ring(struct igc_adapter *adapter,
502 struct igc_ring *ring)
504 struct igc_hw *hw = &adapter->hw;
505 union igc_adv_rx_desc *rx_desc;
506 int reg_idx = ring->reg_idx;
507 u32 srrctl = 0, rxdctl = 0;
508 u64 rdba = ring->dma;
510 /* disable the queue */
511 wr32(IGC_RXDCTL(reg_idx), 0);
513 /* Set DMA base address registers */
514 wr32(IGC_RDBAL(reg_idx),
515 rdba & 0x00000000ffffffffULL);
516 wr32(IGC_RDBAH(reg_idx), rdba >> 32);
517 wr32(IGC_RDLEN(reg_idx),
518 ring->count * sizeof(union igc_adv_rx_desc));
520 /* initialize head and tail */
521 ring->tail = adapter->io_addr + IGC_RDT(reg_idx);
522 wr32(IGC_RDH(reg_idx), 0);
523 writel(0, ring->tail);
525 /* reset next-to- use/clean to place SW in sync with hardware */
526 ring->next_to_clean = 0;
527 ring->next_to_use = 0;
529 /* set descriptor configuration */
530 srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
531 if (ring_uses_large_buffer(ring))
532 srrctl |= IGC_RXBUFFER_3072 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
534 srrctl |= IGC_RXBUFFER_2048 >> IGC_SRRCTL_BSIZEPKT_SHIFT;
535 srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
537 wr32(IGC_SRRCTL(reg_idx), srrctl);
539 rxdctl |= IGC_RX_PTHRESH;
540 rxdctl |= IGC_RX_HTHRESH << 8;
541 rxdctl |= IGC_RX_WTHRESH << 16;
543 /* initialize rx_buffer_info */
544 memset(ring->rx_buffer_info, 0,
545 sizeof(struct igc_rx_buffer) * ring->count);
547 /* initialize Rx descriptor 0 */
548 rx_desc = IGC_RX_DESC(ring, 0);
549 rx_desc->wb.upper.length = 0;
551 /* enable receive descriptor fetching */
552 rxdctl |= IGC_RXDCTL_QUEUE_ENABLE;
554 wr32(IGC_RXDCTL(reg_idx), rxdctl);
558 * igc_configure_rx - Configure receive Unit after Reset
559 * @adapter: board private structure
561 * Configure the Rx unit of the MAC after a reset.
563 static void igc_configure_rx(struct igc_adapter *adapter)
567 /* Setup the HW Rx Head and Tail Descriptor Pointers and
568 * the Base and Length of the Rx Descriptor Ring
570 for (i = 0; i < adapter->num_rx_queues; i++)
571 igc_configure_rx_ring(adapter, adapter->rx_ring[i]);
575 * igc_configure_tx_ring - Configure transmit ring after Reset
576 * @adapter: board private structure
577 * @ring: tx ring to configure
579 * Configure a transmit ring after a reset.
581 static void igc_configure_tx_ring(struct igc_adapter *adapter,
582 struct igc_ring *ring)
584 struct igc_hw *hw = &adapter->hw;
585 int reg_idx = ring->reg_idx;
586 u64 tdba = ring->dma;
589 /* disable the queue */
590 wr32(IGC_TXDCTL(reg_idx), 0);
594 wr32(IGC_TDLEN(reg_idx),
595 ring->count * sizeof(union igc_adv_tx_desc));
596 wr32(IGC_TDBAL(reg_idx),
597 tdba & 0x00000000ffffffffULL);
598 wr32(IGC_TDBAH(reg_idx), tdba >> 32);
600 ring->tail = adapter->io_addr + IGC_TDT(reg_idx);
601 wr32(IGC_TDH(reg_idx), 0);
602 writel(0, ring->tail);
604 txdctl |= IGC_TX_PTHRESH;
605 txdctl |= IGC_TX_HTHRESH << 8;
606 txdctl |= IGC_TX_WTHRESH << 16;
608 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
609 wr32(IGC_TXDCTL(reg_idx), txdctl);
613 * igc_configure_tx - Configure transmit Unit after Reset
614 * @adapter: board private structure
616 * Configure the Tx unit of the MAC after a reset.
618 static void igc_configure_tx(struct igc_adapter *adapter)
622 for (i = 0; i < adapter->num_tx_queues; i++)
623 igc_configure_tx_ring(adapter, adapter->tx_ring[i]);
627 * igc_setup_mrqc - configure the multiple receive queue control registers
628 * @adapter: Board private structure
630 static void igc_setup_mrqc(struct igc_adapter *adapter)
632 struct igc_hw *hw = &adapter->hw;
633 u32 j, num_rx_queues;
637 netdev_rss_key_fill(rss_key, sizeof(rss_key));
638 for (j = 0; j < 10; j++)
639 wr32(IGC_RSSRK(j), rss_key[j]);
641 num_rx_queues = adapter->rss_queues;
643 if (adapter->rss_indir_tbl_init != num_rx_queues) {
644 for (j = 0; j < IGC_RETA_SIZE; j++)
645 adapter->rss_indir_tbl[j] =
646 (j * num_rx_queues) / IGC_RETA_SIZE;
647 adapter->rss_indir_tbl_init = num_rx_queues;
649 igc_write_rss_indir_tbl(adapter);
651 /* Disable raw packet checksumming so that RSS hash is placed in
652 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
653 * offloads as they are enabled by default
655 rxcsum = rd32(IGC_RXCSUM);
656 rxcsum |= IGC_RXCSUM_PCSD;
658 /* Enable Receive Checksum Offload for SCTP */
659 rxcsum |= IGC_RXCSUM_CRCOFL;
661 /* Don't need to set TUOFL or IPOFL, they default to 1 */
662 wr32(IGC_RXCSUM, rxcsum);
664 /* Generate RSS hash based on packet types, TCP/UDP
665 * port numbers and/or IPv4/v6 src and dst addresses
667 mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
668 IGC_MRQC_RSS_FIELD_IPV4_TCP |
669 IGC_MRQC_RSS_FIELD_IPV6 |
670 IGC_MRQC_RSS_FIELD_IPV6_TCP |
671 IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
673 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
674 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
675 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
676 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
678 mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
680 wr32(IGC_MRQC, mrqc);
684 * igc_setup_rctl - configure the receive control registers
685 * @adapter: Board private structure
687 static void igc_setup_rctl(struct igc_adapter *adapter)
689 struct igc_hw *hw = &adapter->hw;
692 rctl = rd32(IGC_RCTL);
694 rctl &= ~(3 << IGC_RCTL_MO_SHIFT);
695 rctl &= ~(IGC_RCTL_LBM_TCVR | IGC_RCTL_LBM_MAC);
697 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_RDMTS_HALF |
698 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
700 /* enable stripping of CRC. Newer features require
701 * that the HW strips the CRC.
703 rctl |= IGC_RCTL_SECRC;
705 /* disable store bad packets and clear size bits. */
706 rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_SZ_256);
708 /* enable LPE to allow for reception of jumbo frames */
709 rctl |= IGC_RCTL_LPE;
711 /* disable queue 0 to prevent tail write w/o re-config */
712 wr32(IGC_RXDCTL(0), 0);
714 /* This is useful for sniffing bad packets. */
715 if (adapter->netdev->features & NETIF_F_RXALL) {
716 /* UPE and MPE will be handled by normal PROMISC logic
719 rctl |= (IGC_RCTL_SBP | /* Receive bad packets */
720 IGC_RCTL_BAM | /* RX All Bcast Pkts */
721 IGC_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
723 rctl &= ~(IGC_RCTL_DPF | /* Allow filtered pause */
724 IGC_RCTL_CFIEN); /* Disable VLAN CFIEN Filter */
727 wr32(IGC_RCTL, rctl);
731 * igc_setup_tctl - configure the transmit control registers
732 * @adapter: Board private structure
734 static void igc_setup_tctl(struct igc_adapter *adapter)
736 struct igc_hw *hw = &adapter->hw;
739 /* disable queue 0 which icould be enabled by default */
740 wr32(IGC_TXDCTL(0), 0);
742 /* Program the Transmit Control Register */
743 tctl = rd32(IGC_TCTL);
744 tctl &= ~IGC_TCTL_CT;
745 tctl |= IGC_TCTL_PSP | IGC_TCTL_RTLC |
746 (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT);
748 /* Enable transmits */
751 wr32(IGC_TCTL, tctl);
755 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
756 * @adapter: Pointer to adapter where the filter should be set
757 * @index: Filter index
758 * @type: MAC address filter type (source or destination)
760 * @queue: If non-negative, queue assignment feature is enabled and frames
761 * matching the filter are enqueued onto 'queue'. Otherwise, queue
762 * assignment is disabled.
764 static void igc_set_mac_filter_hw(struct igc_adapter *adapter, int index,
765 enum igc_mac_filter_type type,
766 const u8 *addr, int queue)
768 struct net_device *dev = adapter->netdev;
769 struct igc_hw *hw = &adapter->hw;
772 if (WARN_ON(index >= hw->mac.rar_entry_count))
775 ral = le32_to_cpup((__le32 *)(addr));
776 rah = le16_to_cpup((__le16 *)(addr + 4));
778 if (type == IGC_MAC_FILTER_TYPE_SRC) {
779 rah &= ~IGC_RAH_ASEL_MASK;
780 rah |= IGC_RAH_ASEL_SRC_ADDR;
784 rah &= ~IGC_RAH_QSEL_MASK;
785 rah |= (queue << IGC_RAH_QSEL_SHIFT);
786 rah |= IGC_RAH_QSEL_ENABLE;
791 wr32(IGC_RAL(index), ral);
792 wr32(IGC_RAH(index), rah);
794 netdev_dbg(dev, "MAC address filter set in HW: index %d", index);
798 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
799 * @adapter: Pointer to adapter where the filter should be cleared
800 * @index: Filter index
802 static void igc_clear_mac_filter_hw(struct igc_adapter *adapter, int index)
804 struct net_device *dev = adapter->netdev;
805 struct igc_hw *hw = &adapter->hw;
807 if (WARN_ON(index >= hw->mac.rar_entry_count))
810 wr32(IGC_RAL(index), 0);
811 wr32(IGC_RAH(index), 0);
813 netdev_dbg(dev, "MAC address filter cleared in HW: index %d", index);
816 /* Set default MAC address for the PF in the first RAR entry */
817 static void igc_set_default_mac_filter(struct igc_adapter *adapter)
819 struct net_device *dev = adapter->netdev;
820 u8 *addr = adapter->hw.mac.addr;
822 netdev_dbg(dev, "Set default MAC address filter: address %pM", addr);
824 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1);
828 * igc_set_mac - Change the Ethernet Address of the NIC
829 * @netdev: network interface device structure
830 * @p: pointer to an address structure
832 * Returns 0 on success, negative on failure
834 static int igc_set_mac(struct net_device *netdev, void *p)
836 struct igc_adapter *adapter = netdev_priv(netdev);
837 struct igc_hw *hw = &adapter->hw;
838 struct sockaddr *addr = p;
840 if (!is_valid_ether_addr(addr->sa_data))
841 return -EADDRNOTAVAIL;
843 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
844 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
846 /* set the correct pool for the new PF MAC address in entry 0 */
847 igc_set_default_mac_filter(adapter);
853 * igc_write_mc_addr_list - write multicast addresses to MTA
854 * @netdev: network interface device structure
856 * Writes multicast address list to the MTA hash table.
857 * Returns: -ENOMEM on failure
858 * 0 on no addresses written
859 * X on writing X addresses to MTA
861 static int igc_write_mc_addr_list(struct net_device *netdev)
863 struct igc_adapter *adapter = netdev_priv(netdev);
864 struct igc_hw *hw = &adapter->hw;
865 struct netdev_hw_addr *ha;
869 if (netdev_mc_empty(netdev)) {
870 /* nothing to program, so clear mc list */
871 igc_update_mc_addr_list(hw, NULL, 0);
875 mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
879 /* The shared function expects a packed array of only addresses. */
881 netdev_for_each_mc_addr(ha, netdev)
882 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
884 igc_update_mc_addr_list(hw, mta_list, i);
887 return netdev_mc_count(netdev);
890 static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime)
892 ktime_t cycle_time = adapter->cycle_time;
893 ktime_t base_time = adapter->base_time;
896 /* FIXME: when using ETF together with taprio, we may have a
897 * case where 'delta' is larger than the cycle_time, this may
898 * cause problems if we don't read the current value of
899 * IGC_BASET, as the value writen into the launchtime
900 * descriptor field may be misinterpreted.
902 div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime);
904 return cpu_to_le32(launchtime);
907 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
908 struct igc_tx_buffer *first,
909 u32 vlan_macip_lens, u32 type_tucmd,
912 struct igc_adv_tx_context_desc *context_desc;
913 u16 i = tx_ring->next_to_use;
915 context_desc = IGC_TX_CTXTDESC(tx_ring, i);
918 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
920 /* set bits to identify this as an advanced context descriptor */
921 type_tucmd |= IGC_TXD_CMD_DEXT | IGC_ADVTXD_DTYP_CTXT;
923 /* For i225, context index must be unique per ring. */
924 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
925 mss_l4len_idx |= tx_ring->reg_idx << 4;
927 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
928 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
929 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
931 /* We assume there is always a valid Tx time available. Invalid times
932 * should have been handled by the upper layers.
934 if (tx_ring->launchtime_enable) {
935 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
936 ktime_t txtime = first->skb->tstamp;
938 first->skb->tstamp = ktime_set(0, 0);
939 context_desc->launch_time = igc_tx_launchtime(adapter,
942 context_desc->launch_time = 0;
946 static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
948 unsigned int offset = 0;
950 ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
952 return offset == skb_checksum_start_offset(skb);
955 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
957 struct sk_buff *skb = first->skb;
958 u32 vlan_macip_lens = 0;
961 if (skb->ip_summed != CHECKSUM_PARTIAL) {
963 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) &&
964 !tx_ring->launchtime_enable)
969 switch (skb->csum_offset) {
970 case offsetof(struct tcphdr, check):
971 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
973 case offsetof(struct udphdr, check):
975 case offsetof(struct sctphdr, checksum):
976 /* validate that this is actually an SCTP request */
977 if ((first->protocol == htons(ETH_P_IP) &&
978 (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
979 (first->protocol == htons(ETH_P_IPV6) &&
980 igc_ipv6_csum_is_sctp(skb))) {
981 type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
986 skb_checksum_help(skb);
990 /* update TX checksum flag */
991 first->tx_flags |= IGC_TX_FLAGS_CSUM;
992 vlan_macip_lens = skb_checksum_start_offset(skb) -
993 skb_network_offset(skb);
995 vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT;
996 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
998 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
1001 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1003 struct net_device *netdev = tx_ring->netdev;
1005 netif_stop_subqueue(netdev, tx_ring->queue_index);
1007 /* memory barriier comment */
1010 /* We need to check again in a case another CPU has just
1011 * made room available.
1013 if (igc_desc_unused(tx_ring) < size)
1017 netif_wake_subqueue(netdev, tx_ring->queue_index);
1019 u64_stats_update_begin(&tx_ring->tx_syncp2);
1020 tx_ring->tx_stats.restart_queue2++;
1021 u64_stats_update_end(&tx_ring->tx_syncp2);
1026 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
1028 if (igc_desc_unused(tx_ring) >= size)
1030 return __igc_maybe_stop_tx(tx_ring, size);
1033 #define IGC_SET_FLAG(_input, _flag, _result) \
1034 (((_flag) <= (_result)) ? \
1035 ((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
1036 ((u32)((_input) & (_flag)) / ((_flag) / (_result))))
1038 static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
1040 /* set type for advanced descriptor with frame checksum insertion */
1041 u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
1042 IGC_ADVTXD_DCMD_DEXT |
1043 IGC_ADVTXD_DCMD_IFCS;
1045 /* set segmentation bits for TSO */
1046 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
1047 (IGC_ADVTXD_DCMD_TSE));
1049 /* set timestamp bit if present */
1050 cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
1051 (IGC_ADVTXD_MAC_TSTAMP));
1056 static void igc_tx_olinfo_status(struct igc_ring *tx_ring,
1057 union igc_adv_tx_desc *tx_desc,
1058 u32 tx_flags, unsigned int paylen)
1060 u32 olinfo_status = paylen << IGC_ADVTXD_PAYLEN_SHIFT;
1062 /* insert L4 checksum */
1063 olinfo_status |= (tx_flags & IGC_TX_FLAGS_CSUM) *
1064 ((IGC_TXD_POPTS_TXSM << 8) /
1067 /* insert IPv4 checksum */
1068 olinfo_status |= (tx_flags & IGC_TX_FLAGS_IPV4) *
1069 (((IGC_TXD_POPTS_IXSM << 8)) /
1072 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
1075 static int igc_tx_map(struct igc_ring *tx_ring,
1076 struct igc_tx_buffer *first,
1079 struct sk_buff *skb = first->skb;
1080 struct igc_tx_buffer *tx_buffer;
1081 union igc_adv_tx_desc *tx_desc;
1082 u32 tx_flags = first->tx_flags;
1084 u16 i = tx_ring->next_to_use;
1085 unsigned int data_len, size;
1087 u32 cmd_type = igc_tx_cmd_type(skb, tx_flags);
1089 tx_desc = IGC_TX_DESC(tx_ring, i);
1091 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
1093 size = skb_headlen(skb);
1094 data_len = skb->data_len;
1096 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
1100 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
1101 if (dma_mapping_error(tx_ring->dev, dma))
1104 /* record length, and DMA address */
1105 dma_unmap_len_set(tx_buffer, len, size);
1106 dma_unmap_addr_set(tx_buffer, dma, dma);
1108 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1110 while (unlikely(size > IGC_MAX_DATA_PER_TXD)) {
1111 tx_desc->read.cmd_type_len =
1112 cpu_to_le32(cmd_type ^ IGC_MAX_DATA_PER_TXD);
1116 if (i == tx_ring->count) {
1117 tx_desc = IGC_TX_DESC(tx_ring, 0);
1120 tx_desc->read.olinfo_status = 0;
1122 dma += IGC_MAX_DATA_PER_TXD;
1123 size -= IGC_MAX_DATA_PER_TXD;
1125 tx_desc->read.buffer_addr = cpu_to_le64(dma);
1128 if (likely(!data_len))
1131 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
1135 if (i == tx_ring->count) {
1136 tx_desc = IGC_TX_DESC(tx_ring, 0);
1139 tx_desc->read.olinfo_status = 0;
1141 size = skb_frag_size(frag);
1144 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
1145 size, DMA_TO_DEVICE);
1147 tx_buffer = &tx_ring->tx_buffer_info[i];
1150 /* write last descriptor with RS and EOP bits */
1151 cmd_type |= size | IGC_TXD_DCMD;
1152 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
1154 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
1156 /* set the timestamp */
1157 first->time_stamp = jiffies;
1159 skb_tx_timestamp(skb);
1161 /* Force memory writes to complete before letting h/w know there
1162 * are new descriptors to fetch. (Only applicable for weak-ordered
1163 * memory model archs, such as IA-64).
1165 * We also need this memory barrier to make certain all of the
1166 * status bits have been updated before next_to_watch is written.
1170 /* set next_to_watch value indicating a packet is present */
1171 first->next_to_watch = tx_desc;
1174 if (i == tx_ring->count)
1177 tx_ring->next_to_use = i;
1179 /* Make sure there is space in the ring for the next send. */
1180 igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
1182 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
1183 writel(i, tx_ring->tail);
1188 netdev_err(tx_ring->netdev, "TX DMA map failed\n");
1189 tx_buffer = &tx_ring->tx_buffer_info[i];
1191 /* clear dma mappings for failed tx_buffer_info map */
1192 while (tx_buffer != first) {
1193 if (dma_unmap_len(tx_buffer, len))
1194 dma_unmap_page(tx_ring->dev,
1195 dma_unmap_addr(tx_buffer, dma),
1196 dma_unmap_len(tx_buffer, len),
1198 dma_unmap_len_set(tx_buffer, len, 0);
1201 i += tx_ring->count;
1202 tx_buffer = &tx_ring->tx_buffer_info[i];
1205 if (dma_unmap_len(tx_buffer, len))
1206 dma_unmap_single(tx_ring->dev,
1207 dma_unmap_addr(tx_buffer, dma),
1208 dma_unmap_len(tx_buffer, len),
1210 dma_unmap_len_set(tx_buffer, len, 0);
1212 dev_kfree_skb_any(tx_buffer->skb);
1213 tx_buffer->skb = NULL;
1215 tx_ring->next_to_use = i;
1220 static int igc_tso(struct igc_ring *tx_ring,
1221 struct igc_tx_buffer *first,
1224 u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
1225 struct sk_buff *skb = first->skb;
1236 u32 paylen, l4_offset;
1239 if (skb->ip_summed != CHECKSUM_PARTIAL)
1242 if (!skb_is_gso(skb))
1245 err = skb_cow_head(skb, 0);
1249 ip.hdr = skb_network_header(skb);
1250 l4.hdr = skb_checksum_start(skb);
1252 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1253 type_tucmd = IGC_ADVTXD_TUCMD_L4T_TCP;
1255 /* initialize outer IP header fields */
1256 if (ip.v4->version == 4) {
1257 unsigned char *csum_start = skb_checksum_start(skb);
1258 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
1260 /* IP header will have to cancel out any data that
1261 * is not a part of the outer IP header
1263 ip.v4->check = csum_fold(csum_partial(trans_start,
1264 csum_start - trans_start,
1266 type_tucmd |= IGC_ADVTXD_TUCMD_IPV4;
1269 first->tx_flags |= IGC_TX_FLAGS_TSO |
1273 ip.v6->payload_len = 0;
1274 first->tx_flags |= IGC_TX_FLAGS_TSO |
1278 /* determine offset of inner transport header */
1279 l4_offset = l4.hdr - skb->data;
1281 /* remove payload length from inner checksum */
1282 paylen = skb->len - l4_offset;
1283 if (type_tucmd & IGC_ADVTXD_TUCMD_L4T_TCP) {
1284 /* compute length of segmentation header */
1285 *hdr_len = (l4.tcp->doff * 4) + l4_offset;
1286 csum_replace_by_diff(&l4.tcp->check,
1287 (__force __wsum)htonl(paylen));
1289 /* compute length of segmentation header */
1290 *hdr_len = sizeof(*l4.udp) + l4_offset;
1291 csum_replace_by_diff(&l4.udp->check,
1292 (__force __wsum)htonl(paylen));
1295 /* update gso size and bytecount with header size */
1296 first->gso_segs = skb_shinfo(skb)->gso_segs;
1297 first->bytecount += (first->gso_segs - 1) * *hdr_len;
1300 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT;
1301 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT;
1303 /* VLAN MACLEN IPLEN */
1304 vlan_macip_lens = l4.hdr - ip.hdr;
1305 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT;
1306 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK;
1308 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
1309 type_tucmd, mss_l4len_idx);
1314 static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
1315 struct igc_ring *tx_ring)
1317 u16 count = TXD_USE_COUNT(skb_headlen(skb));
1318 __be16 protocol = vlan_get_protocol(skb);
1319 struct igc_tx_buffer *first;
1325 /* need: 1 descriptor per page * PAGE_SIZE/IGC_MAX_DATA_PER_TXD,
1326 * + 1 desc for skb_headlen/IGC_MAX_DATA_PER_TXD,
1327 * + 2 desc gap to keep tail from touching head,
1328 * + 1 desc for context descriptor,
1329 * otherwise try next time
1331 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1332 count += TXD_USE_COUNT(skb_frag_size(
1333 &skb_shinfo(skb)->frags[f]));
1335 if (igc_maybe_stop_tx(tx_ring, count + 3)) {
1336 /* this is a hard error */
1337 return NETDEV_TX_BUSY;
1340 /* record the location of the first descriptor for this packet */
1341 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
1343 first->bytecount = skb->len;
1344 first->gso_segs = 1;
1346 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1347 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev);
1349 /* FIXME: add support for retrieving timestamps from
1350 * the other timer registers before skipping the
1351 * timestamping request.
1353 if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
1354 !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS,
1356 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1357 tx_flags |= IGC_TX_FLAGS_TSTAMP;
1359 adapter->ptp_tx_skb = skb_get(skb);
1360 adapter->ptp_tx_start = jiffies;
1362 adapter->tx_hwtstamp_skipped++;
1366 /* record initial flags and protocol */
1367 first->tx_flags = tx_flags;
1368 first->protocol = protocol;
1370 tso = igc_tso(tx_ring, first, &hdr_len);
1374 igc_tx_csum(tx_ring, first);
1376 igc_tx_map(tx_ring, first, hdr_len);
1378 return NETDEV_TX_OK;
1381 dev_kfree_skb_any(first->skb);
1384 return NETDEV_TX_OK;
1387 static inline struct igc_ring *igc_tx_queue_mapping(struct igc_adapter *adapter,
1388 struct sk_buff *skb)
1390 unsigned int r_idx = skb->queue_mapping;
1392 if (r_idx >= adapter->num_tx_queues)
1393 r_idx = r_idx % adapter->num_tx_queues;
1395 return adapter->tx_ring[r_idx];
1398 static netdev_tx_t igc_xmit_frame(struct sk_buff *skb,
1399 struct net_device *netdev)
1401 struct igc_adapter *adapter = netdev_priv(netdev);
1403 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
1404 * in order to meet this minimum size requirement.
1406 if (skb->len < 17) {
1407 if (skb_padto(skb, 17))
1408 return NETDEV_TX_OK;
1412 return igc_xmit_frame_ring(skb, igc_tx_queue_mapping(adapter, skb));
1415 static void igc_rx_checksum(struct igc_ring *ring,
1416 union igc_adv_rx_desc *rx_desc,
1417 struct sk_buff *skb)
1419 skb_checksum_none_assert(skb);
1421 /* Ignore Checksum bit is set */
1422 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_IXSM))
1425 /* Rx checksum disabled via ethtool */
1426 if (!(ring->netdev->features & NETIF_F_RXCSUM))
1429 /* TCP/UDP checksum error bit is set */
1430 if (igc_test_staterr(rx_desc,
1431 IGC_RXDEXT_STATERR_TCPE |
1432 IGC_RXDEXT_STATERR_IPE)) {
1433 /* work around errata with sctp packets where the TCPE aka
1434 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
1435 * packets (aka let the stack check the crc32c)
1437 if (!(skb->len == 60 &&
1438 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
1439 u64_stats_update_begin(&ring->rx_syncp);
1440 ring->rx_stats.csum_err++;
1441 u64_stats_update_end(&ring->rx_syncp);
1443 /* let the stack verify checksum errors */
1446 /* It must be a TCP or UDP packet with a valid checksum */
1447 if (igc_test_staterr(rx_desc, IGC_RXD_STAT_TCPCS |
1448 IGC_RXD_STAT_UDPCS))
1449 skb->ip_summed = CHECKSUM_UNNECESSARY;
1451 netdev_dbg(ring->netdev, "cksum success: bits %08X\n",
1452 le32_to_cpu(rx_desc->wb.upper.status_error));
1455 static inline void igc_rx_hash(struct igc_ring *ring,
1456 union igc_adv_rx_desc *rx_desc,
1457 struct sk_buff *skb)
1459 if (ring->netdev->features & NETIF_F_RXHASH)
1461 le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
1466 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1467 * @rx_ring: rx descriptor ring packet is being transacted on
1468 * @rx_desc: pointer to the EOP Rx descriptor
1469 * @skb: pointer to current skb being populated
1471 * This function checks the ring, descriptor, and packet information in order
1472 * to populate the hash, checksum, VLAN, protocol, and other fields within the
1475 static void igc_process_skb_fields(struct igc_ring *rx_ring,
1476 union igc_adv_rx_desc *rx_desc,
1477 struct sk_buff *skb)
1479 igc_rx_hash(rx_ring, rx_desc, skb);
1481 igc_rx_checksum(rx_ring, rx_desc, skb);
1483 skb_record_rx_queue(skb, rx_ring->queue_index);
1485 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1488 static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
1489 const unsigned int size)
1491 struct igc_rx_buffer *rx_buffer;
1493 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
1494 prefetchw(rx_buffer->page);
1496 /* we are reusing so sync this buffer for CPU use */
1497 dma_sync_single_range_for_cpu(rx_ring->dev,
1499 rx_buffer->page_offset,
1503 rx_buffer->pagecnt_bias--;
1509 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1510 * @rx_ring: rx descriptor ring to transact packets on
1511 * @rx_buffer: buffer containing page to add
1512 * @skb: sk_buff to place the data into
1513 * @size: size of buffer to be added
1515 * This function will add the data contained in rx_buffer->page to the skb.
1517 static void igc_add_rx_frag(struct igc_ring *rx_ring,
1518 struct igc_rx_buffer *rx_buffer,
1519 struct sk_buff *skb,
1522 #if (PAGE_SIZE < 8192)
1523 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1525 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1526 rx_buffer->page_offset, size, truesize);
1527 rx_buffer->page_offset ^= truesize;
1529 unsigned int truesize = ring_uses_build_skb(rx_ring) ?
1530 SKB_DATA_ALIGN(IGC_SKB_PAD + size) :
1531 SKB_DATA_ALIGN(size);
1532 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1533 rx_buffer->page_offset, size, truesize);
1534 rx_buffer->page_offset += truesize;
1538 static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
1539 struct igc_rx_buffer *rx_buffer,
1540 union igc_adv_rx_desc *rx_desc,
1543 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1544 #if (PAGE_SIZE < 8192)
1545 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1547 unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1548 SKB_DATA_ALIGN(IGC_SKB_PAD + size);
1550 struct sk_buff *skb;
1552 /* prefetch first cache line of first page */
1554 #if L1_CACHE_BYTES < 128
1555 prefetch(va + L1_CACHE_BYTES);
1558 /* build an skb around the page buffer */
1559 skb = build_skb(va - IGC_SKB_PAD, truesize);
1563 /* update pointers within the skb to store the data */
1564 skb_reserve(skb, IGC_SKB_PAD);
1565 __skb_put(skb, size);
1567 /* update buffer offset */
1568 #if (PAGE_SIZE < 8192)
1569 rx_buffer->page_offset ^= truesize;
1571 rx_buffer->page_offset += truesize;
1577 static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
1578 struct igc_rx_buffer *rx_buffer,
1579 union igc_adv_rx_desc *rx_desc,
1582 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1583 #if (PAGE_SIZE < 8192)
1584 unsigned int truesize = igc_rx_pg_size(rx_ring) / 2;
1586 unsigned int truesize = SKB_DATA_ALIGN(size);
1588 unsigned int headlen;
1589 struct sk_buff *skb;
1591 /* prefetch first cache line of first page */
1593 #if L1_CACHE_BYTES < 128
1594 prefetch(va + L1_CACHE_BYTES);
1597 /* allocate a skb to store the frags */
1598 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
1602 if (unlikely(igc_test_staterr(rx_desc, IGC_RXDADV_STAT_TSIP))) {
1603 igc_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
1604 va += IGC_TS_HDR_LEN;
1605 size -= IGC_TS_HDR_LEN;
1608 /* Determine available headroom for copy */
1610 if (headlen > IGC_RX_HDR_LEN)
1611 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
1613 /* align pull length to size of long to optimize memcpy performance */
1614 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1616 /* update all of the pointers */
1619 skb_add_rx_frag(skb, 0, rx_buffer->page,
1620 (va + headlen) - page_address(rx_buffer->page),
1622 #if (PAGE_SIZE < 8192)
1623 rx_buffer->page_offset ^= truesize;
1625 rx_buffer->page_offset += truesize;
1628 rx_buffer->pagecnt_bias++;
1635 * igc_reuse_rx_page - page flip buffer and store it back on the ring
1636 * @rx_ring: rx descriptor ring to store buffers on
1637 * @old_buff: donor buffer to have page reused
1639 * Synchronizes page for reuse by the adapter
1641 static void igc_reuse_rx_page(struct igc_ring *rx_ring,
1642 struct igc_rx_buffer *old_buff)
1644 u16 nta = rx_ring->next_to_alloc;
1645 struct igc_rx_buffer *new_buff;
1647 new_buff = &rx_ring->rx_buffer_info[nta];
1649 /* update, and store next to alloc */
1651 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
1653 /* Transfer page from old buffer to new buffer.
1654 * Move each member individually to avoid possible store
1655 * forwarding stalls.
1657 new_buff->dma = old_buff->dma;
1658 new_buff->page = old_buff->page;
1659 new_buff->page_offset = old_buff->page_offset;
1660 new_buff->pagecnt_bias = old_buff->pagecnt_bias;
1663 static inline bool igc_page_is_reserved(struct page *page)
1665 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
1668 static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
1670 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
1671 struct page *page = rx_buffer->page;
1673 /* avoid re-using remote pages */
1674 if (unlikely(igc_page_is_reserved(page)))
1677 #if (PAGE_SIZE < 8192)
1678 /* if we are only owner of page we can reuse it */
1679 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
1682 #define IGC_LAST_OFFSET \
1683 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048)
1685 if (rx_buffer->page_offset > IGC_LAST_OFFSET)
1689 /* If we have drained the page fragment pool we need to update
1690 * the pagecnt_bias and page count so that we fully restock the
1691 * number of references the driver holds.
1693 if (unlikely(!pagecnt_bias)) {
1694 page_ref_add(page, USHRT_MAX);
1695 rx_buffer->pagecnt_bias = USHRT_MAX;
1702 * igc_is_non_eop - process handling of non-EOP buffers
1703 * @rx_ring: Rx ring being processed
1704 * @rx_desc: Rx descriptor for current buffer
1706 * This function updates next to clean. If the buffer is an EOP buffer
1707 * this function exits returning false, otherwise it will place the
1708 * sk_buff in the next buffer to be chained and return true indicating
1709 * that this is in fact a non-EOP buffer.
1711 static bool igc_is_non_eop(struct igc_ring *rx_ring,
1712 union igc_adv_rx_desc *rx_desc)
1714 u32 ntc = rx_ring->next_to_clean + 1;
1716 /* fetch, update, and store next to clean */
1717 ntc = (ntc < rx_ring->count) ? ntc : 0;
1718 rx_ring->next_to_clean = ntc;
1720 prefetch(IGC_RX_DESC(rx_ring, ntc));
1722 if (likely(igc_test_staterr(rx_desc, IGC_RXD_STAT_EOP)))
1729 * igc_cleanup_headers - Correct corrupted or empty headers
1730 * @rx_ring: rx descriptor ring packet is being transacted on
1731 * @rx_desc: pointer to the EOP Rx descriptor
1732 * @skb: pointer to current skb being fixed
1734 * Address the case where we are pulling data in on pages only
1735 * and as such no data is present in the skb header.
1737 * In addition if skb is not at least 60 bytes we need to pad it so that
1738 * it is large enough to qualify as a valid Ethernet frame.
1740 * Returns true if an error was encountered and skb was freed.
1742 static bool igc_cleanup_headers(struct igc_ring *rx_ring,
1743 union igc_adv_rx_desc *rx_desc,
1744 struct sk_buff *skb)
1746 if (unlikely((igc_test_staterr(rx_desc,
1747 IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
1748 struct net_device *netdev = rx_ring->netdev;
1750 if (!(netdev->features & NETIF_F_RXALL)) {
1751 dev_kfree_skb_any(skb);
1756 /* if eth_skb_pad returns an error the skb was freed */
1757 if (eth_skb_pad(skb))
1763 static void igc_put_rx_buffer(struct igc_ring *rx_ring,
1764 struct igc_rx_buffer *rx_buffer)
1766 if (igc_can_reuse_rx_page(rx_buffer)) {
1767 /* hand second half of page back to the ring */
1768 igc_reuse_rx_page(rx_ring, rx_buffer);
1770 /* We are not reusing the buffer so unmap it and free
1771 * any references we are holding to it
1773 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
1774 igc_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
1776 __page_frag_cache_drain(rx_buffer->page,
1777 rx_buffer->pagecnt_bias);
1780 /* clear contents of rx_buffer */
1781 rx_buffer->page = NULL;
1784 static inline unsigned int igc_rx_offset(struct igc_ring *rx_ring)
1786 return ring_uses_build_skb(rx_ring) ? IGC_SKB_PAD : 0;
1789 static bool igc_alloc_mapped_page(struct igc_ring *rx_ring,
1790 struct igc_rx_buffer *bi)
1792 struct page *page = bi->page;
1795 /* since we are recycling buffers we should seldom need to alloc */
1799 /* alloc new page for storage */
1800 page = dev_alloc_pages(igc_rx_pg_order(rx_ring));
1801 if (unlikely(!page)) {
1802 rx_ring->rx_stats.alloc_failed++;
1806 /* map page for use */
1807 dma = dma_map_page_attrs(rx_ring->dev, page, 0,
1808 igc_rx_pg_size(rx_ring),
1812 /* if mapping failed free memory back to system since
1813 * there isn't much point in holding memory we can't use
1815 if (dma_mapping_error(rx_ring->dev, dma)) {
1818 rx_ring->rx_stats.alloc_failed++;
1824 bi->page_offset = igc_rx_offset(rx_ring);
1825 bi->pagecnt_bias = 1;
1831 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
1832 * @rx_ring: rx descriptor ring
1833 * @cleaned_count: number of buffers to clean
1835 static void igc_alloc_rx_buffers(struct igc_ring *rx_ring, u16 cleaned_count)
1837 union igc_adv_rx_desc *rx_desc;
1838 u16 i = rx_ring->next_to_use;
1839 struct igc_rx_buffer *bi;
1846 rx_desc = IGC_RX_DESC(rx_ring, i);
1847 bi = &rx_ring->rx_buffer_info[i];
1848 i -= rx_ring->count;
1850 bufsz = igc_rx_bufsz(rx_ring);
1853 if (!igc_alloc_mapped_page(rx_ring, bi))
1856 /* sync the buffer for use by the device */
1857 dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
1858 bi->page_offset, bufsz,
1861 /* Refresh the desc even if buffer_addrs didn't change
1862 * because each write-back erases this info.
1864 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
1870 rx_desc = IGC_RX_DESC(rx_ring, 0);
1871 bi = rx_ring->rx_buffer_info;
1872 i -= rx_ring->count;
1875 /* clear the length for the next_to_use descriptor */
1876 rx_desc->wb.upper.length = 0;
1879 } while (cleaned_count);
1881 i += rx_ring->count;
1883 if (rx_ring->next_to_use != i) {
1884 /* record the next descriptor to use */
1885 rx_ring->next_to_use = i;
1887 /* update next to alloc since we have filled the ring */
1888 rx_ring->next_to_alloc = i;
1890 /* Force memory writes to complete before letting h/w
1891 * know there are new descriptors to fetch. (Only
1892 * applicable for weak-ordered memory model archs,
1896 writel(i, rx_ring->tail);
1900 static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
1902 unsigned int total_bytes = 0, total_packets = 0;
1903 struct igc_ring *rx_ring = q_vector->rx.ring;
1904 struct sk_buff *skb = rx_ring->skb;
1905 u16 cleaned_count = igc_desc_unused(rx_ring);
1907 while (likely(total_packets < budget)) {
1908 union igc_adv_rx_desc *rx_desc;
1909 struct igc_rx_buffer *rx_buffer;
1912 /* return some buffers to hardware, one at a time is too slow */
1913 if (cleaned_count >= IGC_RX_BUFFER_WRITE) {
1914 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1918 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean);
1919 size = le16_to_cpu(rx_desc->wb.upper.length);
1923 /* This memory barrier is needed to keep us from reading
1924 * any other fields out of the rx_desc until we know the
1925 * descriptor has been written back
1929 rx_buffer = igc_get_rx_buffer(rx_ring, size);
1931 /* retrieve a buffer from the ring */
1933 igc_add_rx_frag(rx_ring, rx_buffer, skb, size);
1934 else if (ring_uses_build_skb(rx_ring))
1935 skb = igc_build_skb(rx_ring, rx_buffer, rx_desc, size);
1937 skb = igc_construct_skb(rx_ring, rx_buffer,
1940 /* exit if we failed to retrieve a buffer */
1942 rx_ring->rx_stats.alloc_failed++;
1943 rx_buffer->pagecnt_bias++;
1947 igc_put_rx_buffer(rx_ring, rx_buffer);
1950 /* fetch next buffer in frame if non-eop */
1951 if (igc_is_non_eop(rx_ring, rx_desc))
1954 /* verify the packet layout is correct */
1955 if (igc_cleanup_headers(rx_ring, rx_desc, skb)) {
1960 /* probably a little skewed due to removing CRC */
1961 total_bytes += skb->len;
1963 /* populate checksum, VLAN, and protocol */
1964 igc_process_skb_fields(rx_ring, rx_desc, skb);
1966 napi_gro_receive(&q_vector->napi, skb);
1968 /* reset skb pointer */
1971 /* update budget accounting */
1975 /* place incomplete frames back on ring for completion */
1978 u64_stats_update_begin(&rx_ring->rx_syncp);
1979 rx_ring->rx_stats.packets += total_packets;
1980 rx_ring->rx_stats.bytes += total_bytes;
1981 u64_stats_update_end(&rx_ring->rx_syncp);
1982 q_vector->rx.total_packets += total_packets;
1983 q_vector->rx.total_bytes += total_bytes;
1986 igc_alloc_rx_buffers(rx_ring, cleaned_count);
1988 return total_packets;
1992 * igc_clean_tx_irq - Reclaim resources after transmit completes
1993 * @q_vector: pointer to q_vector containing needed info
1994 * @napi_budget: Used to determine if we are in netpoll
1996 * returns true if ring is completely cleaned
1998 static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
2000 struct igc_adapter *adapter = q_vector->adapter;
2001 unsigned int total_bytes = 0, total_packets = 0;
2002 unsigned int budget = q_vector->tx.work_limit;
2003 struct igc_ring *tx_ring = q_vector->tx.ring;
2004 unsigned int i = tx_ring->next_to_clean;
2005 struct igc_tx_buffer *tx_buffer;
2006 union igc_adv_tx_desc *tx_desc;
2008 if (test_bit(__IGC_DOWN, &adapter->state))
2011 tx_buffer = &tx_ring->tx_buffer_info[i];
2012 tx_desc = IGC_TX_DESC(tx_ring, i);
2013 i -= tx_ring->count;
2016 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
2018 /* if next_to_watch is not set then there is no work pending */
2022 /* prevent any other reads prior to eop_desc */
2025 /* if DD is not set pending work has not been completed */
2026 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
2029 /* clear next_to_watch to prevent false hangs */
2030 tx_buffer->next_to_watch = NULL;
2032 /* update the statistics for this packet */
2033 total_bytes += tx_buffer->bytecount;
2034 total_packets += tx_buffer->gso_segs;
2037 napi_consume_skb(tx_buffer->skb, napi_budget);
2039 /* unmap skb header data */
2040 dma_unmap_single(tx_ring->dev,
2041 dma_unmap_addr(tx_buffer, dma),
2042 dma_unmap_len(tx_buffer, len),
2045 /* clear tx_buffer data */
2046 dma_unmap_len_set(tx_buffer, len, 0);
2048 /* clear last DMA location and unmap remaining buffers */
2049 while (tx_desc != eop_desc) {
2054 i -= tx_ring->count;
2055 tx_buffer = tx_ring->tx_buffer_info;
2056 tx_desc = IGC_TX_DESC(tx_ring, 0);
2059 /* unmap any remaining paged data */
2060 if (dma_unmap_len(tx_buffer, len)) {
2061 dma_unmap_page(tx_ring->dev,
2062 dma_unmap_addr(tx_buffer, dma),
2063 dma_unmap_len(tx_buffer, len),
2065 dma_unmap_len_set(tx_buffer, len, 0);
2069 /* move us one more past the eop_desc for start of next pkt */
2074 i -= tx_ring->count;
2075 tx_buffer = tx_ring->tx_buffer_info;
2076 tx_desc = IGC_TX_DESC(tx_ring, 0);
2079 /* issue prefetch for next Tx descriptor */
2082 /* update budget accounting */
2084 } while (likely(budget));
2086 netdev_tx_completed_queue(txring_txq(tx_ring),
2087 total_packets, total_bytes);
2089 i += tx_ring->count;
2090 tx_ring->next_to_clean = i;
2091 u64_stats_update_begin(&tx_ring->tx_syncp);
2092 tx_ring->tx_stats.bytes += total_bytes;
2093 tx_ring->tx_stats.packets += total_packets;
2094 u64_stats_update_end(&tx_ring->tx_syncp);
2095 q_vector->tx.total_bytes += total_bytes;
2096 q_vector->tx.total_packets += total_packets;
2098 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
2099 struct igc_hw *hw = &adapter->hw;
2101 /* Detect a transmit hang in hardware, this serializes the
2102 * check with the clearing of time_stamp and movement of i
2104 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
2105 if (tx_buffer->next_to_watch &&
2106 time_after(jiffies, tx_buffer->time_stamp +
2107 (adapter->tx_timeout_factor * HZ)) &&
2108 !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
2109 /* detected Tx unit hang */
2110 netdev_err(tx_ring->netdev,
2111 "Detected Tx Unit Hang\n"
2115 " next_to_use <%x>\n"
2116 " next_to_clean <%x>\n"
2117 "buffer_info[next_to_clean]\n"
2118 " time_stamp <%lx>\n"
2119 " next_to_watch <%p>\n"
2121 " desc.status <%x>\n",
2122 tx_ring->queue_index,
2123 rd32(IGC_TDH(tx_ring->reg_idx)),
2124 readl(tx_ring->tail),
2125 tx_ring->next_to_use,
2126 tx_ring->next_to_clean,
2127 tx_buffer->time_stamp,
2128 tx_buffer->next_to_watch,
2130 tx_buffer->next_to_watch->wb.status);
2131 netif_stop_subqueue(tx_ring->netdev,
2132 tx_ring->queue_index);
2134 /* we are about to reset, no point in enabling stuff */
2139 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
2140 if (unlikely(total_packets &&
2141 netif_carrier_ok(tx_ring->netdev) &&
2142 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
2143 /* Make sure that anybody stopping the queue after this
2144 * sees the new next_to_clean.
2147 if (__netif_subqueue_stopped(tx_ring->netdev,
2148 tx_ring->queue_index) &&
2149 !(test_bit(__IGC_DOWN, &adapter->state))) {
2150 netif_wake_subqueue(tx_ring->netdev,
2151 tx_ring->queue_index);
2153 u64_stats_update_begin(&tx_ring->tx_syncp);
2154 tx_ring->tx_stats.restart_queue++;
2155 u64_stats_update_end(&tx_ring->tx_syncp);
2162 static int igc_find_mac_filter(struct igc_adapter *adapter,
2163 enum igc_mac_filter_type type, const u8 *addr)
2165 struct igc_hw *hw = &adapter->hw;
2166 int max_entries = hw->mac.rar_entry_count;
2170 for (i = 0; i < max_entries; i++) {
2171 ral = rd32(IGC_RAL(i));
2172 rah = rd32(IGC_RAH(i));
2174 if (!(rah & IGC_RAH_AV))
2176 if (!!(rah & IGC_RAH_ASEL_SRC_ADDR) != type)
2178 if ((rah & IGC_RAH_RAH_MASK) !=
2179 le16_to_cpup((__le16 *)(addr + 4)))
2181 if (ral != le32_to_cpup((__le32 *)(addr)))
2190 static int igc_get_avail_mac_filter_slot(struct igc_adapter *adapter)
2192 struct igc_hw *hw = &adapter->hw;
2193 int max_entries = hw->mac.rar_entry_count;
2197 for (i = 0; i < max_entries; i++) {
2198 rah = rd32(IGC_RAH(i));
2200 if (!(rah & IGC_RAH_AV))
2208 * igc_add_mac_filter() - Add MAC address filter
2209 * @adapter: Pointer to adapter where the filter should be added
2210 * @type: MAC address filter type (source or destination)
2211 * @addr: MAC address
2212 * @queue: If non-negative, queue assignment feature is enabled and frames
2213 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2214 * assignment is disabled.
2216 * Return: 0 in case of success, negative errno code otherwise.
2218 static int igc_add_mac_filter(struct igc_adapter *adapter,
2219 enum igc_mac_filter_type type, const u8 *addr,
2222 struct net_device *dev = adapter->netdev;
2225 index = igc_find_mac_filter(adapter, type, addr);
2229 index = igc_get_avail_mac_filter_slot(adapter);
2233 netdev_dbg(dev, "Add MAC address filter: index %d type %s address %pM queue %d\n",
2234 index, type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2238 igc_set_mac_filter_hw(adapter, index, type, addr, queue);
2243 * igc_del_mac_filter() - Delete MAC address filter
2244 * @adapter: Pointer to adapter where the filter should be deleted from
2245 * @type: MAC address filter type (source or destination)
2246 * @addr: MAC address
2248 static void igc_del_mac_filter(struct igc_adapter *adapter,
2249 enum igc_mac_filter_type type, const u8 *addr)
2251 struct net_device *dev = adapter->netdev;
2254 index = igc_find_mac_filter(adapter, type, addr);
2259 /* If this is the default filter, we don't actually delete it.
2260 * We just reset to its default value i.e. disable queue
2263 netdev_dbg(dev, "Disable default MAC filter queue assignment");
2265 igc_set_mac_filter_hw(adapter, 0, type, addr, -1);
2267 netdev_dbg(dev, "Delete MAC address filter: index %d type %s address %pM\n",
2269 type == IGC_MAC_FILTER_TYPE_DST ? "dst" : "src",
2272 igc_clear_mac_filter_hw(adapter, index);
2277 * igc_add_vlan_prio_filter() - Add VLAN priority filter
2278 * @adapter: Pointer to adapter where the filter should be added
2279 * @prio: VLAN priority value
2280 * @queue: Queue number which matching frames are assigned to
2282 * Return: 0 in case of success, negative errno code otherwise.
2284 static int igc_add_vlan_prio_filter(struct igc_adapter *adapter, int prio,
2287 struct net_device *dev = adapter->netdev;
2288 struct igc_hw *hw = &adapter->hw;
2291 vlanpqf = rd32(IGC_VLANPQF);
2293 if (vlanpqf & IGC_VLANPQF_VALID(prio)) {
2294 netdev_dbg(dev, "VLAN priority filter already in use\n");
2298 vlanpqf |= IGC_VLANPQF_QSEL(prio, queue);
2299 vlanpqf |= IGC_VLANPQF_VALID(prio);
2301 wr32(IGC_VLANPQF, vlanpqf);
2303 netdev_dbg(dev, "Add VLAN priority filter: prio %d queue %d\n",
2309 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
2310 * @adapter: Pointer to adapter where the filter should be deleted from
2311 * @prio: VLAN priority value
2313 static void igc_del_vlan_prio_filter(struct igc_adapter *adapter, int prio)
2315 struct igc_hw *hw = &adapter->hw;
2318 vlanpqf = rd32(IGC_VLANPQF);
2320 vlanpqf &= ~IGC_VLANPQF_VALID(prio);
2321 vlanpqf &= ~IGC_VLANPQF_QSEL(prio, IGC_VLANPQF_QUEUE_MASK);
2323 wr32(IGC_VLANPQF, vlanpqf);
2325 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n",
2329 static int igc_get_avail_etype_filter_slot(struct igc_adapter *adapter)
2331 struct igc_hw *hw = &adapter->hw;
2334 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2335 u32 etqf = rd32(IGC_ETQF(i));
2337 if (!(etqf & IGC_ETQF_FILTER_ENABLE))
2345 * igc_add_etype_filter() - Add ethertype filter
2346 * @adapter: Pointer to adapter where the filter should be added
2347 * @etype: Ethertype value
2348 * @queue: If non-negative, queue assignment feature is enabled and frames
2349 * matching the filter are enqueued onto 'queue'. Otherwise, queue
2350 * assignment is disabled.
2352 * Return: 0 in case of success, negative errno code otherwise.
2354 static int igc_add_etype_filter(struct igc_adapter *adapter, u16 etype,
2357 struct igc_hw *hw = &adapter->hw;
2361 index = igc_get_avail_etype_filter_slot(adapter);
2365 etqf = rd32(IGC_ETQF(index));
2367 etqf &= ~IGC_ETQF_ETYPE_MASK;
2371 etqf &= ~IGC_ETQF_QUEUE_MASK;
2372 etqf |= (queue << IGC_ETQF_QUEUE_SHIFT);
2373 etqf |= IGC_ETQF_QUEUE_ENABLE;
2376 etqf |= IGC_ETQF_FILTER_ENABLE;
2378 wr32(IGC_ETQF(index), etqf);
2380 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n",
2385 static int igc_find_etype_filter(struct igc_adapter *adapter, u16 etype)
2387 struct igc_hw *hw = &adapter->hw;
2390 for (i = 0; i < MAX_ETYPE_FILTER; i++) {
2391 u32 etqf = rd32(IGC_ETQF(i));
2393 if ((etqf & IGC_ETQF_ETYPE_MASK) == etype)
2401 * igc_del_etype_filter() - Delete ethertype filter
2402 * @adapter: Pointer to adapter where the filter should be deleted from
2403 * @etype: Ethertype value
2405 static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype)
2407 struct igc_hw *hw = &adapter->hw;
2410 index = igc_find_etype_filter(adapter, etype);
2414 wr32(IGC_ETQF(index), 0);
2416 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n",
2420 static int igc_enable_nfc_rule(struct igc_adapter *adapter,
2421 const struct igc_nfc_rule *rule)
2425 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
2426 err = igc_add_etype_filter(adapter, rule->filter.etype,
2432 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
2433 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2434 rule->filter.src_addr, rule->action);
2439 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
2440 err = igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2441 rule->filter.dst_addr, rule->action);
2446 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2447 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2450 err = igc_add_vlan_prio_filter(adapter, prio, rule->action);
2458 static void igc_disable_nfc_rule(struct igc_adapter *adapter,
2459 const struct igc_nfc_rule *rule)
2461 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
2462 igc_del_etype_filter(adapter, rule->filter.etype);
2464 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
2465 int prio = (rule->filter.vlan_tci & VLAN_PRIO_MASK) >>
2468 igc_del_vlan_prio_filter(adapter, prio);
2471 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
2472 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_SRC,
2473 rule->filter.src_addr);
2475 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
2476 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST,
2477 rule->filter.dst_addr);
2481 * igc_get_nfc_rule() - Get NFC rule
2482 * @adapter: Pointer to adapter
2483 * @location: Rule location
2485 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2487 * Return: Pointer to NFC rule at @location. If not found, NULL.
2489 struct igc_nfc_rule *igc_get_nfc_rule(struct igc_adapter *adapter,
2492 struct igc_nfc_rule *rule;
2494 list_for_each_entry(rule, &adapter->nfc_rule_list, list) {
2495 if (rule->location == location)
2497 if (rule->location > location)
2505 * igc_del_nfc_rule() - Delete NFC rule
2506 * @adapter: Pointer to adapter
2507 * @rule: Pointer to rule to be deleted
2509 * Disable NFC rule in hardware and delete it from adapter.
2511 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2513 void igc_del_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2515 igc_disable_nfc_rule(adapter, rule);
2517 list_del(&rule->list);
2518 adapter->nfc_rule_count--;
2523 static void igc_flush_nfc_rules(struct igc_adapter *adapter)
2525 struct igc_nfc_rule *rule, *tmp;
2527 mutex_lock(&adapter->nfc_rule_lock);
2529 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list)
2530 igc_del_nfc_rule(adapter, rule);
2532 mutex_unlock(&adapter->nfc_rule_lock);
2536 * igc_add_nfc_rule() - Add NFC rule
2537 * @adapter: Pointer to adapter
2538 * @rule: Pointer to rule to be added
2540 * Enable NFC rule in hardware and add it to adapter.
2542 * Context: Expects adapter->nfc_rule_lock to be held by caller.
2544 * Return: 0 on success, negative errno on failure.
2546 int igc_add_nfc_rule(struct igc_adapter *adapter, struct igc_nfc_rule *rule)
2548 struct igc_nfc_rule *pred, *cur;
2551 err = igc_enable_nfc_rule(adapter, rule);
2556 list_for_each_entry(cur, &adapter->nfc_rule_list, list) {
2557 if (cur->location >= rule->location)
2562 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list);
2563 adapter->nfc_rule_count++;
2567 static void igc_restore_nfc_rules(struct igc_adapter *adapter)
2569 struct igc_nfc_rule *rule;
2571 mutex_lock(&adapter->nfc_rule_lock);
2573 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list)
2574 igc_enable_nfc_rule(adapter, rule);
2576 mutex_unlock(&adapter->nfc_rule_lock);
2579 static int igc_uc_sync(struct net_device *netdev, const unsigned char *addr)
2581 struct igc_adapter *adapter = netdev_priv(netdev);
2583 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1);
2586 static int igc_uc_unsync(struct net_device *netdev, const unsigned char *addr)
2588 struct igc_adapter *adapter = netdev_priv(netdev);
2590 igc_del_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr);
2595 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2596 * @netdev: network interface device structure
2598 * The set_rx_mode entry point is called whenever the unicast or multicast
2599 * address lists or the network interface flags are updated. This routine is
2600 * responsible for configuring the hardware for proper unicast, multicast,
2601 * promiscuous mode, and all-multi behavior.
2603 static void igc_set_rx_mode(struct net_device *netdev)
2605 struct igc_adapter *adapter = netdev_priv(netdev);
2606 struct igc_hw *hw = &adapter->hw;
2607 u32 rctl = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
2610 /* Check for Promiscuous and All Multicast modes */
2611 if (netdev->flags & IFF_PROMISC) {
2612 rctl |= IGC_RCTL_UPE | IGC_RCTL_MPE;
2614 if (netdev->flags & IFF_ALLMULTI) {
2615 rctl |= IGC_RCTL_MPE;
2617 /* Write addresses to the MTA, if the attempt fails
2618 * then we should just turn on promiscuous mode so
2619 * that we can at least receive multicast traffic
2621 count = igc_write_mc_addr_list(netdev);
2623 rctl |= IGC_RCTL_MPE;
2627 /* Write addresses to available RAR registers, if there is not
2628 * sufficient space to store all the addresses then enable
2629 * unicast promiscuous mode
2631 if (__dev_uc_sync(netdev, igc_uc_sync, igc_uc_unsync))
2632 rctl |= IGC_RCTL_UPE;
2634 /* update state of unicast and multicast */
2635 rctl |= rd32(IGC_RCTL) & ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
2636 wr32(IGC_RCTL, rctl);
2638 #if (PAGE_SIZE < 8192)
2639 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB)
2640 rlpml = IGC_MAX_FRAME_BUILD_SKB;
2642 wr32(IGC_RLPML, rlpml);
2646 * igc_configure - configure the hardware for RX and TX
2647 * @adapter: private board structure
2649 static void igc_configure(struct igc_adapter *adapter)
2651 struct net_device *netdev = adapter->netdev;
2654 igc_get_hw_control(adapter);
2655 igc_set_rx_mode(netdev);
2657 igc_setup_tctl(adapter);
2658 igc_setup_mrqc(adapter);
2659 igc_setup_rctl(adapter);
2661 igc_set_default_mac_filter(adapter);
2662 igc_restore_nfc_rules(adapter);
2664 igc_configure_tx(adapter);
2665 igc_configure_rx(adapter);
2667 igc_rx_fifo_flush_base(&adapter->hw);
2669 /* call igc_desc_unused which always leaves
2670 * at least 1 descriptor unused to make sure
2671 * next_to_use != next_to_clean
2673 for (i = 0; i < adapter->num_rx_queues; i++) {
2674 struct igc_ring *ring = adapter->rx_ring[i];
2676 igc_alloc_rx_buffers(ring, igc_desc_unused(ring));
2681 * igc_write_ivar - configure ivar for given MSI-X vector
2682 * @hw: pointer to the HW structure
2683 * @msix_vector: vector number we are allocating to a given ring
2684 * @index: row index of IVAR register to write within IVAR table
2685 * @offset: column offset of in IVAR, should be multiple of 8
2687 * The IVAR table consists of 2 columns,
2688 * each containing an cause allocation for an Rx and Tx ring, and a
2689 * variable number of rows depending on the number of queues supported.
2691 static void igc_write_ivar(struct igc_hw *hw, int msix_vector,
2692 int index, int offset)
2694 u32 ivar = array_rd32(IGC_IVAR0, index);
2696 /* clear any bits that are currently set */
2697 ivar &= ~((u32)0xFF << offset);
2699 /* write vector and valid bit */
2700 ivar |= (msix_vector | IGC_IVAR_VALID) << offset;
2702 array_wr32(IGC_IVAR0, index, ivar);
2705 static void igc_assign_vector(struct igc_q_vector *q_vector, int msix_vector)
2707 struct igc_adapter *adapter = q_vector->adapter;
2708 struct igc_hw *hw = &adapter->hw;
2709 int rx_queue = IGC_N0_QUEUE;
2710 int tx_queue = IGC_N0_QUEUE;
2712 if (q_vector->rx.ring)
2713 rx_queue = q_vector->rx.ring->reg_idx;
2714 if (q_vector->tx.ring)
2715 tx_queue = q_vector->tx.ring->reg_idx;
2717 switch (hw->mac.type) {
2719 if (rx_queue > IGC_N0_QUEUE)
2720 igc_write_ivar(hw, msix_vector,
2722 (rx_queue & 0x1) << 4);
2723 if (tx_queue > IGC_N0_QUEUE)
2724 igc_write_ivar(hw, msix_vector,
2726 ((tx_queue & 0x1) << 4) + 8);
2727 q_vector->eims_value = BIT(msix_vector);
2730 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n");
2734 /* add q_vector eims value to global eims_enable_mask */
2735 adapter->eims_enable_mask |= q_vector->eims_value;
2737 /* configure q_vector to set itr on first interrupt */
2738 q_vector->set_itr = 1;
2742 * igc_configure_msix - Configure MSI-X hardware
2743 * @adapter: Pointer to adapter structure
2745 * igc_configure_msix sets up the hardware to properly
2746 * generate MSI-X interrupts.
2748 static void igc_configure_msix(struct igc_adapter *adapter)
2750 struct igc_hw *hw = &adapter->hw;
2754 adapter->eims_enable_mask = 0;
2756 /* set vector for other causes, i.e. link changes */
2757 switch (hw->mac.type) {
2759 /* Turn on MSI-X capability first, or our settings
2760 * won't stick. And it will take days to debug.
2762 wr32(IGC_GPIE, IGC_GPIE_MSIX_MODE |
2763 IGC_GPIE_PBA | IGC_GPIE_EIAME |
2766 /* enable msix_other interrupt */
2767 adapter->eims_other = BIT(vector);
2768 tmp = (vector++ | IGC_IVAR_VALID) << 8;
2770 wr32(IGC_IVAR_MISC, tmp);
2773 /* do nothing, since nothing else supports MSI-X */
2775 } /* switch (hw->mac.type) */
2777 adapter->eims_enable_mask |= adapter->eims_other;
2779 for (i = 0; i < adapter->num_q_vectors; i++)
2780 igc_assign_vector(adapter->q_vector[i], vector++);
2786 * igc_irq_enable - Enable default interrupt generation settings
2787 * @adapter: board private structure
2789 static void igc_irq_enable(struct igc_adapter *adapter)
2791 struct igc_hw *hw = &adapter->hw;
2793 if (adapter->msix_entries) {
2794 u32 ims = IGC_IMS_LSC | IGC_IMS_DOUTSYNC | IGC_IMS_DRSTA;
2795 u32 regval = rd32(IGC_EIAC);
2797 wr32(IGC_EIAC, regval | adapter->eims_enable_mask);
2798 regval = rd32(IGC_EIAM);
2799 wr32(IGC_EIAM, regval | adapter->eims_enable_mask);
2800 wr32(IGC_EIMS, adapter->eims_enable_mask);
2803 wr32(IGC_IMS, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2804 wr32(IGC_IAM, IMS_ENABLE_MASK | IGC_IMS_DRSTA);
2809 * igc_irq_disable - Mask off interrupt generation on the NIC
2810 * @adapter: board private structure
2812 static void igc_irq_disable(struct igc_adapter *adapter)
2814 struct igc_hw *hw = &adapter->hw;
2816 if (adapter->msix_entries) {
2817 u32 regval = rd32(IGC_EIAM);
2819 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask);
2820 wr32(IGC_EIMC, adapter->eims_enable_mask);
2821 regval = rd32(IGC_EIAC);
2822 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask);
2829 if (adapter->msix_entries) {
2832 synchronize_irq(adapter->msix_entries[vector++].vector);
2834 for (i = 0; i < adapter->num_q_vectors; i++)
2835 synchronize_irq(adapter->msix_entries[vector++].vector);
2837 synchronize_irq(adapter->pdev->irq);
2841 void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
2842 const u32 max_rss_queues)
2844 /* Determine if we need to pair queues. */
2845 /* If rss_queues > half of max_rss_queues, pair the queues in
2846 * order to conserve interrupts due to limited supply.
2848 if (adapter->rss_queues > (max_rss_queues / 2))
2849 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
2851 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS;
2854 unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter)
2856 return IGC_MAX_RX_QUEUES;
2859 static void igc_init_queue_configuration(struct igc_adapter *adapter)
2863 max_rss_queues = igc_get_max_rss_queues(adapter);
2864 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
2866 igc_set_flag_queue_pairs(adapter, max_rss_queues);
2870 * igc_reset_q_vector - Reset config for interrupt vector
2871 * @adapter: board private structure to initialize
2872 * @v_idx: Index of vector to be reset
2874 * If NAPI is enabled it will delete any references to the
2875 * NAPI struct. This is preparation for igc_free_q_vector.
2877 static void igc_reset_q_vector(struct igc_adapter *adapter, int v_idx)
2879 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2881 /* if we're coming from igc_set_interrupt_capability, the vectors are
2887 if (q_vector->tx.ring)
2888 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
2890 if (q_vector->rx.ring)
2891 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
2893 netif_napi_del(&q_vector->napi);
2897 * igc_free_q_vector - Free memory allocated for specific interrupt vector
2898 * @adapter: board private structure to initialize
2899 * @v_idx: Index of vector to be freed
2901 * This function frees the memory allocated to the q_vector.
2903 static void igc_free_q_vector(struct igc_adapter *adapter, int v_idx)
2905 struct igc_q_vector *q_vector = adapter->q_vector[v_idx];
2907 adapter->q_vector[v_idx] = NULL;
2909 /* igc_get_stats64() might access the rings on this vector,
2910 * we must wait a grace period before freeing it.
2913 kfree_rcu(q_vector, rcu);
2917 * igc_free_q_vectors - Free memory allocated for interrupt vectors
2918 * @adapter: board private structure to initialize
2920 * This function frees the memory allocated to the q_vectors. In addition if
2921 * NAPI is enabled it will delete any references to the NAPI struct prior
2922 * to freeing the q_vector.
2924 static void igc_free_q_vectors(struct igc_adapter *adapter)
2926 int v_idx = adapter->num_q_vectors;
2928 adapter->num_tx_queues = 0;
2929 adapter->num_rx_queues = 0;
2930 adapter->num_q_vectors = 0;
2933 igc_reset_q_vector(adapter, v_idx);
2934 igc_free_q_vector(adapter, v_idx);
2939 * igc_update_itr - update the dynamic ITR value based on statistics
2940 * @q_vector: pointer to q_vector
2941 * @ring_container: ring info to update the itr for
2943 * Stores a new ITR value based on packets and byte
2944 * counts during the last interrupt. The advantage of per interrupt
2945 * computation is faster updates and more accurate ITR for the current
2946 * traffic pattern. Constants in this function were computed
2947 * based on theoretical maximum wire speed and thresholds were set based
2948 * on testing data as well as attempting to minimize response time
2949 * while increasing bulk throughput.
2950 * NOTE: These calculations are only valid when operating in a single-
2951 * queue environment.
2953 static void igc_update_itr(struct igc_q_vector *q_vector,
2954 struct igc_ring_container *ring_container)
2956 unsigned int packets = ring_container->total_packets;
2957 unsigned int bytes = ring_container->total_bytes;
2958 u8 itrval = ring_container->itr;
2960 /* no packets, exit with status unchanged */
2965 case lowest_latency:
2966 /* handle TSO and jumbo frames */
2967 if (bytes / packets > 8000)
2968 itrval = bulk_latency;
2969 else if ((packets < 5) && (bytes > 512))
2970 itrval = low_latency;
2972 case low_latency: /* 50 usec aka 20000 ints/s */
2973 if (bytes > 10000) {
2974 /* this if handles the TSO accounting */
2975 if (bytes / packets > 8000)
2976 itrval = bulk_latency;
2977 else if ((packets < 10) || ((bytes / packets) > 1200))
2978 itrval = bulk_latency;
2979 else if ((packets > 35))
2980 itrval = lowest_latency;
2981 } else if (bytes / packets > 2000) {
2982 itrval = bulk_latency;
2983 } else if (packets <= 2 && bytes < 512) {
2984 itrval = lowest_latency;
2987 case bulk_latency: /* 250 usec aka 4000 ints/s */
2988 if (bytes > 25000) {
2990 itrval = low_latency;
2991 } else if (bytes < 1500) {
2992 itrval = low_latency;
2997 /* clear work counters since we have the values we need */
2998 ring_container->total_bytes = 0;
2999 ring_container->total_packets = 0;
3001 /* write updated itr to ring container */
3002 ring_container->itr = itrval;
3005 static void igc_set_itr(struct igc_q_vector *q_vector)
3007 struct igc_adapter *adapter = q_vector->adapter;
3008 u32 new_itr = q_vector->itr_val;
3011 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3012 switch (adapter->link_speed) {
3016 new_itr = IGC_4K_ITR;
3022 igc_update_itr(q_vector, &q_vector->tx);
3023 igc_update_itr(q_vector, &q_vector->rx);
3025 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3027 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3028 if (current_itr == lowest_latency &&
3029 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3030 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3031 current_itr = low_latency;
3033 switch (current_itr) {
3034 /* counts and packets in update_itr are dependent on these numbers */
3035 case lowest_latency:
3036 new_itr = IGC_70K_ITR; /* 70,000 ints/sec */
3039 new_itr = IGC_20K_ITR; /* 20,000 ints/sec */
3042 new_itr = IGC_4K_ITR; /* 4,000 ints/sec */
3049 if (new_itr != q_vector->itr_val) {
3050 /* this attempts to bias the interrupt rate towards Bulk
3051 * by adding intermediate steps when interrupt rate is
3054 new_itr = new_itr > q_vector->itr_val ?
3055 max((new_itr * q_vector->itr_val) /
3056 (new_itr + (q_vector->itr_val >> 2)),
3058 /* Don't write the value here; it resets the adapter's
3059 * internal timer, and causes us to delay far longer than
3060 * we should between interrupts. Instead, we write the ITR
3061 * value at the beginning of the next interrupt so the timing
3062 * ends up being correct.
3064 q_vector->itr_val = new_itr;
3065 q_vector->set_itr = 1;
3069 static void igc_reset_interrupt_capability(struct igc_adapter *adapter)
3071 int v_idx = adapter->num_q_vectors;
3073 if (adapter->msix_entries) {
3074 pci_disable_msix(adapter->pdev);
3075 kfree(adapter->msix_entries);
3076 adapter->msix_entries = NULL;
3077 } else if (adapter->flags & IGC_FLAG_HAS_MSI) {
3078 pci_disable_msi(adapter->pdev);
3082 igc_reset_q_vector(adapter, v_idx);
3086 * igc_set_interrupt_capability - set MSI or MSI-X if supported
3087 * @adapter: Pointer to adapter structure
3088 * @msix: boolean value for MSI-X capability
3090 * Attempt to configure interrupts using the best available
3091 * capabilities of the hardware and kernel.
3093 static void igc_set_interrupt_capability(struct igc_adapter *adapter,
3101 adapter->flags |= IGC_FLAG_HAS_MSIX;
3103 /* Number of supported queues. */
3104 adapter->num_rx_queues = adapter->rss_queues;
3106 adapter->num_tx_queues = adapter->rss_queues;
3108 /* start with one vector for every Rx queue */
3109 numvecs = adapter->num_rx_queues;
3111 /* if Tx handler is separate add 1 for every Tx queue */
3112 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS))
3113 numvecs += adapter->num_tx_queues;
3115 /* store the number of vectors reserved for queues */
3116 adapter->num_q_vectors = numvecs;
3118 /* add 1 vector for link status interrupts */
3121 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
3124 if (!adapter->msix_entries)
3127 /* populate entry values */
3128 for (i = 0; i < numvecs; i++)
3129 adapter->msix_entries[i].entry = i;
3131 err = pci_enable_msix_range(adapter->pdev,
3132 adapter->msix_entries,
3138 kfree(adapter->msix_entries);
3139 adapter->msix_entries = NULL;
3141 igc_reset_interrupt_capability(adapter);
3144 adapter->flags &= ~IGC_FLAG_HAS_MSIX;
3146 adapter->rss_queues = 1;
3147 adapter->flags |= IGC_FLAG_QUEUE_PAIRS;
3148 adapter->num_rx_queues = 1;
3149 adapter->num_tx_queues = 1;
3150 adapter->num_q_vectors = 1;
3151 if (!pci_enable_msi(adapter->pdev))
3152 adapter->flags |= IGC_FLAG_HAS_MSI;
3156 * igc_update_ring_itr - update the dynamic ITR value based on packet size
3157 * @q_vector: pointer to q_vector
3159 * Stores a new ITR value based on strictly on packet size. This
3160 * algorithm is less sophisticated than that used in igc_update_itr,
3161 * due to the difficulty of synchronizing statistics across multiple
3162 * receive rings. The divisors and thresholds used by this function
3163 * were determined based on theoretical maximum wire speed and testing
3164 * data, in order to minimize response time while increasing bulk
3166 * NOTE: This function is called only when operating in a multiqueue
3167 * receive environment.
3169 static void igc_update_ring_itr(struct igc_q_vector *q_vector)
3171 struct igc_adapter *adapter = q_vector->adapter;
3172 int new_val = q_vector->itr_val;
3173 int avg_wire_size = 0;
3174 unsigned int packets;
3176 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3177 * ints/sec - ITR timer value of 120 ticks.
3179 switch (adapter->link_speed) {
3182 new_val = IGC_4K_ITR;
3188 packets = q_vector->rx.total_packets;
3190 avg_wire_size = q_vector->rx.total_bytes / packets;
3192 packets = q_vector->tx.total_packets;
3194 avg_wire_size = max_t(u32, avg_wire_size,
3195 q_vector->tx.total_bytes / packets);
3197 /* if avg_wire_size isn't set no work was done */
3201 /* Add 24 bytes to size to account for CRC, preamble, and gap */
3202 avg_wire_size += 24;
3204 /* Don't starve jumbo frames */
3205 avg_wire_size = min(avg_wire_size, 3000);
3207 /* Give a little boost to mid-size frames */
3208 if (avg_wire_size > 300 && avg_wire_size < 1200)
3209 new_val = avg_wire_size / 3;
3211 new_val = avg_wire_size / 2;
3213 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3214 if (new_val < IGC_20K_ITR &&
3215 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3216 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3217 new_val = IGC_20K_ITR;
3220 if (new_val != q_vector->itr_val) {
3221 q_vector->itr_val = new_val;
3222 q_vector->set_itr = 1;
3225 q_vector->rx.total_bytes = 0;
3226 q_vector->rx.total_packets = 0;
3227 q_vector->tx.total_bytes = 0;
3228 q_vector->tx.total_packets = 0;
3231 static void igc_ring_irq_enable(struct igc_q_vector *q_vector)
3233 struct igc_adapter *adapter = q_vector->adapter;
3234 struct igc_hw *hw = &adapter->hw;
3236 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
3237 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
3238 if (adapter->num_q_vectors == 1)
3239 igc_set_itr(q_vector);
3241 igc_update_ring_itr(q_vector);
3244 if (!test_bit(__IGC_DOWN, &adapter->state)) {
3245 if (adapter->msix_entries)
3246 wr32(IGC_EIMS, q_vector->eims_value);
3248 igc_irq_enable(adapter);
3252 static void igc_add_ring(struct igc_ring *ring,
3253 struct igc_ring_container *head)
3260 * igc_cache_ring_register - Descriptor ring to register mapping
3261 * @adapter: board private structure to initialize
3263 * Once we know the feature-set enabled for the device, we'll cache
3264 * the register offset the descriptor ring is assigned to.
3266 static void igc_cache_ring_register(struct igc_adapter *adapter)
3270 switch (adapter->hw.mac.type) {
3273 for (; i < adapter->num_rx_queues; i++)
3274 adapter->rx_ring[i]->reg_idx = i;
3275 for (; j < adapter->num_tx_queues; j++)
3276 adapter->tx_ring[j]->reg_idx = j;
3282 * igc_poll - NAPI Rx polling callback
3283 * @napi: napi polling structure
3284 * @budget: count of how many packets we should handle
3286 static int igc_poll(struct napi_struct *napi, int budget)
3288 struct igc_q_vector *q_vector = container_of(napi,
3289 struct igc_q_vector,
3291 bool clean_complete = true;
3294 if (q_vector->tx.ring)
3295 clean_complete = igc_clean_tx_irq(q_vector, budget);
3297 if (q_vector->rx.ring) {
3298 int cleaned = igc_clean_rx_irq(q_vector, budget);
3300 work_done += cleaned;
3301 if (cleaned >= budget)
3302 clean_complete = false;
3305 /* If all work not completed, return budget and keep polling */
3306 if (!clean_complete)
3309 /* Exit the polling mode, but don't re-enable interrupts if stack might
3310 * poll us due to busy-polling
3312 if (likely(napi_complete_done(napi, work_done)))
3313 igc_ring_irq_enable(q_vector);
3315 return min(work_done, budget - 1);
3319 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
3320 * @adapter: board private structure to initialize
3321 * @v_count: q_vectors allocated on adapter, used for ring interleaving
3322 * @v_idx: index of vector in adapter struct
3323 * @txr_count: total number of Tx rings to allocate
3324 * @txr_idx: index of first Tx ring to allocate
3325 * @rxr_count: total number of Rx rings to allocate
3326 * @rxr_idx: index of first Rx ring to allocate
3328 * We allocate one q_vector. If allocation fails we return -ENOMEM.
3330 static int igc_alloc_q_vector(struct igc_adapter *adapter,
3331 unsigned int v_count, unsigned int v_idx,
3332 unsigned int txr_count, unsigned int txr_idx,
3333 unsigned int rxr_count, unsigned int rxr_idx)
3335 struct igc_q_vector *q_vector;
3336 struct igc_ring *ring;
3339 /* igc only supports 1 Tx and/or 1 Rx queue per vector */
3340 if (txr_count > 1 || rxr_count > 1)
3343 ring_count = txr_count + rxr_count;
3345 /* allocate q_vector and rings */
3346 q_vector = adapter->q_vector[v_idx];
3348 q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
3351 memset(q_vector, 0, struct_size(q_vector, ring, ring_count));
3355 /* initialize NAPI */
3356 netif_napi_add(adapter->netdev, &q_vector->napi,
3359 /* tie q_vector and adapter together */
3360 adapter->q_vector[v_idx] = q_vector;
3361 q_vector->adapter = adapter;
3363 /* initialize work limits */
3364 q_vector->tx.work_limit = adapter->tx_work_limit;
3366 /* initialize ITR configuration */
3367 q_vector->itr_register = adapter->io_addr + IGC_EITR(0);
3368 q_vector->itr_val = IGC_START_ITR;
3370 /* initialize pointer to rings */
3371 ring = q_vector->ring;
3373 /* initialize ITR */
3375 /* rx or rx/tx vector */
3376 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
3377 q_vector->itr_val = adapter->rx_itr_setting;
3379 /* tx only vector */
3380 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
3381 q_vector->itr_val = adapter->tx_itr_setting;
3385 /* assign generic ring traits */
3386 ring->dev = &adapter->pdev->dev;
3387 ring->netdev = adapter->netdev;
3389 /* configure backlink on ring */
3390 ring->q_vector = q_vector;
3392 /* update q_vector Tx values */
3393 igc_add_ring(ring, &q_vector->tx);
3395 /* apply Tx specific ring traits */
3396 ring->count = adapter->tx_ring_count;
3397 ring->queue_index = txr_idx;
3399 /* assign ring to adapter */
3400 adapter->tx_ring[txr_idx] = ring;
3402 /* push pointer to next ring */
3407 /* assign generic ring traits */
3408 ring->dev = &adapter->pdev->dev;
3409 ring->netdev = adapter->netdev;
3411 /* configure backlink on ring */
3412 ring->q_vector = q_vector;
3414 /* update q_vector Rx values */
3415 igc_add_ring(ring, &q_vector->rx);
3417 /* apply Rx specific ring traits */
3418 ring->count = adapter->rx_ring_count;
3419 ring->queue_index = rxr_idx;
3421 /* assign ring to adapter */
3422 adapter->rx_ring[rxr_idx] = ring;
3429 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
3430 * @adapter: board private structure to initialize
3432 * We allocate one q_vector per queue interrupt. If allocation fails we
3435 static int igc_alloc_q_vectors(struct igc_adapter *adapter)
3437 int rxr_remaining = adapter->num_rx_queues;
3438 int txr_remaining = adapter->num_tx_queues;
3439 int rxr_idx = 0, txr_idx = 0, v_idx = 0;
3440 int q_vectors = adapter->num_q_vectors;
3443 if (q_vectors >= (rxr_remaining + txr_remaining)) {
3444 for (; rxr_remaining; v_idx++) {
3445 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3451 /* update counts and index */
3457 for (; v_idx < q_vectors; v_idx++) {
3458 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
3459 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
3461 err = igc_alloc_q_vector(adapter, q_vectors, v_idx,
3462 tqpv, txr_idx, rqpv, rxr_idx);
3467 /* update counts and index */
3468 rxr_remaining -= rqpv;
3469 txr_remaining -= tqpv;
3477 adapter->num_tx_queues = 0;
3478 adapter->num_rx_queues = 0;
3479 adapter->num_q_vectors = 0;
3482 igc_free_q_vector(adapter, v_idx);
3488 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
3489 * @adapter: Pointer to adapter structure
3490 * @msix: boolean for MSI-X capability
3492 * This function initializes the interrupts and allocates all of the queues.
3494 static int igc_init_interrupt_scheme(struct igc_adapter *adapter, bool msix)
3496 struct net_device *dev = adapter->netdev;
3499 igc_set_interrupt_capability(adapter, msix);
3501 err = igc_alloc_q_vectors(adapter);
3503 netdev_err(dev, "Unable to allocate memory for vectors\n");
3504 goto err_alloc_q_vectors;
3507 igc_cache_ring_register(adapter);
3511 err_alloc_q_vectors:
3512 igc_reset_interrupt_capability(adapter);
3517 * igc_sw_init - Initialize general software structures (struct igc_adapter)
3518 * @adapter: board private structure to initialize
3520 * igc_sw_init initializes the Adapter private data structure.
3521 * Fields are initialized based on PCI device information and
3522 * OS network device settings (MTU size).
3524 static int igc_sw_init(struct igc_adapter *adapter)
3526 struct net_device *netdev = adapter->netdev;
3527 struct pci_dev *pdev = adapter->pdev;
3528 struct igc_hw *hw = &adapter->hw;
3530 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
3532 /* set default ring sizes */
3533 adapter->tx_ring_count = IGC_DEFAULT_TXD;
3534 adapter->rx_ring_count = IGC_DEFAULT_RXD;
3536 /* set default ITR values */
3537 adapter->rx_itr_setting = IGC_DEFAULT_ITR;
3538 adapter->tx_itr_setting = IGC_DEFAULT_ITR;
3540 /* set default work limits */
3541 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK;
3543 /* adjust max frame to be at least the size of a standard frame */
3544 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
3546 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
3548 mutex_init(&adapter->nfc_rule_lock);
3549 INIT_LIST_HEAD(&adapter->nfc_rule_list);
3550 adapter->nfc_rule_count = 0;
3552 spin_lock_init(&adapter->stats64_lock);
3553 /* Assume MSI-X interrupts, will be checked during IRQ allocation */
3554 adapter->flags |= IGC_FLAG_HAS_MSIX;
3556 igc_init_queue_configuration(adapter);
3558 /* This call may decrease the number of queues */
3559 if (igc_init_interrupt_scheme(adapter, true)) {
3560 netdev_err(netdev, "Unable to allocate memory for queues\n");
3564 /* Explicitly disable IRQ since the NIC can be in any state. */
3565 igc_irq_disable(adapter);
3567 set_bit(__IGC_DOWN, &adapter->state);
3573 * igc_up - Open the interface and prepare it to handle traffic
3574 * @adapter: board private structure
3576 void igc_up(struct igc_adapter *adapter)
3578 struct igc_hw *hw = &adapter->hw;
3581 /* hardware has been reset, we need to reload some things */
3582 igc_configure(adapter);
3584 clear_bit(__IGC_DOWN, &adapter->state);
3586 for (i = 0; i < adapter->num_q_vectors; i++)
3587 napi_enable(&adapter->q_vector[i]->napi);
3589 if (adapter->msix_entries)
3590 igc_configure_msix(adapter);
3592 igc_assign_vector(adapter->q_vector[0], 0);
3594 /* Clear any pending interrupts. */
3596 igc_irq_enable(adapter);
3598 netif_tx_start_all_queues(adapter->netdev);
3600 /* start the watchdog. */
3601 hw->mac.get_link_status = 1;
3602 schedule_work(&adapter->watchdog_task);
3606 * igc_update_stats - Update the board statistics counters
3607 * @adapter: board private structure
3609 void igc_update_stats(struct igc_adapter *adapter)
3611 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
3612 struct pci_dev *pdev = adapter->pdev;
3613 struct igc_hw *hw = &adapter->hw;
3614 u64 _bytes, _packets;
3620 /* Prevent stats update while adapter is being reset, or if the pci
3621 * connection is down.
3623 if (adapter->link_speed == 0)
3625 if (pci_channel_offline(pdev))
3632 for (i = 0; i < adapter->num_rx_queues; i++) {
3633 struct igc_ring *ring = adapter->rx_ring[i];
3634 u32 rqdpc = rd32(IGC_RQDPC(i));
3636 if (hw->mac.type >= igc_i225)
3637 wr32(IGC_RQDPC(i), 0);
3640 ring->rx_stats.drops += rqdpc;
3641 net_stats->rx_fifo_errors += rqdpc;
3645 start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
3646 _bytes = ring->rx_stats.bytes;
3647 _packets = ring->rx_stats.packets;
3648 } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
3650 packets += _packets;
3653 net_stats->rx_bytes = bytes;
3654 net_stats->rx_packets = packets;
3658 for (i = 0; i < adapter->num_tx_queues; i++) {
3659 struct igc_ring *ring = adapter->tx_ring[i];
3662 start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
3663 _bytes = ring->tx_stats.bytes;
3664 _packets = ring->tx_stats.packets;
3665 } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
3667 packets += _packets;
3669 net_stats->tx_bytes = bytes;
3670 net_stats->tx_packets = packets;
3673 /* read stats registers */
3674 adapter->stats.crcerrs += rd32(IGC_CRCERRS);
3675 adapter->stats.gprc += rd32(IGC_GPRC);
3676 adapter->stats.gorc += rd32(IGC_GORCL);
3677 rd32(IGC_GORCH); /* clear GORCL */
3678 adapter->stats.bprc += rd32(IGC_BPRC);
3679 adapter->stats.mprc += rd32(IGC_MPRC);
3680 adapter->stats.roc += rd32(IGC_ROC);
3682 adapter->stats.prc64 += rd32(IGC_PRC64);
3683 adapter->stats.prc127 += rd32(IGC_PRC127);
3684 adapter->stats.prc255 += rd32(IGC_PRC255);
3685 adapter->stats.prc511 += rd32(IGC_PRC511);
3686 adapter->stats.prc1023 += rd32(IGC_PRC1023);
3687 adapter->stats.prc1522 += rd32(IGC_PRC1522);
3689 mpc = rd32(IGC_MPC);
3690 adapter->stats.mpc += mpc;
3691 net_stats->rx_fifo_errors += mpc;
3692 adapter->stats.scc += rd32(IGC_SCC);
3693 adapter->stats.ecol += rd32(IGC_ECOL);
3694 adapter->stats.mcc += rd32(IGC_MCC);
3695 adapter->stats.latecol += rd32(IGC_LATECOL);
3696 adapter->stats.dc += rd32(IGC_DC);
3697 adapter->stats.rlec += rd32(IGC_RLEC);
3698 adapter->stats.xonrxc += rd32(IGC_XONRXC);
3699 adapter->stats.xontxc += rd32(IGC_XONTXC);
3700 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
3701 adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
3702 adapter->stats.fcruc += rd32(IGC_FCRUC);
3703 adapter->stats.gptc += rd32(IGC_GPTC);
3704 adapter->stats.gotc += rd32(IGC_GOTCL);
3705 rd32(IGC_GOTCH); /* clear GOTCL */
3706 adapter->stats.rnbc += rd32(IGC_RNBC);
3707 adapter->stats.ruc += rd32(IGC_RUC);
3708 adapter->stats.rfc += rd32(IGC_RFC);
3709 adapter->stats.rjc += rd32(IGC_RJC);
3710 adapter->stats.tor += rd32(IGC_TORH);
3711 adapter->stats.tot += rd32(IGC_TOTH);
3712 adapter->stats.tpr += rd32(IGC_TPR);
3714 adapter->stats.ptc64 += rd32(IGC_PTC64);
3715 adapter->stats.ptc127 += rd32(IGC_PTC127);
3716 adapter->stats.ptc255 += rd32(IGC_PTC255);
3717 adapter->stats.ptc511 += rd32(IGC_PTC511);
3718 adapter->stats.ptc1023 += rd32(IGC_PTC1023);
3719 adapter->stats.ptc1522 += rd32(IGC_PTC1522);
3721 adapter->stats.mptc += rd32(IGC_MPTC);
3722 adapter->stats.bptc += rd32(IGC_BPTC);
3724 adapter->stats.tpt += rd32(IGC_TPT);
3725 adapter->stats.colc += rd32(IGC_COLC);
3726 adapter->stats.colc += rd32(IGC_RERC);
3728 adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
3730 adapter->stats.tsctc += rd32(IGC_TSCTC);
3732 adapter->stats.iac += rd32(IGC_IAC);
3734 /* Fill out the OS statistics structure */
3735 net_stats->multicast = adapter->stats.mprc;
3736 net_stats->collisions = adapter->stats.colc;
3740 /* RLEC on some newer hardware can be incorrect so build
3741 * our own version based on RUC and ROC
3743 net_stats->rx_errors = adapter->stats.rxerrc +
3744 adapter->stats.crcerrs + adapter->stats.algnerrc +
3745 adapter->stats.ruc + adapter->stats.roc +
3746 adapter->stats.cexterr;
3747 net_stats->rx_length_errors = adapter->stats.ruc +
3749 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3750 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3751 net_stats->rx_missed_errors = adapter->stats.mpc;
3754 net_stats->tx_errors = adapter->stats.ecol +
3755 adapter->stats.latecol;
3756 net_stats->tx_aborted_errors = adapter->stats.ecol;
3757 net_stats->tx_window_errors = adapter->stats.latecol;
3758 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3760 /* Tx Dropped needs to be maintained elsewhere */
3762 /* Management Stats */
3763 adapter->stats.mgptc += rd32(IGC_MGTPTC);
3764 adapter->stats.mgprc += rd32(IGC_MGTPRC);
3765 adapter->stats.mgpdc += rd32(IGC_MGTPDC);
3769 * igc_down - Close the interface
3770 * @adapter: board private structure
3772 void igc_down(struct igc_adapter *adapter)
3774 struct net_device *netdev = adapter->netdev;
3775 struct igc_hw *hw = &adapter->hw;
3779 set_bit(__IGC_DOWN, &adapter->state);
3781 /* disable receives in the hardware */
3782 rctl = rd32(IGC_RCTL);
3783 wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
3784 /* flush and sleep below */
3786 /* set trans_start so we don't get spurious watchdogs during reset */
3787 netif_trans_update(netdev);
3789 netif_carrier_off(netdev);
3790 netif_tx_stop_all_queues(netdev);
3792 /* disable transmits in the hardware */
3793 tctl = rd32(IGC_TCTL);
3794 tctl &= ~IGC_TCTL_EN;
3795 wr32(IGC_TCTL, tctl);
3796 /* flush both disables and wait for them to finish */
3798 usleep_range(10000, 20000);
3800 igc_irq_disable(adapter);
3802 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
3804 for (i = 0; i < adapter->num_q_vectors; i++) {
3805 if (adapter->q_vector[i]) {
3806 napi_synchronize(&adapter->q_vector[i]->napi);
3807 napi_disable(&adapter->q_vector[i]->napi);
3811 del_timer_sync(&adapter->watchdog_timer);
3812 del_timer_sync(&adapter->phy_info_timer);
3814 /* record the stats before reset*/
3815 spin_lock(&adapter->stats64_lock);
3816 igc_update_stats(adapter);
3817 spin_unlock(&adapter->stats64_lock);
3819 adapter->link_speed = 0;
3820 adapter->link_duplex = 0;
3822 if (!pci_channel_offline(adapter->pdev))
3825 /* clear VLAN promisc flag so VFTA will be updated if necessary */
3826 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC;
3828 igc_clean_all_tx_rings(adapter);
3829 igc_clean_all_rx_rings(adapter);
3832 void igc_reinit_locked(struct igc_adapter *adapter)
3834 WARN_ON(in_interrupt());
3835 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3836 usleep_range(1000, 2000);
3839 clear_bit(__IGC_RESETTING, &adapter->state);
3842 static void igc_reset_task(struct work_struct *work)
3844 struct igc_adapter *adapter;
3846 adapter = container_of(work, struct igc_adapter, reset_task);
3848 igc_rings_dump(adapter);
3849 igc_regs_dump(adapter);
3850 netdev_err(adapter->netdev, "Reset adapter\n");
3851 igc_reinit_locked(adapter);
3855 * igc_change_mtu - Change the Maximum Transfer Unit
3856 * @netdev: network interface device structure
3857 * @new_mtu: new value for maximum frame size
3859 * Returns 0 on success, negative on failure
3861 static int igc_change_mtu(struct net_device *netdev, int new_mtu)
3863 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
3864 struct igc_adapter *adapter = netdev_priv(netdev);
3866 /* adjust max frame to be at least the size of a standard frame */
3867 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
3868 max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
3870 while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
3871 usleep_range(1000, 2000);
3873 /* igc_down has a dependency on max_frame_size */
3874 adapter->max_frame_size = max_frame;
3876 if (netif_running(netdev))
3879 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
3880 netdev->mtu = new_mtu;
3882 if (netif_running(netdev))
3887 clear_bit(__IGC_RESETTING, &adapter->state);
3893 * igc_get_stats - Get System Network Statistics
3894 * @netdev: network interface device structure
3896 * Returns the address of the device statistics structure.
3897 * The statistics are updated here and also from the timer callback.
3899 static struct net_device_stats *igc_get_stats(struct net_device *netdev)
3901 struct igc_adapter *adapter = netdev_priv(netdev);
3903 if (!test_bit(__IGC_RESETTING, &adapter->state))
3904 igc_update_stats(adapter);
3906 /* only return the current stats */
3907 return &netdev->stats;
3910 static netdev_features_t igc_fix_features(struct net_device *netdev,
3911 netdev_features_t features)
3913 /* Since there is no support for separate Rx/Tx vlan accel
3914 * enable/disable make sure Tx flag is always in same state as Rx.
3916 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3917 features |= NETIF_F_HW_VLAN_CTAG_TX;
3919 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
3924 static int igc_set_features(struct net_device *netdev,
3925 netdev_features_t features)
3927 netdev_features_t changed = netdev->features ^ features;
3928 struct igc_adapter *adapter = netdev_priv(netdev);
3930 /* Add VLAN support */
3931 if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
3934 if (!(features & NETIF_F_NTUPLE))
3935 igc_flush_nfc_rules(adapter);
3937 netdev->features = features;
3939 if (netif_running(netdev))
3940 igc_reinit_locked(adapter);
3947 static netdev_features_t
3948 igc_features_check(struct sk_buff *skb, struct net_device *dev,
3949 netdev_features_t features)
3951 unsigned int network_hdr_len, mac_hdr_len;
3953 /* Make certain the headers can be described by a context descriptor */
3954 mac_hdr_len = skb_network_header(skb) - skb->data;
3955 if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
3956 return features & ~(NETIF_F_HW_CSUM |
3958 NETIF_F_HW_VLAN_CTAG_TX |
3962 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
3963 if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
3964 return features & ~(NETIF_F_HW_CSUM |
3969 /* We can only support IPv4 TSO in tunnels if we can mangle the
3970 * inner IP ID field, so strip TSO if MANGLEID is not supported.
3972 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
3973 features &= ~NETIF_F_TSO;
3978 static void igc_tsync_interrupt(struct igc_adapter *adapter)
3980 struct igc_hw *hw = &adapter->hw;
3981 u32 tsicr = rd32(IGC_TSICR);
3984 if (tsicr & IGC_TSICR_TXTS) {
3985 /* retrieve hardware timestamp */
3986 schedule_work(&adapter->ptp_tx_work);
3987 ack |= IGC_TSICR_TXTS;
3990 /* acknowledge the interrupts */
3991 wr32(IGC_TSICR, ack);
3995 * igc_msix_other - msix other interrupt handler
3996 * @irq: interrupt number
3997 * @data: pointer to a q_vector
3999 static irqreturn_t igc_msix_other(int irq, void *data)
4001 struct igc_adapter *adapter = data;
4002 struct igc_hw *hw = &adapter->hw;
4003 u32 icr = rd32(IGC_ICR);
4005 /* reading ICR causes bit 31 of EICR to be cleared */
4006 if (icr & IGC_ICR_DRSTA)
4007 schedule_work(&adapter->reset_task);
4009 if (icr & IGC_ICR_DOUTSYNC) {
4010 /* HW is reporting DMA is out of sync */
4011 adapter->stats.doosync++;
4014 if (icr & IGC_ICR_LSC) {
4015 hw->mac.get_link_status = 1;
4016 /* guard against interrupt when we're going down */
4017 if (!test_bit(__IGC_DOWN, &adapter->state))
4018 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4021 if (icr & IGC_ICR_TS)
4022 igc_tsync_interrupt(adapter);
4024 wr32(IGC_EIMS, adapter->eims_other);
4029 static void igc_write_itr(struct igc_q_vector *q_vector)
4031 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK;
4033 if (!q_vector->set_itr)
4037 itr_val = IGC_ITR_VAL_MASK;
4039 itr_val |= IGC_EITR_CNT_IGNR;
4041 writel(itr_val, q_vector->itr_register);
4042 q_vector->set_itr = 0;
4045 static irqreturn_t igc_msix_ring(int irq, void *data)
4047 struct igc_q_vector *q_vector = data;
4049 /* Write the ITR value calculated from the previous interrupt. */
4050 igc_write_itr(q_vector);
4052 napi_schedule(&q_vector->napi);
4058 * igc_request_msix - Initialize MSI-X interrupts
4059 * @adapter: Pointer to adapter structure
4061 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
4064 static int igc_request_msix(struct igc_adapter *adapter)
4066 int i = 0, err = 0, vector = 0, free_vector = 0;
4067 struct net_device *netdev = adapter->netdev;
4069 err = request_irq(adapter->msix_entries[vector].vector,
4070 &igc_msix_other, 0, netdev->name, adapter);
4074 for (i = 0; i < adapter->num_q_vectors; i++) {
4075 struct igc_q_vector *q_vector = adapter->q_vector[i];
4079 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector);
4081 if (q_vector->rx.ring && q_vector->tx.ring)
4082 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
4083 q_vector->rx.ring->queue_index);
4084 else if (q_vector->tx.ring)
4085 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
4086 q_vector->tx.ring->queue_index);
4087 else if (q_vector->rx.ring)
4088 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
4089 q_vector->rx.ring->queue_index);
4091 sprintf(q_vector->name, "%s-unused", netdev->name);
4093 err = request_irq(adapter->msix_entries[vector].vector,
4094 igc_msix_ring, 0, q_vector->name,
4100 igc_configure_msix(adapter);
4104 /* free already assigned IRQs */
4105 free_irq(adapter->msix_entries[free_vector++].vector, adapter);
4108 for (i = 0; i < vector; i++) {
4109 free_irq(adapter->msix_entries[free_vector++].vector,
4110 adapter->q_vector[i]);
4117 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
4118 * @adapter: Pointer to adapter structure
4120 * This function resets the device so that it has 0 rx queues, tx queues, and
4121 * MSI-X interrupts allocated.
4123 static void igc_clear_interrupt_scheme(struct igc_adapter *adapter)
4125 igc_free_q_vectors(adapter);
4126 igc_reset_interrupt_capability(adapter);
4129 /* Need to wait a few seconds after link up to get diagnostic information from
4132 static void igc_update_phy_info(struct timer_list *t)
4134 struct igc_adapter *adapter = from_timer(adapter, t, phy_info_timer);
4136 igc_get_phy_info(&adapter->hw);
4140 * igc_has_link - check shared code for link and determine up/down
4141 * @adapter: pointer to driver private info
4143 bool igc_has_link(struct igc_adapter *adapter)
4145 struct igc_hw *hw = &adapter->hw;
4146 bool link_active = false;
4148 /* get_link_status is set on LSC (link status) interrupt or
4149 * rx sequence error interrupt. get_link_status will stay
4150 * false until the igc_check_for_link establishes link
4151 * for copper adapters ONLY
4153 switch (hw->phy.media_type) {
4154 case igc_media_type_copper:
4155 if (!hw->mac.get_link_status)
4157 hw->mac.ops.check_for_link(hw);
4158 link_active = !hw->mac.get_link_status;
4161 case igc_media_type_unknown:
4165 if (hw->mac.type == igc_i225 &&
4166 hw->phy.id == I225_I_PHY_ID) {
4167 if (!netif_carrier_ok(adapter->netdev)) {
4168 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4169 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) {
4170 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE;
4171 adapter->link_check_timeout = jiffies;
4179 * igc_watchdog - Timer Call-back
4180 * @t: timer for the watchdog
4182 static void igc_watchdog(struct timer_list *t)
4184 struct igc_adapter *adapter = from_timer(adapter, t, watchdog_timer);
4185 /* Do the rest outside of interrupt context */
4186 schedule_work(&adapter->watchdog_task);
4189 static void igc_watchdog_task(struct work_struct *work)
4191 struct igc_adapter *adapter = container_of(work,
4194 struct net_device *netdev = adapter->netdev;
4195 struct igc_hw *hw = &adapter->hw;
4196 struct igc_phy_info *phy = &hw->phy;
4197 u16 phy_data, retry_count = 20;
4201 link = igc_has_link(adapter);
4203 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) {
4204 if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
4205 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
4211 /* Cancel scheduled suspend requests. */
4212 pm_runtime_resume(netdev->dev.parent);
4214 if (!netif_carrier_ok(netdev)) {
4217 hw->mac.ops.get_speed_and_duplex(hw,
4218 &adapter->link_speed,
4219 &adapter->link_duplex);
4221 ctrl = rd32(IGC_CTRL);
4222 /* Link status message must follow this format */
4224 "NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4225 adapter->link_speed,
4226 adapter->link_duplex == FULL_DUPLEX ?
4228 (ctrl & IGC_CTRL_TFCE) &&
4229 (ctrl & IGC_CTRL_RFCE) ? "RX/TX" :
4230 (ctrl & IGC_CTRL_RFCE) ? "RX" :
4231 (ctrl & IGC_CTRL_TFCE) ? "TX" : "None");
4233 /* disable EEE if enabled */
4234 if ((adapter->flags & IGC_FLAG_EEE) &&
4235 adapter->link_duplex == HALF_DUPLEX) {
4237 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n");
4238 adapter->hw.dev_spec._base.eee_enable = false;
4239 adapter->flags &= ~IGC_FLAG_EEE;
4242 /* check if SmartSpeed worked */
4243 igc_check_downshift(hw);
4244 if (phy->speed_downgraded)
4245 netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
4247 /* adjust timeout factor according to speed/duplex */
4248 adapter->tx_timeout_factor = 1;
4249 switch (adapter->link_speed) {
4251 adapter->tx_timeout_factor = 14;
4254 /* maybe add some timeout factor ? */
4258 if (adapter->link_speed != SPEED_1000)
4261 /* wait for Remote receiver status OK */
4263 if (!igc_read_phy_reg(hw, PHY_1000T_STATUS,
4265 if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
4269 goto retry_read_status;
4270 } else if (!retry_count) {
4271 netdev_err(netdev, "exceed max 2 second\n");
4274 netdev_err(netdev, "read 1000Base-T Status Reg\n");
4277 netif_carrier_on(netdev);
4279 /* link state has changed, schedule phy info update */
4280 if (!test_bit(__IGC_DOWN, &adapter->state))
4281 mod_timer(&adapter->phy_info_timer,
4282 round_jiffies(jiffies + 2 * HZ));
4285 if (netif_carrier_ok(netdev)) {
4286 adapter->link_speed = 0;
4287 adapter->link_duplex = 0;
4289 /* Links status message must follow this format */
4290 netdev_info(netdev, "NIC Link is Down\n");
4291 netif_carrier_off(netdev);
4293 /* link state has changed, schedule phy info update */
4294 if (!test_bit(__IGC_DOWN, &adapter->state))
4295 mod_timer(&adapter->phy_info_timer,
4296 round_jiffies(jiffies + 2 * HZ));
4298 /* link is down, time to check for alternate media */
4299 if (adapter->flags & IGC_FLAG_MAS_ENABLE) {
4300 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4301 schedule_work(&adapter->reset_task);
4302 /* return immediately */
4306 pm_schedule_suspend(netdev->dev.parent,
4309 /* also check for alternate media here */
4310 } else if (!netif_carrier_ok(netdev) &&
4311 (adapter->flags & IGC_FLAG_MAS_ENABLE)) {
4312 if (adapter->flags & IGC_FLAG_MEDIA_RESET) {
4313 schedule_work(&adapter->reset_task);
4314 /* return immediately */
4320 spin_lock(&adapter->stats64_lock);
4321 igc_update_stats(adapter);
4322 spin_unlock(&adapter->stats64_lock);
4324 for (i = 0; i < adapter->num_tx_queues; i++) {
4325 struct igc_ring *tx_ring = adapter->tx_ring[i];
4327 if (!netif_carrier_ok(netdev)) {
4328 /* We've lost link, so the controller stops DMA,
4329 * but we've got queued Tx work that's never going
4330 * to get done, so reset controller to flush Tx.
4331 * (Do the reset outside of interrupt context).
4333 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) {
4334 adapter->tx_timeout_count++;
4335 schedule_work(&adapter->reset_task);
4336 /* return immediately since reset is imminent */
4341 /* Force detection of hung controller every watchdog period */
4342 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4345 /* Cause software interrupt to ensure Rx ring is cleaned */
4346 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4349 for (i = 0; i < adapter->num_q_vectors; i++)
4350 eics |= adapter->q_vector[i]->eims_value;
4351 wr32(IGC_EICS, eics);
4353 wr32(IGC_ICS, IGC_ICS_RXDMT0);
4356 igc_ptp_tx_hang(adapter);
4358 /* Reset the timer */
4359 if (!test_bit(__IGC_DOWN, &adapter->state)) {
4360 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)
4361 mod_timer(&adapter->watchdog_timer,
4362 round_jiffies(jiffies + HZ));
4364 mod_timer(&adapter->watchdog_timer,
4365 round_jiffies(jiffies + 2 * HZ));
4370 * igc_intr_msi - Interrupt Handler
4371 * @irq: interrupt number
4372 * @data: pointer to a network interface device structure
4374 static irqreturn_t igc_intr_msi(int irq, void *data)
4376 struct igc_adapter *adapter = data;
4377 struct igc_q_vector *q_vector = adapter->q_vector[0];
4378 struct igc_hw *hw = &adapter->hw;
4379 /* read ICR disables interrupts using IAM */
4380 u32 icr = rd32(IGC_ICR);
4382 igc_write_itr(q_vector);
4384 if (icr & IGC_ICR_DRSTA)
4385 schedule_work(&adapter->reset_task);
4387 if (icr & IGC_ICR_DOUTSYNC) {
4388 /* HW is reporting DMA is out of sync */
4389 adapter->stats.doosync++;
4392 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4393 hw->mac.get_link_status = 1;
4394 if (!test_bit(__IGC_DOWN, &adapter->state))
4395 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4398 napi_schedule(&q_vector->napi);
4404 * igc_intr - Legacy Interrupt Handler
4405 * @irq: interrupt number
4406 * @data: pointer to a network interface device structure
4408 static irqreturn_t igc_intr(int irq, void *data)
4410 struct igc_adapter *adapter = data;
4411 struct igc_q_vector *q_vector = adapter->q_vector[0];
4412 struct igc_hw *hw = &adapter->hw;
4413 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4414 * need for the IMC write
4416 u32 icr = rd32(IGC_ICR);
4418 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4419 * not set, then the adapter didn't send an interrupt
4421 if (!(icr & IGC_ICR_INT_ASSERTED))
4424 igc_write_itr(q_vector);
4426 if (icr & IGC_ICR_DRSTA)
4427 schedule_work(&adapter->reset_task);
4429 if (icr & IGC_ICR_DOUTSYNC) {
4430 /* HW is reporting DMA is out of sync */
4431 adapter->stats.doosync++;
4434 if (icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
4435 hw->mac.get_link_status = 1;
4436 /* guard against interrupt when we're going down */
4437 if (!test_bit(__IGC_DOWN, &adapter->state))
4438 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4441 napi_schedule(&q_vector->napi);
4446 static void igc_free_irq(struct igc_adapter *adapter)
4448 if (adapter->msix_entries) {
4451 free_irq(adapter->msix_entries[vector++].vector, adapter);
4453 for (i = 0; i < adapter->num_q_vectors; i++)
4454 free_irq(adapter->msix_entries[vector++].vector,
4455 adapter->q_vector[i]);
4457 free_irq(adapter->pdev->irq, adapter);
4462 * igc_request_irq - initialize interrupts
4463 * @adapter: Pointer to adapter structure
4465 * Attempts to configure interrupts using the best available
4466 * capabilities of the hardware and kernel.
4468 static int igc_request_irq(struct igc_adapter *adapter)
4470 struct net_device *netdev = adapter->netdev;
4471 struct pci_dev *pdev = adapter->pdev;
4474 if (adapter->flags & IGC_FLAG_HAS_MSIX) {
4475 err = igc_request_msix(adapter);
4478 /* fall back to MSI */
4479 igc_free_all_tx_resources(adapter);
4480 igc_free_all_rx_resources(adapter);
4482 igc_clear_interrupt_scheme(adapter);
4483 err = igc_init_interrupt_scheme(adapter, false);
4486 igc_setup_all_tx_resources(adapter);
4487 igc_setup_all_rx_resources(adapter);
4488 igc_configure(adapter);
4491 igc_assign_vector(adapter->q_vector[0], 0);
4493 if (adapter->flags & IGC_FLAG_HAS_MSI) {
4494 err = request_irq(pdev->irq, &igc_intr_msi, 0,
4495 netdev->name, adapter);
4499 /* fall back to legacy interrupts */
4500 igc_reset_interrupt_capability(adapter);
4501 adapter->flags &= ~IGC_FLAG_HAS_MSI;
4504 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED,
4505 netdev->name, adapter);
4508 netdev_err(netdev, "Error %d getting interrupt\n", err);
4515 * __igc_open - Called when a network interface is made active
4516 * @netdev: network interface device structure
4517 * @resuming: boolean indicating if the device is resuming
4519 * Returns 0 on success, negative value on failure
4521 * The open entry point is called when a network interface is made
4522 * active by the system (IFF_UP). At this point all resources needed
4523 * for transmit and receive operations are allocated, the interrupt
4524 * handler is registered with the OS, the watchdog timer is started,
4525 * and the stack is notified that the interface is ready.
4527 static int __igc_open(struct net_device *netdev, bool resuming)
4529 struct igc_adapter *adapter = netdev_priv(netdev);
4530 struct pci_dev *pdev = adapter->pdev;
4531 struct igc_hw *hw = &adapter->hw;
4535 /* disallow open during test */
4537 if (test_bit(__IGC_TESTING, &adapter->state)) {
4543 pm_runtime_get_sync(&pdev->dev);
4545 netif_carrier_off(netdev);
4547 /* allocate transmit descriptors */
4548 err = igc_setup_all_tx_resources(adapter);
4552 /* allocate receive descriptors */
4553 err = igc_setup_all_rx_resources(adapter);
4557 igc_power_up_link(adapter);
4559 igc_configure(adapter);
4561 err = igc_request_irq(adapter);
4565 /* Notify the stack of the actual queue counts. */
4566 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
4568 goto err_set_queues;
4570 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
4572 goto err_set_queues;
4574 clear_bit(__IGC_DOWN, &adapter->state);
4576 for (i = 0; i < adapter->num_q_vectors; i++)
4577 napi_enable(&adapter->q_vector[i]->napi);
4579 /* Clear any pending interrupts. */
4581 igc_irq_enable(adapter);
4584 pm_runtime_put(&pdev->dev);
4586 netif_tx_start_all_queues(netdev);
4588 /* start the watchdog. */
4589 hw->mac.get_link_status = 1;
4590 schedule_work(&adapter->watchdog_task);
4595 igc_free_irq(adapter);
4597 igc_release_hw_control(adapter);
4598 igc_power_down_phy_copper_base(&adapter->hw);
4599 igc_free_all_rx_resources(adapter);
4601 igc_free_all_tx_resources(adapter);
4605 pm_runtime_put(&pdev->dev);
4610 int igc_open(struct net_device *netdev)
4612 return __igc_open(netdev, false);
4616 * __igc_close - Disables a network interface
4617 * @netdev: network interface device structure
4618 * @suspending: boolean indicating the device is suspending
4620 * Returns 0, this is not allowed to fail
4622 * The close entry point is called when an interface is de-activated
4623 * by the OS. The hardware is still under the driver's control, but
4624 * needs to be disabled. A global MAC reset is issued to stop the
4625 * hardware, and all transmit and receive resources are freed.
4627 static int __igc_close(struct net_device *netdev, bool suspending)
4629 struct igc_adapter *adapter = netdev_priv(netdev);
4630 struct pci_dev *pdev = adapter->pdev;
4632 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state));
4635 pm_runtime_get_sync(&pdev->dev);
4639 igc_release_hw_control(adapter);
4641 igc_free_irq(adapter);
4643 igc_free_all_tx_resources(adapter);
4644 igc_free_all_rx_resources(adapter);
4647 pm_runtime_put_sync(&pdev->dev);
4652 int igc_close(struct net_device *netdev)
4654 if (netif_device_present(netdev) || netdev->dismantle)
4655 return __igc_close(netdev, false);
4660 * igc_ioctl - Access the hwtstamp interface
4661 * @netdev: network interface device structure
4662 * @ifreq: interface request data
4663 * @cmd: ioctl command
4665 static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4669 return igc_ptp_get_ts_config(netdev, ifr);
4671 return igc_ptp_set_ts_config(netdev, ifr);
4677 static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
4680 struct igc_ring *ring;
4683 if (queue < 0 || queue >= adapter->num_tx_queues)
4686 ring = adapter->tx_ring[queue];
4687 ring->launchtime_enable = enable;
4689 if (adapter->base_time)
4692 adapter->cycle_time = NSEC_PER_SEC;
4694 for (i = 0; i < adapter->num_tx_queues; i++) {
4695 ring = adapter->tx_ring[i];
4696 ring->start_time = 0;
4697 ring->end_time = NSEC_PER_SEC;
4703 static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
4705 int queue_uses[IGC_MAX_TX_QUEUES] = { };
4708 if (qopt->cycle_time_extension)
4711 for (n = 0; n < qopt->num_entries; n++) {
4712 const struct tc_taprio_sched_entry *e;
4715 e = &qopt->entries[n];
4717 /* i225 only supports "global" frame preemption
4720 if (e->command != TC_TAPRIO_CMD_SET_GATES)
4723 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4724 if (e->gate_mask & BIT(i))
4727 if (queue_uses[i] > 1)
4735 static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
4736 struct tc_etf_qopt_offload *qopt)
4738 struct igc_hw *hw = &adapter->hw;
4741 if (hw->mac.type != igc_i225)
4744 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable);
4748 return igc_tsn_offload_apply(adapter);
4751 static int igc_save_qbv_schedule(struct igc_adapter *adapter,
4752 struct tc_taprio_qopt_offload *qopt)
4754 u32 start_time = 0, end_time = 0;
4757 if (!qopt->enable) {
4758 adapter->base_time = 0;
4762 if (adapter->base_time)
4765 if (!validate_schedule(qopt))
4768 adapter->cycle_time = qopt->cycle_time;
4769 adapter->base_time = qopt->base_time;
4771 /* FIXME: be a little smarter about cases when the gate for a
4772 * queue stays open for more than one entry.
4774 for (n = 0; n < qopt->num_entries; n++) {
4775 struct tc_taprio_sched_entry *e = &qopt->entries[n];
4778 end_time += e->interval;
4780 for (i = 0; i < IGC_MAX_TX_QUEUES; i++) {
4781 struct igc_ring *ring = adapter->tx_ring[i];
4783 if (!(e->gate_mask & BIT(i)))
4786 ring->start_time = start_time;
4787 ring->end_time = end_time;
4790 start_time += e->interval;
4796 static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
4797 struct tc_taprio_qopt_offload *qopt)
4799 struct igc_hw *hw = &adapter->hw;
4802 if (hw->mac.type != igc_i225)
4805 err = igc_save_qbv_schedule(adapter, qopt);
4809 return igc_tsn_offload_apply(adapter);
4812 static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
4815 struct igc_adapter *adapter = netdev_priv(dev);
4818 case TC_SETUP_QDISC_TAPRIO:
4819 return igc_tsn_enable_qbv_scheduling(adapter, type_data);
4821 case TC_SETUP_QDISC_ETF:
4822 return igc_tsn_enable_launchtime(adapter, type_data);
4829 static const struct net_device_ops igc_netdev_ops = {
4830 .ndo_open = igc_open,
4831 .ndo_stop = igc_close,
4832 .ndo_start_xmit = igc_xmit_frame,
4833 .ndo_set_rx_mode = igc_set_rx_mode,
4834 .ndo_set_mac_address = igc_set_mac,
4835 .ndo_change_mtu = igc_change_mtu,
4836 .ndo_get_stats = igc_get_stats,
4837 .ndo_fix_features = igc_fix_features,
4838 .ndo_set_features = igc_set_features,
4839 .ndo_features_check = igc_features_check,
4840 .ndo_do_ioctl = igc_ioctl,
4841 .ndo_setup_tc = igc_setup_tc,
4844 /* PCIe configuration access */
4845 void igc_read_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4847 struct igc_adapter *adapter = hw->back;
4849 pci_read_config_word(adapter->pdev, reg, value);
4852 void igc_write_pci_cfg(struct igc_hw *hw, u32 reg, u16 *value)
4854 struct igc_adapter *adapter = hw->back;
4856 pci_write_config_word(adapter->pdev, reg, *value);
4859 s32 igc_read_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4861 struct igc_adapter *adapter = hw->back;
4863 if (!pci_is_pcie(adapter->pdev))
4864 return -IGC_ERR_CONFIG;
4866 pcie_capability_read_word(adapter->pdev, reg, value);
4871 s32 igc_write_pcie_cap_reg(struct igc_hw *hw, u32 reg, u16 *value)
4873 struct igc_adapter *adapter = hw->back;
4875 if (!pci_is_pcie(adapter->pdev))
4876 return -IGC_ERR_CONFIG;
4878 pcie_capability_write_word(adapter->pdev, reg, *value);
4883 u32 igc_rd32(struct igc_hw *hw, u32 reg)
4885 struct igc_adapter *igc = container_of(hw, struct igc_adapter, hw);
4886 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
4889 value = readl(&hw_addr[reg]);
4891 /* reads should not return all F's */
4892 if (!(~value) && (!reg || !(~readl(hw_addr)))) {
4893 struct net_device *netdev = igc->netdev;
4896 netif_device_detach(netdev);
4897 netdev_err(netdev, "PCIe link lost, device now detached\n");
4898 WARN(pci_device_is_present(igc->pdev),
4899 "igc: Failed to read reg 0x%x!\n", reg);
4905 int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx)
4907 struct igc_mac_info *mac = &adapter->hw.mac;
4911 /* Make sure dplx is at most 1 bit and lsb of speed is not set
4912 * for the switch() below to work
4914 if ((spd & 1) || (dplx & ~1))
4917 switch (spd + dplx) {
4918 case SPEED_10 + DUPLEX_HALF:
4919 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4921 case SPEED_10 + DUPLEX_FULL:
4922 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4924 case SPEED_100 + DUPLEX_HALF:
4925 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4927 case SPEED_100 + DUPLEX_FULL:
4928 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4930 case SPEED_1000 + DUPLEX_FULL:
4932 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4934 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4936 case SPEED_2500 + DUPLEX_FULL:
4938 adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
4940 case SPEED_2500 + DUPLEX_HALF: /* not supported */
4945 /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
4946 adapter->hw.phy.mdix = AUTO_ALL_MODES;
4951 netdev_err(adapter->netdev, "Unsupported Speed/Duplex configuration\n");
4956 * igc_probe - Device Initialization Routine
4957 * @pdev: PCI device information struct
4958 * @ent: entry in igc_pci_tbl
4960 * Returns 0 on success, negative on failure
4962 * igc_probe initializes an adapter identified by a pci_dev structure.
4963 * The OS initialization, configuring the adapter private structure,
4964 * and a hardware reset occur.
4966 static int igc_probe(struct pci_dev *pdev,
4967 const struct pci_device_id *ent)
4969 struct igc_adapter *adapter;
4970 struct net_device *netdev;
4972 const struct igc_info *ei = igc_info_tbl[ent->driver_data];
4973 int err, pci_using_dac;
4975 err = pci_enable_device_mem(pdev);
4980 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4984 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4987 "No usable DMA configuration, aborting\n");
4992 err = pci_request_mem_regions(pdev, igc_driver_name);
4996 pci_enable_pcie_error_reporting(pdev);
4998 pci_set_master(pdev);
5001 netdev = alloc_etherdev_mq(sizeof(struct igc_adapter),
5005 goto err_alloc_etherdev;
5007 SET_NETDEV_DEV(netdev, &pdev->dev);
5009 pci_set_drvdata(pdev, netdev);
5010 adapter = netdev_priv(netdev);
5011 adapter->netdev = netdev;
5012 adapter->pdev = pdev;
5015 adapter->port_num = hw->bus.func;
5016 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
5018 err = pci_save_state(pdev);
5023 adapter->io_addr = ioremap(pci_resource_start(pdev, 0),
5024 pci_resource_len(pdev, 0));
5025 if (!adapter->io_addr)
5028 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */
5029 hw->hw_addr = adapter->io_addr;
5031 netdev->netdev_ops = &igc_netdev_ops;
5032 igc_ethtool_set_ops(netdev);
5033 netdev->watchdog_timeo = 5 * HZ;
5035 netdev->mem_start = pci_resource_start(pdev, 0);
5036 netdev->mem_end = pci_resource_end(pdev, 0);
5038 /* PCI config space info */
5039 hw->vendor_id = pdev->vendor;
5040 hw->device_id = pdev->device;
5041 hw->revision_id = pdev->revision;
5042 hw->subsystem_vendor_id = pdev->subsystem_vendor;
5043 hw->subsystem_device_id = pdev->subsystem_device;
5045 /* Copy the default MAC and PHY function pointers */
5046 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
5047 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
5049 /* Initialize skew-specific constants */
5050 err = ei->get_invariants(hw);
5054 /* Add supported features to the features list*/
5055 netdev->features |= NETIF_F_SG;
5056 netdev->features |= NETIF_F_TSO;
5057 netdev->features |= NETIF_F_TSO6;
5058 netdev->features |= NETIF_F_TSO_ECN;
5059 netdev->features |= NETIF_F_RXCSUM;
5060 netdev->features |= NETIF_F_HW_CSUM;
5061 netdev->features |= NETIF_F_SCTP_CRC;
5062 netdev->features |= NETIF_F_HW_TC;
5064 #define IGC_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
5065 NETIF_F_GSO_GRE_CSUM | \
5066 NETIF_F_GSO_IPXIP4 | \
5067 NETIF_F_GSO_IPXIP6 | \
5068 NETIF_F_GSO_UDP_TUNNEL | \
5069 NETIF_F_GSO_UDP_TUNNEL_CSUM)
5071 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES;
5072 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES;
5074 /* setup the private structure */
5075 err = igc_sw_init(adapter);
5079 /* copy netdev features into list of user selectable features */
5080 netdev->hw_features |= NETIF_F_NTUPLE;
5081 netdev->hw_features |= netdev->features;
5084 netdev->features |= NETIF_F_HIGHDMA;
5086 /* MTU range: 68 - 9216 */
5087 netdev->min_mtu = ETH_MIN_MTU;
5088 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
5090 /* before reading the NVM, reset the controller to put the device in a
5091 * known good starting state
5093 hw->mac.ops.reset_hw(hw);
5095 if (igc_get_flash_presence_i225(hw)) {
5096 if (hw->nvm.ops.validate(hw) < 0) {
5097 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
5103 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
5104 /* copy the MAC address out of the NVM */
5105 if (hw->mac.ops.read_mac_addr(hw))
5106 dev_err(&pdev->dev, "NVM Read Error\n");
5109 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
5111 if (!is_valid_ether_addr(netdev->dev_addr)) {
5112 dev_err(&pdev->dev, "Invalid MAC Address\n");
5117 /* configure RXPBSIZE and TXPBSIZE */
5118 wr32(IGC_RXPBS, I225_RXPBSIZE_DEFAULT);
5119 wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
5121 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0);
5122 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0);
5124 INIT_WORK(&adapter->reset_task, igc_reset_task);
5125 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task);
5127 /* Initialize link properties that are user-changeable */
5128 adapter->fc_autoneg = true;
5129 hw->mac.autoneg = true;
5130 hw->phy.autoneg_advertised = 0xaf;
5132 hw->fc.requested_mode = igc_fc_default;
5133 hw->fc.current_mode = igc_fc_default;
5135 /* By default, support wake on port A */
5136 adapter->flags |= IGC_FLAG_WOL_SUPPORTED;
5138 /* initialize the wol settings based on the eeprom settings */
5139 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED)
5140 adapter->wol |= IGC_WUFC_MAG;
5142 device_set_wakeup_enable(&adapter->pdev->dev,
5143 adapter->flags & IGC_FLAG_WOL_SUPPORTED);
5145 igc_ptp_init(adapter);
5147 /* reset the hardware with the new settings */
5150 /* let the f/w know that the h/w is now under the control of the
5153 igc_get_hw_control(adapter);
5155 strncpy(netdev->name, "eth%d", IFNAMSIZ);
5156 err = register_netdev(netdev);
5160 /* carrier off reporting is important to ethtool even BEFORE open */
5161 netif_carrier_off(netdev);
5163 /* Check if Media Autosense is enabled */
5166 /* print pcie link status and MAC address */
5167 pcie_print_link_status(pdev);
5168 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr);
5170 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
5171 /* Disable EEE for internal PHY devices */
5172 hw->dev_spec._base.eee_enable = false;
5173 adapter->flags &= ~IGC_FLAG_EEE;
5174 igc_set_eee_i225(hw, false, false, false);
5176 pm_runtime_put_noidle(&pdev->dev);
5181 igc_release_hw_control(adapter);
5183 if (!igc_check_reset_block(hw))
5186 igc_clear_interrupt_scheme(adapter);
5187 iounmap(adapter->io_addr);
5189 free_netdev(netdev);
5191 pci_release_mem_regions(pdev);
5194 pci_disable_device(pdev);
5199 * igc_remove - Device Removal Routine
5200 * @pdev: PCI device information struct
5202 * igc_remove is called by the PCI subsystem to alert the driver
5203 * that it should release a PCI device. This could be caused by a
5204 * Hot-Plug event, or because the driver is going to be removed from
5207 static void igc_remove(struct pci_dev *pdev)
5209 struct net_device *netdev = pci_get_drvdata(pdev);
5210 struct igc_adapter *adapter = netdev_priv(netdev);
5212 pm_runtime_get_noresume(&pdev->dev);
5214 igc_flush_nfc_rules(adapter);
5216 igc_ptp_stop(adapter);
5218 set_bit(__IGC_DOWN, &adapter->state);
5220 del_timer_sync(&adapter->watchdog_timer);
5221 del_timer_sync(&adapter->phy_info_timer);
5223 cancel_work_sync(&adapter->reset_task);
5224 cancel_work_sync(&adapter->watchdog_task);
5226 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5227 * would have already happened in close and is redundant.
5229 igc_release_hw_control(adapter);
5230 unregister_netdev(netdev);
5232 igc_clear_interrupt_scheme(adapter);
5233 pci_iounmap(pdev, adapter->io_addr);
5234 pci_release_mem_regions(pdev);
5236 free_netdev(netdev);
5238 pci_disable_pcie_error_reporting(pdev);
5240 pci_disable_device(pdev);
5243 static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
5246 struct net_device *netdev = pci_get_drvdata(pdev);
5247 struct igc_adapter *adapter = netdev_priv(netdev);
5248 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol;
5249 struct igc_hw *hw = &adapter->hw;
5250 u32 ctrl, rctl, status;
5254 netif_device_detach(netdev);
5256 if (netif_running(netdev))
5257 __igc_close(netdev, true);
5259 igc_ptp_suspend(adapter);
5261 igc_clear_interrupt_scheme(adapter);
5264 status = rd32(IGC_STATUS);
5265 if (status & IGC_STATUS_LU)
5266 wufc &= ~IGC_WUFC_LNKC;
5269 igc_setup_rctl(adapter);
5270 igc_set_rx_mode(netdev);
5272 /* turn on all-multi mode if wake on multicast is enabled */
5273 if (wufc & IGC_WUFC_MC) {
5274 rctl = rd32(IGC_RCTL);
5275 rctl |= IGC_RCTL_MPE;
5276 wr32(IGC_RCTL, rctl);
5279 ctrl = rd32(IGC_CTRL);
5280 ctrl |= IGC_CTRL_ADVD3WUC;
5281 wr32(IGC_CTRL, ctrl);
5283 /* Allow time for pending master requests to run */
5284 igc_disable_pcie_master(hw);
5286 wr32(IGC_WUC, IGC_WUC_PME_EN);
5287 wr32(IGC_WUFC, wufc);
5293 wake = wufc || adapter->en_mng_pt;
5295 igc_power_down_phy_copper_base(&adapter->hw);
5297 igc_power_up_link(adapter);
5300 *enable_wake = wake;
5302 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5303 * would have already happened in close and is redundant.
5305 igc_release_hw_control(adapter);
5307 pci_disable_device(pdev);
5313 static int __maybe_unused igc_runtime_suspend(struct device *dev)
5315 return __igc_shutdown(to_pci_dev(dev), NULL, 1);
5318 static void igc_deliver_wake_packet(struct net_device *netdev)
5320 struct igc_adapter *adapter = netdev_priv(netdev);
5321 struct igc_hw *hw = &adapter->hw;
5322 struct sk_buff *skb;
5325 wupl = rd32(IGC_WUPL) & IGC_WUPL_MASK;
5327 /* WUPM stores only the first 128 bytes of the wake packet.
5328 * Read the packet only if we have the whole thing.
5330 if (wupl == 0 || wupl > IGC_WUPM_BYTES)
5333 skb = netdev_alloc_skb_ip_align(netdev, IGC_WUPM_BYTES);
5339 /* Ensure reads are 32-bit aligned */
5340 wupl = roundup(wupl, 4);
5342 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl);
5344 skb->protocol = eth_type_trans(skb, netdev);
5348 static int __maybe_unused igc_resume(struct device *dev)
5350 struct pci_dev *pdev = to_pci_dev(dev);
5351 struct net_device *netdev = pci_get_drvdata(pdev);
5352 struct igc_adapter *adapter = netdev_priv(netdev);
5353 struct igc_hw *hw = &adapter->hw;
5356 pci_set_power_state(pdev, PCI_D0);
5357 pci_restore_state(pdev);
5358 pci_save_state(pdev);
5360 if (!pci_device_is_present(pdev))
5362 err = pci_enable_device_mem(pdev);
5364 netdev_err(netdev, "Cannot enable PCI device from suspend\n");
5367 pci_set_master(pdev);
5369 pci_enable_wake(pdev, PCI_D3hot, 0);
5370 pci_enable_wake(pdev, PCI_D3cold, 0);
5372 if (igc_init_interrupt_scheme(adapter, true)) {
5373 netdev_err(netdev, "Unable to allocate memory for queues\n");
5379 /* let the f/w know that the h/w is now under the control of the
5382 igc_get_hw_control(adapter);
5384 val = rd32(IGC_WUS);
5385 if (val & WAKE_PKT_WUS)
5386 igc_deliver_wake_packet(netdev);
5391 if (!err && netif_running(netdev))
5392 err = __igc_open(netdev, true);
5395 netif_device_attach(netdev);
5401 static int __maybe_unused igc_runtime_resume(struct device *dev)
5403 return igc_resume(dev);
5406 static int __maybe_unused igc_suspend(struct device *dev)
5408 return __igc_shutdown(to_pci_dev(dev), NULL, 0);
5411 static int __maybe_unused igc_runtime_idle(struct device *dev)
5413 struct net_device *netdev = dev_get_drvdata(dev);
5414 struct igc_adapter *adapter = netdev_priv(netdev);
5416 if (!igc_has_link(adapter))
5417 pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
5421 #endif /* CONFIG_PM */
5423 static void igc_shutdown(struct pci_dev *pdev)
5427 __igc_shutdown(pdev, &wake, 0);
5429 if (system_state == SYSTEM_POWER_OFF) {
5430 pci_wake_from_d3(pdev, wake);
5431 pci_set_power_state(pdev, PCI_D3hot);
5436 * igc_io_error_detected - called when PCI error is detected
5437 * @pdev: Pointer to PCI device
5438 * @state: The current PCI connection state
5440 * This function is called after a PCI bus error affecting
5441 * this device has been detected.
5443 static pci_ers_result_t igc_io_error_detected(struct pci_dev *pdev,
5444 pci_channel_state_t state)
5446 struct net_device *netdev = pci_get_drvdata(pdev);
5447 struct igc_adapter *adapter = netdev_priv(netdev);
5449 netif_device_detach(netdev);
5451 if (state == pci_channel_io_perm_failure)
5452 return PCI_ERS_RESULT_DISCONNECT;
5454 if (netif_running(netdev))
5456 pci_disable_device(pdev);
5458 /* Request a slot reset. */
5459 return PCI_ERS_RESULT_NEED_RESET;
5463 * igc_io_slot_reset - called after the PCI bus has been reset.
5464 * @pdev: Pointer to PCI device
5466 * Restart the card from scratch, as if from a cold-boot. Implementation
5467 * resembles the first-half of the igc_resume routine.
5469 static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev)
5471 struct net_device *netdev = pci_get_drvdata(pdev);
5472 struct igc_adapter *adapter = netdev_priv(netdev);
5473 struct igc_hw *hw = &adapter->hw;
5474 pci_ers_result_t result;
5476 if (pci_enable_device_mem(pdev)) {
5477 netdev_err(netdev, "Could not re-enable PCI device after reset\n");
5478 result = PCI_ERS_RESULT_DISCONNECT;
5480 pci_set_master(pdev);
5481 pci_restore_state(pdev);
5482 pci_save_state(pdev);
5484 pci_enable_wake(pdev, PCI_D3hot, 0);
5485 pci_enable_wake(pdev, PCI_D3cold, 0);
5487 /* In case of PCI error, adapter loses its HW address
5488 * so we should re-assign it here.
5490 hw->hw_addr = adapter->io_addr;
5494 result = PCI_ERS_RESULT_RECOVERED;
5501 * igc_io_resume - called when traffic can start to flow again.
5502 * @pdev: Pointer to PCI device
5504 * This callback is called when the error recovery driver tells us that
5505 * its OK to resume normal operation. Implementation resembles the
5506 * second-half of the igc_resume routine.
5508 static void igc_io_resume(struct pci_dev *pdev)
5510 struct net_device *netdev = pci_get_drvdata(pdev);
5511 struct igc_adapter *adapter = netdev_priv(netdev);
5514 if (netif_running(netdev)) {
5515 if (igc_open(netdev)) {
5516 netdev_err(netdev, "igc_open failed after reset\n");
5521 netif_device_attach(netdev);
5523 /* let the f/w know that the h/w is now under the control of the
5526 igc_get_hw_control(adapter);
5530 static const struct pci_error_handlers igc_err_handler = {
5531 .error_detected = igc_io_error_detected,
5532 .slot_reset = igc_io_slot_reset,
5533 .resume = igc_io_resume,
5537 static const struct dev_pm_ops igc_pm_ops = {
5538 SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
5539 SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
5544 static struct pci_driver igc_driver = {
5545 .name = igc_driver_name,
5546 .id_table = igc_pci_tbl,
5548 .remove = igc_remove,
5550 .driver.pm = &igc_pm_ops,
5552 .shutdown = igc_shutdown,
5553 .err_handler = &igc_err_handler,
5557 * igc_reinit_queues - return error
5558 * @adapter: pointer to adapter structure
5560 int igc_reinit_queues(struct igc_adapter *adapter)
5562 struct net_device *netdev = adapter->netdev;
5565 if (netif_running(netdev))
5568 igc_reset_interrupt_capability(adapter);
5570 if (igc_init_interrupt_scheme(adapter, true)) {
5571 netdev_err(netdev, "Unable to allocate memory for queues\n");
5575 if (netif_running(netdev))
5576 err = igc_open(netdev);
5582 * igc_get_hw_dev - return device
5583 * @hw: pointer to hardware structure
5585 * used by hardware layer to print debugging information
5587 struct net_device *igc_get_hw_dev(struct igc_hw *hw)
5589 struct igc_adapter *adapter = hw->back;
5591 return adapter->netdev;
5595 * igc_init_module - Driver Registration Routine
5597 * igc_init_module is the first routine called when the driver is
5598 * loaded. All it does is register with the PCI subsystem.
5600 static int __init igc_init_module(void)
5604 pr_info("%s\n", igc_driver_string);
5605 pr_info("%s\n", igc_copyright);
5607 ret = pci_register_driver(&igc_driver);
5611 module_init(igc_init_module);
5614 * igc_exit_module - Driver Exit Cleanup Routine
5616 * igc_exit_module is called just before the driver is removed
5619 static void __exit igc_exit_module(void)
5621 pci_unregister_driver(&igc_driver);
5624 module_exit(igc_exit_module);