Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2006 Intel Corporation. */
3
4 #include "e1000.h"
5 #include <net/ip6_checksum.h>
6 #include <linux/io.h>
7 #include <linux/prefetch.h>
8 #include <linux/bitops.h>
9 #include <linux/if_vlan.h>
10
11 char e1000_driver_name[] = "e1000";
12 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
13 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
14
15 /* e1000_pci_tbl - PCI Device ID Table
16  *
17  * Last entry must be all 0s
18  *
19  * Macro expands to...
20  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
21  */
22 static const struct pci_device_id e1000_pci_tbl[] = {
23         INTEL_E1000_ETHERNET_DEVICE(0x1000),
24         INTEL_E1000_ETHERNET_DEVICE(0x1001),
25         INTEL_E1000_ETHERNET_DEVICE(0x1004),
26         INTEL_E1000_ETHERNET_DEVICE(0x1008),
27         INTEL_E1000_ETHERNET_DEVICE(0x1009),
28         INTEL_E1000_ETHERNET_DEVICE(0x100C),
29         INTEL_E1000_ETHERNET_DEVICE(0x100D),
30         INTEL_E1000_ETHERNET_DEVICE(0x100E),
31         INTEL_E1000_ETHERNET_DEVICE(0x100F),
32         INTEL_E1000_ETHERNET_DEVICE(0x1010),
33         INTEL_E1000_ETHERNET_DEVICE(0x1011),
34         INTEL_E1000_ETHERNET_DEVICE(0x1012),
35         INTEL_E1000_ETHERNET_DEVICE(0x1013),
36         INTEL_E1000_ETHERNET_DEVICE(0x1014),
37         INTEL_E1000_ETHERNET_DEVICE(0x1015),
38         INTEL_E1000_ETHERNET_DEVICE(0x1016),
39         INTEL_E1000_ETHERNET_DEVICE(0x1017),
40         INTEL_E1000_ETHERNET_DEVICE(0x1018),
41         INTEL_E1000_ETHERNET_DEVICE(0x1019),
42         INTEL_E1000_ETHERNET_DEVICE(0x101A),
43         INTEL_E1000_ETHERNET_DEVICE(0x101D),
44         INTEL_E1000_ETHERNET_DEVICE(0x101E),
45         INTEL_E1000_ETHERNET_DEVICE(0x1026),
46         INTEL_E1000_ETHERNET_DEVICE(0x1027),
47         INTEL_E1000_ETHERNET_DEVICE(0x1028),
48         INTEL_E1000_ETHERNET_DEVICE(0x1075),
49         INTEL_E1000_ETHERNET_DEVICE(0x1076),
50         INTEL_E1000_ETHERNET_DEVICE(0x1077),
51         INTEL_E1000_ETHERNET_DEVICE(0x1078),
52         INTEL_E1000_ETHERNET_DEVICE(0x1079),
53         INTEL_E1000_ETHERNET_DEVICE(0x107A),
54         INTEL_E1000_ETHERNET_DEVICE(0x107B),
55         INTEL_E1000_ETHERNET_DEVICE(0x107C),
56         INTEL_E1000_ETHERNET_DEVICE(0x108A),
57         INTEL_E1000_ETHERNET_DEVICE(0x1099),
58         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
59         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
60         /* required last entry */
61         {0,}
62 };
63
64 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
65
66 int e1000_up(struct e1000_adapter *adapter);
67 void e1000_down(struct e1000_adapter *adapter);
68 void e1000_reinit_locked(struct e1000_adapter *adapter);
69 void e1000_reset(struct e1000_adapter *adapter);
70 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
71 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
72 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
73 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
74 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
75                                     struct e1000_tx_ring *txdr);
76 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
77                                     struct e1000_rx_ring *rxdr);
78 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
79                                     struct e1000_tx_ring *tx_ring);
80 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
81                                     struct e1000_rx_ring *rx_ring);
82 void e1000_update_stats(struct e1000_adapter *adapter);
83
84 static int e1000_init_module(void);
85 static void e1000_exit_module(void);
86 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
87 static void e1000_remove(struct pci_dev *pdev);
88 static int e1000_alloc_queues(struct e1000_adapter *adapter);
89 static int e1000_sw_init(struct e1000_adapter *adapter);
90 int e1000_open(struct net_device *netdev);
91 int e1000_close(struct net_device *netdev);
92 static void e1000_configure_tx(struct e1000_adapter *adapter);
93 static void e1000_configure_rx(struct e1000_adapter *adapter);
94 static void e1000_setup_rctl(struct e1000_adapter *adapter);
95 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
96 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
97 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
98                                 struct e1000_tx_ring *tx_ring);
99 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
100                                 struct e1000_rx_ring *rx_ring);
101 static void e1000_set_rx_mode(struct net_device *netdev);
102 static void e1000_update_phy_info_task(struct work_struct *work);
103 static void e1000_watchdog(struct work_struct *work);
104 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
106                                     struct net_device *netdev);
107 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
108 static int e1000_set_mac(struct net_device *netdev, void *p);
109 static irqreturn_t e1000_intr(int irq, void *data);
110 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
111                                struct e1000_tx_ring *tx_ring);
112 static int e1000_clean(struct napi_struct *napi, int budget);
113 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
114                                struct e1000_rx_ring *rx_ring,
115                                int *work_done, int work_to_do);
116 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
117                                      struct e1000_rx_ring *rx_ring,
118                                      int *work_done, int work_to_do);
119 static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
120                                          struct e1000_rx_ring *rx_ring,
121                                          int cleaned_count)
122 {
123 }
124 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
125                                    struct e1000_rx_ring *rx_ring,
126                                    int cleaned_count);
127 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
128                                          struct e1000_rx_ring *rx_ring,
129                                          int cleaned_count);
130 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
131 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
132                            int cmd);
133 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
134 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
135 static void e1000_tx_timeout(struct net_device *dev, unsigned int txqueue);
136 static void e1000_reset_task(struct work_struct *work);
137 static void e1000_smartspeed(struct e1000_adapter *adapter);
138 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
139                                        struct sk_buff *skb);
140
141 static bool e1000_vlan_used(struct e1000_adapter *adapter);
142 static void e1000_vlan_mode(struct net_device *netdev,
143                             netdev_features_t features);
144 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
145                                      bool filter_on);
146 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
147                                  __be16 proto, u16 vid);
148 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
149                                   __be16 proto, u16 vid);
150 static void e1000_restore_vlan(struct e1000_adapter *adapter);
151
152 static int __maybe_unused e1000_suspend(struct device *dev);
153 static int __maybe_unused e1000_resume(struct device *dev);
154 static void e1000_shutdown(struct pci_dev *pdev);
155
156 #ifdef CONFIG_NET_POLL_CONTROLLER
157 /* for netdump / net console */
158 static void e1000_netpoll (struct net_device *netdev);
159 #endif
160
161 #define COPYBREAK_DEFAULT 256
162 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
163 module_param(copybreak, uint, 0644);
164 MODULE_PARM_DESC(copybreak,
165         "Maximum size of packet that is copied to a new buffer on receive");
166
167 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
168                                                 pci_channel_state_t state);
169 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
170 static void e1000_io_resume(struct pci_dev *pdev);
171
172 static const struct pci_error_handlers e1000_err_handler = {
173         .error_detected = e1000_io_error_detected,
174         .slot_reset = e1000_io_slot_reset,
175         .resume = e1000_io_resume,
176 };
177
178 static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
179
180 static struct pci_driver e1000_driver = {
181         .name     = e1000_driver_name,
182         .id_table = e1000_pci_tbl,
183         .probe    = e1000_probe,
184         .remove   = e1000_remove,
185         .driver = {
186                 .pm = &e1000_pm_ops,
187         },
188         .shutdown = e1000_shutdown,
189         .err_handler = &e1000_err_handler
190 };
191
192 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
193 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
194 MODULE_LICENSE("GPL v2");
195
196 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
197 static int debug = -1;
198 module_param(debug, int, 0);
199 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
200
201 /**
202  * e1000_get_hw_dev - return device
203  * used by hardware layer to print debugging information
204  *
205  **/
206 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
207 {
208         struct e1000_adapter *adapter = hw->back;
209         return adapter->netdev;
210 }
211
212 /**
213  * e1000_init_module - Driver Registration Routine
214  *
215  * e1000_init_module is the first routine called when the driver is
216  * loaded. All it does is register with the PCI subsystem.
217  **/
218 static int __init e1000_init_module(void)
219 {
220         int ret;
221         pr_info("%s\n", e1000_driver_string);
222
223         pr_info("%s\n", e1000_copyright);
224
225         ret = pci_register_driver(&e1000_driver);
226         if (copybreak != COPYBREAK_DEFAULT) {
227                 if (copybreak == 0)
228                         pr_info("copybreak disabled\n");
229                 else
230                         pr_info("copybreak enabled for "
231                                    "packets <= %u bytes\n", copybreak);
232         }
233         return ret;
234 }
235
236 module_init(e1000_init_module);
237
238 /**
239  * e1000_exit_module - Driver Exit Cleanup Routine
240  *
241  * e1000_exit_module is called just before the driver is removed
242  * from memory.
243  **/
244 static void __exit e1000_exit_module(void)
245 {
246         pci_unregister_driver(&e1000_driver);
247 }
248
249 module_exit(e1000_exit_module);
250
251 static int e1000_request_irq(struct e1000_adapter *adapter)
252 {
253         struct net_device *netdev = adapter->netdev;
254         irq_handler_t handler = e1000_intr;
255         int irq_flags = IRQF_SHARED;
256         int err;
257
258         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
259                           netdev);
260         if (err) {
261                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
262         }
263
264         return err;
265 }
266
267 static void e1000_free_irq(struct e1000_adapter *adapter)
268 {
269         struct net_device *netdev = adapter->netdev;
270
271         free_irq(adapter->pdev->irq, netdev);
272 }
273
274 /**
275  * e1000_irq_disable - Mask off interrupt generation on the NIC
276  * @adapter: board private structure
277  **/
278 static void e1000_irq_disable(struct e1000_adapter *adapter)
279 {
280         struct e1000_hw *hw = &adapter->hw;
281
282         ew32(IMC, ~0);
283         E1000_WRITE_FLUSH();
284         synchronize_irq(adapter->pdev->irq);
285 }
286
287 /**
288  * e1000_irq_enable - Enable default interrupt generation settings
289  * @adapter: board private structure
290  **/
291 static void e1000_irq_enable(struct e1000_adapter *adapter)
292 {
293         struct e1000_hw *hw = &adapter->hw;
294
295         ew32(IMS, IMS_ENABLE_MASK);
296         E1000_WRITE_FLUSH();
297 }
298
299 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
300 {
301         struct e1000_hw *hw = &adapter->hw;
302         struct net_device *netdev = adapter->netdev;
303         u16 vid = hw->mng_cookie.vlan_id;
304         u16 old_vid = adapter->mng_vlan_id;
305
306         if (!e1000_vlan_used(adapter))
307                 return;
308
309         if (!test_bit(vid, adapter->active_vlans)) {
310                 if (hw->mng_cookie.status &
311                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
312                         e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
313                         adapter->mng_vlan_id = vid;
314                 } else {
315                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
316                 }
317                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
318                     (vid != old_vid) &&
319                     !test_bit(old_vid, adapter->active_vlans))
320                         e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
321                                                old_vid);
322         } else {
323                 adapter->mng_vlan_id = vid;
324         }
325 }
326
327 static void e1000_init_manageability(struct e1000_adapter *adapter)
328 {
329         struct e1000_hw *hw = &adapter->hw;
330
331         if (adapter->en_mng_pt) {
332                 u32 manc = er32(MANC);
333
334                 /* disable hardware interception of ARP */
335                 manc &= ~(E1000_MANC_ARP_EN);
336
337                 ew32(MANC, manc);
338         }
339 }
340
341 static void e1000_release_manageability(struct e1000_adapter *adapter)
342 {
343         struct e1000_hw *hw = &adapter->hw;
344
345         if (adapter->en_mng_pt) {
346                 u32 manc = er32(MANC);
347
348                 /* re-enable hardware interception of ARP */
349                 manc |= E1000_MANC_ARP_EN;
350
351                 ew32(MANC, manc);
352         }
353 }
354
355 /**
356  * e1000_configure - configure the hardware for RX and TX
357  * @adapter = private board structure
358  **/
359 static void e1000_configure(struct e1000_adapter *adapter)
360 {
361         struct net_device *netdev = adapter->netdev;
362         int i;
363
364         e1000_set_rx_mode(netdev);
365
366         e1000_restore_vlan(adapter);
367         e1000_init_manageability(adapter);
368
369         e1000_configure_tx(adapter);
370         e1000_setup_rctl(adapter);
371         e1000_configure_rx(adapter);
372         /* call E1000_DESC_UNUSED which always leaves
373          * at least 1 descriptor unused to make sure
374          * next_to_use != next_to_clean
375          */
376         for (i = 0; i < adapter->num_rx_queues; i++) {
377                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
378                 adapter->alloc_rx_buf(adapter, ring,
379                                       E1000_DESC_UNUSED(ring));
380         }
381 }
382
383 int e1000_up(struct e1000_adapter *adapter)
384 {
385         struct e1000_hw *hw = &adapter->hw;
386
387         /* hardware has been reset, we need to reload some things */
388         e1000_configure(adapter);
389
390         clear_bit(__E1000_DOWN, &adapter->flags);
391
392         napi_enable(&adapter->napi);
393
394         e1000_irq_enable(adapter);
395
396         netif_wake_queue(adapter->netdev);
397
398         /* fire a link change interrupt to start the watchdog */
399         ew32(ICS, E1000_ICS_LSC);
400         return 0;
401 }
402
403 /**
404  * e1000_power_up_phy - restore link in case the phy was powered down
405  * @adapter: address of board private structure
406  *
407  * The phy may be powered down to save power and turn off link when the
408  * driver is unloaded and wake on lan is not enabled (among others)
409  * *** this routine MUST be followed by a call to e1000_reset ***
410  **/
411 void e1000_power_up_phy(struct e1000_adapter *adapter)
412 {
413         struct e1000_hw *hw = &adapter->hw;
414         u16 mii_reg = 0;
415
416         /* Just clear the power down bit to wake the phy back up */
417         if (hw->media_type == e1000_media_type_copper) {
418                 /* according to the manual, the phy will retain its
419                  * settings across a power-down/up cycle
420                  */
421                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
422                 mii_reg &= ~MII_CR_POWER_DOWN;
423                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
424         }
425 }
426
427 static void e1000_power_down_phy(struct e1000_adapter *adapter)
428 {
429         struct e1000_hw *hw = &adapter->hw;
430
431         /* Power down the PHY so no link is implied when interface is down *
432          * The PHY cannot be powered down if any of the following is true *
433          * (a) WoL is enabled
434          * (b) AMT is active
435          * (c) SoL/IDER session is active
436          */
437         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
438            hw->media_type == e1000_media_type_copper) {
439                 u16 mii_reg = 0;
440
441                 switch (hw->mac_type) {
442                 case e1000_82540:
443                 case e1000_82545:
444                 case e1000_82545_rev_3:
445                 case e1000_82546:
446                 case e1000_ce4100:
447                 case e1000_82546_rev_3:
448                 case e1000_82541:
449                 case e1000_82541_rev_2:
450                 case e1000_82547:
451                 case e1000_82547_rev_2:
452                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
453                                 goto out;
454                         break;
455                 default:
456                         goto out;
457                 }
458                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
459                 mii_reg |= MII_CR_POWER_DOWN;
460                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
461                 msleep(1);
462         }
463 out:
464         return;
465 }
466
467 static void e1000_down_and_stop(struct e1000_adapter *adapter)
468 {
469         set_bit(__E1000_DOWN, &adapter->flags);
470
471         cancel_delayed_work_sync(&adapter->watchdog_task);
472
473         /*
474          * Since the watchdog task can reschedule other tasks, we should cancel
475          * it first, otherwise we can run into the situation when a work is
476          * still running after the adapter has been turned down.
477          */
478
479         cancel_delayed_work_sync(&adapter->phy_info_task);
480         cancel_delayed_work_sync(&adapter->fifo_stall_task);
481
482         /* Only kill reset task if adapter is not resetting */
483         if (!test_bit(__E1000_RESETTING, &adapter->flags))
484                 cancel_work_sync(&adapter->reset_task);
485 }
486
487 void e1000_down(struct e1000_adapter *adapter)
488 {
489         struct e1000_hw *hw = &adapter->hw;
490         struct net_device *netdev = adapter->netdev;
491         u32 rctl, tctl;
492
493         /* disable receives in the hardware */
494         rctl = er32(RCTL);
495         ew32(RCTL, rctl & ~E1000_RCTL_EN);
496         /* flush and sleep below */
497
498         netif_tx_disable(netdev);
499
500         /* disable transmits in the hardware */
501         tctl = er32(TCTL);
502         tctl &= ~E1000_TCTL_EN;
503         ew32(TCTL, tctl);
504         /* flush both disables and wait for them to finish */
505         E1000_WRITE_FLUSH();
506         msleep(10);
507
508         /* Set the carrier off after transmits have been disabled in the
509          * hardware, to avoid race conditions with e1000_watchdog() (which
510          * may be running concurrently to us, checking for the carrier
511          * bit to decide whether it should enable transmits again). Such
512          * a race condition would result into transmission being disabled
513          * in the hardware until the next IFF_DOWN+IFF_UP cycle.
514          */
515         netif_carrier_off(netdev);
516
517         napi_disable(&adapter->napi);
518
519         e1000_irq_disable(adapter);
520
521         /* Setting DOWN must be after irq_disable to prevent
522          * a screaming interrupt.  Setting DOWN also prevents
523          * tasks from rescheduling.
524          */
525         e1000_down_and_stop(adapter);
526
527         adapter->link_speed = 0;
528         adapter->link_duplex = 0;
529
530         e1000_reset(adapter);
531         e1000_clean_all_tx_rings(adapter);
532         e1000_clean_all_rx_rings(adapter);
533 }
534
535 void e1000_reinit_locked(struct e1000_adapter *adapter)
536 {
537         WARN_ON(in_interrupt());
538         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
539                 msleep(1);
540
541         /* only run the task if not already down */
542         if (!test_bit(__E1000_DOWN, &adapter->flags)) {
543                 e1000_down(adapter);
544                 e1000_up(adapter);
545         }
546
547         clear_bit(__E1000_RESETTING, &adapter->flags);
548 }
549
550 void e1000_reset(struct e1000_adapter *adapter)
551 {
552         struct e1000_hw *hw = &adapter->hw;
553         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
554         bool legacy_pba_adjust = false;
555         u16 hwm;
556
557         /* Repartition Pba for greater than 9k mtu
558          * To take effect CTRL.RST is required.
559          */
560
561         switch (hw->mac_type) {
562         case e1000_82542_rev2_0:
563         case e1000_82542_rev2_1:
564         case e1000_82543:
565         case e1000_82544:
566         case e1000_82540:
567         case e1000_82541:
568         case e1000_82541_rev_2:
569                 legacy_pba_adjust = true;
570                 pba = E1000_PBA_48K;
571                 break;
572         case e1000_82545:
573         case e1000_82545_rev_3:
574         case e1000_82546:
575         case e1000_ce4100:
576         case e1000_82546_rev_3:
577                 pba = E1000_PBA_48K;
578                 break;
579         case e1000_82547:
580         case e1000_82547_rev_2:
581                 legacy_pba_adjust = true;
582                 pba = E1000_PBA_30K;
583                 break;
584         case e1000_undefined:
585         case e1000_num_macs:
586                 break;
587         }
588
589         if (legacy_pba_adjust) {
590                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
591                         pba -= 8; /* allocate more FIFO for Tx */
592
593                 if (hw->mac_type == e1000_82547) {
594                         adapter->tx_fifo_head = 0;
595                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
596                         adapter->tx_fifo_size =
597                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
598                         atomic_set(&adapter->tx_fifo_stall, 0);
599                 }
600         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
601                 /* adjust PBA for jumbo frames */
602                 ew32(PBA, pba);
603
604                 /* To maintain wire speed transmits, the Tx FIFO should be
605                  * large enough to accommodate two full transmit packets,
606                  * rounded up to the next 1KB and expressed in KB.  Likewise,
607                  * the Rx FIFO should be large enough to accommodate at least
608                  * one full receive packet and is similarly rounded up and
609                  * expressed in KB.
610                  */
611                 pba = er32(PBA);
612                 /* upper 16 bits has Tx packet buffer allocation size in KB */
613                 tx_space = pba >> 16;
614                 /* lower 16 bits has Rx packet buffer allocation size in KB */
615                 pba &= 0xffff;
616                 /* the Tx fifo also stores 16 bytes of information about the Tx
617                  * but don't include ethernet FCS because hardware appends it
618                  */
619                 min_tx_space = (hw->max_frame_size +
620                                 sizeof(struct e1000_tx_desc) -
621                                 ETH_FCS_LEN) * 2;
622                 min_tx_space = ALIGN(min_tx_space, 1024);
623                 min_tx_space >>= 10;
624                 /* software strips receive CRC, so leave room for it */
625                 min_rx_space = hw->max_frame_size;
626                 min_rx_space = ALIGN(min_rx_space, 1024);
627                 min_rx_space >>= 10;
628
629                 /* If current Tx allocation is less than the min Tx FIFO size,
630                  * and the min Tx FIFO size is less than the current Rx FIFO
631                  * allocation, take space away from current Rx allocation
632                  */
633                 if (tx_space < min_tx_space &&
634                     ((min_tx_space - tx_space) < pba)) {
635                         pba = pba - (min_tx_space - tx_space);
636
637                         /* PCI/PCIx hardware has PBA alignment constraints */
638                         switch (hw->mac_type) {
639                         case e1000_82545 ... e1000_82546_rev_3:
640                                 pba &= ~(E1000_PBA_8K - 1);
641                                 break;
642                         default:
643                                 break;
644                         }
645
646                         /* if short on Rx space, Rx wins and must trump Tx
647                          * adjustment or use Early Receive if available
648                          */
649                         if (pba < min_rx_space)
650                                 pba = min_rx_space;
651                 }
652         }
653
654         ew32(PBA, pba);
655
656         /* flow control settings:
657          * The high water mark must be low enough to fit one full frame
658          * (or the size used for early receive) above it in the Rx FIFO.
659          * Set it to the lower of:
660          * - 90% of the Rx FIFO size, and
661          * - the full Rx FIFO size minus the early receive size (for parts
662          *   with ERT support assuming ERT set to E1000_ERT_2048), or
663          * - the full Rx FIFO size minus one full frame
664          */
665         hwm = min(((pba << 10) * 9 / 10),
666                   ((pba << 10) - hw->max_frame_size));
667
668         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
669         hw->fc_low_water = hw->fc_high_water - 8;
670         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
671         hw->fc_send_xon = 1;
672         hw->fc = hw->original_fc;
673
674         /* Allow time for pending master requests to run */
675         e1000_reset_hw(hw);
676         if (hw->mac_type >= e1000_82544)
677                 ew32(WUC, 0);
678
679         if (e1000_init_hw(hw))
680                 e_dev_err("Hardware Error\n");
681         e1000_update_mng_vlan(adapter);
682
683         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
684         if (hw->mac_type >= e1000_82544 &&
685             hw->autoneg == 1 &&
686             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
687                 u32 ctrl = er32(CTRL);
688                 /* clear phy power management bit if we are in gig only mode,
689                  * which if enabled will attempt negotiation to 100Mb, which
690                  * can cause a loss of link at power off or driver unload
691                  */
692                 ctrl &= ~E1000_CTRL_SWDPIN3;
693                 ew32(CTRL, ctrl);
694         }
695
696         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
697         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
698
699         e1000_reset_adaptive(hw);
700         e1000_phy_get_info(hw, &adapter->phy_info);
701
702         e1000_release_manageability(adapter);
703 }
704
705 /* Dump the eeprom for users having checksum issues */
706 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
707 {
708         struct net_device *netdev = adapter->netdev;
709         struct ethtool_eeprom eeprom;
710         const struct ethtool_ops *ops = netdev->ethtool_ops;
711         u8 *data;
712         int i;
713         u16 csum_old, csum_new = 0;
714
715         eeprom.len = ops->get_eeprom_len(netdev);
716         eeprom.offset = 0;
717
718         data = kmalloc(eeprom.len, GFP_KERNEL);
719         if (!data)
720                 return;
721
722         ops->get_eeprom(netdev, &eeprom, data);
723
724         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
725                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
726         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
727                 csum_new += data[i] + (data[i + 1] << 8);
728         csum_new = EEPROM_SUM - csum_new;
729
730         pr_err("/*********************/\n");
731         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
732         pr_err("Calculated              : 0x%04x\n", csum_new);
733
734         pr_err("Offset    Values\n");
735         pr_err("========  ======\n");
736         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
737
738         pr_err("Include this output when contacting your support provider.\n");
739         pr_err("This is not a software error! Something bad happened to\n");
740         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
741         pr_err("result in further problems, possibly loss of data,\n");
742         pr_err("corruption or system hangs!\n");
743         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
744         pr_err("which is invalid and requires you to set the proper MAC\n");
745         pr_err("address manually before continuing to enable this network\n");
746         pr_err("device. Please inspect the EEPROM dump and report the\n");
747         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
748         pr_err("/*********************/\n");
749
750         kfree(data);
751 }
752
753 /**
754  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
755  * @pdev: PCI device information struct
756  *
757  * Return true if an adapter needs ioport resources
758  **/
759 static int e1000_is_need_ioport(struct pci_dev *pdev)
760 {
761         switch (pdev->device) {
762         case E1000_DEV_ID_82540EM:
763         case E1000_DEV_ID_82540EM_LOM:
764         case E1000_DEV_ID_82540EP:
765         case E1000_DEV_ID_82540EP_LOM:
766         case E1000_DEV_ID_82540EP_LP:
767         case E1000_DEV_ID_82541EI:
768         case E1000_DEV_ID_82541EI_MOBILE:
769         case E1000_DEV_ID_82541ER:
770         case E1000_DEV_ID_82541ER_LOM:
771         case E1000_DEV_ID_82541GI:
772         case E1000_DEV_ID_82541GI_LF:
773         case E1000_DEV_ID_82541GI_MOBILE:
774         case E1000_DEV_ID_82544EI_COPPER:
775         case E1000_DEV_ID_82544EI_FIBER:
776         case E1000_DEV_ID_82544GC_COPPER:
777         case E1000_DEV_ID_82544GC_LOM:
778         case E1000_DEV_ID_82545EM_COPPER:
779         case E1000_DEV_ID_82545EM_FIBER:
780         case E1000_DEV_ID_82546EB_COPPER:
781         case E1000_DEV_ID_82546EB_FIBER:
782         case E1000_DEV_ID_82546EB_QUAD_COPPER:
783                 return true;
784         default:
785                 return false;
786         }
787 }
788
789 static netdev_features_t e1000_fix_features(struct net_device *netdev,
790         netdev_features_t features)
791 {
792         /* Since there is no support for separate Rx/Tx vlan accel
793          * enable/disable make sure Tx flag is always in same state as Rx.
794          */
795         if (features & NETIF_F_HW_VLAN_CTAG_RX)
796                 features |= NETIF_F_HW_VLAN_CTAG_TX;
797         else
798                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
799
800         return features;
801 }
802
803 static int e1000_set_features(struct net_device *netdev,
804         netdev_features_t features)
805 {
806         struct e1000_adapter *adapter = netdev_priv(netdev);
807         netdev_features_t changed = features ^ netdev->features;
808
809         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
810                 e1000_vlan_mode(netdev, features);
811
812         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
813                 return 0;
814
815         netdev->features = features;
816         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
817
818         if (netif_running(netdev))
819                 e1000_reinit_locked(adapter);
820         else
821                 e1000_reset(adapter);
822
823         return 1;
824 }
825
826 static const struct net_device_ops e1000_netdev_ops = {
827         .ndo_open               = e1000_open,
828         .ndo_stop               = e1000_close,
829         .ndo_start_xmit         = e1000_xmit_frame,
830         .ndo_set_rx_mode        = e1000_set_rx_mode,
831         .ndo_set_mac_address    = e1000_set_mac,
832         .ndo_tx_timeout         = e1000_tx_timeout,
833         .ndo_change_mtu         = e1000_change_mtu,
834         .ndo_do_ioctl           = e1000_ioctl,
835         .ndo_validate_addr      = eth_validate_addr,
836         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
837         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
838 #ifdef CONFIG_NET_POLL_CONTROLLER
839         .ndo_poll_controller    = e1000_netpoll,
840 #endif
841         .ndo_fix_features       = e1000_fix_features,
842         .ndo_set_features       = e1000_set_features,
843 };
844
845 /**
846  * e1000_init_hw_struct - initialize members of hw struct
847  * @adapter: board private struct
848  * @hw: structure used by e1000_hw.c
849  *
850  * Factors out initialization of the e1000_hw struct to its own function
851  * that can be called very early at init (just after struct allocation).
852  * Fields are initialized based on PCI device information and
853  * OS network device settings (MTU size).
854  * Returns negative error codes if MAC type setup fails.
855  */
856 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
857                                 struct e1000_hw *hw)
858 {
859         struct pci_dev *pdev = adapter->pdev;
860
861         /* PCI config space info */
862         hw->vendor_id = pdev->vendor;
863         hw->device_id = pdev->device;
864         hw->subsystem_vendor_id = pdev->subsystem_vendor;
865         hw->subsystem_id = pdev->subsystem_device;
866         hw->revision_id = pdev->revision;
867
868         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
869
870         hw->max_frame_size = adapter->netdev->mtu +
871                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
872         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
873
874         /* identify the MAC */
875         if (e1000_set_mac_type(hw)) {
876                 e_err(probe, "Unknown MAC Type\n");
877                 return -EIO;
878         }
879
880         switch (hw->mac_type) {
881         default:
882                 break;
883         case e1000_82541:
884         case e1000_82547:
885         case e1000_82541_rev_2:
886         case e1000_82547_rev_2:
887                 hw->phy_init_script = 1;
888                 break;
889         }
890
891         e1000_set_media_type(hw);
892         e1000_get_bus_info(hw);
893
894         hw->wait_autoneg_complete = false;
895         hw->tbi_compatibility_en = true;
896         hw->adaptive_ifs = true;
897
898         /* Copper options */
899
900         if (hw->media_type == e1000_media_type_copper) {
901                 hw->mdix = AUTO_ALL_MODES;
902                 hw->disable_polarity_correction = false;
903                 hw->master_slave = E1000_MASTER_SLAVE;
904         }
905
906         return 0;
907 }
908
909 /**
910  * e1000_probe - Device Initialization Routine
911  * @pdev: PCI device information struct
912  * @ent: entry in e1000_pci_tbl
913  *
914  * Returns 0 on success, negative on failure
915  *
916  * e1000_probe initializes an adapter identified by a pci_dev structure.
917  * The OS initialization, configuring of the adapter private structure,
918  * and a hardware reset occur.
919  **/
920 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
921 {
922         struct net_device *netdev;
923         struct e1000_adapter *adapter = NULL;
924         struct e1000_hw *hw;
925
926         static int cards_found;
927         static int global_quad_port_a; /* global ksp3 port a indication */
928         int i, err, pci_using_dac;
929         u16 eeprom_data = 0;
930         u16 tmp = 0;
931         u16 eeprom_apme_mask = E1000_EEPROM_APME;
932         int bars, need_ioport;
933         bool disable_dev = false;
934
935         /* do not allocate ioport bars when not needed */
936         need_ioport = e1000_is_need_ioport(pdev);
937         if (need_ioport) {
938                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
939                 err = pci_enable_device(pdev);
940         } else {
941                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
942                 err = pci_enable_device_mem(pdev);
943         }
944         if (err)
945                 return err;
946
947         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
948         if (err)
949                 goto err_pci_reg;
950
951         pci_set_master(pdev);
952         err = pci_save_state(pdev);
953         if (err)
954                 goto err_alloc_etherdev;
955
956         err = -ENOMEM;
957         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
958         if (!netdev)
959                 goto err_alloc_etherdev;
960
961         SET_NETDEV_DEV(netdev, &pdev->dev);
962
963         pci_set_drvdata(pdev, netdev);
964         adapter = netdev_priv(netdev);
965         adapter->netdev = netdev;
966         adapter->pdev = pdev;
967         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
968         adapter->bars = bars;
969         adapter->need_ioport = need_ioport;
970
971         hw = &adapter->hw;
972         hw->back = adapter;
973
974         err = -EIO;
975         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
976         if (!hw->hw_addr)
977                 goto err_ioremap;
978
979         if (adapter->need_ioport) {
980                 for (i = BAR_1; i < PCI_STD_NUM_BARS; i++) {
981                         if (pci_resource_len(pdev, i) == 0)
982                                 continue;
983                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
984                                 hw->io_base = pci_resource_start(pdev, i);
985                                 break;
986                         }
987                 }
988         }
989
990         /* make ready for any if (hw->...) below */
991         err = e1000_init_hw_struct(adapter, hw);
992         if (err)
993                 goto err_sw_init;
994
995         /* there is a workaround being applied below that limits
996          * 64-bit DMA addresses to 64-bit hardware.  There are some
997          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
998          */
999         pci_using_dac = 0;
1000         if ((hw->bus_type == e1000_bus_type_pcix) &&
1001             !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
1002                 pci_using_dac = 1;
1003         } else {
1004                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1005                 if (err) {
1006                         pr_err("No usable DMA config, aborting\n");
1007                         goto err_dma;
1008                 }
1009         }
1010
1011         netdev->netdev_ops = &e1000_netdev_ops;
1012         e1000_set_ethtool_ops(netdev);
1013         netdev->watchdog_timeo = 5 * HZ;
1014         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1015
1016         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1017
1018         adapter->bd_number = cards_found;
1019
1020         /* setup the private structure */
1021
1022         err = e1000_sw_init(adapter);
1023         if (err)
1024                 goto err_sw_init;
1025
1026         err = -EIO;
1027         if (hw->mac_type == e1000_ce4100) {
1028                 hw->ce4100_gbe_mdio_base_virt =
1029                                         ioremap(pci_resource_start(pdev, BAR_1),
1030                                                 pci_resource_len(pdev, BAR_1));
1031
1032                 if (!hw->ce4100_gbe_mdio_base_virt)
1033                         goto err_mdio_ioremap;
1034         }
1035
1036         if (hw->mac_type >= e1000_82543) {
1037                 netdev->hw_features = NETIF_F_SG |
1038                                    NETIF_F_HW_CSUM |
1039                                    NETIF_F_HW_VLAN_CTAG_RX;
1040                 netdev->features = NETIF_F_HW_VLAN_CTAG_TX |
1041                                    NETIF_F_HW_VLAN_CTAG_FILTER;
1042         }
1043
1044         if ((hw->mac_type >= e1000_82544) &&
1045            (hw->mac_type != e1000_82547))
1046                 netdev->hw_features |= NETIF_F_TSO;
1047
1048         netdev->priv_flags |= IFF_SUPP_NOFCS;
1049
1050         netdev->features |= netdev->hw_features;
1051         netdev->hw_features |= (NETIF_F_RXCSUM |
1052                                 NETIF_F_RXALL |
1053                                 NETIF_F_RXFCS);
1054
1055         if (pci_using_dac) {
1056                 netdev->features |= NETIF_F_HIGHDMA;
1057                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1058         }
1059
1060         netdev->vlan_features |= (NETIF_F_TSO |
1061                                   NETIF_F_HW_CSUM |
1062                                   NETIF_F_SG);
1063
1064         /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1065         if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1066             hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1067                 netdev->priv_flags |= IFF_UNICAST_FLT;
1068
1069         /* MTU range: 46 - 16110 */
1070         netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
1071         netdev->max_mtu = MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
1072
1073         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1074
1075         /* initialize eeprom parameters */
1076         if (e1000_init_eeprom_params(hw)) {
1077                 e_err(probe, "EEPROM initialization failed\n");
1078                 goto err_eeprom;
1079         }
1080
1081         /* before reading the EEPROM, reset the controller to
1082          * put the device in a known good starting state
1083          */
1084
1085         e1000_reset_hw(hw);
1086
1087         /* make sure the EEPROM is good */
1088         if (e1000_validate_eeprom_checksum(hw) < 0) {
1089                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1090                 e1000_dump_eeprom(adapter);
1091                 /* set MAC address to all zeroes to invalidate and temporary
1092                  * disable this device for the user. This blocks regular
1093                  * traffic while still permitting ethtool ioctls from reaching
1094                  * the hardware as well as allowing the user to run the
1095                  * interface after manually setting a hw addr using
1096                  * `ip set address`
1097                  */
1098                 memset(hw->mac_addr, 0, netdev->addr_len);
1099         } else {
1100                 /* copy the MAC address out of the EEPROM */
1101                 if (e1000_read_mac_addr(hw))
1102                         e_err(probe, "EEPROM Read Error\n");
1103         }
1104         /* don't block initialization here due to bad MAC address */
1105         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1106
1107         if (!is_valid_ether_addr(netdev->dev_addr))
1108                 e_err(probe, "Invalid MAC Address\n");
1109
1110
1111         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1112         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1113                           e1000_82547_tx_fifo_stall_task);
1114         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1115         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1116
1117         e1000_check_options(adapter);
1118
1119         /* Initial Wake on LAN setting
1120          * If APM wake is enabled in the EEPROM,
1121          * enable the ACPI Magic Packet filter
1122          */
1123
1124         switch (hw->mac_type) {
1125         case e1000_82542_rev2_0:
1126         case e1000_82542_rev2_1:
1127         case e1000_82543:
1128                 break;
1129         case e1000_82544:
1130                 e1000_read_eeprom(hw,
1131                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1132                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1133                 break;
1134         case e1000_82546:
1135         case e1000_82546_rev_3:
1136                 if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1137                         e1000_read_eeprom(hw,
1138                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1139                         break;
1140                 }
1141                 fallthrough;
1142         default:
1143                 e1000_read_eeprom(hw,
1144                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1145                 break;
1146         }
1147         if (eeprom_data & eeprom_apme_mask)
1148                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1149
1150         /* now that we have the eeprom settings, apply the special cases
1151          * where the eeprom may be wrong or the board simply won't support
1152          * wake on lan on a particular port
1153          */
1154         switch (pdev->device) {
1155         case E1000_DEV_ID_82546GB_PCIE:
1156                 adapter->eeprom_wol = 0;
1157                 break;
1158         case E1000_DEV_ID_82546EB_FIBER:
1159         case E1000_DEV_ID_82546GB_FIBER:
1160                 /* Wake events only supported on port A for dual fiber
1161                  * regardless of eeprom setting
1162                  */
1163                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1164                         adapter->eeprom_wol = 0;
1165                 break;
1166         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1167                 /* if quad port adapter, disable WoL on all but port A */
1168                 if (global_quad_port_a != 0)
1169                         adapter->eeprom_wol = 0;
1170                 else
1171                         adapter->quad_port_a = true;
1172                 /* Reset for multiple quad port adapters */
1173                 if (++global_quad_port_a == 4)
1174                         global_quad_port_a = 0;
1175                 break;
1176         }
1177
1178         /* initialize the wol settings based on the eeprom settings */
1179         adapter->wol = adapter->eeprom_wol;
1180         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1181
1182         /* Auto detect PHY address */
1183         if (hw->mac_type == e1000_ce4100) {
1184                 for (i = 0; i < 32; i++) {
1185                         hw->phy_addr = i;
1186                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1187
1188                         if (tmp != 0 && tmp != 0xFF)
1189                                 break;
1190                 }
1191
1192                 if (i >= 32)
1193                         goto err_eeprom;
1194         }
1195
1196         /* reset the hardware with the new settings */
1197         e1000_reset(adapter);
1198
1199         strcpy(netdev->name, "eth%d");
1200         err = register_netdev(netdev);
1201         if (err)
1202                 goto err_register;
1203
1204         e1000_vlan_filter_on_off(adapter, false);
1205
1206         /* print bus type/speed/width info */
1207         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1208                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1209                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1210                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1211                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1212                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1213                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1214                netdev->dev_addr);
1215
1216         /* carrier off reporting is important to ethtool even BEFORE open */
1217         netif_carrier_off(netdev);
1218
1219         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1220
1221         cards_found++;
1222         return 0;
1223
1224 err_register:
1225 err_eeprom:
1226         e1000_phy_hw_reset(hw);
1227
1228         if (hw->flash_address)
1229                 iounmap(hw->flash_address);
1230         kfree(adapter->tx_ring);
1231         kfree(adapter->rx_ring);
1232 err_dma:
1233 err_sw_init:
1234 err_mdio_ioremap:
1235         iounmap(hw->ce4100_gbe_mdio_base_virt);
1236         iounmap(hw->hw_addr);
1237 err_ioremap:
1238         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1239         free_netdev(netdev);
1240 err_alloc_etherdev:
1241         pci_release_selected_regions(pdev, bars);
1242 err_pci_reg:
1243         if (!adapter || disable_dev)
1244                 pci_disable_device(pdev);
1245         return err;
1246 }
1247
1248 /**
1249  * e1000_remove - Device Removal Routine
1250  * @pdev: PCI device information struct
1251  *
1252  * e1000_remove is called by the PCI subsystem to alert the driver
1253  * that it should release a PCI device. That could be caused by a
1254  * Hot-Plug event, or because the driver is going to be removed from
1255  * memory.
1256  **/
1257 static void e1000_remove(struct pci_dev *pdev)
1258 {
1259         struct net_device *netdev = pci_get_drvdata(pdev);
1260         struct e1000_adapter *adapter = netdev_priv(netdev);
1261         struct e1000_hw *hw = &adapter->hw;
1262         bool disable_dev;
1263
1264         e1000_down_and_stop(adapter);
1265         e1000_release_manageability(adapter);
1266
1267         unregister_netdev(netdev);
1268
1269         e1000_phy_hw_reset(hw);
1270
1271         kfree(adapter->tx_ring);
1272         kfree(adapter->rx_ring);
1273
1274         if (hw->mac_type == e1000_ce4100)
1275                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1276         iounmap(hw->hw_addr);
1277         if (hw->flash_address)
1278                 iounmap(hw->flash_address);
1279         pci_release_selected_regions(pdev, adapter->bars);
1280
1281         disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
1282         free_netdev(netdev);
1283
1284         if (disable_dev)
1285                 pci_disable_device(pdev);
1286 }
1287
1288 /**
1289  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1290  * @adapter: board private structure to initialize
1291  *
1292  * e1000_sw_init initializes the Adapter private data structure.
1293  * e1000_init_hw_struct MUST be called before this function
1294  **/
1295 static int e1000_sw_init(struct e1000_adapter *adapter)
1296 {
1297         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1298
1299         adapter->num_tx_queues = 1;
1300         adapter->num_rx_queues = 1;
1301
1302         if (e1000_alloc_queues(adapter)) {
1303                 e_err(probe, "Unable to allocate memory for queues\n");
1304                 return -ENOMEM;
1305         }
1306
1307         /* Explicitly disable IRQ since the NIC can be in any state. */
1308         e1000_irq_disable(adapter);
1309
1310         spin_lock_init(&adapter->stats_lock);
1311
1312         set_bit(__E1000_DOWN, &adapter->flags);
1313
1314         return 0;
1315 }
1316
1317 /**
1318  * e1000_alloc_queues - Allocate memory for all rings
1319  * @adapter: board private structure to initialize
1320  *
1321  * We allocate one ring per queue at run-time since we don't know the
1322  * number of queues at compile-time.
1323  **/
1324 static int e1000_alloc_queues(struct e1000_adapter *adapter)
1325 {
1326         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1327                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1328         if (!adapter->tx_ring)
1329                 return -ENOMEM;
1330
1331         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1332                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1333         if (!adapter->rx_ring) {
1334                 kfree(adapter->tx_ring);
1335                 return -ENOMEM;
1336         }
1337
1338         return E1000_SUCCESS;
1339 }
1340
1341 /**
1342  * e1000_open - Called when a network interface is made active
1343  * @netdev: network interface device structure
1344  *
1345  * Returns 0 on success, negative value on failure
1346  *
1347  * The open entry point is called when a network interface is made
1348  * active by the system (IFF_UP).  At this point all resources needed
1349  * for transmit and receive operations are allocated, the interrupt
1350  * handler is registered with the OS, the watchdog task is started,
1351  * and the stack is notified that the interface is ready.
1352  **/
1353 int e1000_open(struct net_device *netdev)
1354 {
1355         struct e1000_adapter *adapter = netdev_priv(netdev);
1356         struct e1000_hw *hw = &adapter->hw;
1357         int err;
1358
1359         /* disallow open during test */
1360         if (test_bit(__E1000_TESTING, &adapter->flags))
1361                 return -EBUSY;
1362
1363         netif_carrier_off(netdev);
1364
1365         /* allocate transmit descriptors */
1366         err = e1000_setup_all_tx_resources(adapter);
1367         if (err)
1368                 goto err_setup_tx;
1369
1370         /* allocate receive descriptors */
1371         err = e1000_setup_all_rx_resources(adapter);
1372         if (err)
1373                 goto err_setup_rx;
1374
1375         e1000_power_up_phy(adapter);
1376
1377         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1378         if ((hw->mng_cookie.status &
1379                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1380                 e1000_update_mng_vlan(adapter);
1381         }
1382
1383         /* before we allocate an interrupt, we must be ready to handle it.
1384          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1385          * as soon as we call pci_request_irq, so we have to setup our
1386          * clean_rx handler before we do so.
1387          */
1388         e1000_configure(adapter);
1389
1390         err = e1000_request_irq(adapter);
1391         if (err)
1392                 goto err_req_irq;
1393
1394         /* From here on the code is the same as e1000_up() */
1395         clear_bit(__E1000_DOWN, &adapter->flags);
1396
1397         napi_enable(&adapter->napi);
1398
1399         e1000_irq_enable(adapter);
1400
1401         netif_start_queue(netdev);
1402
1403         /* fire a link status change interrupt to start the watchdog */
1404         ew32(ICS, E1000_ICS_LSC);
1405
1406         return E1000_SUCCESS;
1407
1408 err_req_irq:
1409         e1000_power_down_phy(adapter);
1410         e1000_free_all_rx_resources(adapter);
1411 err_setup_rx:
1412         e1000_free_all_tx_resources(adapter);
1413 err_setup_tx:
1414         e1000_reset(adapter);
1415
1416         return err;
1417 }
1418
1419 /**
1420  * e1000_close - Disables a network interface
1421  * @netdev: network interface device structure
1422  *
1423  * Returns 0, this is not allowed to fail
1424  *
1425  * The close entry point is called when an interface is de-activated
1426  * by the OS.  The hardware is still under the drivers control, but
1427  * needs to be disabled.  A global MAC reset is issued to stop the
1428  * hardware, and all transmit and receive resources are freed.
1429  **/
1430 int e1000_close(struct net_device *netdev)
1431 {
1432         struct e1000_adapter *adapter = netdev_priv(netdev);
1433         struct e1000_hw *hw = &adapter->hw;
1434         int count = E1000_CHECK_RESET_COUNT;
1435
1436         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
1437                 usleep_range(10000, 20000);
1438
1439         WARN_ON(count < 0);
1440
1441         /* signal that we're down so that the reset task will no longer run */
1442         set_bit(__E1000_DOWN, &adapter->flags);
1443         clear_bit(__E1000_RESETTING, &adapter->flags);
1444
1445         e1000_down(adapter);
1446         e1000_power_down_phy(adapter);
1447         e1000_free_irq(adapter);
1448
1449         e1000_free_all_tx_resources(adapter);
1450         e1000_free_all_rx_resources(adapter);
1451
1452         /* kill manageability vlan ID if supported, but not if a vlan with
1453          * the same ID is registered on the host OS (let 8021q kill it)
1454          */
1455         if ((hw->mng_cookie.status &
1456              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1457             !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1458                 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
1459                                        adapter->mng_vlan_id);
1460         }
1461
1462         return 0;
1463 }
1464
1465 /**
1466  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1467  * @adapter: address of board private structure
1468  * @start: address of beginning of memory
1469  * @len: length of memory
1470  **/
1471 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1472                                   unsigned long len)
1473 {
1474         struct e1000_hw *hw = &adapter->hw;
1475         unsigned long begin = (unsigned long)start;
1476         unsigned long end = begin + len;
1477
1478         /* First rev 82545 and 82546 need to not allow any memory
1479          * write location to cross 64k boundary due to errata 23
1480          */
1481         if (hw->mac_type == e1000_82545 ||
1482             hw->mac_type == e1000_ce4100 ||
1483             hw->mac_type == e1000_82546) {
1484                 return ((begin ^ (end - 1)) >> 16) == 0;
1485         }
1486
1487         return true;
1488 }
1489
1490 /**
1491  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1492  * @adapter: board private structure
1493  * @txdr:    tx descriptor ring (for a specific queue) to setup
1494  *
1495  * Return 0 on success, negative on failure
1496  **/
1497 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1498                                     struct e1000_tx_ring *txdr)
1499 {
1500         struct pci_dev *pdev = adapter->pdev;
1501         int size;
1502
1503         size = sizeof(struct e1000_tx_buffer) * txdr->count;
1504         txdr->buffer_info = vzalloc(size);
1505         if (!txdr->buffer_info)
1506                 return -ENOMEM;
1507
1508         /* round up to nearest 4K */
1509
1510         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1511         txdr->size = ALIGN(txdr->size, 4096);
1512
1513         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1514                                         GFP_KERNEL);
1515         if (!txdr->desc) {
1516 setup_tx_desc_die:
1517                 vfree(txdr->buffer_info);
1518                 return -ENOMEM;
1519         }
1520
1521         /* Fix for errata 23, can't cross 64kB boundary */
1522         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1523                 void *olddesc = txdr->desc;
1524                 dma_addr_t olddma = txdr->dma;
1525                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1526                       txdr->size, txdr->desc);
1527                 /* Try again, without freeing the previous */
1528                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1529                                                 &txdr->dma, GFP_KERNEL);
1530                 /* Failed allocation, critical failure */
1531                 if (!txdr->desc) {
1532                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1533                                           olddma);
1534                         goto setup_tx_desc_die;
1535                 }
1536
1537                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1538                         /* give up */
1539                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1540                                           txdr->dma);
1541                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1542                                           olddma);
1543                         e_err(probe, "Unable to allocate aligned memory "
1544                               "for the transmit descriptor ring\n");
1545                         vfree(txdr->buffer_info);
1546                         return -ENOMEM;
1547                 } else {
1548                         /* Free old allocation, new allocation was successful */
1549                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1550                                           olddma);
1551                 }
1552         }
1553         memset(txdr->desc, 0, txdr->size);
1554
1555         txdr->next_to_use = 0;
1556         txdr->next_to_clean = 0;
1557
1558         return 0;
1559 }
1560
1561 /**
1562  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1563  *                                (Descriptors) for all queues
1564  * @adapter: board private structure
1565  *
1566  * Return 0 on success, negative on failure
1567  **/
1568 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1569 {
1570         int i, err = 0;
1571
1572         for (i = 0; i < adapter->num_tx_queues; i++) {
1573                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1574                 if (err) {
1575                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1576                         for (i-- ; i >= 0; i--)
1577                                 e1000_free_tx_resources(adapter,
1578                                                         &adapter->tx_ring[i]);
1579                         break;
1580                 }
1581         }
1582
1583         return err;
1584 }
1585
1586 /**
1587  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1588  * @adapter: board private structure
1589  *
1590  * Configure the Tx unit of the MAC after a reset.
1591  **/
1592 static void e1000_configure_tx(struct e1000_adapter *adapter)
1593 {
1594         u64 tdba;
1595         struct e1000_hw *hw = &adapter->hw;
1596         u32 tdlen, tctl, tipg;
1597         u32 ipgr1, ipgr2;
1598
1599         /* Setup the HW Tx Head and Tail descriptor pointers */
1600
1601         switch (adapter->num_tx_queues) {
1602         case 1:
1603         default:
1604                 tdba = adapter->tx_ring[0].dma;
1605                 tdlen = adapter->tx_ring[0].count *
1606                         sizeof(struct e1000_tx_desc);
1607                 ew32(TDLEN, tdlen);
1608                 ew32(TDBAH, (tdba >> 32));
1609                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1610                 ew32(TDT, 0);
1611                 ew32(TDH, 0);
1612                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
1613                                            E1000_TDH : E1000_82542_TDH);
1614                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
1615                                            E1000_TDT : E1000_82542_TDT);
1616                 break;
1617         }
1618
1619         /* Set the default values for the Tx Inter Packet Gap timer */
1620         if ((hw->media_type == e1000_media_type_fiber ||
1621              hw->media_type == e1000_media_type_internal_serdes))
1622                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1623         else
1624                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1625
1626         switch (hw->mac_type) {
1627         case e1000_82542_rev2_0:
1628         case e1000_82542_rev2_1:
1629                 tipg = DEFAULT_82542_TIPG_IPGT;
1630                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1631                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1632                 break;
1633         default:
1634                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1635                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1636                 break;
1637         }
1638         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1639         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1640         ew32(TIPG, tipg);
1641
1642         /* Set the Tx Interrupt Delay register */
1643
1644         ew32(TIDV, adapter->tx_int_delay);
1645         if (hw->mac_type >= e1000_82540)
1646                 ew32(TADV, adapter->tx_abs_int_delay);
1647
1648         /* Program the Transmit Control Register */
1649
1650         tctl = er32(TCTL);
1651         tctl &= ~E1000_TCTL_CT;
1652         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1653                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1654
1655         e1000_config_collision_dist(hw);
1656
1657         /* Setup Transmit Descriptor Settings for eop descriptor */
1658         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1659
1660         /* only set IDE if we are delaying interrupts using the timers */
1661         if (adapter->tx_int_delay)
1662                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1663
1664         if (hw->mac_type < e1000_82543)
1665                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1666         else
1667                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1668
1669         /* Cache if we're 82544 running in PCI-X because we'll
1670          * need this to apply a workaround later in the send path.
1671          */
1672         if (hw->mac_type == e1000_82544 &&
1673             hw->bus_type == e1000_bus_type_pcix)
1674                 adapter->pcix_82544 = true;
1675
1676         ew32(TCTL, tctl);
1677
1678 }
1679
1680 /**
1681  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1682  * @adapter: board private structure
1683  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1684  *
1685  * Returns 0 on success, negative on failure
1686  **/
1687 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1688                                     struct e1000_rx_ring *rxdr)
1689 {
1690         struct pci_dev *pdev = adapter->pdev;
1691         int size, desc_len;
1692
1693         size = sizeof(struct e1000_rx_buffer) * rxdr->count;
1694         rxdr->buffer_info = vzalloc(size);
1695         if (!rxdr->buffer_info)
1696                 return -ENOMEM;
1697
1698         desc_len = sizeof(struct e1000_rx_desc);
1699
1700         /* Round up to nearest 4K */
1701
1702         rxdr->size = rxdr->count * desc_len;
1703         rxdr->size = ALIGN(rxdr->size, 4096);
1704
1705         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1706                                         GFP_KERNEL);
1707         if (!rxdr->desc) {
1708 setup_rx_desc_die:
1709                 vfree(rxdr->buffer_info);
1710                 return -ENOMEM;
1711         }
1712
1713         /* Fix for errata 23, can't cross 64kB boundary */
1714         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1715                 void *olddesc = rxdr->desc;
1716                 dma_addr_t olddma = rxdr->dma;
1717                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1718                       rxdr->size, rxdr->desc);
1719                 /* Try again, without freeing the previous */
1720                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1721                                                 &rxdr->dma, GFP_KERNEL);
1722                 /* Failed allocation, critical failure */
1723                 if (!rxdr->desc) {
1724                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1725                                           olddma);
1726                         goto setup_rx_desc_die;
1727                 }
1728
1729                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1730                         /* give up */
1731                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1732                                           rxdr->dma);
1733                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1734                                           olddma);
1735                         e_err(probe, "Unable to allocate aligned memory for "
1736                               "the Rx descriptor ring\n");
1737                         goto setup_rx_desc_die;
1738                 } else {
1739                         /* Free old allocation, new allocation was successful */
1740                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1741                                           olddma);
1742                 }
1743         }
1744         memset(rxdr->desc, 0, rxdr->size);
1745
1746         rxdr->next_to_clean = 0;
1747         rxdr->next_to_use = 0;
1748         rxdr->rx_skb_top = NULL;
1749
1750         return 0;
1751 }
1752
1753 /**
1754  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1755  *                                (Descriptors) for all queues
1756  * @adapter: board private structure
1757  *
1758  * Return 0 on success, negative on failure
1759  **/
1760 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1761 {
1762         int i, err = 0;
1763
1764         for (i = 0; i < adapter->num_rx_queues; i++) {
1765                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1766                 if (err) {
1767                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1768                         for (i-- ; i >= 0; i--)
1769                                 e1000_free_rx_resources(adapter,
1770                                                         &adapter->rx_ring[i]);
1771                         break;
1772                 }
1773         }
1774
1775         return err;
1776 }
1777
1778 /**
1779  * e1000_setup_rctl - configure the receive control registers
1780  * @adapter: Board private structure
1781  **/
1782 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1783 {
1784         struct e1000_hw *hw = &adapter->hw;
1785         u32 rctl;
1786
1787         rctl = er32(RCTL);
1788
1789         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1790
1791         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1792                 E1000_RCTL_RDMTS_HALF |
1793                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1794
1795         if (hw->tbi_compatibility_on == 1)
1796                 rctl |= E1000_RCTL_SBP;
1797         else
1798                 rctl &= ~E1000_RCTL_SBP;
1799
1800         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1801                 rctl &= ~E1000_RCTL_LPE;
1802         else
1803                 rctl |= E1000_RCTL_LPE;
1804
1805         /* Setup buffer sizes */
1806         rctl &= ~E1000_RCTL_SZ_4096;
1807         rctl |= E1000_RCTL_BSEX;
1808         switch (adapter->rx_buffer_len) {
1809         case E1000_RXBUFFER_2048:
1810         default:
1811                 rctl |= E1000_RCTL_SZ_2048;
1812                 rctl &= ~E1000_RCTL_BSEX;
1813                 break;
1814         case E1000_RXBUFFER_4096:
1815                 rctl |= E1000_RCTL_SZ_4096;
1816                 break;
1817         case E1000_RXBUFFER_8192:
1818                 rctl |= E1000_RCTL_SZ_8192;
1819                 break;
1820         case E1000_RXBUFFER_16384:
1821                 rctl |= E1000_RCTL_SZ_16384;
1822                 break;
1823         }
1824
1825         /* This is useful for sniffing bad packets. */
1826         if (adapter->netdev->features & NETIF_F_RXALL) {
1827                 /* UPE and MPE will be handled by normal PROMISC logic
1828                  * in e1000e_set_rx_mode
1829                  */
1830                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1831                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1832                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1833
1834                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1835                           E1000_RCTL_DPF | /* Allow filtered pause */
1836                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1837                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1838                  * and that breaks VLANs.
1839                  */
1840         }
1841
1842         ew32(RCTL, rctl);
1843 }
1844
1845 /**
1846  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1847  * @adapter: board private structure
1848  *
1849  * Configure the Rx unit of the MAC after a reset.
1850  **/
1851 static void e1000_configure_rx(struct e1000_adapter *adapter)
1852 {
1853         u64 rdba;
1854         struct e1000_hw *hw = &adapter->hw;
1855         u32 rdlen, rctl, rxcsum;
1856
1857         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1858                 rdlen = adapter->rx_ring[0].count *
1859                         sizeof(struct e1000_rx_desc);
1860                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1861                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1862         } else {
1863                 rdlen = adapter->rx_ring[0].count *
1864                         sizeof(struct e1000_rx_desc);
1865                 adapter->clean_rx = e1000_clean_rx_irq;
1866                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1867         }
1868
1869         /* disable receives while setting up the descriptors */
1870         rctl = er32(RCTL);
1871         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1872
1873         /* set the Receive Delay Timer Register */
1874         ew32(RDTR, adapter->rx_int_delay);
1875
1876         if (hw->mac_type >= e1000_82540) {
1877                 ew32(RADV, adapter->rx_abs_int_delay);
1878                 if (adapter->itr_setting != 0)
1879                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1880         }
1881
1882         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1883          * the Base and Length of the Rx Descriptor Ring
1884          */
1885         switch (adapter->num_rx_queues) {
1886         case 1:
1887         default:
1888                 rdba = adapter->rx_ring[0].dma;
1889                 ew32(RDLEN, rdlen);
1890                 ew32(RDBAH, (rdba >> 32));
1891                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1892                 ew32(RDT, 0);
1893                 ew32(RDH, 0);
1894                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
1895                                            E1000_RDH : E1000_82542_RDH);
1896                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
1897                                            E1000_RDT : E1000_82542_RDT);
1898                 break;
1899         }
1900
1901         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1902         if (hw->mac_type >= e1000_82543) {
1903                 rxcsum = er32(RXCSUM);
1904                 if (adapter->rx_csum)
1905                         rxcsum |= E1000_RXCSUM_TUOFL;
1906                 else
1907                         /* don't need to clear IPPCSE as it defaults to 0 */
1908                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1909                 ew32(RXCSUM, rxcsum);
1910         }
1911
1912         /* Enable Receives */
1913         ew32(RCTL, rctl | E1000_RCTL_EN);
1914 }
1915
1916 /**
1917  * e1000_free_tx_resources - Free Tx Resources per Queue
1918  * @adapter: board private structure
1919  * @tx_ring: Tx descriptor ring for a specific queue
1920  *
1921  * Free all transmit software resources
1922  **/
1923 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1924                                     struct e1000_tx_ring *tx_ring)
1925 {
1926         struct pci_dev *pdev = adapter->pdev;
1927
1928         e1000_clean_tx_ring(adapter, tx_ring);
1929
1930         vfree(tx_ring->buffer_info);
1931         tx_ring->buffer_info = NULL;
1932
1933         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1934                           tx_ring->dma);
1935
1936         tx_ring->desc = NULL;
1937 }
1938
1939 /**
1940  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1941  * @adapter: board private structure
1942  *
1943  * Free all transmit software resources
1944  **/
1945 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1946 {
1947         int i;
1948
1949         for (i = 0; i < adapter->num_tx_queues; i++)
1950                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1951 }
1952
1953 static void
1954 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1955                                  struct e1000_tx_buffer *buffer_info)
1956 {
1957         if (buffer_info->dma) {
1958                 if (buffer_info->mapped_as_page)
1959                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1960                                        buffer_info->length, DMA_TO_DEVICE);
1961                 else
1962                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1963                                          buffer_info->length,
1964                                          DMA_TO_DEVICE);
1965                 buffer_info->dma = 0;
1966         }
1967         if (buffer_info->skb) {
1968                 dev_kfree_skb_any(buffer_info->skb);
1969                 buffer_info->skb = NULL;
1970         }
1971         buffer_info->time_stamp = 0;
1972         /* buffer_info must be completely set up in the transmit path */
1973 }
1974
1975 /**
1976  * e1000_clean_tx_ring - Free Tx Buffers
1977  * @adapter: board private structure
1978  * @tx_ring: ring to be cleaned
1979  **/
1980 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1981                                 struct e1000_tx_ring *tx_ring)
1982 {
1983         struct e1000_hw *hw = &adapter->hw;
1984         struct e1000_tx_buffer *buffer_info;
1985         unsigned long size;
1986         unsigned int i;
1987
1988         /* Free all the Tx ring sk_buffs */
1989
1990         for (i = 0; i < tx_ring->count; i++) {
1991                 buffer_info = &tx_ring->buffer_info[i];
1992                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
1993         }
1994
1995         netdev_reset_queue(adapter->netdev);
1996         size = sizeof(struct e1000_tx_buffer) * tx_ring->count;
1997         memset(tx_ring->buffer_info, 0, size);
1998
1999         /* Zero out the descriptor ring */
2000
2001         memset(tx_ring->desc, 0, tx_ring->size);
2002
2003         tx_ring->next_to_use = 0;
2004         tx_ring->next_to_clean = 0;
2005         tx_ring->last_tx_tso = false;
2006
2007         writel(0, hw->hw_addr + tx_ring->tdh);
2008         writel(0, hw->hw_addr + tx_ring->tdt);
2009 }
2010
2011 /**
2012  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2013  * @adapter: board private structure
2014  **/
2015 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2016 {
2017         int i;
2018
2019         for (i = 0; i < adapter->num_tx_queues; i++)
2020                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2021 }
2022
2023 /**
2024  * e1000_free_rx_resources - Free Rx Resources
2025  * @adapter: board private structure
2026  * @rx_ring: ring to clean the resources from
2027  *
2028  * Free all receive software resources
2029  **/
2030 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2031                                     struct e1000_rx_ring *rx_ring)
2032 {
2033         struct pci_dev *pdev = adapter->pdev;
2034
2035         e1000_clean_rx_ring(adapter, rx_ring);
2036
2037         vfree(rx_ring->buffer_info);
2038         rx_ring->buffer_info = NULL;
2039
2040         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2041                           rx_ring->dma);
2042
2043         rx_ring->desc = NULL;
2044 }
2045
2046 /**
2047  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2048  * @adapter: board private structure
2049  *
2050  * Free all receive software resources
2051  **/
2052 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2053 {
2054         int i;
2055
2056         for (i = 0; i < adapter->num_rx_queues; i++)
2057                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2058 }
2059
2060 #define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
2061 static unsigned int e1000_frag_len(const struct e1000_adapter *a)
2062 {
2063         return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) +
2064                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2065 }
2066
2067 static void *e1000_alloc_frag(const struct e1000_adapter *a)
2068 {
2069         unsigned int len = e1000_frag_len(a);
2070         u8 *data = netdev_alloc_frag(len);
2071
2072         if (likely(data))
2073                 data += E1000_HEADROOM;
2074         return data;
2075 }
2076
2077 /**
2078  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2079  * @adapter: board private structure
2080  * @rx_ring: ring to free buffers from
2081  **/
2082 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2083                                 struct e1000_rx_ring *rx_ring)
2084 {
2085         struct e1000_hw *hw = &adapter->hw;
2086         struct e1000_rx_buffer *buffer_info;
2087         struct pci_dev *pdev = adapter->pdev;
2088         unsigned long size;
2089         unsigned int i;
2090
2091         /* Free all the Rx netfrags */
2092         for (i = 0; i < rx_ring->count; i++) {
2093                 buffer_info = &rx_ring->buffer_info[i];
2094                 if (adapter->clean_rx == e1000_clean_rx_irq) {
2095                         if (buffer_info->dma)
2096                                 dma_unmap_single(&pdev->dev, buffer_info->dma,
2097                                                  adapter->rx_buffer_len,
2098                                                  DMA_FROM_DEVICE);
2099                         if (buffer_info->rxbuf.data) {
2100                                 skb_free_frag(buffer_info->rxbuf.data);
2101                                 buffer_info->rxbuf.data = NULL;
2102                         }
2103                 } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2104                         if (buffer_info->dma)
2105                                 dma_unmap_page(&pdev->dev, buffer_info->dma,
2106                                                adapter->rx_buffer_len,
2107                                                DMA_FROM_DEVICE);
2108                         if (buffer_info->rxbuf.page) {
2109                                 put_page(buffer_info->rxbuf.page);
2110                                 buffer_info->rxbuf.page = NULL;
2111                         }
2112                 }
2113
2114                 buffer_info->dma = 0;
2115         }
2116
2117         /* there also may be some cached data from a chained receive */
2118         napi_free_frags(&adapter->napi);
2119         rx_ring->rx_skb_top = NULL;
2120
2121         size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
2122         memset(rx_ring->buffer_info, 0, size);
2123
2124         /* Zero out the descriptor ring */
2125         memset(rx_ring->desc, 0, rx_ring->size);
2126
2127         rx_ring->next_to_clean = 0;
2128         rx_ring->next_to_use = 0;
2129
2130         writel(0, hw->hw_addr + rx_ring->rdh);
2131         writel(0, hw->hw_addr + rx_ring->rdt);
2132 }
2133
2134 /**
2135  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2136  * @adapter: board private structure
2137  **/
2138 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2139 {
2140         int i;
2141
2142         for (i = 0; i < adapter->num_rx_queues; i++)
2143                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2144 }
2145
2146 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2147  * and memory write and invalidate disabled for certain operations
2148  */
2149 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2150 {
2151         struct e1000_hw *hw = &adapter->hw;
2152         struct net_device *netdev = adapter->netdev;
2153         u32 rctl;
2154
2155         e1000_pci_clear_mwi(hw);
2156
2157         rctl = er32(RCTL);
2158         rctl |= E1000_RCTL_RST;
2159         ew32(RCTL, rctl);
2160         E1000_WRITE_FLUSH();
2161         mdelay(5);
2162
2163         if (netif_running(netdev))
2164                 e1000_clean_all_rx_rings(adapter);
2165 }
2166
2167 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2168 {
2169         struct e1000_hw *hw = &adapter->hw;
2170         struct net_device *netdev = adapter->netdev;
2171         u32 rctl;
2172
2173         rctl = er32(RCTL);
2174         rctl &= ~E1000_RCTL_RST;
2175         ew32(RCTL, rctl);
2176         E1000_WRITE_FLUSH();
2177         mdelay(5);
2178
2179         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2180                 e1000_pci_set_mwi(hw);
2181
2182         if (netif_running(netdev)) {
2183                 /* No need to loop, because 82542 supports only 1 queue */
2184                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2185                 e1000_configure_rx(adapter);
2186                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2187         }
2188 }
2189
2190 /**
2191  * e1000_set_mac - Change the Ethernet Address of the NIC
2192  * @netdev: network interface device structure
2193  * @p: pointer to an address structure
2194  *
2195  * Returns 0 on success, negative on failure
2196  **/
2197 static int e1000_set_mac(struct net_device *netdev, void *p)
2198 {
2199         struct e1000_adapter *adapter = netdev_priv(netdev);
2200         struct e1000_hw *hw = &adapter->hw;
2201         struct sockaddr *addr = p;
2202
2203         if (!is_valid_ether_addr(addr->sa_data))
2204                 return -EADDRNOTAVAIL;
2205
2206         /* 82542 2.0 needs to be in reset to write receive address registers */
2207
2208         if (hw->mac_type == e1000_82542_rev2_0)
2209                 e1000_enter_82542_rst(adapter);
2210
2211         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2212         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2213
2214         e1000_rar_set(hw, hw->mac_addr, 0);
2215
2216         if (hw->mac_type == e1000_82542_rev2_0)
2217                 e1000_leave_82542_rst(adapter);
2218
2219         return 0;
2220 }
2221
2222 /**
2223  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2224  * @netdev: network interface device structure
2225  *
2226  * The set_rx_mode entry point is called whenever the unicast or multicast
2227  * address lists or the network interface flags are updated. This routine is
2228  * responsible for configuring the hardware for proper unicast, multicast,
2229  * promiscuous mode, and all-multi behavior.
2230  **/
2231 static void e1000_set_rx_mode(struct net_device *netdev)
2232 {
2233         struct e1000_adapter *adapter = netdev_priv(netdev);
2234         struct e1000_hw *hw = &adapter->hw;
2235         struct netdev_hw_addr *ha;
2236         bool use_uc = false;
2237         u32 rctl;
2238         u32 hash_value;
2239         int i, rar_entries = E1000_RAR_ENTRIES;
2240         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2241         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2242
2243         if (!mcarray)
2244                 return;
2245
2246         /* Check for Promiscuous and All Multicast modes */
2247
2248         rctl = er32(RCTL);
2249
2250         if (netdev->flags & IFF_PROMISC) {
2251                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2252                 rctl &= ~E1000_RCTL_VFE;
2253         } else {
2254                 if (netdev->flags & IFF_ALLMULTI)
2255                         rctl |= E1000_RCTL_MPE;
2256                 else
2257                         rctl &= ~E1000_RCTL_MPE;
2258                 /* Enable VLAN filter if there is a VLAN */
2259                 if (e1000_vlan_used(adapter))
2260                         rctl |= E1000_RCTL_VFE;
2261         }
2262
2263         if (netdev_uc_count(netdev) > rar_entries - 1) {
2264                 rctl |= E1000_RCTL_UPE;
2265         } else if (!(netdev->flags & IFF_PROMISC)) {
2266                 rctl &= ~E1000_RCTL_UPE;
2267                 use_uc = true;
2268         }
2269
2270         ew32(RCTL, rctl);
2271
2272         /* 82542 2.0 needs to be in reset to write receive address registers */
2273
2274         if (hw->mac_type == e1000_82542_rev2_0)
2275                 e1000_enter_82542_rst(adapter);
2276
2277         /* load the first 14 addresses into the exact filters 1-14. Unicast
2278          * addresses take precedence to avoid disabling unicast filtering
2279          * when possible.
2280          *
2281          * RAR 0 is used for the station MAC address
2282          * if there are not 14 addresses, go ahead and clear the filters
2283          */
2284         i = 1;
2285         if (use_uc)
2286                 netdev_for_each_uc_addr(ha, netdev) {
2287                         if (i == rar_entries)
2288                                 break;
2289                         e1000_rar_set(hw, ha->addr, i++);
2290                 }
2291
2292         netdev_for_each_mc_addr(ha, netdev) {
2293                 if (i == rar_entries) {
2294                         /* load any remaining addresses into the hash table */
2295                         u32 hash_reg, hash_bit, mta;
2296                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2297                         hash_reg = (hash_value >> 5) & 0x7F;
2298                         hash_bit = hash_value & 0x1F;
2299                         mta = (1 << hash_bit);
2300                         mcarray[hash_reg] |= mta;
2301                 } else {
2302                         e1000_rar_set(hw, ha->addr, i++);
2303                 }
2304         }
2305
2306         for (; i < rar_entries; i++) {
2307                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2308                 E1000_WRITE_FLUSH();
2309                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2310                 E1000_WRITE_FLUSH();
2311         }
2312
2313         /* write the hash table completely, write from bottom to avoid
2314          * both stupid write combining chipsets, and flushing each write
2315          */
2316         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2317                 /* If we are on an 82544 has an errata where writing odd
2318                  * offsets overwrites the previous even offset, but writing
2319                  * backwards over the range solves the issue by always
2320                  * writing the odd offset first
2321                  */
2322                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2323         }
2324         E1000_WRITE_FLUSH();
2325
2326         if (hw->mac_type == e1000_82542_rev2_0)
2327                 e1000_leave_82542_rst(adapter);
2328
2329         kfree(mcarray);
2330 }
2331
2332 /**
2333  * e1000_update_phy_info_task - get phy info
2334  * @work: work struct contained inside adapter struct
2335  *
2336  * Need to wait a few seconds after link up to get diagnostic information from
2337  * the phy
2338  */
2339 static void e1000_update_phy_info_task(struct work_struct *work)
2340 {
2341         struct e1000_adapter *adapter = container_of(work,
2342                                                      struct e1000_adapter,
2343                                                      phy_info_task.work);
2344
2345         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2346 }
2347
2348 /**
2349  * e1000_82547_tx_fifo_stall_task - task to complete work
2350  * @work: work struct contained inside adapter struct
2351  **/
2352 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2353 {
2354         struct e1000_adapter *adapter = container_of(work,
2355                                                      struct e1000_adapter,
2356                                                      fifo_stall_task.work);
2357         struct e1000_hw *hw = &adapter->hw;
2358         struct net_device *netdev = adapter->netdev;
2359         u32 tctl;
2360
2361         if (atomic_read(&adapter->tx_fifo_stall)) {
2362                 if ((er32(TDT) == er32(TDH)) &&
2363                    (er32(TDFT) == er32(TDFH)) &&
2364                    (er32(TDFTS) == er32(TDFHS))) {
2365                         tctl = er32(TCTL);
2366                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2367                         ew32(TDFT, adapter->tx_head_addr);
2368                         ew32(TDFH, adapter->tx_head_addr);
2369                         ew32(TDFTS, adapter->tx_head_addr);
2370                         ew32(TDFHS, adapter->tx_head_addr);
2371                         ew32(TCTL, tctl);
2372                         E1000_WRITE_FLUSH();
2373
2374                         adapter->tx_fifo_head = 0;
2375                         atomic_set(&adapter->tx_fifo_stall, 0);
2376                         netif_wake_queue(netdev);
2377                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2378                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2379                 }
2380         }
2381 }
2382
2383 bool e1000_has_link(struct e1000_adapter *adapter)
2384 {
2385         struct e1000_hw *hw = &adapter->hw;
2386         bool link_active = false;
2387
2388         /* get_link_status is set on LSC (link status) interrupt or rx
2389          * sequence error interrupt (except on intel ce4100).
2390          * get_link_status will stay false until the
2391          * e1000_check_for_link establishes link for copper adapters
2392          * ONLY
2393          */
2394         switch (hw->media_type) {
2395         case e1000_media_type_copper:
2396                 if (hw->mac_type == e1000_ce4100)
2397                         hw->get_link_status = 1;
2398                 if (hw->get_link_status) {
2399                         e1000_check_for_link(hw);
2400                         link_active = !hw->get_link_status;
2401                 } else {
2402                         link_active = true;
2403                 }
2404                 break;
2405         case e1000_media_type_fiber:
2406                 e1000_check_for_link(hw);
2407                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2408                 break;
2409         case e1000_media_type_internal_serdes:
2410                 e1000_check_for_link(hw);
2411                 link_active = hw->serdes_has_link;
2412                 break;
2413         default:
2414                 break;
2415         }
2416
2417         return link_active;
2418 }
2419
2420 /**
2421  * e1000_watchdog - work function
2422  * @work: work struct contained inside adapter struct
2423  **/
2424 static void e1000_watchdog(struct work_struct *work)
2425 {
2426         struct e1000_adapter *adapter = container_of(work,
2427                                                      struct e1000_adapter,
2428                                                      watchdog_task.work);
2429         struct e1000_hw *hw = &adapter->hw;
2430         struct net_device *netdev = adapter->netdev;
2431         struct e1000_tx_ring *txdr = adapter->tx_ring;
2432         u32 link, tctl;
2433
2434         link = e1000_has_link(adapter);
2435         if ((netif_carrier_ok(netdev)) && link)
2436                 goto link_up;
2437
2438         if (link) {
2439                 if (!netif_carrier_ok(netdev)) {
2440                         u32 ctrl;
2441                         /* update snapshot of PHY registers on LSC */
2442                         e1000_get_speed_and_duplex(hw,
2443                                                    &adapter->link_speed,
2444                                                    &adapter->link_duplex);
2445
2446                         ctrl = er32(CTRL);
2447                         pr_info("%s NIC Link is Up %d Mbps %s, "
2448                                 "Flow Control: %s\n",
2449                                 netdev->name,
2450                                 adapter->link_speed,
2451                                 adapter->link_duplex == FULL_DUPLEX ?
2452                                 "Full Duplex" : "Half Duplex",
2453                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2454                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2455                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2456                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2457
2458                         /* adjust timeout factor according to speed/duplex */
2459                         adapter->tx_timeout_factor = 1;
2460                         switch (adapter->link_speed) {
2461                         case SPEED_10:
2462                                 adapter->tx_timeout_factor = 16;
2463                                 break;
2464                         case SPEED_100:
2465                                 /* maybe add some timeout factor ? */
2466                                 break;
2467                         }
2468
2469                         /* enable transmits in the hardware */
2470                         tctl = er32(TCTL);
2471                         tctl |= E1000_TCTL_EN;
2472                         ew32(TCTL, tctl);
2473
2474                         netif_carrier_on(netdev);
2475                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2476                                 schedule_delayed_work(&adapter->phy_info_task,
2477                                                       2 * HZ);
2478                         adapter->smartspeed = 0;
2479                 }
2480         } else {
2481                 if (netif_carrier_ok(netdev)) {
2482                         adapter->link_speed = 0;
2483                         adapter->link_duplex = 0;
2484                         pr_info("%s NIC Link is Down\n",
2485                                 netdev->name);
2486                         netif_carrier_off(netdev);
2487
2488                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2489                                 schedule_delayed_work(&adapter->phy_info_task,
2490                                                       2 * HZ);
2491                 }
2492
2493                 e1000_smartspeed(adapter);
2494         }
2495
2496 link_up:
2497         e1000_update_stats(adapter);
2498
2499         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2500         adapter->tpt_old = adapter->stats.tpt;
2501         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2502         adapter->colc_old = adapter->stats.colc;
2503
2504         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2505         adapter->gorcl_old = adapter->stats.gorcl;
2506         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2507         adapter->gotcl_old = adapter->stats.gotcl;
2508
2509         e1000_update_adaptive(hw);
2510
2511         if (!netif_carrier_ok(netdev)) {
2512                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2513                         /* We've lost link, so the controller stops DMA,
2514                          * but we've got queued Tx work that's never going
2515                          * to get done, so reset controller to flush Tx.
2516                          * (Do the reset outside of interrupt context).
2517                          */
2518                         adapter->tx_timeout_count++;
2519                         schedule_work(&adapter->reset_task);
2520                         /* exit immediately since reset is imminent */
2521                         return;
2522                 }
2523         }
2524
2525         /* Simple mode for Interrupt Throttle Rate (ITR) */
2526         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2527                 /* Symmetric Tx/Rx gets a reduced ITR=2000;
2528                  * Total asymmetrical Tx or Rx gets ITR=8000;
2529                  * everyone else is between 2000-8000.
2530                  */
2531                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2532                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2533                             adapter->gotcl - adapter->gorcl :
2534                             adapter->gorcl - adapter->gotcl) / 10000;
2535                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2536
2537                 ew32(ITR, 1000000000 / (itr * 256));
2538         }
2539
2540         /* Cause software interrupt to ensure rx ring is cleaned */
2541         ew32(ICS, E1000_ICS_RXDMT0);
2542
2543         /* Force detection of hung controller every watchdog period */
2544         adapter->detect_tx_hung = true;
2545
2546         /* Reschedule the task */
2547         if (!test_bit(__E1000_DOWN, &adapter->flags))
2548                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2549 }
2550
2551 enum latency_range {
2552         lowest_latency = 0,
2553         low_latency = 1,
2554         bulk_latency = 2,
2555         latency_invalid = 255
2556 };
2557
2558 /**
2559  * e1000_update_itr - update the dynamic ITR value based on statistics
2560  * @adapter: pointer to adapter
2561  * @itr_setting: current adapter->itr
2562  * @packets: the number of packets during this measurement interval
2563  * @bytes: the number of bytes during this measurement interval
2564  *
2565  *      Stores a new ITR value based on packets and byte
2566  *      counts during the last interrupt.  The advantage of per interrupt
2567  *      computation is faster updates and more accurate ITR for the current
2568  *      traffic pattern.  Constants in this function were computed
2569  *      based on theoretical maximum wire speed and thresholds were set based
2570  *      on testing data as well as attempting to minimize response time
2571  *      while increasing bulk throughput.
2572  *      this functionality is controlled by the InterruptThrottleRate module
2573  *      parameter (see e1000_param.c)
2574  **/
2575 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2576                                      u16 itr_setting, int packets, int bytes)
2577 {
2578         unsigned int retval = itr_setting;
2579         struct e1000_hw *hw = &adapter->hw;
2580
2581         if (unlikely(hw->mac_type < e1000_82540))
2582                 goto update_itr_done;
2583
2584         if (packets == 0)
2585                 goto update_itr_done;
2586
2587         switch (itr_setting) {
2588         case lowest_latency:
2589                 /* jumbo frames get bulk treatment*/
2590                 if (bytes/packets > 8000)
2591                         retval = bulk_latency;
2592                 else if ((packets < 5) && (bytes > 512))
2593                         retval = low_latency;
2594                 break;
2595         case low_latency:  /* 50 usec aka 20000 ints/s */
2596                 if (bytes > 10000) {
2597                         /* jumbo frames need bulk latency setting */
2598                         if (bytes/packets > 8000)
2599                                 retval = bulk_latency;
2600                         else if ((packets < 10) || ((bytes/packets) > 1200))
2601                                 retval = bulk_latency;
2602                         else if ((packets > 35))
2603                                 retval = lowest_latency;
2604                 } else if (bytes/packets > 2000)
2605                         retval = bulk_latency;
2606                 else if (packets <= 2 && bytes < 512)
2607                         retval = lowest_latency;
2608                 break;
2609         case bulk_latency: /* 250 usec aka 4000 ints/s */
2610                 if (bytes > 25000) {
2611                         if (packets > 35)
2612                                 retval = low_latency;
2613                 } else if (bytes < 6000) {
2614                         retval = low_latency;
2615                 }
2616                 break;
2617         }
2618
2619 update_itr_done:
2620         return retval;
2621 }
2622
2623 static void e1000_set_itr(struct e1000_adapter *adapter)
2624 {
2625         struct e1000_hw *hw = &adapter->hw;
2626         u16 current_itr;
2627         u32 new_itr = adapter->itr;
2628
2629         if (unlikely(hw->mac_type < e1000_82540))
2630                 return;
2631
2632         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2633         if (unlikely(adapter->link_speed != SPEED_1000)) {
2634                 current_itr = 0;
2635                 new_itr = 4000;
2636                 goto set_itr_now;
2637         }
2638
2639         adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
2640                                            adapter->total_tx_packets,
2641                                            adapter->total_tx_bytes);
2642         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2643         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2644                 adapter->tx_itr = low_latency;
2645
2646         adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
2647                                            adapter->total_rx_packets,
2648                                            adapter->total_rx_bytes);
2649         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2650         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2651                 adapter->rx_itr = low_latency;
2652
2653         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2654
2655         switch (current_itr) {
2656         /* counts and packets in update_itr are dependent on these numbers */
2657         case lowest_latency:
2658                 new_itr = 70000;
2659                 break;
2660         case low_latency:
2661                 new_itr = 20000; /* aka hwitr = ~200 */
2662                 break;
2663         case bulk_latency:
2664                 new_itr = 4000;
2665                 break;
2666         default:
2667                 break;
2668         }
2669
2670 set_itr_now:
2671         if (new_itr != adapter->itr) {
2672                 /* this attempts to bias the interrupt rate towards Bulk
2673                  * by adding intermediate steps when interrupt rate is
2674                  * increasing
2675                  */
2676                 new_itr = new_itr > adapter->itr ?
2677                           min(adapter->itr + (new_itr >> 2), new_itr) :
2678                           new_itr;
2679                 adapter->itr = new_itr;
2680                 ew32(ITR, 1000000000 / (new_itr * 256));
2681         }
2682 }
2683
2684 #define E1000_TX_FLAGS_CSUM             0x00000001
2685 #define E1000_TX_FLAGS_VLAN             0x00000002
2686 #define E1000_TX_FLAGS_TSO              0x00000004
2687 #define E1000_TX_FLAGS_IPV4             0x00000008
2688 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2689 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2690 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2691
2692 static int e1000_tso(struct e1000_adapter *adapter,
2693                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2694                      __be16 protocol)
2695 {
2696         struct e1000_context_desc *context_desc;
2697         struct e1000_tx_buffer *buffer_info;
2698         unsigned int i;
2699         u32 cmd_length = 0;
2700         u16 ipcse = 0, tucse, mss;
2701         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2702
2703         if (skb_is_gso(skb)) {
2704                 int err;
2705
2706                 err = skb_cow_head(skb, 0);
2707                 if (err < 0)
2708                         return err;
2709
2710                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2711                 mss = skb_shinfo(skb)->gso_size;
2712                 if (protocol == htons(ETH_P_IP)) {
2713                         struct iphdr *iph = ip_hdr(skb);
2714                         iph->tot_len = 0;
2715                         iph->check = 0;
2716                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2717                                                                  iph->daddr, 0,
2718                                                                  IPPROTO_TCP,
2719                                                                  0);
2720                         cmd_length = E1000_TXD_CMD_IP;
2721                         ipcse = skb_transport_offset(skb) - 1;
2722                 } else if (skb_is_gso_v6(skb)) {
2723                         tcp_v6_gso_csum_prep(skb);
2724                         ipcse = 0;
2725                 }
2726                 ipcss = skb_network_offset(skb);
2727                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2728                 tucss = skb_transport_offset(skb);
2729                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2730                 tucse = 0;
2731
2732                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2733                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2734
2735                 i = tx_ring->next_to_use;
2736                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2737                 buffer_info = &tx_ring->buffer_info[i];
2738
2739                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2740                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2741                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2742                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2743                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2744                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2745                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2746                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2747                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2748
2749                 buffer_info->time_stamp = jiffies;
2750                 buffer_info->next_to_watch = i;
2751
2752                 if (++i == tx_ring->count)
2753                         i = 0;
2754
2755                 tx_ring->next_to_use = i;
2756
2757                 return true;
2758         }
2759         return false;
2760 }
2761
2762 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2763                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb,
2764                           __be16 protocol)
2765 {
2766         struct e1000_context_desc *context_desc;
2767         struct e1000_tx_buffer *buffer_info;
2768         unsigned int i;
2769         u8 css;
2770         u32 cmd_len = E1000_TXD_CMD_DEXT;
2771
2772         if (skb->ip_summed != CHECKSUM_PARTIAL)
2773                 return false;
2774
2775         switch (protocol) {
2776         case cpu_to_be16(ETH_P_IP):
2777                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2778                         cmd_len |= E1000_TXD_CMD_TCP;
2779                 break;
2780         case cpu_to_be16(ETH_P_IPV6):
2781                 /* XXX not handling all IPV6 headers */
2782                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2783                         cmd_len |= E1000_TXD_CMD_TCP;
2784                 break;
2785         default:
2786                 if (unlikely(net_ratelimit()))
2787                         e_warn(drv, "checksum_partial proto=%x!\n",
2788                                skb->protocol);
2789                 break;
2790         }
2791
2792         css = skb_checksum_start_offset(skb);
2793
2794         i = tx_ring->next_to_use;
2795         buffer_info = &tx_ring->buffer_info[i];
2796         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2797
2798         context_desc->lower_setup.ip_config = 0;
2799         context_desc->upper_setup.tcp_fields.tucss = css;
2800         context_desc->upper_setup.tcp_fields.tucso =
2801                 css + skb->csum_offset;
2802         context_desc->upper_setup.tcp_fields.tucse = 0;
2803         context_desc->tcp_seg_setup.data = 0;
2804         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2805
2806         buffer_info->time_stamp = jiffies;
2807         buffer_info->next_to_watch = i;
2808
2809         if (unlikely(++i == tx_ring->count))
2810                 i = 0;
2811
2812         tx_ring->next_to_use = i;
2813
2814         return true;
2815 }
2816
2817 #define E1000_MAX_TXD_PWR       12
2818 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2819
2820 static int e1000_tx_map(struct e1000_adapter *adapter,
2821                         struct e1000_tx_ring *tx_ring,
2822                         struct sk_buff *skb, unsigned int first,
2823                         unsigned int max_per_txd, unsigned int nr_frags,
2824                         unsigned int mss)
2825 {
2826         struct e1000_hw *hw = &adapter->hw;
2827         struct pci_dev *pdev = adapter->pdev;
2828         struct e1000_tx_buffer *buffer_info;
2829         unsigned int len = skb_headlen(skb);
2830         unsigned int offset = 0, size, count = 0, i;
2831         unsigned int f, bytecount, segs;
2832
2833         i = tx_ring->next_to_use;
2834
2835         while (len) {
2836                 buffer_info = &tx_ring->buffer_info[i];
2837                 size = min(len, max_per_txd);
2838                 /* Workaround for Controller erratum --
2839                  * descriptor for non-tso packet in a linear SKB that follows a
2840                  * tso gets written back prematurely before the data is fully
2841                  * DMA'd to the controller
2842                  */
2843                 if (!skb->data_len && tx_ring->last_tx_tso &&
2844                     !skb_is_gso(skb)) {
2845                         tx_ring->last_tx_tso = false;
2846                         size -= 4;
2847                 }
2848
2849                 /* Workaround for premature desc write-backs
2850                  * in TSO mode.  Append 4-byte sentinel desc
2851                  */
2852                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2853                         size -= 4;
2854                 /* work-around for errata 10 and it applies
2855                  * to all controllers in PCI-X mode
2856                  * The fix is to make sure that the first descriptor of a
2857                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2858                  */
2859                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2860                              (size > 2015) && count == 0))
2861                         size = 2015;
2862
2863                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2864                  * terminating buffers within evenly-aligned dwords.
2865                  */
2866                 if (unlikely(adapter->pcix_82544 &&
2867                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2868                    size > 4))
2869                         size -= 4;
2870
2871                 buffer_info->length = size;
2872                 /* set time_stamp *before* dma to help avoid a possible race */
2873                 buffer_info->time_stamp = jiffies;
2874                 buffer_info->mapped_as_page = false;
2875                 buffer_info->dma = dma_map_single(&pdev->dev,
2876                                                   skb->data + offset,
2877                                                   size, DMA_TO_DEVICE);
2878                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2879                         goto dma_error;
2880                 buffer_info->next_to_watch = i;
2881
2882                 len -= size;
2883                 offset += size;
2884                 count++;
2885                 if (len) {
2886                         i++;
2887                         if (unlikely(i == tx_ring->count))
2888                                 i = 0;
2889                 }
2890         }
2891
2892         for (f = 0; f < nr_frags; f++) {
2893                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2894
2895                 len = skb_frag_size(frag);
2896                 offset = 0;
2897
2898                 while (len) {
2899                         unsigned long bufend;
2900                         i++;
2901                         if (unlikely(i == tx_ring->count))
2902                                 i = 0;
2903
2904                         buffer_info = &tx_ring->buffer_info[i];
2905                         size = min(len, max_per_txd);
2906                         /* Workaround for premature desc write-backs
2907                          * in TSO mode.  Append 4-byte sentinel desc
2908                          */
2909                         if (unlikely(mss && f == (nr_frags-1) &&
2910                             size == len && size > 8))
2911                                 size -= 4;
2912                         /* Workaround for potential 82544 hang in PCI-X.
2913                          * Avoid terminating buffers within evenly-aligned
2914                          * dwords.
2915                          */
2916                         bufend = (unsigned long)
2917                                 page_to_phys(skb_frag_page(frag));
2918                         bufend += offset + size - 1;
2919                         if (unlikely(adapter->pcix_82544 &&
2920                                      !(bufend & 4) &&
2921                                      size > 4))
2922                                 size -= 4;
2923
2924                         buffer_info->length = size;
2925                         buffer_info->time_stamp = jiffies;
2926                         buffer_info->mapped_as_page = true;
2927                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2928                                                 offset, size, DMA_TO_DEVICE);
2929                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2930                                 goto dma_error;
2931                         buffer_info->next_to_watch = i;
2932
2933                         len -= size;
2934                         offset += size;
2935                         count++;
2936                 }
2937         }
2938
2939         segs = skb_shinfo(skb)->gso_segs ?: 1;
2940         /* multiply data chunks by size of headers */
2941         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2942
2943         tx_ring->buffer_info[i].skb = skb;
2944         tx_ring->buffer_info[i].segs = segs;
2945         tx_ring->buffer_info[i].bytecount = bytecount;
2946         tx_ring->buffer_info[first].next_to_watch = i;
2947
2948         return count;
2949
2950 dma_error:
2951         dev_err(&pdev->dev, "TX DMA map failed\n");
2952         buffer_info->dma = 0;
2953         if (count)
2954                 count--;
2955
2956         while (count--) {
2957                 if (i == 0)
2958                         i += tx_ring->count;
2959                 i--;
2960                 buffer_info = &tx_ring->buffer_info[i];
2961                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2962         }
2963
2964         return 0;
2965 }
2966
2967 static void e1000_tx_queue(struct e1000_adapter *adapter,
2968                            struct e1000_tx_ring *tx_ring, int tx_flags,
2969                            int count)
2970 {
2971         struct e1000_tx_desc *tx_desc = NULL;
2972         struct e1000_tx_buffer *buffer_info;
2973         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2974         unsigned int i;
2975
2976         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2977                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2978                              E1000_TXD_CMD_TSE;
2979                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2980
2981                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2982                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2983         }
2984
2985         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
2986                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
2987                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2988         }
2989
2990         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
2991                 txd_lower |= E1000_TXD_CMD_VLE;
2992                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
2993         }
2994
2995         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
2996                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
2997
2998         i = tx_ring->next_to_use;
2999
3000         while (count--) {
3001                 buffer_info = &tx_ring->buffer_info[i];
3002                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3003                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3004                 tx_desc->lower.data =
3005                         cpu_to_le32(txd_lower | buffer_info->length);
3006                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3007                 if (unlikely(++i == tx_ring->count))
3008                         i = 0;
3009         }
3010
3011         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3012
3013         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3014         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3015                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3016
3017         /* Force memory writes to complete before letting h/w
3018          * know there are new descriptors to fetch.  (Only
3019          * applicable for weak-ordered memory model archs,
3020          * such as IA-64).
3021          */
3022         dma_wmb();
3023
3024         tx_ring->next_to_use = i;
3025 }
3026
3027 /* 82547 workaround to avoid controller hang in half-duplex environment.
3028  * The workaround is to avoid queuing a large packet that would span
3029  * the internal Tx FIFO ring boundary by notifying the stack to resend
3030  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3031  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3032  * to the beginning of the Tx FIFO.
3033  */
3034
3035 #define E1000_FIFO_HDR                  0x10
3036 #define E1000_82547_PAD_LEN             0x3E0
3037
3038 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3039                                        struct sk_buff *skb)
3040 {
3041         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3042         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3043
3044         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3045
3046         if (adapter->link_duplex != HALF_DUPLEX)
3047                 goto no_fifo_stall_required;
3048
3049         if (atomic_read(&adapter->tx_fifo_stall))
3050                 return 1;
3051
3052         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3053                 atomic_set(&adapter->tx_fifo_stall, 1);
3054                 return 1;
3055         }
3056
3057 no_fifo_stall_required:
3058         adapter->tx_fifo_head += skb_fifo_len;
3059         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3060                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3061         return 0;
3062 }
3063
3064 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3065 {
3066         struct e1000_adapter *adapter = netdev_priv(netdev);
3067         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3068
3069         netif_stop_queue(netdev);
3070         /* Herbert's original patch had:
3071          *  smp_mb__after_netif_stop_queue();
3072          * but since that doesn't exist yet, just open code it.
3073          */
3074         smp_mb();
3075
3076         /* We need to check again in a case another CPU has just
3077          * made room available.
3078          */
3079         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3080                 return -EBUSY;
3081
3082         /* A reprieve! */
3083         netif_start_queue(netdev);
3084         ++adapter->restart_queue;
3085         return 0;
3086 }
3087
3088 static int e1000_maybe_stop_tx(struct net_device *netdev,
3089                                struct e1000_tx_ring *tx_ring, int size)
3090 {
3091         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3092                 return 0;
3093         return __e1000_maybe_stop_tx(netdev, size);
3094 }
3095
3096 #define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X))
3097 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3098                                     struct net_device *netdev)
3099 {
3100         struct e1000_adapter *adapter = netdev_priv(netdev);
3101         struct e1000_hw *hw = &adapter->hw;
3102         struct e1000_tx_ring *tx_ring;
3103         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3104         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3105         unsigned int tx_flags = 0;
3106         unsigned int len = skb_headlen(skb);
3107         unsigned int nr_frags;
3108         unsigned int mss;
3109         int count = 0;
3110         int tso;
3111         unsigned int f;
3112         __be16 protocol = vlan_get_protocol(skb);
3113
3114         /* This goes back to the question of how to logically map a Tx queue
3115          * to a flow.  Right now, performance is impacted slightly negatively
3116          * if using multiple Tx queues.  If the stack breaks away from a
3117          * single qdisc implementation, we can look at this again.
3118          */
3119         tx_ring = adapter->tx_ring;
3120
3121         /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
3122          * packets may get corrupted during padding by HW.
3123          * To WA this issue, pad all small packets manually.
3124          */
3125         if (eth_skb_pad(skb))
3126                 return NETDEV_TX_OK;
3127
3128         mss = skb_shinfo(skb)->gso_size;
3129         /* The controller does a simple calculation to
3130          * make sure there is enough room in the FIFO before
3131          * initiating the DMA for each buffer.  The calc is:
3132          * 4 = ceil(buffer len/mss).  To make sure we don't
3133          * overrun the FIFO, adjust the max buffer len if mss
3134          * drops.
3135          */
3136         if (mss) {
3137                 u8 hdr_len;
3138                 max_per_txd = min(mss << 2, max_per_txd);
3139                 max_txd_pwr = fls(max_per_txd) - 1;
3140
3141                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3142                 if (skb->data_len && hdr_len == len) {
3143                         switch (hw->mac_type) {
3144                         case e1000_82544: {
3145                                 unsigned int pull_size;
3146
3147                                 /* Make sure we have room to chop off 4 bytes,
3148                                  * and that the end alignment will work out to
3149                                  * this hardware's requirements
3150                                  * NOTE: this is a TSO only workaround
3151                                  * if end byte alignment not correct move us
3152                                  * into the next dword
3153                                  */
3154                                 if ((unsigned long)(skb_tail_pointer(skb) - 1)
3155                                     & 4)
3156                                         break;
3157                                 pull_size = min((unsigned int)4, skb->data_len);
3158                                 if (!__pskb_pull_tail(skb, pull_size)) {
3159                                         e_err(drv, "__pskb_pull_tail "
3160                                               "failed.\n");
3161                                         dev_kfree_skb_any(skb);
3162                                         return NETDEV_TX_OK;
3163                                 }
3164                                 len = skb_headlen(skb);
3165                                 break;
3166                         }
3167                         default:
3168                                 /* do nothing */
3169                                 break;
3170                         }
3171                 }
3172         }
3173
3174         /* reserve a descriptor for the offload context */
3175         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3176                 count++;
3177         count++;
3178
3179         /* Controller Erratum workaround */
3180         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3181                 count++;
3182
3183         count += TXD_USE_COUNT(len, max_txd_pwr);
3184
3185         if (adapter->pcix_82544)
3186                 count++;
3187
3188         /* work-around for errata 10 and it applies to all controllers
3189          * in PCI-X mode, so add one more descriptor to the count
3190          */
3191         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3192                         (len > 2015)))
3193                 count++;
3194
3195         nr_frags = skb_shinfo(skb)->nr_frags;
3196         for (f = 0; f < nr_frags; f++)
3197                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3198                                        max_txd_pwr);
3199         if (adapter->pcix_82544)
3200                 count += nr_frags;
3201
3202         /* need: count + 2 desc gap to keep tail from touching
3203          * head, otherwise try next time
3204          */
3205         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3206                 return NETDEV_TX_BUSY;
3207
3208         if (unlikely((hw->mac_type == e1000_82547) &&
3209                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3210                 netif_stop_queue(netdev);
3211                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3212                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3213                 return NETDEV_TX_BUSY;
3214         }
3215
3216         if (skb_vlan_tag_present(skb)) {
3217                 tx_flags |= E1000_TX_FLAGS_VLAN;
3218                 tx_flags |= (skb_vlan_tag_get(skb) <<
3219                              E1000_TX_FLAGS_VLAN_SHIFT);
3220         }
3221
3222         first = tx_ring->next_to_use;
3223
3224         tso = e1000_tso(adapter, tx_ring, skb, protocol);
3225         if (tso < 0) {
3226                 dev_kfree_skb_any(skb);
3227                 return NETDEV_TX_OK;
3228         }
3229
3230         if (likely(tso)) {
3231                 if (likely(hw->mac_type != e1000_82544))
3232                         tx_ring->last_tx_tso = true;
3233                 tx_flags |= E1000_TX_FLAGS_TSO;
3234         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol)))
3235                 tx_flags |= E1000_TX_FLAGS_CSUM;
3236
3237         if (protocol == htons(ETH_P_IP))
3238                 tx_flags |= E1000_TX_FLAGS_IPV4;
3239
3240         if (unlikely(skb->no_fcs))
3241                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3242
3243         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3244                              nr_frags, mss);
3245
3246         if (count) {
3247                 /* The descriptors needed is higher than other Intel drivers
3248                  * due to a number of workarounds.  The breakdown is below:
3249                  * Data descriptors: MAX_SKB_FRAGS + 1
3250                  * Context Descriptor: 1
3251                  * Keep head from touching tail: 2
3252                  * Workarounds: 3
3253                  */
3254                 int desc_needed = MAX_SKB_FRAGS + 7;
3255
3256                 netdev_sent_queue(netdev, skb->len);
3257                 skb_tx_timestamp(skb);
3258
3259                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3260
3261                 /* 82544 potentially requires twice as many data descriptors
3262                  * in order to guarantee buffers don't end on evenly-aligned
3263                  * dwords
3264                  */
3265                 if (adapter->pcix_82544)
3266                         desc_needed += MAX_SKB_FRAGS + 1;
3267
3268                 /* Make sure there is space in the ring for the next send. */
3269                 e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
3270
3271                 if (!netdev_xmit_more() ||
3272                     netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
3273                         writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
3274                 }
3275         } else {
3276                 dev_kfree_skb_any(skb);
3277                 tx_ring->buffer_info[first].time_stamp = 0;
3278                 tx_ring->next_to_use = first;
3279         }
3280
3281         return NETDEV_TX_OK;
3282 }
3283
3284 #define NUM_REGS 38 /* 1 based count */
3285 static void e1000_regdump(struct e1000_adapter *adapter)
3286 {
3287         struct e1000_hw *hw = &adapter->hw;
3288         u32 regs[NUM_REGS];
3289         u32 *regs_buff = regs;
3290         int i = 0;
3291
3292         static const char * const reg_name[] = {
3293                 "CTRL",  "STATUS",
3294                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3295                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3296                 "TIDV", "TXDCTL", "TADV", "TARC0",
3297                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3298                 "TXDCTL1", "TARC1",
3299                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3300                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3301                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3302         };
3303
3304         regs_buff[0]  = er32(CTRL);
3305         regs_buff[1]  = er32(STATUS);
3306
3307         regs_buff[2]  = er32(RCTL);
3308         regs_buff[3]  = er32(RDLEN);
3309         regs_buff[4]  = er32(RDH);
3310         regs_buff[5]  = er32(RDT);
3311         regs_buff[6]  = er32(RDTR);
3312
3313         regs_buff[7]  = er32(TCTL);
3314         regs_buff[8]  = er32(TDBAL);
3315         regs_buff[9]  = er32(TDBAH);
3316         regs_buff[10] = er32(TDLEN);
3317         regs_buff[11] = er32(TDH);
3318         regs_buff[12] = er32(TDT);
3319         regs_buff[13] = er32(TIDV);
3320         regs_buff[14] = er32(TXDCTL);
3321         regs_buff[15] = er32(TADV);
3322         regs_buff[16] = er32(TARC0);
3323
3324         regs_buff[17] = er32(TDBAL1);
3325         regs_buff[18] = er32(TDBAH1);
3326         regs_buff[19] = er32(TDLEN1);
3327         regs_buff[20] = er32(TDH1);
3328         regs_buff[21] = er32(TDT1);
3329         regs_buff[22] = er32(TXDCTL1);
3330         regs_buff[23] = er32(TARC1);
3331         regs_buff[24] = er32(CTRL_EXT);
3332         regs_buff[25] = er32(ERT);
3333         regs_buff[26] = er32(RDBAL0);
3334         regs_buff[27] = er32(RDBAH0);
3335         regs_buff[28] = er32(TDFH);
3336         regs_buff[29] = er32(TDFT);
3337         regs_buff[30] = er32(TDFHS);
3338         regs_buff[31] = er32(TDFTS);
3339         regs_buff[32] = er32(TDFPC);
3340         regs_buff[33] = er32(RDFH);
3341         regs_buff[34] = er32(RDFT);
3342         regs_buff[35] = er32(RDFHS);
3343         regs_buff[36] = er32(RDFTS);
3344         regs_buff[37] = er32(RDFPC);
3345
3346         pr_info("Register dump\n");
3347         for (i = 0; i < NUM_REGS; i++)
3348                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3349 }
3350
3351 /*
3352  * e1000_dump: Print registers, tx ring and rx ring
3353  */
3354 static void e1000_dump(struct e1000_adapter *adapter)
3355 {
3356         /* this code doesn't handle multiple rings */
3357         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3358         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3359         int i;
3360
3361         if (!netif_msg_hw(adapter))
3362                 return;
3363
3364         /* Print Registers */
3365         e1000_regdump(adapter);
3366
3367         /* transmit dump */
3368         pr_info("TX Desc ring0 dump\n");
3369
3370         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3371          *
3372          * Legacy Transmit Descriptor
3373          *   +--------------------------------------------------------------+
3374          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3375          *   +--------------------------------------------------------------+
3376          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3377          *   +--------------------------------------------------------------+
3378          *   63       48 47        36 35    32 31     24 23    16 15        0
3379          *
3380          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3381          *   63      48 47    40 39       32 31             16 15    8 7      0
3382          *   +----------------------------------------------------------------+
3383          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3384          *   +----------------------------------------------------------------+
3385          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3386          *   +----------------------------------------------------------------+
3387          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3388          *
3389          * Extended Data Descriptor (DTYP=0x1)
3390          *   +----------------------------------------------------------------+
3391          * 0 |                     Buffer Address [63:0]                      |
3392          *   +----------------------------------------------------------------+
3393          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3394          *   +----------------------------------------------------------------+
3395          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3396          */
3397         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3398         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399
3400         if (!netif_msg_tx_done(adapter))
3401                 goto rx_ring_summary;
3402
3403         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3404                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3405                 struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i];
3406                 struct my_u { __le64 a; __le64 b; };
3407                 struct my_u *u = (struct my_u *)tx_desc;
3408                 const char *type;
3409
3410                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3411                         type = "NTC/U";
3412                 else if (i == tx_ring->next_to_use)
3413                         type = "NTU";
3414                 else if (i == tx_ring->next_to_clean)
3415                         type = "NTC";
3416                 else
3417                         type = "";
3418
3419                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3420                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3421                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3422                         (u64)buffer_info->dma, buffer_info->length,
3423                         buffer_info->next_to_watch,
3424                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3425         }
3426
3427 rx_ring_summary:
3428         /* receive dump */
3429         pr_info("\nRX Desc ring dump\n");
3430
3431         /* Legacy Receive Descriptor Format
3432          *
3433          * +-----------------------------------------------------+
3434          * |                Buffer Address [63:0]                |
3435          * +-----------------------------------------------------+
3436          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3437          * +-----------------------------------------------------+
3438          * 63       48 47    40 39      32 31         16 15      0
3439          */
3440         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3441
3442         if (!netif_msg_rx_status(adapter))
3443                 goto exit;
3444
3445         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3446                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3447                 struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i];
3448                 struct my_u { __le64 a; __le64 b; };
3449                 struct my_u *u = (struct my_u *)rx_desc;
3450                 const char *type;
3451
3452                 if (i == rx_ring->next_to_use)
3453                         type = "NTU";
3454                 else if (i == rx_ring->next_to_clean)
3455                         type = "NTC";
3456                 else
3457                         type = "";
3458
3459                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3460                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3461                         (u64)buffer_info->dma, buffer_info->rxbuf.data, type);
3462         } /* for */
3463
3464         /* dump the descriptor caches */
3465         /* rx */
3466         pr_info("Rx descriptor cache in 64bit format\n");
3467         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3468                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3469                         i,
3470                         readl(adapter->hw.hw_addr + i+4),
3471                         readl(adapter->hw.hw_addr + i),
3472                         readl(adapter->hw.hw_addr + i+12),
3473                         readl(adapter->hw.hw_addr + i+8));
3474         }
3475         /* tx */
3476         pr_info("Tx descriptor cache in 64bit format\n");
3477         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3478                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3479                         i,
3480                         readl(adapter->hw.hw_addr + i+4),
3481                         readl(adapter->hw.hw_addr + i),
3482                         readl(adapter->hw.hw_addr + i+12),
3483                         readl(adapter->hw.hw_addr + i+8));
3484         }
3485 exit:
3486         return;
3487 }
3488
3489 /**
3490  * e1000_tx_timeout - Respond to a Tx Hang
3491  * @netdev: network interface device structure
3492  **/
3493 static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
3494 {
3495         struct e1000_adapter *adapter = netdev_priv(netdev);
3496
3497         /* Do the reset outside of interrupt context */
3498         adapter->tx_timeout_count++;
3499         schedule_work(&adapter->reset_task);
3500 }
3501
3502 static void e1000_reset_task(struct work_struct *work)
3503 {
3504         struct e1000_adapter *adapter =
3505                 container_of(work, struct e1000_adapter, reset_task);
3506
3507         e_err(drv, "Reset adapter\n");
3508         e1000_reinit_locked(adapter);
3509 }
3510
3511 /**
3512  * e1000_change_mtu - Change the Maximum Transfer Unit
3513  * @netdev: network interface device structure
3514  * @new_mtu: new value for maximum frame size
3515  *
3516  * Returns 0 on success, negative on failure
3517  **/
3518 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3519 {
3520         struct e1000_adapter *adapter = netdev_priv(netdev);
3521         struct e1000_hw *hw = &adapter->hw;
3522         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3523
3524         /* Adapter-specific max frame size limits. */
3525         switch (hw->mac_type) {
3526         case e1000_undefined ... e1000_82542_rev2_1:
3527                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3528                         e_err(probe, "Jumbo Frames not supported.\n");
3529                         return -EINVAL;
3530                 }
3531                 break;
3532         default:
3533                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3534                 break;
3535         }
3536
3537         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3538                 msleep(1);
3539         /* e1000_down has a dependency on max_frame_size */
3540         hw->max_frame_size = max_frame;
3541         if (netif_running(netdev)) {
3542                 /* prevent buffers from being reallocated */
3543                 adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
3544                 e1000_down(adapter);
3545         }
3546
3547         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3548          * means we reserve 2 more, this pushes us to allocate from the next
3549          * larger slab size.
3550          * i.e. RXBUFFER_2048 --> size-4096 slab
3551          * however with the new *_jumbo_rx* routines, jumbo receives will use
3552          * fragmented skbs
3553          */
3554
3555         if (max_frame <= E1000_RXBUFFER_2048)
3556                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3557         else
3558 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3559                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3560 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3561                 adapter->rx_buffer_len = PAGE_SIZE;
3562 #endif
3563
3564         /* adjust allocation if LPE protects us, and we aren't using SBP */
3565         if (!hw->tbi_compatibility_on &&
3566             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3567              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3568                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3569
3570         netdev_dbg(netdev, "changing MTU from %d to %d\n",
3571                    netdev->mtu, new_mtu);
3572         netdev->mtu = new_mtu;
3573
3574         if (netif_running(netdev))
3575                 e1000_up(adapter);
3576         else
3577                 e1000_reset(adapter);
3578
3579         clear_bit(__E1000_RESETTING, &adapter->flags);
3580
3581         return 0;
3582 }
3583
3584 /**
3585  * e1000_update_stats - Update the board statistics counters
3586  * @adapter: board private structure
3587  **/
3588 void e1000_update_stats(struct e1000_adapter *adapter)
3589 {
3590         struct net_device *netdev = adapter->netdev;
3591         struct e1000_hw *hw = &adapter->hw;
3592         struct pci_dev *pdev = adapter->pdev;
3593         unsigned long flags;
3594         u16 phy_tmp;
3595
3596 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3597
3598         /* Prevent stats update while adapter is being reset, or if the pci
3599          * connection is down.
3600          */
3601         if (adapter->link_speed == 0)
3602                 return;
3603         if (pci_channel_offline(pdev))
3604                 return;
3605
3606         spin_lock_irqsave(&adapter->stats_lock, flags);
3607
3608         /* these counters are modified from e1000_tbi_adjust_stats,
3609          * called from the interrupt context, so they must only
3610          * be written while holding adapter->stats_lock
3611          */
3612
3613         adapter->stats.crcerrs += er32(CRCERRS);
3614         adapter->stats.gprc += er32(GPRC);
3615         adapter->stats.gorcl += er32(GORCL);
3616         adapter->stats.gorch += er32(GORCH);
3617         adapter->stats.bprc += er32(BPRC);
3618         adapter->stats.mprc += er32(MPRC);
3619         adapter->stats.roc += er32(ROC);
3620
3621         adapter->stats.prc64 += er32(PRC64);
3622         adapter->stats.prc127 += er32(PRC127);
3623         adapter->stats.prc255 += er32(PRC255);
3624         adapter->stats.prc511 += er32(PRC511);
3625         adapter->stats.prc1023 += er32(PRC1023);
3626         adapter->stats.prc1522 += er32(PRC1522);
3627
3628         adapter->stats.symerrs += er32(SYMERRS);
3629         adapter->stats.mpc += er32(MPC);
3630         adapter->stats.scc += er32(SCC);
3631         adapter->stats.ecol += er32(ECOL);
3632         adapter->stats.mcc += er32(MCC);
3633         adapter->stats.latecol += er32(LATECOL);
3634         adapter->stats.dc += er32(DC);
3635         adapter->stats.sec += er32(SEC);
3636         adapter->stats.rlec += er32(RLEC);
3637         adapter->stats.xonrxc += er32(XONRXC);
3638         adapter->stats.xontxc += er32(XONTXC);
3639         adapter->stats.xoffrxc += er32(XOFFRXC);
3640         adapter->stats.xofftxc += er32(XOFFTXC);
3641         adapter->stats.fcruc += er32(FCRUC);
3642         adapter->stats.gptc += er32(GPTC);
3643         adapter->stats.gotcl += er32(GOTCL);
3644         adapter->stats.gotch += er32(GOTCH);
3645         adapter->stats.rnbc += er32(RNBC);
3646         adapter->stats.ruc += er32(RUC);
3647         adapter->stats.rfc += er32(RFC);
3648         adapter->stats.rjc += er32(RJC);
3649         adapter->stats.torl += er32(TORL);
3650         adapter->stats.torh += er32(TORH);
3651         adapter->stats.totl += er32(TOTL);
3652         adapter->stats.toth += er32(TOTH);
3653         adapter->stats.tpr += er32(TPR);
3654
3655         adapter->stats.ptc64 += er32(PTC64);
3656         adapter->stats.ptc127 += er32(PTC127);
3657         adapter->stats.ptc255 += er32(PTC255);
3658         adapter->stats.ptc511 += er32(PTC511);
3659         adapter->stats.ptc1023 += er32(PTC1023);
3660         adapter->stats.ptc1522 += er32(PTC1522);
3661
3662         adapter->stats.mptc += er32(MPTC);
3663         adapter->stats.bptc += er32(BPTC);
3664
3665         /* used for adaptive IFS */
3666
3667         hw->tx_packet_delta = er32(TPT);
3668         adapter->stats.tpt += hw->tx_packet_delta;
3669         hw->collision_delta = er32(COLC);
3670         adapter->stats.colc += hw->collision_delta;
3671
3672         if (hw->mac_type >= e1000_82543) {
3673                 adapter->stats.algnerrc += er32(ALGNERRC);
3674                 adapter->stats.rxerrc += er32(RXERRC);
3675                 adapter->stats.tncrs += er32(TNCRS);
3676                 adapter->stats.cexterr += er32(CEXTERR);
3677                 adapter->stats.tsctc += er32(TSCTC);
3678                 adapter->stats.tsctfc += er32(TSCTFC);
3679         }
3680
3681         /* Fill out the OS statistics structure */
3682         netdev->stats.multicast = adapter->stats.mprc;
3683         netdev->stats.collisions = adapter->stats.colc;
3684
3685         /* Rx Errors */
3686
3687         /* RLEC on some newer hardware can be incorrect so build
3688          * our own version based on RUC and ROC
3689          */
3690         netdev->stats.rx_errors = adapter->stats.rxerrc +
3691                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3692                 adapter->stats.ruc + adapter->stats.roc +
3693                 adapter->stats.cexterr;
3694         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3695         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3696         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3697         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3698         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3699
3700         /* Tx Errors */
3701         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3702         netdev->stats.tx_errors = adapter->stats.txerrc;
3703         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3704         netdev->stats.tx_window_errors = adapter->stats.latecol;
3705         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3706         if (hw->bad_tx_carr_stats_fd &&
3707             adapter->link_duplex == FULL_DUPLEX) {
3708                 netdev->stats.tx_carrier_errors = 0;
3709                 adapter->stats.tncrs = 0;
3710         }
3711
3712         /* Tx Dropped needs to be maintained elsewhere */
3713
3714         /* Phy Stats */
3715         if (hw->media_type == e1000_media_type_copper) {
3716                 if ((adapter->link_speed == SPEED_1000) &&
3717                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3718                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3719                         adapter->phy_stats.idle_errors += phy_tmp;
3720                 }
3721
3722                 if ((hw->mac_type <= e1000_82546) &&
3723                    (hw->phy_type == e1000_phy_m88) &&
3724                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3725                         adapter->phy_stats.receive_errors += phy_tmp;
3726         }
3727
3728         /* Management Stats */
3729         if (hw->has_smbus) {
3730                 adapter->stats.mgptc += er32(MGTPTC);
3731                 adapter->stats.mgprc += er32(MGTPRC);
3732                 adapter->stats.mgpdc += er32(MGTPDC);
3733         }
3734
3735         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3736 }
3737
3738 /**
3739  * e1000_intr - Interrupt Handler
3740  * @irq: interrupt number
3741  * @data: pointer to a network interface device structure
3742  **/
3743 static irqreturn_t e1000_intr(int irq, void *data)
3744 {
3745         struct net_device *netdev = data;
3746         struct e1000_adapter *adapter = netdev_priv(netdev);
3747         struct e1000_hw *hw = &adapter->hw;
3748         u32 icr = er32(ICR);
3749
3750         if (unlikely((!icr)))
3751                 return IRQ_NONE;  /* Not our interrupt */
3752
3753         /* we might have caused the interrupt, but the above
3754          * read cleared it, and just in case the driver is
3755          * down there is nothing to do so return handled
3756          */
3757         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3758                 return IRQ_HANDLED;
3759
3760         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3761                 hw->get_link_status = 1;
3762                 /* guard against interrupt when we're going down */
3763                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3764                         schedule_delayed_work(&adapter->watchdog_task, 1);
3765         }
3766
3767         /* disable interrupts, without the synchronize_irq bit */
3768         ew32(IMC, ~0);
3769         E1000_WRITE_FLUSH();
3770
3771         if (likely(napi_schedule_prep(&adapter->napi))) {
3772                 adapter->total_tx_bytes = 0;
3773                 adapter->total_tx_packets = 0;
3774                 adapter->total_rx_bytes = 0;
3775                 adapter->total_rx_packets = 0;
3776                 __napi_schedule(&adapter->napi);
3777         } else {
3778                 /* this really should not happen! if it does it is basically a
3779                  * bug, but not a hard error, so enable ints and continue
3780                  */
3781                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3782                         e1000_irq_enable(adapter);
3783         }
3784
3785         return IRQ_HANDLED;
3786 }
3787
3788 /**
3789  * e1000_clean - NAPI Rx polling callback
3790  * @adapter: board private structure
3791  **/
3792 static int e1000_clean(struct napi_struct *napi, int budget)
3793 {
3794         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
3795                                                      napi);
3796         int tx_clean_complete = 0, work_done = 0;
3797
3798         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3799
3800         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3801
3802         if (!tx_clean_complete || work_done == budget)
3803                 return budget;
3804
3805         /* Exit the polling mode, but don't re-enable interrupts if stack might
3806          * poll us due to busy-polling
3807          */
3808         if (likely(napi_complete_done(napi, work_done))) {
3809                 if (likely(adapter->itr_setting & 3))
3810                         e1000_set_itr(adapter);
3811                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3812                         e1000_irq_enable(adapter);
3813         }
3814
3815         return work_done;
3816 }
3817
3818 /**
3819  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3820  * @adapter: board private structure
3821  **/
3822 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3823                                struct e1000_tx_ring *tx_ring)
3824 {
3825         struct e1000_hw *hw = &adapter->hw;
3826         struct net_device *netdev = adapter->netdev;
3827         struct e1000_tx_desc *tx_desc, *eop_desc;
3828         struct e1000_tx_buffer *buffer_info;
3829         unsigned int i, eop;
3830         unsigned int count = 0;
3831         unsigned int total_tx_bytes = 0, total_tx_packets = 0;
3832         unsigned int bytes_compl = 0, pkts_compl = 0;
3833
3834         i = tx_ring->next_to_clean;
3835         eop = tx_ring->buffer_info[i].next_to_watch;
3836         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3837
3838         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3839                (count < tx_ring->count)) {
3840                 bool cleaned = false;
3841                 dma_rmb();      /* read buffer_info after eop_desc */
3842                 for ( ; !cleaned; count++) {
3843                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3844                         buffer_info = &tx_ring->buffer_info[i];
3845                         cleaned = (i == eop);
3846
3847                         if (cleaned) {
3848                                 total_tx_packets += buffer_info->segs;
3849                                 total_tx_bytes += buffer_info->bytecount;
3850                                 if (buffer_info->skb) {
3851                                         bytes_compl += buffer_info->skb->len;
3852                                         pkts_compl++;
3853                                 }
3854
3855                         }
3856                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3857                         tx_desc->upper.data = 0;
3858
3859                         if (unlikely(++i == tx_ring->count))
3860                                 i = 0;
3861                 }
3862
3863                 eop = tx_ring->buffer_info[i].next_to_watch;
3864                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3865         }
3866
3867         /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame,
3868          * which will reuse the cleaned buffers.
3869          */
3870         smp_store_release(&tx_ring->next_to_clean, i);
3871
3872         netdev_completed_queue(netdev, pkts_compl, bytes_compl);
3873
3874 #define TX_WAKE_THRESHOLD 32
3875         if (unlikely(count && netif_carrier_ok(netdev) &&
3876                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3877                 /* Make sure that anybody stopping the queue after this
3878                  * sees the new next_to_clean.
3879                  */
3880                 smp_mb();
3881
3882                 if (netif_queue_stopped(netdev) &&
3883                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3884                         netif_wake_queue(netdev);
3885                         ++adapter->restart_queue;
3886                 }
3887         }
3888
3889         if (adapter->detect_tx_hung) {
3890                 /* Detect a transmit hang in hardware, this serializes the
3891                  * check with the clearing of time_stamp and movement of i
3892                  */
3893                 adapter->detect_tx_hung = false;
3894                 if (tx_ring->buffer_info[eop].time_stamp &&
3895                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3896                                (adapter->tx_timeout_factor * HZ)) &&
3897                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3898
3899                         /* detected Tx unit hang */
3900                         e_err(drv, "Detected Tx Unit Hang\n"
3901                               "  Tx Queue             <%lu>\n"
3902                               "  TDH                  <%x>\n"
3903                               "  TDT                  <%x>\n"
3904                               "  next_to_use          <%x>\n"
3905                               "  next_to_clean        <%x>\n"
3906                               "buffer_info[next_to_clean]\n"
3907                               "  time_stamp           <%lx>\n"
3908                               "  next_to_watch        <%x>\n"
3909                               "  jiffies              <%lx>\n"
3910                               "  next_to_watch.status <%x>\n",
3911                                 (unsigned long)(tx_ring - adapter->tx_ring),
3912                                 readl(hw->hw_addr + tx_ring->tdh),
3913                                 readl(hw->hw_addr + tx_ring->tdt),
3914                                 tx_ring->next_to_use,
3915                                 tx_ring->next_to_clean,
3916                                 tx_ring->buffer_info[eop].time_stamp,
3917                                 eop,
3918                                 jiffies,
3919                                 eop_desc->upper.fields.status);
3920                         e1000_dump(adapter);
3921                         netif_stop_queue(netdev);
3922                 }
3923         }
3924         adapter->total_tx_bytes += total_tx_bytes;
3925         adapter->total_tx_packets += total_tx_packets;
3926         netdev->stats.tx_bytes += total_tx_bytes;
3927         netdev->stats.tx_packets += total_tx_packets;
3928         return count < tx_ring->count;
3929 }
3930
3931 /**
3932  * e1000_rx_checksum - Receive Checksum Offload for 82543
3933  * @adapter:     board private structure
3934  * @status_err:  receive descriptor status and error fields
3935  * @csum:        receive descriptor csum field
3936  * @sk_buff:     socket buffer with received data
3937  **/
3938 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3939                               u32 csum, struct sk_buff *skb)
3940 {
3941         struct e1000_hw *hw = &adapter->hw;
3942         u16 status = (u16)status_err;
3943         u8 errors = (u8)(status_err >> 24);
3944
3945         skb_checksum_none_assert(skb);
3946
3947         /* 82543 or newer only */
3948         if (unlikely(hw->mac_type < e1000_82543))
3949                 return;
3950         /* Ignore Checksum bit is set */
3951         if (unlikely(status & E1000_RXD_STAT_IXSM))
3952                 return;
3953         /* TCP/UDP checksum error bit is set */
3954         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3955                 /* let the stack verify checksum errors */
3956                 adapter->hw_csum_err++;
3957                 return;
3958         }
3959         /* TCP/UDP Checksum has not been calculated */
3960         if (!(status & E1000_RXD_STAT_TCPCS))
3961                 return;
3962
3963         /* It must be a TCP or UDP packet with a valid checksum */
3964         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3965                 /* TCP checksum is good */
3966                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3967         }
3968         adapter->hw_csum_good++;
3969 }
3970
3971 /**
3972  * e1000_consume_page - helper function for jumbo Rx path
3973  **/
3974 static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
3975                                u16 length)
3976 {
3977         bi->rxbuf.page = NULL;
3978         skb->len += length;
3979         skb->data_len += length;
3980         skb->truesize += PAGE_SIZE;
3981 }
3982
3983 /**
3984  * e1000_receive_skb - helper function to handle rx indications
3985  * @adapter: board private structure
3986  * @status: descriptor status field as written by hardware
3987  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3988  * @skb: pointer to sk_buff to be indicated to stack
3989  */
3990 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3991                               __le16 vlan, struct sk_buff *skb)
3992 {
3993         skb->protocol = eth_type_trans(skb, adapter->netdev);
3994
3995         if (status & E1000_RXD_STAT_VP) {
3996                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3997
3998                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
3999         }
4000         napi_gro_receive(&adapter->napi, skb);
4001 }
4002
4003 /**
4004  * e1000_tbi_adjust_stats
4005  * @hw: Struct containing variables accessed by shared code
4006  * @frame_len: The length of the frame in question
4007  * @mac_addr: The Ethernet destination address of the frame in question
4008  *
4009  * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
4010  */
4011 static void e1000_tbi_adjust_stats(struct e1000_hw *hw,
4012                                    struct e1000_hw_stats *stats,
4013                                    u32 frame_len, const u8 *mac_addr)
4014 {
4015         u64 carry_bit;
4016
4017         /* First adjust the frame length. */
4018         frame_len--;
4019         /* We need to adjust the statistics counters, since the hardware
4020          * counters overcount this packet as a CRC error and undercount
4021          * the packet as a good packet
4022          */
4023         /* This packet should not be counted as a CRC error. */
4024         stats->crcerrs--;
4025         /* This packet does count as a Good Packet Received. */
4026         stats->gprc++;
4027
4028         /* Adjust the Good Octets received counters */
4029         carry_bit = 0x80000000 & stats->gorcl;
4030         stats->gorcl += frame_len;
4031         /* If the high bit of Gorcl (the low 32 bits of the Good Octets
4032          * Received Count) was one before the addition,
4033          * AND it is zero after, then we lost the carry out,
4034          * need to add one to Gorch (Good Octets Received Count High).
4035          * This could be simplified if all environments supported
4036          * 64-bit integers.
4037          */
4038         if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
4039                 stats->gorch++;
4040         /* Is this a broadcast or multicast?  Check broadcast first,
4041          * since the test for a multicast frame will test positive on
4042          * a broadcast frame.
4043          */
4044         if (is_broadcast_ether_addr(mac_addr))
4045                 stats->bprc++;
4046         else if (is_multicast_ether_addr(mac_addr))
4047                 stats->mprc++;
4048
4049         if (frame_len == hw->max_frame_size) {
4050                 /* In this case, the hardware has overcounted the number of
4051                  * oversize frames.
4052                  */
4053                 if (stats->roc > 0)
4054                         stats->roc--;
4055         }
4056
4057         /* Adjust the bin counters when the extra byte put the frame in the
4058          * wrong bin. Remember that the frame_len was adjusted above.
4059          */
4060         if (frame_len == 64) {
4061                 stats->prc64++;
4062                 stats->prc127--;
4063         } else if (frame_len == 127) {
4064                 stats->prc127++;
4065                 stats->prc255--;
4066         } else if (frame_len == 255) {
4067                 stats->prc255++;
4068                 stats->prc511--;
4069         } else if (frame_len == 511) {
4070                 stats->prc511++;
4071                 stats->prc1023--;
4072         } else if (frame_len == 1023) {
4073                 stats->prc1023++;
4074                 stats->prc1522--;
4075         } else if (frame_len == 1522) {
4076                 stats->prc1522++;
4077         }
4078 }
4079
4080 static bool e1000_tbi_should_accept(struct e1000_adapter *adapter,
4081                                     u8 status, u8 errors,
4082                                     u32 length, const u8 *data)
4083 {
4084         struct e1000_hw *hw = &adapter->hw;
4085         u8 last_byte = *(data + length - 1);
4086
4087         if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
4088                 unsigned long irq_flags;
4089
4090                 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
4091                 e1000_tbi_adjust_stats(hw, &adapter->stats, length, data);
4092                 spin_unlock_irqrestore(&adapter->stats_lock, irq_flags);
4093
4094                 return true;
4095         }
4096
4097         return false;
4098 }
4099
4100 static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter,
4101                                           unsigned int bufsz)
4102 {
4103         struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz);
4104
4105         if (unlikely(!skb))
4106                 adapter->alloc_rx_buff_failed++;
4107         return skb;
4108 }
4109
4110 /**
4111  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4112  * @adapter: board private structure
4113  * @rx_ring: ring to clean
4114  * @work_done: amount of napi work completed this call
4115  * @work_to_do: max amount of work allowed for this call to do
4116  *
4117  * the return value indicates whether actual cleaning was done, there
4118  * is no guarantee that everything was cleaned
4119  */
4120 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4121                                      struct e1000_rx_ring *rx_ring,
4122                                      int *work_done, int work_to_do)
4123 {
4124         struct net_device *netdev = adapter->netdev;
4125         struct pci_dev *pdev = adapter->pdev;
4126         struct e1000_rx_desc *rx_desc, *next_rxd;
4127         struct e1000_rx_buffer *buffer_info, *next_buffer;
4128         u32 length;
4129         unsigned int i;
4130         int cleaned_count = 0;
4131         bool cleaned = false;
4132         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4133
4134         i = rx_ring->next_to_clean;
4135         rx_desc = E1000_RX_DESC(*rx_ring, i);
4136         buffer_info = &rx_ring->buffer_info[i];
4137
4138         while (rx_desc->status & E1000_RXD_STAT_DD) {
4139                 struct sk_buff *skb;
4140                 u8 status;
4141
4142                 if (*work_done >= work_to_do)
4143                         break;
4144                 (*work_done)++;
4145                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4146
4147                 status = rx_desc->status;
4148
4149                 if (++i == rx_ring->count)
4150                         i = 0;
4151
4152                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4153                 prefetch(next_rxd);
4154
4155                 next_buffer = &rx_ring->buffer_info[i];
4156
4157                 cleaned = true;
4158                 cleaned_count++;
4159                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4160                                adapter->rx_buffer_len, DMA_FROM_DEVICE);
4161                 buffer_info->dma = 0;
4162
4163                 length = le16_to_cpu(rx_desc->length);
4164
4165                 /* errors is only valid for DD + EOP descriptors */
4166                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4167                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4168                         u8 *mapped = page_address(buffer_info->rxbuf.page);
4169
4170                         if (e1000_tbi_should_accept(adapter, status,
4171                                                     rx_desc->errors,
4172                                                     length, mapped)) {
4173                                 length--;
4174                         } else if (netdev->features & NETIF_F_RXALL) {
4175                                 goto process_skb;
4176                         } else {
4177                                 /* an error means any chain goes out the window
4178                                  * too
4179                                  */
4180                                 dev_kfree_skb(rx_ring->rx_skb_top);
4181                                 rx_ring->rx_skb_top = NULL;
4182                                 goto next_desc;
4183                         }
4184                 }
4185
4186 #define rxtop rx_ring->rx_skb_top
4187 process_skb:
4188                 if (!(status & E1000_RXD_STAT_EOP)) {
4189                         /* this descriptor is only the beginning (or middle) */
4190                         if (!rxtop) {
4191                                 /* this is the beginning of a chain */
4192                                 rxtop = napi_get_frags(&adapter->napi);
4193                                 if (!rxtop)
4194                                         break;
4195
4196                                 skb_fill_page_desc(rxtop, 0,
4197                                                    buffer_info->rxbuf.page,
4198                                                    0, length);
4199                         } else {
4200                                 /* this is the middle of a chain */
4201                                 skb_fill_page_desc(rxtop,
4202                                     skb_shinfo(rxtop)->nr_frags,
4203                                     buffer_info->rxbuf.page, 0, length);
4204                         }
4205                         e1000_consume_page(buffer_info, rxtop, length);
4206                         goto next_desc;
4207                 } else {
4208                         if (rxtop) {
4209                                 /* end of the chain */
4210                                 skb_fill_page_desc(rxtop,
4211                                     skb_shinfo(rxtop)->nr_frags,
4212                                     buffer_info->rxbuf.page, 0, length);
4213                                 skb = rxtop;
4214                                 rxtop = NULL;
4215                                 e1000_consume_page(buffer_info, skb, length);
4216                         } else {
4217                                 struct page *p;
4218                                 /* no chain, got EOP, this buf is the packet
4219                                  * copybreak to save the put_page/alloc_page
4220                                  */
4221                                 p = buffer_info->rxbuf.page;
4222                                 if (length <= copybreak) {
4223                                         u8 *vaddr;
4224
4225                                         if (likely(!(netdev->features & NETIF_F_RXFCS)))
4226                                                 length -= 4;
4227                                         skb = e1000_alloc_rx_skb(adapter,
4228                                                                  length);
4229                                         if (!skb)
4230                                                 break;
4231
4232                                         vaddr = kmap_atomic(p);
4233                                         memcpy(skb_tail_pointer(skb), vaddr,
4234                                                length);
4235                                         kunmap_atomic(vaddr);
4236                                         /* re-use the page, so don't erase
4237                                          * buffer_info->rxbuf.page
4238                                          */
4239                                         skb_put(skb, length);
4240                                         e1000_rx_checksum(adapter,
4241                                                           status | rx_desc->errors << 24,
4242                                                           le16_to_cpu(rx_desc->csum), skb);
4243
4244                                         total_rx_bytes += skb->len;
4245                                         total_rx_packets++;
4246
4247                                         e1000_receive_skb(adapter, status,
4248                                                           rx_desc->special, skb);
4249                                         goto next_desc;
4250                                 } else {
4251                                         skb = napi_get_frags(&adapter->napi);
4252                                         if (!skb) {
4253                                                 adapter->alloc_rx_buff_failed++;
4254                                                 break;
4255                                         }
4256                                         skb_fill_page_desc(skb, 0, p, 0,
4257                                                            length);
4258                                         e1000_consume_page(buffer_info, skb,
4259                                                            length);
4260                                 }
4261                         }
4262                 }
4263
4264                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4265                 e1000_rx_checksum(adapter,
4266                                   (u32)(status) |
4267                                   ((u32)(rx_desc->errors) << 24),
4268                                   le16_to_cpu(rx_desc->csum), skb);
4269
4270                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4271                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4272                         pskb_trim(skb, skb->len - 4);
4273                 total_rx_packets++;
4274
4275                 if (status & E1000_RXD_STAT_VP) {
4276                         __le16 vlan = rx_desc->special;
4277                         u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4278
4279                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
4280                 }
4281
4282                 napi_gro_frags(&adapter->napi);
4283
4284 next_desc:
4285                 rx_desc->status = 0;
4286
4287                 /* return some buffers to hardware, one at a time is too slow */
4288                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4289                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4290                         cleaned_count = 0;
4291                 }
4292
4293                 /* use prefetched values */
4294                 rx_desc = next_rxd;
4295                 buffer_info = next_buffer;
4296         }
4297         rx_ring->next_to_clean = i;
4298
4299         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4300         if (cleaned_count)
4301                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4302
4303         adapter->total_rx_packets += total_rx_packets;
4304         adapter->total_rx_bytes += total_rx_bytes;
4305         netdev->stats.rx_bytes += total_rx_bytes;
4306         netdev->stats.rx_packets += total_rx_packets;
4307         return cleaned;
4308 }
4309
4310 /* this should improve performance for small packets with large amounts
4311  * of reassembly being done in the stack
4312  */
4313 static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter,
4314                                        struct e1000_rx_buffer *buffer_info,
4315                                        u32 length, const void *data)
4316 {
4317         struct sk_buff *skb;
4318
4319         if (length > copybreak)
4320                 return NULL;
4321
4322         skb = e1000_alloc_rx_skb(adapter, length);
4323         if (!skb)
4324                 return NULL;
4325
4326         dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma,
4327                                 length, DMA_FROM_DEVICE);
4328
4329         skb_put_data(skb, data, length);
4330
4331         return skb;
4332 }
4333
4334 /**
4335  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4336  * @adapter: board private structure
4337  * @rx_ring: ring to clean
4338  * @work_done: amount of napi work completed this call
4339  * @work_to_do: max amount of work allowed for this call to do
4340  */
4341 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4342                                struct e1000_rx_ring *rx_ring,
4343                                int *work_done, int work_to_do)
4344 {
4345         struct net_device *netdev = adapter->netdev;
4346         struct pci_dev *pdev = adapter->pdev;
4347         struct e1000_rx_desc *rx_desc, *next_rxd;
4348         struct e1000_rx_buffer *buffer_info, *next_buffer;
4349         u32 length;
4350         unsigned int i;
4351         int cleaned_count = 0;
4352         bool cleaned = false;
4353         unsigned int total_rx_bytes = 0, total_rx_packets = 0;
4354
4355         i = rx_ring->next_to_clean;
4356         rx_desc = E1000_RX_DESC(*rx_ring, i);
4357         buffer_info = &rx_ring->buffer_info[i];
4358
4359         while (rx_desc->status & E1000_RXD_STAT_DD) {
4360                 struct sk_buff *skb;
4361                 u8 *data;
4362                 u8 status;
4363
4364                 if (*work_done >= work_to_do)
4365                         break;
4366                 (*work_done)++;
4367                 dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
4368
4369                 status = rx_desc->status;
4370                 length = le16_to_cpu(rx_desc->length);
4371
4372                 data = buffer_info->rxbuf.data;
4373                 prefetch(data);
4374                 skb = e1000_copybreak(adapter, buffer_info, length, data);
4375                 if (!skb) {
4376                         unsigned int frag_len = e1000_frag_len(adapter);
4377
4378                         skb = build_skb(data - E1000_HEADROOM, frag_len);
4379                         if (!skb) {
4380                                 adapter->alloc_rx_buff_failed++;
4381                                 break;
4382                         }
4383
4384                         skb_reserve(skb, E1000_HEADROOM);
4385                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4386                                          adapter->rx_buffer_len,
4387                                          DMA_FROM_DEVICE);
4388                         buffer_info->dma = 0;
4389                         buffer_info->rxbuf.data = NULL;
4390                 }
4391
4392                 if (++i == rx_ring->count)
4393                         i = 0;
4394
4395                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4396                 prefetch(next_rxd);
4397
4398                 next_buffer = &rx_ring->buffer_info[i];
4399
4400                 cleaned = true;
4401                 cleaned_count++;
4402
4403                 /* !EOP means multiple descriptors were used to store a single
4404                  * packet, if thats the case we need to toss it.  In fact, we
4405                  * to toss every packet with the EOP bit clear and the next
4406                  * frame that _does_ have the EOP bit set, as it is by
4407                  * definition only a frame fragment
4408                  */
4409                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4410                         adapter->discarding = true;
4411
4412                 if (adapter->discarding) {
4413                         /* All receives must fit into a single buffer */
4414                         netdev_dbg(netdev, "Receive packet consumed multiple buffers\n");
4415                         dev_kfree_skb(skb);
4416                         if (status & E1000_RXD_STAT_EOP)
4417                                 adapter->discarding = false;
4418                         goto next_desc;
4419                 }
4420
4421                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4422                         if (e1000_tbi_should_accept(adapter, status,
4423                                                     rx_desc->errors,
4424                                                     length, data)) {
4425                                 length--;
4426                         } else if (netdev->features & NETIF_F_RXALL) {
4427                                 goto process_skb;
4428                         } else {
4429                                 dev_kfree_skb(skb);
4430                                 goto next_desc;
4431                         }
4432                 }
4433
4434 process_skb:
4435                 total_rx_bytes += (length - 4); /* don't count FCS */
4436                 total_rx_packets++;
4437
4438                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4439                         /* adjust length to remove Ethernet CRC, this must be
4440                          * done after the TBI_ACCEPT workaround above
4441                          */
4442                         length -= 4;
4443
4444                 if (buffer_info->rxbuf.data == NULL)
4445                         skb_put(skb, length);
4446                 else /* copybreak skb */
4447                         skb_trim(skb, length);
4448
4449                 /* Receive Checksum Offload */
4450                 e1000_rx_checksum(adapter,
4451                                   (u32)(status) |
4452                                   ((u32)(rx_desc->errors) << 24),
4453                                   le16_to_cpu(rx_desc->csum), skb);
4454
4455                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4456
4457 next_desc:
4458                 rx_desc->status = 0;
4459
4460                 /* return some buffers to hardware, one at a time is too slow */
4461                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4462                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4463                         cleaned_count = 0;
4464                 }
4465
4466                 /* use prefetched values */
4467                 rx_desc = next_rxd;
4468                 buffer_info = next_buffer;
4469         }
4470         rx_ring->next_to_clean = i;
4471
4472         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4473         if (cleaned_count)
4474                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4475
4476         adapter->total_rx_packets += total_rx_packets;
4477         adapter->total_rx_bytes += total_rx_bytes;
4478         netdev->stats.rx_bytes += total_rx_bytes;
4479         netdev->stats.rx_packets += total_rx_packets;
4480         return cleaned;
4481 }
4482
4483 /**
4484  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4485  * @adapter: address of board private structure
4486  * @rx_ring: pointer to receive ring structure
4487  * @cleaned_count: number of buffers to allocate this pass
4488  **/
4489 static void
4490 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4491                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4492 {
4493         struct pci_dev *pdev = adapter->pdev;
4494         struct e1000_rx_desc *rx_desc;
4495         struct e1000_rx_buffer *buffer_info;
4496         unsigned int i;
4497
4498         i = rx_ring->next_to_use;
4499         buffer_info = &rx_ring->buffer_info[i];
4500
4501         while (cleaned_count--) {
4502                 /* allocate a new page if necessary */
4503                 if (!buffer_info->rxbuf.page) {
4504                         buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC);
4505                         if (unlikely(!buffer_info->rxbuf.page)) {
4506                                 adapter->alloc_rx_buff_failed++;
4507                                 break;
4508                         }
4509                 }
4510
4511                 if (!buffer_info->dma) {
4512                         buffer_info->dma = dma_map_page(&pdev->dev,
4513                                                         buffer_info->rxbuf.page, 0,
4514                                                         adapter->rx_buffer_len,
4515                                                         DMA_FROM_DEVICE);
4516                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4517                                 put_page(buffer_info->rxbuf.page);
4518                                 buffer_info->rxbuf.page = NULL;
4519                                 buffer_info->dma = 0;
4520                                 adapter->alloc_rx_buff_failed++;
4521                                 break;
4522                         }
4523                 }
4524
4525                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4526                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4527
4528                 if (unlikely(++i == rx_ring->count))
4529                         i = 0;
4530                 buffer_info = &rx_ring->buffer_info[i];
4531         }
4532
4533         if (likely(rx_ring->next_to_use != i)) {
4534                 rx_ring->next_to_use = i;
4535                 if (unlikely(i-- == 0))
4536                         i = (rx_ring->count - 1);
4537
4538                 /* Force memory writes to complete before letting h/w
4539                  * know there are new descriptors to fetch.  (Only
4540                  * applicable for weak-ordered memory model archs,
4541                  * such as IA-64).
4542                  */
4543                 dma_wmb();
4544                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4545         }
4546 }
4547
4548 /**
4549  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4550  * @adapter: address of board private structure
4551  **/
4552 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4553                                    struct e1000_rx_ring *rx_ring,
4554                                    int cleaned_count)
4555 {
4556         struct e1000_hw *hw = &adapter->hw;
4557         struct pci_dev *pdev = adapter->pdev;
4558         struct e1000_rx_desc *rx_desc;
4559         struct e1000_rx_buffer *buffer_info;
4560         unsigned int i;
4561         unsigned int bufsz = adapter->rx_buffer_len;
4562
4563         i = rx_ring->next_to_use;
4564         buffer_info = &rx_ring->buffer_info[i];
4565
4566         while (cleaned_count--) {
4567                 void *data;
4568
4569                 if (buffer_info->rxbuf.data)
4570                         goto skip;
4571
4572                 data = e1000_alloc_frag(adapter);
4573                 if (!data) {
4574                         /* Better luck next round */
4575                         adapter->alloc_rx_buff_failed++;
4576                         break;
4577                 }
4578
4579                 /* Fix for errata 23, can't cross 64kB boundary */
4580                 if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4581                         void *olddata = data;
4582                         e_err(rx_err, "skb align check failed: %u bytes at "
4583                               "%p\n", bufsz, data);
4584                         /* Try again, without freeing the previous */
4585                         data = e1000_alloc_frag(adapter);
4586                         /* Failed allocation, critical failure */
4587                         if (!data) {
4588                                 skb_free_frag(olddata);
4589                                 adapter->alloc_rx_buff_failed++;
4590                                 break;
4591                         }
4592
4593                         if (!e1000_check_64k_bound(adapter, data, bufsz)) {
4594                                 /* give up */
4595                                 skb_free_frag(data);
4596                                 skb_free_frag(olddata);
4597                                 adapter->alloc_rx_buff_failed++;
4598                                 break;
4599                         }
4600
4601                         /* Use new allocation */
4602                         skb_free_frag(olddata);
4603                 }
4604                 buffer_info->dma = dma_map_single(&pdev->dev,
4605                                                   data,
4606                                                   adapter->rx_buffer_len,
4607                                                   DMA_FROM_DEVICE);
4608                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4609                         skb_free_frag(data);
4610                         buffer_info->dma = 0;
4611                         adapter->alloc_rx_buff_failed++;
4612                         break;
4613                 }
4614
4615                 /* XXX if it was allocated cleanly it will never map to a
4616                  * boundary crossing
4617                  */
4618
4619                 /* Fix for errata 23, can't cross 64kB boundary */
4620                 if (!e1000_check_64k_bound(adapter,
4621                                         (void *)(unsigned long)buffer_info->dma,
4622                                         adapter->rx_buffer_len)) {
4623                         e_err(rx_err, "dma align check failed: %u bytes at "
4624                               "%p\n", adapter->rx_buffer_len,
4625                               (void *)(unsigned long)buffer_info->dma);
4626
4627                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4628                                          adapter->rx_buffer_len,
4629                                          DMA_FROM_DEVICE);
4630
4631                         skb_free_frag(data);
4632                         buffer_info->rxbuf.data = NULL;
4633                         buffer_info->dma = 0;
4634
4635                         adapter->alloc_rx_buff_failed++;
4636                         break;
4637                 }
4638                 buffer_info->rxbuf.data = data;
4639  skip:
4640                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4641                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4642
4643                 if (unlikely(++i == rx_ring->count))
4644                         i = 0;
4645                 buffer_info = &rx_ring->buffer_info[i];
4646         }
4647
4648         if (likely(rx_ring->next_to_use != i)) {
4649                 rx_ring->next_to_use = i;
4650                 if (unlikely(i-- == 0))
4651                         i = (rx_ring->count - 1);
4652
4653                 /* Force memory writes to complete before letting h/w
4654                  * know there are new descriptors to fetch.  (Only
4655                  * applicable for weak-ordered memory model archs,
4656                  * such as IA-64).
4657                  */
4658                 dma_wmb();
4659                 writel(i, hw->hw_addr + rx_ring->rdt);
4660         }
4661 }
4662
4663 /**
4664  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4665  * @adapter:
4666  **/
4667 static void e1000_smartspeed(struct e1000_adapter *adapter)
4668 {
4669         struct e1000_hw *hw = &adapter->hw;
4670         u16 phy_status;
4671         u16 phy_ctrl;
4672
4673         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4674            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4675                 return;
4676
4677         if (adapter->smartspeed == 0) {
4678                 /* If Master/Slave config fault is asserted twice,
4679                  * we assume back-to-back
4680                  */
4681                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4682                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4683                         return;
4684                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4685                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
4686                         return;
4687                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4688                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4689                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4690                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4691                                             phy_ctrl);
4692                         adapter->smartspeed++;
4693                         if (!e1000_phy_setup_autoneg(hw) &&
4694                            !e1000_read_phy_reg(hw, PHY_CTRL,
4695                                                &phy_ctrl)) {
4696                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4697                                              MII_CR_RESTART_AUTO_NEG);
4698                                 e1000_write_phy_reg(hw, PHY_CTRL,
4699                                                     phy_ctrl);
4700                         }
4701                 }
4702                 return;
4703         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4704                 /* If still no link, perhaps using 2/3 pair cable */
4705                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4706                 phy_ctrl |= CR_1000T_MS_ENABLE;
4707                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4708                 if (!e1000_phy_setup_autoneg(hw) &&
4709                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4710                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4711                                      MII_CR_RESTART_AUTO_NEG);
4712                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4713                 }
4714         }
4715         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4716         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4717                 adapter->smartspeed = 0;
4718 }
4719
4720 /**
4721  * e1000_ioctl -
4722  * @netdev:
4723  * @ifreq:
4724  * @cmd:
4725  **/
4726 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4727 {
4728         switch (cmd) {
4729         case SIOCGMIIPHY:
4730         case SIOCGMIIREG:
4731         case SIOCSMIIREG:
4732                 return e1000_mii_ioctl(netdev, ifr, cmd);
4733         default:
4734                 return -EOPNOTSUPP;
4735         }
4736 }
4737
4738 /**
4739  * e1000_mii_ioctl -
4740  * @netdev:
4741  * @ifreq:
4742  * @cmd:
4743  **/
4744 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4745                            int cmd)
4746 {
4747         struct e1000_adapter *adapter = netdev_priv(netdev);
4748         struct e1000_hw *hw = &adapter->hw;
4749         struct mii_ioctl_data *data = if_mii(ifr);
4750         int retval;
4751         u16 mii_reg;
4752         unsigned long flags;
4753
4754         if (hw->media_type != e1000_media_type_copper)
4755                 return -EOPNOTSUPP;
4756
4757         switch (cmd) {
4758         case SIOCGMIIPHY:
4759                 data->phy_id = hw->phy_addr;
4760                 break;
4761         case SIOCGMIIREG:
4762                 spin_lock_irqsave(&adapter->stats_lock, flags);
4763                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4764                                    &data->val_out)) {
4765                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4766                         return -EIO;
4767                 }
4768                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4769                 break;
4770         case SIOCSMIIREG:
4771                 if (data->reg_num & ~(0x1F))
4772                         return -EFAULT;
4773                 mii_reg = data->val_in;
4774                 spin_lock_irqsave(&adapter->stats_lock, flags);
4775                 if (e1000_write_phy_reg(hw, data->reg_num,
4776                                         mii_reg)) {
4777                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4778                         return -EIO;
4779                 }
4780                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4781                 if (hw->media_type == e1000_media_type_copper) {
4782                         switch (data->reg_num) {
4783                         case PHY_CTRL:
4784                                 if (mii_reg & MII_CR_POWER_DOWN)
4785                                         break;
4786                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4787                                         hw->autoneg = 1;
4788                                         hw->autoneg_advertised = 0x2F;
4789                                 } else {
4790                                         u32 speed;
4791                                         if (mii_reg & 0x40)
4792                                                 speed = SPEED_1000;
4793                                         else if (mii_reg & 0x2000)
4794                                                 speed = SPEED_100;
4795                                         else
4796                                                 speed = SPEED_10;
4797                                         retval = e1000_set_spd_dplx(
4798                                                 adapter, speed,
4799                                                 ((mii_reg & 0x100)
4800                                                  ? DUPLEX_FULL :
4801                                                  DUPLEX_HALF));
4802                                         if (retval)
4803                                                 return retval;
4804                                 }
4805                                 if (netif_running(adapter->netdev))
4806                                         e1000_reinit_locked(adapter);
4807                                 else
4808                                         e1000_reset(adapter);
4809                                 break;
4810                         case M88E1000_PHY_SPEC_CTRL:
4811                         case M88E1000_EXT_PHY_SPEC_CTRL:
4812                                 if (e1000_phy_reset(hw))
4813                                         return -EIO;
4814                                 break;
4815                         }
4816                 } else {
4817                         switch (data->reg_num) {
4818                         case PHY_CTRL:
4819                                 if (mii_reg & MII_CR_POWER_DOWN)
4820                                         break;
4821                                 if (netif_running(adapter->netdev))
4822                                         e1000_reinit_locked(adapter);
4823                                 else
4824                                         e1000_reset(adapter);
4825                                 break;
4826                         }
4827                 }
4828                 break;
4829         default:
4830                 return -EOPNOTSUPP;
4831         }
4832         return E1000_SUCCESS;
4833 }
4834
4835 void e1000_pci_set_mwi(struct e1000_hw *hw)
4836 {
4837         struct e1000_adapter *adapter = hw->back;
4838         int ret_val = pci_set_mwi(adapter->pdev);
4839
4840         if (ret_val)
4841                 e_err(probe, "Error in setting MWI\n");
4842 }
4843
4844 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4845 {
4846         struct e1000_adapter *adapter = hw->back;
4847
4848         pci_clear_mwi(adapter->pdev);
4849 }
4850
4851 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4852 {
4853         struct e1000_adapter *adapter = hw->back;
4854         return pcix_get_mmrbc(adapter->pdev);
4855 }
4856
4857 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4858 {
4859         struct e1000_adapter *adapter = hw->back;
4860         pcix_set_mmrbc(adapter->pdev, mmrbc);
4861 }
4862
4863 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4864 {
4865         outl(value, port);
4866 }
4867
4868 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4869 {
4870         u16 vid;
4871
4872         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4873                 return true;
4874         return false;
4875 }
4876
4877 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4878                               netdev_features_t features)
4879 {
4880         struct e1000_hw *hw = &adapter->hw;
4881         u32 ctrl;
4882
4883         ctrl = er32(CTRL);
4884         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
4885                 /* enable VLAN tag insert/strip */
4886                 ctrl |= E1000_CTRL_VME;
4887         } else {
4888                 /* disable VLAN tag insert/strip */
4889                 ctrl &= ~E1000_CTRL_VME;
4890         }
4891         ew32(CTRL, ctrl);
4892 }
4893 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4894                                      bool filter_on)
4895 {
4896         struct e1000_hw *hw = &adapter->hw;
4897         u32 rctl;
4898
4899         if (!test_bit(__E1000_DOWN, &adapter->flags))
4900                 e1000_irq_disable(adapter);
4901
4902         __e1000_vlan_mode(adapter, adapter->netdev->features);
4903         if (filter_on) {
4904                 /* enable VLAN receive filtering */
4905                 rctl = er32(RCTL);
4906                 rctl &= ~E1000_RCTL_CFIEN;
4907                 if (!(adapter->netdev->flags & IFF_PROMISC))
4908                         rctl |= E1000_RCTL_VFE;
4909                 ew32(RCTL, rctl);
4910                 e1000_update_mng_vlan(adapter);
4911         } else {
4912                 /* disable VLAN receive filtering */
4913                 rctl = er32(RCTL);
4914                 rctl &= ~E1000_RCTL_VFE;
4915                 ew32(RCTL, rctl);
4916         }
4917
4918         if (!test_bit(__E1000_DOWN, &adapter->flags))
4919                 e1000_irq_enable(adapter);
4920 }
4921
4922 static void e1000_vlan_mode(struct net_device *netdev,
4923                             netdev_features_t features)
4924 {
4925         struct e1000_adapter *adapter = netdev_priv(netdev);
4926
4927         if (!test_bit(__E1000_DOWN, &adapter->flags))
4928                 e1000_irq_disable(adapter);
4929
4930         __e1000_vlan_mode(adapter, features);
4931
4932         if (!test_bit(__E1000_DOWN, &adapter->flags))
4933                 e1000_irq_enable(adapter);
4934 }
4935
4936 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
4937                                  __be16 proto, u16 vid)
4938 {
4939         struct e1000_adapter *adapter = netdev_priv(netdev);
4940         struct e1000_hw *hw = &adapter->hw;
4941         u32 vfta, index;
4942
4943         if ((hw->mng_cookie.status &
4944              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4945             (vid == adapter->mng_vlan_id))
4946                 return 0;
4947
4948         if (!e1000_vlan_used(adapter))
4949                 e1000_vlan_filter_on_off(adapter, true);
4950
4951         /* add VID to filter table */
4952         index = (vid >> 5) & 0x7F;
4953         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4954         vfta |= (1 << (vid & 0x1F));
4955         e1000_write_vfta(hw, index, vfta);
4956
4957         set_bit(vid, adapter->active_vlans);
4958
4959         return 0;
4960 }
4961
4962 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
4963                                   __be16 proto, u16 vid)
4964 {
4965         struct e1000_adapter *adapter = netdev_priv(netdev);
4966         struct e1000_hw *hw = &adapter->hw;
4967         u32 vfta, index;
4968
4969         if (!test_bit(__E1000_DOWN, &adapter->flags))
4970                 e1000_irq_disable(adapter);
4971         if (!test_bit(__E1000_DOWN, &adapter->flags))
4972                 e1000_irq_enable(adapter);
4973
4974         /* remove VID from filter table */
4975         index = (vid >> 5) & 0x7F;
4976         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4977         vfta &= ~(1 << (vid & 0x1F));
4978         e1000_write_vfta(hw, index, vfta);
4979
4980         clear_bit(vid, adapter->active_vlans);
4981
4982         if (!e1000_vlan_used(adapter))
4983                 e1000_vlan_filter_on_off(adapter, false);
4984
4985         return 0;
4986 }
4987
4988 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4989 {
4990         u16 vid;
4991
4992         if (!e1000_vlan_used(adapter))
4993                 return;
4994
4995         e1000_vlan_filter_on_off(adapter, true);
4996         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4997                 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
4998 }
4999
5000 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
5001 {
5002         struct e1000_hw *hw = &adapter->hw;
5003
5004         hw->autoneg = 0;
5005
5006         /* Make sure dplx is at most 1 bit and lsb of speed is not set
5007          * for the switch() below to work
5008          */
5009         if ((spd & 1) || (dplx & ~1))
5010                 goto err_inval;
5011
5012         /* Fiber NICs only allow 1000 gbps Full duplex */
5013         if ((hw->media_type == e1000_media_type_fiber) &&
5014             spd != SPEED_1000 &&
5015             dplx != DUPLEX_FULL)
5016                 goto err_inval;
5017
5018         switch (spd + dplx) {
5019         case SPEED_10 + DUPLEX_HALF:
5020                 hw->forced_speed_duplex = e1000_10_half;
5021                 break;
5022         case SPEED_10 + DUPLEX_FULL:
5023                 hw->forced_speed_duplex = e1000_10_full;
5024                 break;
5025         case SPEED_100 + DUPLEX_HALF:
5026                 hw->forced_speed_duplex = e1000_100_half;
5027                 break;
5028         case SPEED_100 + DUPLEX_FULL:
5029                 hw->forced_speed_duplex = e1000_100_full;
5030                 break;
5031         case SPEED_1000 + DUPLEX_FULL:
5032                 hw->autoneg = 1;
5033                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
5034                 break;
5035         case SPEED_1000 + DUPLEX_HALF: /* not supported */
5036         default:
5037                 goto err_inval;
5038         }
5039
5040         /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
5041         hw->mdix = AUTO_ALL_MODES;
5042
5043         return 0;
5044
5045 err_inval:
5046         e_err(probe, "Unsupported Speed/Duplex configuration\n");
5047         return -EINVAL;
5048 }
5049
5050 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
5051 {
5052         struct net_device *netdev = pci_get_drvdata(pdev);
5053         struct e1000_adapter *adapter = netdev_priv(netdev);
5054         struct e1000_hw *hw = &adapter->hw;
5055         u32 ctrl, ctrl_ext, rctl, status;
5056         u32 wufc = adapter->wol;
5057
5058         netif_device_detach(netdev);
5059
5060         if (netif_running(netdev)) {
5061                 int count = E1000_CHECK_RESET_COUNT;
5062
5063                 while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
5064                         usleep_range(10000, 20000);
5065
5066                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5067                 e1000_down(adapter);
5068         }
5069
5070         status = er32(STATUS);
5071         if (status & E1000_STATUS_LU)
5072                 wufc &= ~E1000_WUFC_LNKC;
5073
5074         if (wufc) {
5075                 e1000_setup_rctl(adapter);
5076                 e1000_set_rx_mode(netdev);
5077
5078                 rctl = er32(RCTL);
5079
5080                 /* turn on all-multi mode if wake on multicast is enabled */
5081                 if (wufc & E1000_WUFC_MC)
5082                         rctl |= E1000_RCTL_MPE;
5083
5084                 /* enable receives in the hardware */
5085                 ew32(RCTL, rctl | E1000_RCTL_EN);
5086
5087                 if (hw->mac_type >= e1000_82540) {
5088                         ctrl = er32(CTRL);
5089                         /* advertise wake from D3Cold */
5090                         #define E1000_CTRL_ADVD3WUC 0x00100000
5091                         /* phy power management enable */
5092                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5093                         ctrl |= E1000_CTRL_ADVD3WUC |
5094                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5095                         ew32(CTRL, ctrl);
5096                 }
5097
5098                 if (hw->media_type == e1000_media_type_fiber ||
5099                     hw->media_type == e1000_media_type_internal_serdes) {
5100                         /* keep the laser running in D3 */
5101                         ctrl_ext = er32(CTRL_EXT);
5102                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5103                         ew32(CTRL_EXT, ctrl_ext);
5104                 }
5105
5106                 ew32(WUC, E1000_WUC_PME_EN);
5107                 ew32(WUFC, wufc);
5108         } else {
5109                 ew32(WUC, 0);
5110                 ew32(WUFC, 0);
5111         }
5112
5113         e1000_release_manageability(adapter);
5114
5115         *enable_wake = !!wufc;
5116
5117         /* make sure adapter isn't asleep if manageability is enabled */
5118         if (adapter->en_mng_pt)
5119                 *enable_wake = true;
5120
5121         if (netif_running(netdev))
5122                 e1000_free_irq(adapter);
5123
5124         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5125                 pci_disable_device(pdev);
5126
5127         return 0;
5128 }
5129
5130 static int __maybe_unused e1000_suspend(struct device *dev)
5131 {
5132         int retval;
5133         struct pci_dev *pdev = to_pci_dev(dev);
5134         bool wake;
5135
5136         retval = __e1000_shutdown(pdev, &wake);
5137         device_set_wakeup_enable(dev, wake);
5138
5139         return retval;
5140 }
5141
5142 static int __maybe_unused e1000_resume(struct device *dev)
5143 {
5144         struct pci_dev *pdev = to_pci_dev(dev);
5145         struct net_device *netdev = pci_get_drvdata(pdev);
5146         struct e1000_adapter *adapter = netdev_priv(netdev);
5147         struct e1000_hw *hw = &adapter->hw;
5148         u32 err;
5149
5150         if (adapter->need_ioport)
5151                 err = pci_enable_device(pdev);
5152         else
5153                 err = pci_enable_device_mem(pdev);
5154         if (err) {
5155                 pr_err("Cannot enable PCI device from suspend\n");
5156                 return err;
5157         }
5158
5159         /* flush memory to make sure state is correct */
5160         smp_mb__before_atomic();
5161         clear_bit(__E1000_DISABLED, &adapter->flags);
5162         pci_set_master(pdev);
5163
5164         pci_enable_wake(pdev, PCI_D3hot, 0);
5165         pci_enable_wake(pdev, PCI_D3cold, 0);
5166
5167         if (netif_running(netdev)) {
5168                 err = e1000_request_irq(adapter);
5169                 if (err)
5170                         return err;
5171         }
5172
5173         e1000_power_up_phy(adapter);
5174         e1000_reset(adapter);
5175         ew32(WUS, ~0);
5176
5177         e1000_init_manageability(adapter);
5178
5179         if (netif_running(netdev))
5180                 e1000_up(adapter);
5181
5182         netif_device_attach(netdev);
5183
5184         return 0;
5185 }
5186
5187 static void e1000_shutdown(struct pci_dev *pdev)
5188 {
5189         bool wake;
5190
5191         __e1000_shutdown(pdev, &wake);
5192
5193         if (system_state == SYSTEM_POWER_OFF) {
5194                 pci_wake_from_d3(pdev, wake);
5195                 pci_set_power_state(pdev, PCI_D3hot);
5196         }
5197 }
5198
5199 #ifdef CONFIG_NET_POLL_CONTROLLER
5200 /* Polling 'interrupt' - used by things like netconsole to send skbs
5201  * without having to re-enable interrupts. It's not called while
5202  * the interrupt routine is executing.
5203  */
5204 static void e1000_netpoll(struct net_device *netdev)
5205 {
5206         struct e1000_adapter *adapter = netdev_priv(netdev);
5207
5208         if (disable_hardirq(adapter->pdev->irq))
5209                 e1000_intr(adapter->pdev->irq, netdev);
5210         enable_irq(adapter->pdev->irq);
5211 }
5212 #endif
5213
5214 /**
5215  * e1000_io_error_detected - called when PCI error is detected
5216  * @pdev: Pointer to PCI device
5217  * @state: The current pci connection state
5218  *
5219  * This function is called after a PCI bus error affecting
5220  * this device has been detected.
5221  */
5222 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5223                                                 pci_channel_state_t state)
5224 {
5225         struct net_device *netdev = pci_get_drvdata(pdev);
5226         struct e1000_adapter *adapter = netdev_priv(netdev);
5227
5228         netif_device_detach(netdev);
5229
5230         if (state == pci_channel_io_perm_failure)
5231                 return PCI_ERS_RESULT_DISCONNECT;
5232
5233         if (netif_running(netdev))
5234                 e1000_down(adapter);
5235
5236         if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
5237                 pci_disable_device(pdev);
5238
5239         /* Request a slot slot reset. */
5240         return PCI_ERS_RESULT_NEED_RESET;
5241 }
5242
5243 /**
5244  * e1000_io_slot_reset - called after the pci bus has been reset.
5245  * @pdev: Pointer to PCI device
5246  *
5247  * Restart the card from scratch, as if from a cold-boot. Implementation
5248  * resembles the first-half of the e1000_resume routine.
5249  */
5250 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5251 {
5252         struct net_device *netdev = pci_get_drvdata(pdev);
5253         struct e1000_adapter *adapter = netdev_priv(netdev);
5254         struct e1000_hw *hw = &adapter->hw;
5255         int err;
5256
5257         if (adapter->need_ioport)
5258                 err = pci_enable_device(pdev);
5259         else
5260                 err = pci_enable_device_mem(pdev);
5261         if (err) {
5262                 pr_err("Cannot re-enable PCI device after reset.\n");
5263                 return PCI_ERS_RESULT_DISCONNECT;
5264         }
5265
5266         /* flush memory to make sure state is correct */
5267         smp_mb__before_atomic();
5268         clear_bit(__E1000_DISABLED, &adapter->flags);
5269         pci_set_master(pdev);
5270
5271         pci_enable_wake(pdev, PCI_D3hot, 0);
5272         pci_enable_wake(pdev, PCI_D3cold, 0);
5273
5274         e1000_reset(adapter);
5275         ew32(WUS, ~0);
5276
5277         return PCI_ERS_RESULT_RECOVERED;
5278 }
5279
5280 /**
5281  * e1000_io_resume - called when traffic can start flowing again.
5282  * @pdev: Pointer to PCI device
5283  *
5284  * This callback is called when the error recovery driver tells us that
5285  * its OK to resume normal operation. Implementation resembles the
5286  * second-half of the e1000_resume routine.
5287  */
5288 static void e1000_io_resume(struct pci_dev *pdev)
5289 {
5290         struct net_device *netdev = pci_get_drvdata(pdev);
5291         struct e1000_adapter *adapter = netdev_priv(netdev);
5292
5293         e1000_init_manageability(adapter);
5294
5295         if (netif_running(netdev)) {
5296                 if (e1000_up(adapter)) {
5297                         pr_info("can't bring device back up after reset\n");
5298                         return;
5299                 }
5300         }
5301
5302         netif_device_attach(netdev);
5303 }
5304
5305 /* e1000_main.c */