1d17c24e6d753377f34bbc623613eee21c913460
[linux-2.6-microblaze.git] / drivers / net / ethernet / atheros / atl1c / atl1c_main.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved.
4  *
5  * Derived from Intel e1000 driver
6  * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
7  */
8
9 #include "atl1c.h"
10
11 char atl1c_driver_name[] = "atl1c";
12
13 /*
14  * atl1c_pci_tbl - PCI Device ID Table
15  *
16  * Wildcard entries (PCI_ANY_ID) should come last
17  * Last entry must be all 0s
18  *
19  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
20  *   Class, Class Mask, private data (not used) }
21  */
22 static const struct pci_device_id atl1c_pci_tbl[] = {
23         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
24         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
25         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
26         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
27         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
28         {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)},
29         /* required last entry */
30         { 0 }
31 };
32 MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
33
34 MODULE_AUTHOR("Jie Yang");
35 MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
36 MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
37 MODULE_LICENSE("GPL");
38
39 static int atl1c_stop_mac(struct atl1c_hw *hw);
40 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
41 static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
42 static void atl1c_start_mac(struct atl1c_adapter *adapter);
43 static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
44                    int *work_done, int work_to_do);
45 static int atl1c_up(struct atl1c_adapter *adapter);
46 static void atl1c_down(struct atl1c_adapter *adapter);
47 static int atl1c_reset_mac(struct atl1c_hw *hw);
48 static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
49 static int atl1c_configure(struct atl1c_adapter *adapter);
50 static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode);
51
52
53 static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
54         NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
55 static void atl1c_pcie_patch(struct atl1c_hw *hw)
56 {
57         u32 mst_data, data;
58
59         /* pclk sel could switch to 25M */
60         AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
61         mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
62         AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
63
64         /* WoL/PCIE related settings */
65         if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
66                 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
67                 data |= PCIE_PHYMISC_FORCE_RCV_DET;
68                 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
69         } else { /* new dev set bit5 of MASTER */
70                 if (!(mst_data & MASTER_CTRL_WAKEN_25M))
71                         AT_WRITE_REG(hw, REG_MASTER_CTRL,
72                                 mst_data | MASTER_CTRL_WAKEN_25M);
73         }
74         /* aspm/PCIE setting only for l2cb 1.0 */
75         if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
76                 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
77                 data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
78                         L2CB1_PCIE_PHYMISC2_CDR_BW);
79                 data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
80                         L2CB1_PCIE_PHYMISC2_L0S_TH);
81                 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
82                 /* extend L1 sync timer */
83                 AT_READ_REG(hw, REG_LINK_CTRL, &data);
84                 data |= LINK_CTRL_EXT_SYNC;
85                 AT_WRITE_REG(hw, REG_LINK_CTRL, data);
86         }
87         /* l2cb 1.x & l1d 1.x */
88         if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
89                 AT_READ_REG(hw, REG_PM_CTRL, &data);
90                 data |= PM_CTRL_L0S_BUFSRX_EN;
91                 AT_WRITE_REG(hw, REG_PM_CTRL, data);
92                 /* clear vendor msg */
93                 AT_READ_REG(hw, REG_DMA_DBG, &data);
94                 AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
95         }
96 }
97
98 /* FIXME: no need any more ? */
99 /*
100  * atl1c_init_pcie - init PCIE module
101  */
102 static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
103 {
104         u32 data;
105         u32 pci_cmd;
106         struct pci_dev *pdev = hw->adapter->pdev;
107         int pos;
108
109         AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
110         pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
111         pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
112                 PCI_COMMAND_IO);
113         AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd);
114
115         /*
116          * Clear any PowerSaveing Settings
117          */
118         pci_enable_wake(pdev, PCI_D3hot, 0);
119         pci_enable_wake(pdev, PCI_D3cold, 0);
120         /* wol sts read-clear */
121         AT_READ_REG(hw, REG_WOL_CTRL, &data);
122         AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
123
124         /*
125          * Mask some pcie error bits
126          */
127         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
128         if (pos) {
129                 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
130                 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
131                 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
132         }
133         /* clear error status */
134         pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
135                         PCI_EXP_DEVSTA_NFED |
136                         PCI_EXP_DEVSTA_FED |
137                         PCI_EXP_DEVSTA_CED |
138                         PCI_EXP_DEVSTA_URD);
139
140         AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
141         data &= ~LTSSM_ID_EN_WRO;
142         AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data);
143
144         atl1c_pcie_patch(hw);
145         if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
146                 atl1c_disable_l0s_l1(hw);
147
148         msleep(5);
149 }
150
151 /**
152  * atl1c_irq_enable - Enable default interrupt generation settings
153  * @adapter: board private structure
154  */
155 static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
156 {
157         if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
158                 AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF);
159                 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
160                 AT_WRITE_FLUSH(&adapter->hw);
161         }
162 }
163
164 /**
165  * atl1c_irq_disable - Mask off interrupt generation on the NIC
166  * @adapter: board private structure
167  */
168 static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
169 {
170         atomic_inc(&adapter->irq_sem);
171         AT_WRITE_REG(&adapter->hw, REG_IMR, 0);
172         AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT);
173         AT_WRITE_FLUSH(&adapter->hw);
174         synchronize_irq(adapter->pdev->irq);
175 }
176
177 /**
178  * atl1c_irq_reset - reset interrupt confiure on the NIC
179  * @adapter: board private structure
180  */
181 static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
182 {
183         atomic_set(&adapter->irq_sem, 1);
184         atl1c_irq_enable(adapter);
185 }
186
187 /*
188  * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
189  * of the idle status register until the device is actually idle
190  */
191 static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
192 {
193         int timeout;
194         u32 data;
195
196         for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
197                 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
198                 if ((data & modu_ctrl) == 0)
199                         return 0;
200                 msleep(1);
201         }
202         return data;
203 }
204
205 /**
206  * atl1c_phy_config - Timer Call-back
207  * @t: timer list containing pointer to netdev cast into an unsigned long
208  */
209 static void atl1c_phy_config(struct timer_list *t)
210 {
211         struct atl1c_adapter *adapter = from_timer(adapter, t,
212                                                    phy_config_timer);
213         struct atl1c_hw *hw = &adapter->hw;
214         unsigned long flags;
215
216         spin_lock_irqsave(&adapter->mdio_lock, flags);
217         atl1c_restart_autoneg(hw);
218         spin_unlock_irqrestore(&adapter->mdio_lock, flags);
219 }
220
221 void atl1c_reinit_locked(struct atl1c_adapter *adapter)
222 {
223         atl1c_down(adapter);
224         atl1c_up(adapter);
225         clear_bit(__AT_RESETTING, &adapter->flags);
226 }
227
228 static void atl1c_check_link_status(struct atl1c_adapter *adapter)
229 {
230         struct atl1c_hw *hw = &adapter->hw;
231         struct net_device *netdev = adapter->netdev;
232         struct pci_dev    *pdev   = adapter->pdev;
233         int err;
234         unsigned long flags;
235         u16 speed, duplex, phy_data;
236
237         spin_lock_irqsave(&adapter->mdio_lock, flags);
238         /* MII_BMSR must read twise */
239         atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
240         atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
241         spin_unlock_irqrestore(&adapter->mdio_lock, flags);
242
243         if ((phy_data & BMSR_LSTATUS) == 0) {
244                 /* link down */
245                 netif_carrier_off(netdev);
246                 hw->hibernate = true;
247                 if (atl1c_reset_mac(hw) != 0)
248                         if (netif_msg_hw(adapter))
249                                 dev_warn(&pdev->dev, "reset mac failed\n");
250                 atl1c_set_aspm(hw, SPEED_0);
251                 atl1c_post_phy_linkchg(hw, SPEED_0);
252                 atl1c_reset_dma_ring(adapter);
253                 atl1c_configure(adapter);
254         } else {
255                 /* Link Up */
256                 hw->hibernate = false;
257                 spin_lock_irqsave(&adapter->mdio_lock, flags);
258                 err = atl1c_get_speed_and_duplex(hw, &speed, &duplex);
259                 spin_unlock_irqrestore(&adapter->mdio_lock, flags);
260                 if (unlikely(err))
261                         return;
262                 /* link result is our setting */
263                 if (adapter->link_speed != speed ||
264                     adapter->link_duplex != duplex) {
265                         adapter->link_speed  = speed;
266                         adapter->link_duplex = duplex;
267                         atl1c_set_aspm(hw, speed);
268                         atl1c_post_phy_linkchg(hw, speed);
269                         atl1c_start_mac(adapter);
270                         if (netif_msg_link(adapter))
271                                 dev_info(&pdev->dev,
272                                         "%s: %s NIC Link is Up<%d Mbps %s>\n",
273                                         atl1c_driver_name, netdev->name,
274                                         adapter->link_speed,
275                                         adapter->link_duplex == FULL_DUPLEX ?
276                                         "Full Duplex" : "Half Duplex");
277                 }
278                 if (!netif_carrier_ok(netdev))
279                         netif_carrier_on(netdev);
280         }
281 }
282
283 static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
284 {
285         struct net_device *netdev = adapter->netdev;
286         struct pci_dev    *pdev   = adapter->pdev;
287         u16 phy_data;
288         u16 link_up;
289
290         spin_lock(&adapter->mdio_lock);
291         atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
292         atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
293         spin_unlock(&adapter->mdio_lock);
294         link_up = phy_data & BMSR_LSTATUS;
295         /* notify upper layer link down ASAP */
296         if (!link_up) {
297                 if (netif_carrier_ok(netdev)) {
298                         /* old link state: Up */
299                         netif_carrier_off(netdev);
300                         if (netif_msg_link(adapter))
301                                 dev_info(&pdev->dev,
302                                         "%s: %s NIC Link is Down\n",
303                                         atl1c_driver_name, netdev->name);
304                         adapter->link_speed = SPEED_0;
305                 }
306         }
307
308         set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
309         schedule_work(&adapter->common_task);
310 }
311
312 static void atl1c_common_task(struct work_struct *work)
313 {
314         struct atl1c_adapter *adapter;
315         struct net_device *netdev;
316
317         adapter = container_of(work, struct atl1c_adapter, common_task);
318         netdev = adapter->netdev;
319
320         if (test_bit(__AT_DOWN, &adapter->flags))
321                 return;
322
323         if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
324                 netif_device_detach(netdev);
325                 atl1c_down(adapter);
326                 atl1c_up(adapter);
327                 netif_device_attach(netdev);
328         }
329
330         if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
331                 &adapter->work_event)) {
332                 atl1c_irq_disable(adapter);
333                 atl1c_check_link_status(adapter);
334                 atl1c_irq_enable(adapter);
335         }
336 }
337
338
339 static void atl1c_del_timer(struct atl1c_adapter *adapter)
340 {
341         del_timer_sync(&adapter->phy_config_timer);
342 }
343
344
345 /**
346  * atl1c_tx_timeout - Respond to a Tx Hang
347  * @netdev: network interface device structure
348  * @txqueue: index of hanging tx queue
349  */
350 static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
351 {
352         struct atl1c_adapter *adapter = netdev_priv(netdev);
353
354         /* Do the reset outside of interrupt context */
355         set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
356         schedule_work(&adapter->common_task);
357 }
358
359 /**
360  * atl1c_set_multi - Multicast and Promiscuous mode set
361  * @netdev: network interface device structure
362  *
363  * The set_multi entry point is called whenever the multicast address
364  * list or the network interface flags are updated.  This routine is
365  * responsible for configuring the hardware for proper multicast,
366  * promiscuous mode, and all-multi behavior.
367  */
368 static void atl1c_set_multi(struct net_device *netdev)
369 {
370         struct atl1c_adapter *adapter = netdev_priv(netdev);
371         struct atl1c_hw *hw = &adapter->hw;
372         struct netdev_hw_addr *ha;
373         u32 mac_ctrl_data;
374         u32 hash_value;
375
376         /* Check for Promiscuous and All Multicast modes */
377         AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
378
379         if (netdev->flags & IFF_PROMISC) {
380                 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
381         } else if (netdev->flags & IFF_ALLMULTI) {
382                 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
383                 mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN;
384         } else {
385                 mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
386         }
387
388         AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
389
390         /* clear the old settings from the multicast hash table */
391         AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0);
392         AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
393
394         /* comoute mc addresses' hash value ,and put it into hash table */
395         netdev_for_each_mc_addr(ha, netdev) {
396                 hash_value = atl1c_hash_mc_addr(hw, ha->addr);
397                 atl1c_hash_set(hw, hash_value);
398         }
399 }
400
401 static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
402 {
403         if (features & NETIF_F_HW_VLAN_CTAG_RX) {
404                 /* enable VLAN tag insert/strip */
405                 *mac_ctrl_data |= MAC_CTRL_RMV_VLAN;
406         } else {
407                 /* disable VLAN tag insert/strip */
408                 *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN;
409         }
410 }
411
412 static void atl1c_vlan_mode(struct net_device *netdev,
413         netdev_features_t features)
414 {
415         struct atl1c_adapter *adapter = netdev_priv(netdev);
416         struct pci_dev *pdev = adapter->pdev;
417         u32 mac_ctrl_data = 0;
418
419         if (netif_msg_pktdata(adapter))
420                 dev_dbg(&pdev->dev, "atl1c_vlan_mode\n");
421
422         atl1c_irq_disable(adapter);
423         AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data);
424         __atl1c_vlan_mode(features, &mac_ctrl_data);
425         AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data);
426         atl1c_irq_enable(adapter);
427 }
428
429 static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
430 {
431         struct pci_dev *pdev = adapter->pdev;
432
433         if (netif_msg_pktdata(adapter))
434                 dev_dbg(&pdev->dev, "atl1c_restore_vlan\n");
435         atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
436 }
437
438 /**
439  * atl1c_set_mac - Change the Ethernet Address of the NIC
440  * @netdev: network interface device structure
441  * @p: pointer to an address structure
442  *
443  * Returns 0 on success, negative on failure
444  */
445 static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
446 {
447         struct atl1c_adapter *adapter = netdev_priv(netdev);
448         struct sockaddr *addr = p;
449
450         if (!is_valid_ether_addr(addr->sa_data))
451                 return -EADDRNOTAVAIL;
452
453         if (netif_running(netdev))
454                 return -EBUSY;
455
456         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
457         memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
458
459         atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
460
461         return 0;
462 }
463
464 static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
465                                 struct net_device *dev)
466 {
467         unsigned int head_size;
468         int mtu = dev->mtu;
469
470         adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
471                 roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
472
473         head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
474                     SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
475         adapter->rx_frag_size = roundup_pow_of_two(head_size);
476 }
477
478 static netdev_features_t atl1c_fix_features(struct net_device *netdev,
479         netdev_features_t features)
480 {
481         /*
482          * Since there is no support for separate rx/tx vlan accel
483          * enable/disable make sure tx flag is always in same state as rx.
484          */
485         if (features & NETIF_F_HW_VLAN_CTAG_RX)
486                 features |= NETIF_F_HW_VLAN_CTAG_TX;
487         else
488                 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
489
490         if (netdev->mtu > MAX_TSO_FRAME_SIZE)
491                 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
492
493         return features;
494 }
495
496 static int atl1c_set_features(struct net_device *netdev,
497         netdev_features_t features)
498 {
499         netdev_features_t changed = netdev->features ^ features;
500
501         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
502                 atl1c_vlan_mode(netdev, features);
503
504         return 0;
505 }
506
507 static void atl1c_set_max_mtu(struct net_device *netdev)
508 {
509         struct atl1c_adapter *adapter = netdev_priv(netdev);
510         struct atl1c_hw *hw = &adapter->hw;
511
512         switch (hw->nic_type) {
513         /* These (GbE) devices support jumbo packets, max_mtu 6122 */
514         case athr_l1c:
515         case athr_l1d:
516         case athr_l1d_2:
517                 netdev->max_mtu = MAX_JUMBO_FRAME_SIZE -
518                                   (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
519                 break;
520         /* The 10/100 devices don't support jumbo packets, max_mtu 1500 */
521         default:
522                 netdev->max_mtu = ETH_DATA_LEN;
523                 break;
524         }
525 }
526
527 /**
528  * atl1c_change_mtu - Change the Maximum Transfer Unit
529  * @netdev: network interface device structure
530  * @new_mtu: new value for maximum frame size
531  *
532  * Returns 0 on success, negative on failure
533  */
534 static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
535 {
536         struct atl1c_adapter *adapter = netdev_priv(netdev);
537
538         /* set MTU */
539         if (netif_running(netdev)) {
540                 while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
541                         msleep(1);
542                 netdev->mtu = new_mtu;
543                 adapter->hw.max_frame_size = new_mtu;
544                 atl1c_set_rxbufsize(adapter, netdev);
545                 atl1c_down(adapter);
546                 netdev_update_features(netdev);
547                 atl1c_up(adapter);
548                 clear_bit(__AT_RESETTING, &adapter->flags);
549         }
550         return 0;
551 }
552
553 /*
554  *  caller should hold mdio_lock
555  */
556 static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
557 {
558         struct atl1c_adapter *adapter = netdev_priv(netdev);
559         u16 result;
560
561         atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
562         return result;
563 }
564
565 static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
566                              int reg_num, int val)
567 {
568         struct atl1c_adapter *adapter = netdev_priv(netdev);
569
570         atl1c_write_phy_reg(&adapter->hw, reg_num, val);
571 }
572
573 static int atl1c_mii_ioctl(struct net_device *netdev,
574                            struct ifreq *ifr, int cmd)
575 {
576         struct atl1c_adapter *adapter = netdev_priv(netdev);
577         struct pci_dev *pdev = adapter->pdev;
578         struct mii_ioctl_data *data = if_mii(ifr);
579         unsigned long flags;
580         int retval = 0;
581
582         if (!netif_running(netdev))
583                 return -EINVAL;
584
585         spin_lock_irqsave(&adapter->mdio_lock, flags);
586         switch (cmd) {
587         case SIOCGMIIPHY:
588                 data->phy_id = 0;
589                 break;
590
591         case SIOCGMIIREG:
592                 if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
593                                     &data->val_out)) {
594                         retval = -EIO;
595                         goto out;
596                 }
597                 break;
598
599         case SIOCSMIIREG:
600                 if (data->reg_num & ~(0x1F)) {
601                         retval = -EFAULT;
602                         goto out;
603                 }
604
605                 dev_dbg(&pdev->dev, "<atl1c_mii_ioctl> write %x %x",
606                                 data->reg_num, data->val_in);
607                 if (atl1c_write_phy_reg(&adapter->hw,
608                                      data->reg_num, data->val_in)) {
609                         retval = -EIO;
610                         goto out;
611                 }
612                 break;
613
614         default:
615                 retval = -EOPNOTSUPP;
616                 break;
617         }
618 out:
619         spin_unlock_irqrestore(&adapter->mdio_lock, flags);
620         return retval;
621 }
622
623 static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
624 {
625         switch (cmd) {
626         case SIOCGMIIPHY:
627         case SIOCGMIIREG:
628         case SIOCSMIIREG:
629                 return atl1c_mii_ioctl(netdev, ifr, cmd);
630         default:
631                 return -EOPNOTSUPP;
632         }
633 }
634
635 /**
636  * atl1c_alloc_queues - Allocate memory for all rings
637  * @adapter: board private structure to initialize
638  *
639  */
640 static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
641 {
642         return 0;
643 }
644
645 static void atl1c_set_mac_type(struct atl1c_hw *hw)
646 {
647         switch (hw->device_id) {
648         case PCI_DEVICE_ID_ATTANSIC_L2C:
649                 hw->nic_type = athr_l2c;
650                 break;
651         case PCI_DEVICE_ID_ATTANSIC_L1C:
652                 hw->nic_type = athr_l1c;
653                 break;
654         case PCI_DEVICE_ID_ATHEROS_L2C_B:
655                 hw->nic_type = athr_l2c_b;
656                 break;
657         case PCI_DEVICE_ID_ATHEROS_L2C_B2:
658                 hw->nic_type = athr_l2c_b2;
659                 break;
660         case PCI_DEVICE_ID_ATHEROS_L1D:
661                 hw->nic_type = athr_l1d;
662                 break;
663         case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
664                 hw->nic_type = athr_l1d_2;
665                 break;
666         default:
667                 break;
668         }
669 }
670
671 static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
672 {
673         u32 link_ctrl_data;
674
675         atl1c_set_mac_type(hw);
676         AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
677
678         hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE  |
679                          ATL1C_TXQ_MODE_ENHANCE;
680         hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
681                           ATL1C_ASPM_L1_SUPPORT;
682         hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
683
684         if (hw->nic_type == athr_l1c ||
685             hw->nic_type == athr_l1d ||
686             hw->nic_type == athr_l1d_2)
687                 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
688         return 0;
689 }
690
691 struct atl1c_platform_patch {
692         u16 pci_did;
693         u8  pci_revid;
694         u16 subsystem_vid;
695         u16 subsystem_did;
696         u32 patch_flag;
697 #define ATL1C_LINK_PATCH        0x1
698 };
699 static const struct atl1c_platform_patch plats[] = {
700 {0x2060, 0xC1, 0x1019, 0x8152, 0x1},
701 {0x2060, 0xC1, 0x1019, 0x2060, 0x1},
702 {0x2060, 0xC1, 0x1019, 0xE000, 0x1},
703 {0x2062, 0xC0, 0x1019, 0x8152, 0x1},
704 {0x2062, 0xC0, 0x1019, 0x2062, 0x1},
705 {0x2062, 0xC0, 0x1458, 0xE000, 0x1},
706 {0x2062, 0xC1, 0x1019, 0x8152, 0x1},
707 {0x2062, 0xC1, 0x1019, 0x2062, 0x1},
708 {0x2062, 0xC1, 0x1458, 0xE000, 0x1},
709 {0x2062, 0xC1, 0x1565, 0x2802, 0x1},
710 {0x2062, 0xC1, 0x1565, 0x2801, 0x1},
711 {0x1073, 0xC0, 0x1019, 0x8151, 0x1},
712 {0x1073, 0xC0, 0x1019, 0x1073, 0x1},
713 {0x1073, 0xC0, 0x1458, 0xE000, 0x1},
714 {0x1083, 0xC0, 0x1458, 0xE000, 0x1},
715 {0x1083, 0xC0, 0x1019, 0x8151, 0x1},
716 {0x1083, 0xC0, 0x1019, 0x1083, 0x1},
717 {0x1083, 0xC0, 0x1462, 0x7680, 0x1},
718 {0x1083, 0xC0, 0x1565, 0x2803, 0x1},
719 {0},
720 };
721
722 static void atl1c_patch_assign(struct atl1c_hw *hw)
723 {
724         struct pci_dev  *pdev = hw->adapter->pdev;
725         u32 misc_ctrl;
726         int i = 0;
727
728         hw->msi_lnkpatch = false;
729
730         while (plats[i].pci_did != 0) {
731                 if (plats[i].pci_did == hw->device_id &&
732                     plats[i].pci_revid == hw->revision_id &&
733                     plats[i].subsystem_vid == hw->subsystem_vendor_id &&
734                     plats[i].subsystem_did == hw->subsystem_id) {
735                         if (plats[i].patch_flag & ATL1C_LINK_PATCH)
736                                 hw->msi_lnkpatch = true;
737                 }
738                 i++;
739         }
740
741         if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
742             hw->revision_id == L2CB_V21) {
743                 /* config access mode */
744                 pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
745                                        REG_PCIE_DEV_MISC_CTRL);
746                 pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
747                 misc_ctrl &= ~0x100;
748                 pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
749                                        REG_PCIE_DEV_MISC_CTRL);
750                 pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
751         }
752 }
753 /**
754  * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
755  * @adapter: board private structure to initialize
756  *
757  * atl1c_sw_init initializes the Adapter private data structure.
758  * Fields are initialized based on PCI device information and
759  * OS network device settings (MTU size).
760  */
761 static int atl1c_sw_init(struct atl1c_adapter *adapter)
762 {
763         struct atl1c_hw *hw   = &adapter->hw;
764         struct pci_dev  *pdev = adapter->pdev;
765         u32 revision;
766
767
768         adapter->wol = 0;
769         device_set_wakeup_enable(&pdev->dev, false);
770         adapter->link_speed = SPEED_0;
771         adapter->link_duplex = FULL_DUPLEX;
772         adapter->tpd_ring[0].count = 1024;
773         adapter->rfd_ring.count = 512;
774
775         hw->vendor_id = pdev->vendor;
776         hw->device_id = pdev->device;
777         hw->subsystem_vendor_id = pdev->subsystem_vendor;
778         hw->subsystem_id = pdev->subsystem_device;
779         pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
780         hw->revision_id = revision & 0xFF;
781         /* before link up, we assume hibernate is true */
782         hw->hibernate = true;
783         hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
784         if (atl1c_setup_mac_funcs(hw) != 0) {
785                 dev_err(&pdev->dev, "set mac function pointers failed\n");
786                 return -1;
787         }
788         atl1c_patch_assign(hw);
789
790         hw->intr_mask = IMR_NORMAL_MASK;
791         hw->phy_configured = false;
792         hw->preamble_len = 7;
793         hw->max_frame_size = adapter->netdev->mtu;
794         hw->autoneg_advertised = ADVERTISED_Autoneg;
795         hw->indirect_tab = 0xE4E4E4E4;
796         hw->base_cpu = 0;
797
798         hw->ict = 50000;                /* 100ms */
799         hw->smb_timer = 200000;         /* 400ms */
800         hw->rx_imt = 200;
801         hw->tx_imt = 1000;
802
803         hw->tpd_burst = 5;
804         hw->rfd_burst = 8;
805         hw->dma_order = atl1c_dma_ord_out;
806         hw->dmar_block = atl1c_dma_req_1024;
807
808         if (atl1c_alloc_queues(adapter)) {
809                 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
810                 return -ENOMEM;
811         }
812         /* TODO */
813         atl1c_set_rxbufsize(adapter, adapter->netdev);
814         atomic_set(&adapter->irq_sem, 1);
815         spin_lock_init(&adapter->mdio_lock);
816         spin_lock_init(&adapter->hw.intr_mask_lock);
817         set_bit(__AT_DOWN, &adapter->flags);
818
819         return 0;
820 }
821
822 static inline void atl1c_clean_buffer(struct pci_dev *pdev,
823                                 struct atl1c_buffer *buffer_info)
824 {
825         u16 pci_driection;
826         if (buffer_info->flags & ATL1C_BUFFER_FREE)
827                 return;
828         if (buffer_info->dma) {
829                 if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
830                         pci_driection = DMA_FROM_DEVICE;
831                 else
832                         pci_driection = DMA_TO_DEVICE;
833
834                 if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
835                         dma_unmap_single(&pdev->dev, buffer_info->dma,
836                                          buffer_info->length, pci_driection);
837                 else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
838                         dma_unmap_page(&pdev->dev, buffer_info->dma,
839                                        buffer_info->length, pci_driection);
840         }
841         if (buffer_info->skb)
842                 dev_consume_skb_any(buffer_info->skb);
843         buffer_info->dma = 0;
844         buffer_info->skb = NULL;
845         ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
846 }
847 /**
848  * atl1c_clean_tx_ring - Free Tx-skb
849  * @adapter: board private structure
850  * @type: type of transmit queue
851  */
852 static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
853                                 enum atl1c_trans_queue type)
854 {
855         struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
856         struct atl1c_buffer *buffer_info;
857         struct pci_dev *pdev = adapter->pdev;
858         u16 index, ring_count;
859
860         ring_count = tpd_ring->count;
861         for (index = 0; index < ring_count; index++) {
862                 buffer_info = &tpd_ring->buffer_info[index];
863                 atl1c_clean_buffer(pdev, buffer_info);
864         }
865
866         netdev_reset_queue(adapter->netdev);
867
868         /* Zero out Tx-buffers */
869         memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
870                 ring_count);
871         atomic_set(&tpd_ring->next_to_clean, 0);
872         tpd_ring->next_to_use = 0;
873 }
874
875 /**
876  * atl1c_clean_rx_ring - Free rx-reservation skbs
877  * @adapter: board private structure
878  */
879 static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
880 {
881         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
882         struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
883         struct atl1c_buffer *buffer_info;
884         struct pci_dev *pdev = adapter->pdev;
885         int j;
886
887         for (j = 0; j < rfd_ring->count; j++) {
888                 buffer_info = &rfd_ring->buffer_info[j];
889                 atl1c_clean_buffer(pdev, buffer_info);
890         }
891         /* zero out the descriptor ring */
892         memset(rfd_ring->desc, 0, rfd_ring->size);
893         rfd_ring->next_to_clean = 0;
894         rfd_ring->next_to_use = 0;
895         rrd_ring->next_to_use = 0;
896         rrd_ring->next_to_clean = 0;
897 }
898
899 /*
900  * Read / Write Ptr Initialize:
901  */
902 static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
903 {
904         struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
905         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
906         struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
907         struct atl1c_buffer *buffer_info;
908         int i, j;
909
910         for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
911                 tpd_ring[i].next_to_use = 0;
912                 atomic_set(&tpd_ring[i].next_to_clean, 0);
913                 buffer_info = tpd_ring[i].buffer_info;
914                 for (j = 0; j < tpd_ring->count; j++)
915                         ATL1C_SET_BUFFER_STATE(&buffer_info[i],
916                                         ATL1C_BUFFER_FREE);
917         }
918         rfd_ring->next_to_use = 0;
919         rfd_ring->next_to_clean = 0;
920         rrd_ring->next_to_use = 0;
921         rrd_ring->next_to_clean = 0;
922         for (j = 0; j < rfd_ring->count; j++) {
923                 buffer_info = &rfd_ring->buffer_info[j];
924                 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
925         }
926 }
927
928 /**
929  * atl1c_free_ring_resources - Free Tx / RX descriptor Resources
930  * @adapter: board private structure
931  *
932  * Free all transmit software resources
933  */
934 static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
935 {
936         struct pci_dev *pdev = adapter->pdev;
937
938         dma_free_coherent(&pdev->dev, adapter->ring_header.size,
939                           adapter->ring_header.desc, adapter->ring_header.dma);
940         adapter->ring_header.desc = NULL;
941
942         /* Note: just free tdp_ring.buffer_info,
943         *  it contain rfd_ring.buffer_info, do not double free */
944         if (adapter->tpd_ring[0].buffer_info) {
945                 kfree(adapter->tpd_ring[0].buffer_info);
946                 adapter->tpd_ring[0].buffer_info = NULL;
947         }
948         if (adapter->rx_page) {
949                 put_page(adapter->rx_page);
950                 adapter->rx_page = NULL;
951         }
952 }
953
954 /**
955  * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
956  * @adapter: board private structure
957  *
958  * Return 0 on success, negative on failure
959  */
960 static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
961 {
962         struct pci_dev *pdev = adapter->pdev;
963         struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
964         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
965         struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
966         struct atl1c_ring_header *ring_header = &adapter->ring_header;
967         int size;
968         int i;
969         int count = 0;
970         int rx_desc_count = 0;
971         u32 offset = 0;
972
973         rrd_ring->count = rfd_ring->count;
974         for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
975                 tpd_ring[i].count = tpd_ring[0].count;
976
977         /* 2 tpd queue, one high priority queue,
978          * another normal priority queue */
979         size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
980                 rfd_ring->count);
981         tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
982         if (unlikely(!tpd_ring->buffer_info))
983                 goto err_nomem;
984
985         for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
986                 tpd_ring[i].buffer_info =
987                         (tpd_ring->buffer_info + count);
988                 count += tpd_ring[i].count;
989         }
990
991         rfd_ring->buffer_info =
992                 (tpd_ring->buffer_info + count);
993         count += rfd_ring->count;
994         rx_desc_count += rfd_ring->count;
995
996         /*
997          * real ring DMA buffer
998          * each ring/block may need up to 8 bytes for alignment, hence the
999          * additional bytes tacked onto the end.
1000          */
1001         ring_header->size = size =
1002                 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
1003                 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
1004                 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
1005                 8 * 4;
1006
1007         ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
1008                                                &ring_header->dma, GFP_KERNEL);
1009         if (unlikely(!ring_header->desc)) {
1010                 dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
1011                 goto err_nomem;
1012         }
1013         /* init TPD ring */
1014
1015         tpd_ring[0].dma = roundup(ring_header->dma, 8);
1016         offset = tpd_ring[0].dma - ring_header->dma;
1017         for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
1018                 tpd_ring[i].dma = ring_header->dma + offset;
1019                 tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
1020                 tpd_ring[i].size =
1021                         sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
1022                 offset += roundup(tpd_ring[i].size, 8);
1023         }
1024         /* init RFD ring */
1025         rfd_ring->dma = ring_header->dma + offset;
1026         rfd_ring->desc = (u8 *) ring_header->desc + offset;
1027         rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
1028         offset += roundup(rfd_ring->size, 8);
1029
1030         /* init RRD ring */
1031         rrd_ring->dma = ring_header->dma + offset;
1032         rrd_ring->desc = (u8 *) ring_header->desc + offset;
1033         rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
1034                 rrd_ring->count;
1035         offset += roundup(rrd_ring->size, 8);
1036
1037         return 0;
1038
1039 err_nomem:
1040         kfree(tpd_ring->buffer_info);
1041         return -ENOMEM;
1042 }
1043
1044 static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1045 {
1046         struct atl1c_hw *hw = &adapter->hw;
1047         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1048         struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1049         struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1050                                 adapter->tpd_ring;
1051
1052         /* TPD */
1053         AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
1054                         (u32)((tpd_ring[atl1c_trans_normal].dma &
1055                                 AT_DMA_HI_ADDR_MASK) >> 32));
1056         /* just enable normal priority TX queue */
1057         AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
1058                         (u32)(tpd_ring[atl1c_trans_normal].dma &
1059                                 AT_DMA_LO_ADDR_MASK));
1060         AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
1061                         (u32)(tpd_ring[atl1c_trans_high].dma &
1062                                 AT_DMA_LO_ADDR_MASK));
1063         AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
1064                         (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
1065
1066
1067         /* RFD */
1068         AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1069                         (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1070         AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
1071                         (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
1072
1073         AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1074                         rfd_ring->count & RFD_RING_SIZE_MASK);
1075         AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1076                         adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1077
1078         /* RRD */
1079         AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
1080                         (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
1081         AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1082                         (rrd_ring->count & RRD_RING_SIZE_MASK));
1083
1084         if (hw->nic_type == athr_l2c_b) {
1085                 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1086                 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
1087                 AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L);
1088                 AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L);
1089                 AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L);
1090                 AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L);
1091                 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0);        /* TX watermark, to enter l1 state.*/
1092                 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0);          /* RXD threshold.*/
1093         }
1094         /* Load all of base address above */
1095         AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1096 }
1097
1098 static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1099 {
1100         struct atl1c_hw *hw = &adapter->hw;
1101         int max_pay_load;
1102         u16 tx_offload_thresh;
1103         u32 txq_ctrl_data;
1104
1105         tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1106         AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1107                 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1108         max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1109         hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1110         /*
1111          * if BIOS had changed the dam-read-max-length to an invalid value,
1112          * restore it to default value
1113          */
1114         if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1115                 pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1116                 hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1117         }
1118         txq_ctrl_data =
1119                 hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1120                 L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1121
1122         AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1123 }
1124
1125 static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1126 {
1127         struct atl1c_hw *hw = &adapter->hw;
1128         u32 rxq_ctrl_data;
1129
1130         rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) <<
1131                         RXQ_RFD_BURST_NUM_SHIFT;
1132
1133         if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1134                 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1135
1136         /* aspm for gigabit */
1137         if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1138                 rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1139                         ASPM_THRUPUT_LIMIT_100M);
1140
1141         AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1142 }
1143
1144 static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1145 {
1146         struct atl1c_hw *hw = &adapter->hw;
1147         u32 dma_ctrl_data;
1148
1149         dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1150                 DMA_CTRL_RREQ_PRI_DATA |
1151                 FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1152                 FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1153                 FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1154
1155         AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1156 }
1157
1158 /*
1159  * Stop the mac, transmit and receive units
1160  * hw - Struct containing variables accessed by shared code
1161  * return : 0  or  idle status (if error)
1162  */
1163 static int atl1c_stop_mac(struct atl1c_hw *hw)
1164 {
1165         u32 data;
1166
1167         AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1168         data &= ~RXQ_CTRL_EN;
1169         AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1170
1171         AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1172         data &= ~TXQ_CTRL_EN;
1173         AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1174
1175         atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1176
1177         AT_READ_REG(hw, REG_MAC_CTRL, &data);
1178         data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1179         AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1180
1181         return (int)atl1c_wait_until_idle(hw,
1182                 IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1183 }
1184
1185 static void atl1c_start_mac(struct atl1c_adapter *adapter)
1186 {
1187         struct atl1c_hw *hw = &adapter->hw;
1188         u32 mac, txq, rxq;
1189
1190         hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX;
1191         hw->mac_speed = adapter->link_speed == SPEED_1000 ?
1192                 atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
1193
1194         AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
1195         AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
1196         AT_READ_REG(hw, REG_MAC_CTRL, &mac);
1197
1198         txq |= TXQ_CTRL_EN;
1199         rxq |= RXQ_CTRL_EN;
1200         mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
1201                MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
1202                MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
1203                MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
1204                MAC_CTRL_HASH_ALG_CRC32;
1205         if (hw->mac_duplex)
1206                 mac |= MAC_CTRL_DUPLX;
1207         else
1208                 mac &= ~MAC_CTRL_DUPLX;
1209         mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
1210         mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
1211
1212         AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
1213         AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
1214         AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
1215 }
1216
1217 /*
1218  * Reset the transmit and receive units; mask and clear all interrupts.
1219  * hw - Struct containing variables accessed by shared code
1220  * return : 0  or  idle status (if error)
1221  */
1222 static int atl1c_reset_mac(struct atl1c_hw *hw)
1223 {
1224         struct atl1c_adapter *adapter = hw->adapter;
1225         struct pci_dev *pdev = adapter->pdev;
1226         u32 ctrl_data = 0;
1227
1228         atl1c_stop_mac(hw);
1229         /*
1230          * Issue Soft Reset to the MAC.  This will reset the chip's
1231          * transmit, receive, DMA.  It will not effect
1232          * the current PCI configuration.  The global reset bit is self-
1233          * clearing, and should clear within a microsecond.
1234          */
1235         AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
1236         ctrl_data |= MASTER_CTRL_OOB_DIS;
1237         AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
1238
1239         AT_WRITE_FLUSH(hw);
1240         msleep(10);
1241         /* Wait at least 10ms for All module to be Idle */
1242
1243         if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1244                 dev_err(&pdev->dev,
1245                         "MAC state machine can't be idle since"
1246                         " disabled for 10ms second\n");
1247                 return -1;
1248         }
1249         AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
1250
1251         /* driver control speed/duplex */
1252         AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
1253         AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
1254
1255         /* clk switch setting */
1256         AT_READ_REG(hw, REG_SERDES, &ctrl_data);
1257         switch (hw->nic_type) {
1258         case athr_l2c_b:
1259                 ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
1260                                 SERDES_MAC_CLK_SLOWDOWN);
1261                 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1262                 break;
1263         case athr_l2c_b2:
1264         case athr_l1d_2:
1265                 ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
1266                 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1267                 break;
1268         default:
1269                 break;
1270         }
1271
1272         return 0;
1273 }
1274
1275 static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1276 {
1277         u16 ctrl_flags = hw->ctrl_flags;
1278
1279         hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1280         atl1c_set_aspm(hw, SPEED_0);
1281         hw->ctrl_flags = ctrl_flags;
1282 }
1283
1284 /*
1285  * Set ASPM state.
1286  * Enable/disable L0s/L1 depend on link state.
1287  */
1288 static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1289 {
1290         u32 pm_ctrl_data;
1291         u32 link_l1_timer;
1292
1293         AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1294         pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1295                           PM_CTRL_ASPM_L0S_EN |
1296                           PM_CTRL_MAC_ASPM_CHK);
1297         /* L1 timer */
1298         if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1299                 pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1300                 link_l1_timer =
1301                         link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1302                         L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1303                 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1304                         L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1305         } else {
1306                 link_l1_timer = hw->nic_type == athr_l2c_b ?
1307                         L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1308                 if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1309                         link_l1_timer = 1;
1310                 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1311                         PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1312         }
1313
1314         /* L0S/L1 enable */
1315         if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
1316                 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1317         if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1318                 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1319
1320         /* l2cb & l1d & l2cb2 & l1d2 */
1321         if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1322             hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1323                 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1324                         PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1325                 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1326                                 PM_CTRL_SERDES_PD_EX_L1 |
1327                                 PM_CTRL_CLK_SWH_L1;
1328                 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1329                                   PM_CTRL_SERDES_PLL_L1_EN |
1330                                   PM_CTRL_SERDES_BUFS_RX_L1_EN |
1331                                   PM_CTRL_SA_DLY_EN |
1332                                   PM_CTRL_HOTRST);
1333                 /* disable l0s if link down or l2cb */
1334                 if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1335                         pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1336         } else { /* l1c */
1337                 pm_ctrl_data =
1338                         FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1339                 if (link_speed != SPEED_0) {
1340                         pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1341                                         PM_CTRL_SERDES_PLL_L1_EN |
1342                                         PM_CTRL_SERDES_BUFS_RX_L1_EN;
1343                         pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1344                                           PM_CTRL_CLK_SWH_L1 |
1345                                           PM_CTRL_ASPM_L0S_EN |
1346                                           PM_CTRL_ASPM_L1_EN);
1347                 } else { /* link down */
1348                         pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1349                         pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1350                                           PM_CTRL_SERDES_PLL_L1_EN |
1351                                           PM_CTRL_SERDES_BUFS_RX_L1_EN |
1352                                           PM_CTRL_ASPM_L0S_EN);
1353                 }
1354         }
1355         AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1356
1357         return;
1358 }
1359
1360 /**
1361  * atl1c_configure - Configure Transmit&Receive Unit after Reset
1362  * @adapter: board private structure
1363  *
1364  * Configure the Tx /Rx unit of the MAC after a reset.
1365  */
1366 static int atl1c_configure_mac(struct atl1c_adapter *adapter)
1367 {
1368         struct atl1c_hw *hw = &adapter->hw;
1369         u32 master_ctrl_data = 0;
1370         u32 intr_modrt_data;
1371         u32 data;
1372
1373         AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1374         master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1375                               MASTER_CTRL_RX_ITIMER_EN |
1376                               MASTER_CTRL_INT_RDCLR);
1377         /* clear interrupt status */
1378         AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1379         /*  Clear any WOL status */
1380         AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
1381         /* set Interrupt Clear Timer
1382          * HW will enable self to assert interrupt event to system after
1383          * waiting x-time for software to notify it accept interrupt.
1384          */
1385
1386         data = CLK_GATING_EN_ALL;
1387         if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) {
1388                 if (hw->nic_type == athr_l2c_b)
1389                         data &= ~CLK_GATING_RXMAC_EN;
1390         } else
1391                 data = 0;
1392         AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data);
1393
1394         AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER,
1395                 hw->ict & INT_RETRIG_TIMER_MASK);
1396
1397         atl1c_configure_des_ring(adapter);
1398
1399         if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) {
1400                 intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) <<
1401                                         IRQ_MODRT_TX_TIMER_SHIFT;
1402                 intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) <<
1403                                         IRQ_MODRT_RX_TIMER_SHIFT;
1404                 AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data);
1405                 master_ctrl_data |=
1406                         MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN;
1407         }
1408
1409         if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ)
1410                 master_ctrl_data |= MASTER_CTRL_INT_RDCLR;
1411
1412         master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1413         AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1414
1415         AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1416                 hw->smb_timer & SMB_STAT_TIMER_MASK);
1417
1418         /* set MTU */
1419         AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1420                         VLAN_HLEN + ETH_FCS_LEN);
1421
1422         atl1c_configure_tx(adapter);
1423         atl1c_configure_rx(adapter);
1424         atl1c_configure_dma(adapter);
1425
1426         return 0;
1427 }
1428
1429 static int atl1c_configure(struct atl1c_adapter *adapter)
1430 {
1431         struct net_device *netdev = adapter->netdev;
1432         int num;
1433
1434         atl1c_init_ring_ptrs(adapter);
1435         atl1c_set_multi(netdev);
1436         atl1c_restore_vlan(adapter);
1437
1438         num = atl1c_alloc_rx_buffer(adapter, false);
1439         if (unlikely(num == 0))
1440                 return -ENOMEM;
1441
1442         if (atl1c_configure_mac(adapter))
1443                 return -EIO;
1444
1445         return 0;
1446 }
1447
1448 static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1449 {
1450         u16 hw_reg_addr = 0;
1451         unsigned long *stats_item = NULL;
1452         u32 data;
1453
1454         /* update rx status */
1455         hw_reg_addr = REG_MAC_RX_STATUS_BIN;
1456         stats_item  = &adapter->hw_stats.rx_ok;
1457         while (hw_reg_addr <= REG_MAC_RX_STATUS_END) {
1458                 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1459                 *stats_item += data;
1460                 stats_item++;
1461                 hw_reg_addr += 4;
1462         }
1463 /* update tx status */
1464         hw_reg_addr = REG_MAC_TX_STATUS_BIN;
1465         stats_item  = &adapter->hw_stats.tx_ok;
1466         while (hw_reg_addr <= REG_MAC_TX_STATUS_END) {
1467                 AT_READ_REG(&adapter->hw, hw_reg_addr, &data);
1468                 *stats_item += data;
1469                 stats_item++;
1470                 hw_reg_addr += 4;
1471         }
1472 }
1473
1474 /**
1475  * atl1c_get_stats - Get System Network Statistics
1476  * @netdev: network interface device structure
1477  *
1478  * Returns the address of the device statistics structure.
1479  * The statistics are actually updated from the timer callback.
1480  */
1481 static struct net_device_stats *atl1c_get_stats(struct net_device *netdev)
1482 {
1483         struct atl1c_adapter *adapter = netdev_priv(netdev);
1484         struct atl1c_hw_stats  *hw_stats = &adapter->hw_stats;
1485         struct net_device_stats *net_stats = &netdev->stats;
1486
1487         atl1c_update_hw_stats(adapter);
1488         net_stats->rx_bytes   = hw_stats->rx_byte_cnt;
1489         net_stats->tx_bytes   = hw_stats->tx_byte_cnt;
1490         net_stats->multicast  = hw_stats->rx_mcast;
1491         net_stats->collisions = hw_stats->tx_1_col +
1492                                 hw_stats->tx_2_col +
1493                                 hw_stats->tx_late_col +
1494                                 hw_stats->tx_abort_col;
1495
1496         net_stats->rx_errors  = hw_stats->rx_frag +
1497                                 hw_stats->rx_fcs_err +
1498                                 hw_stats->rx_len_err +
1499                                 hw_stats->rx_sz_ov +
1500                                 hw_stats->rx_rrd_ov +
1501                                 hw_stats->rx_align_err +
1502                                 hw_stats->rx_rxf_ov;
1503
1504         net_stats->rx_fifo_errors   = hw_stats->rx_rxf_ov;
1505         net_stats->rx_length_errors = hw_stats->rx_len_err;
1506         net_stats->rx_crc_errors    = hw_stats->rx_fcs_err;
1507         net_stats->rx_frame_errors  = hw_stats->rx_align_err;
1508         net_stats->rx_dropped       = hw_stats->rx_rrd_ov;
1509
1510         net_stats->tx_errors = hw_stats->tx_late_col +
1511                                hw_stats->tx_abort_col +
1512                                hw_stats->tx_underrun +
1513                                hw_stats->tx_trunc;
1514
1515         net_stats->tx_fifo_errors    = hw_stats->tx_underrun;
1516         net_stats->tx_aborted_errors = hw_stats->tx_abort_col;
1517         net_stats->tx_window_errors  = hw_stats->tx_late_col;
1518
1519         net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors;
1520         net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors;
1521
1522         return net_stats;
1523 }
1524
1525 static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
1526 {
1527         u16 phy_data;
1528
1529         spin_lock(&adapter->mdio_lock);
1530         atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data);
1531         spin_unlock(&adapter->mdio_lock);
1532 }
1533
1534 static int atl1c_clean_tx(struct napi_struct *napi, int budget)
1535 {
1536         struct atl1c_adapter *adapter =
1537                 container_of(napi, struct atl1c_adapter, tx_napi);
1538         struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal];
1539         struct atl1c_buffer *buffer_info;
1540         struct pci_dev *pdev = adapter->pdev;
1541         u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1542         u16 hw_next_to_clean;
1543         unsigned int total_bytes = 0, total_packets = 0;
1544         unsigned long flags;
1545
1546         AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean);
1547
1548         while (next_to_clean != hw_next_to_clean) {
1549                 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1550                 if (buffer_info->skb) {
1551                         total_bytes += buffer_info->skb->len;
1552                         total_packets++;
1553                 }
1554                 atl1c_clean_buffer(pdev, buffer_info);
1555                 if (++next_to_clean == tpd_ring->count)
1556                         next_to_clean = 0;
1557                 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
1558         }
1559
1560         netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
1561
1562         if (netif_queue_stopped(adapter->netdev) &&
1563                         netif_carrier_ok(adapter->netdev)) {
1564                 netif_wake_queue(adapter->netdev);
1565         }
1566
1567         if (total_packets < budget) {
1568                 napi_complete_done(napi, total_packets);
1569                 spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1570                 adapter->hw.intr_mask |= ISR_TX_PKT;
1571                 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1572                 spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
1573                 return total_packets;
1574         }
1575         return budget;
1576 }
1577
1578 /**
1579  * atl1c_intr - Interrupt Handler
1580  * @irq: interrupt number
1581  * @data: pointer to a network interface device structure
1582  */
1583 static irqreturn_t atl1c_intr(int irq, void *data)
1584 {
1585         struct net_device *netdev  = data;
1586         struct atl1c_adapter *adapter = netdev_priv(netdev);
1587         struct pci_dev *pdev = adapter->pdev;
1588         struct atl1c_hw *hw = &adapter->hw;
1589         int max_ints = AT_MAX_INT_WORK;
1590         int handled = IRQ_NONE;
1591         u32 status;
1592         u32 reg_data;
1593
1594         do {
1595                 AT_READ_REG(hw, REG_ISR, &reg_data);
1596                 status = reg_data & hw->intr_mask;
1597
1598                 if (status == 0 || (status & ISR_DIS_INT) != 0) {
1599                         if (max_ints != AT_MAX_INT_WORK)
1600                                 handled = IRQ_HANDLED;
1601                         break;
1602                 }
1603                 /* link event */
1604                 if (status & ISR_GPHY)
1605                         atl1c_clear_phy_int(adapter);
1606                 /* Ack ISR */
1607                 AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
1608                 if (status & ISR_RX_PKT) {
1609                         if (likely(napi_schedule_prep(&adapter->napi))) {
1610                                 spin_lock(&hw->intr_mask_lock);
1611                                 hw->intr_mask &= ~ISR_RX_PKT;
1612                                 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1613                                 spin_unlock(&hw->intr_mask_lock);
1614                                 __napi_schedule(&adapter->napi);
1615                         }
1616                 }
1617                 if (status & ISR_TX_PKT) {
1618                         if (napi_schedule_prep(&adapter->tx_napi)) {
1619                                 spin_lock(&hw->intr_mask_lock);
1620                                 hw->intr_mask &= ~ISR_TX_PKT;
1621                                 AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
1622                                 spin_unlock(&hw->intr_mask_lock);
1623                                 __napi_schedule(&adapter->tx_napi);
1624                         }
1625                 }
1626
1627                 handled = IRQ_HANDLED;
1628                 /* check if PCIE PHY Link down */
1629                 if (status & ISR_ERROR) {
1630                         if (netif_msg_hw(adapter))
1631                                 dev_err(&pdev->dev,
1632                                         "atl1c hardware error (status = 0x%x)\n",
1633                                         status & ISR_ERROR);
1634                         /* reset MAC */
1635                         set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
1636                         schedule_work(&adapter->common_task);
1637                         return IRQ_HANDLED;
1638                 }
1639
1640                 if (status & ISR_OVER)
1641                         if (netif_msg_intr(adapter))
1642                                 dev_warn(&pdev->dev,
1643                                         "TX/RX overflow (status = 0x%x)\n",
1644                                         status & ISR_OVER);
1645
1646                 /* link event */
1647                 if (status & (ISR_GPHY | ISR_MANUAL)) {
1648                         netdev->stats.tx_carrier_errors++;
1649                         atl1c_link_chg_event(adapter);
1650                         break;
1651                 }
1652
1653         } while (--max_ints > 0);
1654         /* re-enable Interrupt*/
1655         AT_WRITE_REG(&adapter->hw, REG_ISR, 0);
1656         return handled;
1657 }
1658
1659 static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1660                   struct sk_buff *skb, struct atl1c_recv_ret_status *prrs)
1661 {
1662         /*
1663          * The pid field in RRS in not correct sometimes, so we
1664          * cannot figure out if the packet is fragmented or not,
1665          * so we tell the KERNEL CHECKSUM_NONE
1666          */
1667         skb_checksum_none_assert(skb);
1668 }
1669
1670 static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
1671                                        bool napi_mode)
1672 {
1673         struct sk_buff *skb;
1674         struct page *page;
1675
1676         if (adapter->rx_frag_size > PAGE_SIZE) {
1677                 if (likely(napi_mode))
1678                         return napi_alloc_skb(&adapter->napi,
1679                                               adapter->rx_buffer_len);
1680                 else
1681                         return netdev_alloc_skb_ip_align(adapter->netdev,
1682                                                          adapter->rx_buffer_len);
1683         }
1684
1685         page = adapter->rx_page;
1686         if (!page) {
1687                 adapter->rx_page = page = alloc_page(GFP_ATOMIC);
1688                 if (unlikely(!page))
1689                         return NULL;
1690                 adapter->rx_page_offset = 0;
1691         }
1692
1693         skb = build_skb(page_address(page) + adapter->rx_page_offset,
1694                         adapter->rx_frag_size);
1695         if (likely(skb)) {
1696                 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1697                 adapter->rx_page_offset += adapter->rx_frag_size;
1698                 if (adapter->rx_page_offset >= PAGE_SIZE)
1699                         adapter->rx_page = NULL;
1700                 else
1701                         get_page(page);
1702         }
1703         return skb;
1704 }
1705
1706 static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
1707 {
1708         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1709         struct pci_dev *pdev = adapter->pdev;
1710         struct atl1c_buffer *buffer_info, *next_info;
1711         struct sk_buff *skb;
1712         void *vir_addr = NULL;
1713         u16 num_alloc = 0;
1714         u16 rfd_next_to_use, next_next;
1715         struct atl1c_rx_free_desc *rfd_desc;
1716         dma_addr_t mapping;
1717
1718         next_next = rfd_next_to_use = rfd_ring->next_to_use;
1719         if (++next_next == rfd_ring->count)
1720                 next_next = 0;
1721         buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1722         next_info = &rfd_ring->buffer_info[next_next];
1723
1724         while (next_info->flags & ATL1C_BUFFER_FREE) {
1725                 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1726
1727                 skb = atl1c_alloc_skb(adapter, napi_mode);
1728                 if (unlikely(!skb)) {
1729                         if (netif_msg_rx_err(adapter))
1730                                 dev_warn(&pdev->dev, "alloc rx buffer failed\n");
1731                         break;
1732                 }
1733
1734                 /*
1735                  * Make buffer alignment 2 beyond a 16 byte boundary
1736                  * this will result in a 16 byte aligned IP header after
1737                  * the 14 byte MAC header is removed
1738                  */
1739                 vir_addr = skb->data;
1740                 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1741                 buffer_info->skb = skb;
1742                 buffer_info->length = adapter->rx_buffer_len;
1743                 mapping = dma_map_single(&pdev->dev, vir_addr,
1744                                          buffer_info->length, DMA_FROM_DEVICE);
1745                 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
1746                         dev_kfree_skb(skb);
1747                         buffer_info->skb = NULL;
1748                         buffer_info->length = 0;
1749                         ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
1750                         netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
1751                         break;
1752                 }
1753                 buffer_info->dma = mapping;
1754                 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
1755                         ATL1C_PCIMAP_FROMDEVICE);
1756                 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1757                 rfd_next_to_use = next_next;
1758                 if (++next_next == rfd_ring->count)
1759                         next_next = 0;
1760                 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1761                 next_info = &rfd_ring->buffer_info[next_next];
1762                 num_alloc++;
1763         }
1764
1765         if (num_alloc) {
1766                 /* TODO: update mailbox here */
1767                 wmb();
1768                 rfd_ring->next_to_use = rfd_next_to_use;
1769                 AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
1770                         rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1771         }
1772
1773         return num_alloc;
1774 }
1775
1776 static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring,
1777                         struct  atl1c_recv_ret_status *rrs, u16 num)
1778 {
1779         u16 i;
1780         /* the relationship between rrd and rfd is one map one */
1781         for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring,
1782                                         rrd_ring->next_to_clean)) {
1783                 rrs->word3 &= ~RRS_RXD_UPDATED;
1784                 if (++rrd_ring->next_to_clean == rrd_ring->count)
1785                         rrd_ring->next_to_clean = 0;
1786         }
1787 }
1788
1789 static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1790         struct atl1c_recv_ret_status *rrs, u16 num)
1791 {
1792         u16 i;
1793         u16 rfd_index;
1794         struct atl1c_buffer *buffer_info = rfd_ring->buffer_info;
1795
1796         rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1797                         RRS_RX_RFD_INDEX_MASK;
1798         for (i = 0; i < num; i++) {
1799                 buffer_info[rfd_index].skb = NULL;
1800                 ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
1801                                         ATL1C_BUFFER_FREE);
1802                 if (++rfd_index == rfd_ring->count)
1803                         rfd_index = 0;
1804         }
1805         rfd_ring->next_to_clean = rfd_index;
1806 }
1807
1808 static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
1809                    int *work_done, int work_to_do)
1810 {
1811         u16 rfd_num, rfd_index;
1812         u16 count = 0;
1813         u16 length;
1814         struct pci_dev *pdev = adapter->pdev;
1815         struct net_device *netdev  = adapter->netdev;
1816         struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1817         struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1818         struct sk_buff *skb;
1819         struct atl1c_recv_ret_status *rrs;
1820         struct atl1c_buffer *buffer_info;
1821
1822         while (1) {
1823                 if (*work_done >= work_to_do)
1824                         break;
1825                 rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
1826                 if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
1827                         rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) &
1828                                 RRS_RX_RFD_CNT_MASK;
1829                         if (unlikely(rfd_num != 1))
1830                                 /* TODO support mul rfd*/
1831                                 if (netif_msg_rx_err(adapter))
1832                                         dev_warn(&pdev->dev,
1833                                                 "Multi rfd not support yet!\n");
1834                         goto rrs_checked;
1835                 } else {
1836                         break;
1837                 }
1838 rrs_checked:
1839                 atl1c_clean_rrd(rrd_ring, rrs, rfd_num);
1840                 if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) {
1841                         atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1842                         if (netif_msg_rx_err(adapter))
1843                                 dev_warn(&pdev->dev,
1844                                          "wrong packet! rrs word3 is %x\n",
1845                                          rrs->word3);
1846                         continue;
1847                 }
1848
1849                 length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) &
1850                                 RRS_PKT_SIZE_MASK);
1851                 /* Good Receive */
1852                 if (likely(rfd_num == 1)) {
1853                         rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
1854                                         RRS_RX_RFD_INDEX_MASK;
1855                         buffer_info = &rfd_ring->buffer_info[rfd_index];
1856                         dma_unmap_single(&pdev->dev, buffer_info->dma,
1857                                          buffer_info->length, DMA_FROM_DEVICE);
1858                         skb = buffer_info->skb;
1859                 } else {
1860                         /* TODO */
1861                         if (netif_msg_rx_err(adapter))
1862                                 dev_warn(&pdev->dev,
1863                                         "Multi rfd not support yet!\n");
1864                         break;
1865                 }
1866                 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1867                 skb_put(skb, length - ETH_FCS_LEN);
1868                 skb->protocol = eth_type_trans(skb, netdev);
1869                 atl1c_rx_checksum(adapter, skb, rrs);
1870                 if (rrs->word3 & RRS_VLAN_INS) {
1871                         u16 vlan;
1872
1873                         AT_TAG_TO_VLAN(rrs->vlan_tag, vlan);
1874                         vlan = le16_to_cpu(vlan);
1875                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1876                 }
1877                 napi_gro_receive(&adapter->napi, skb);
1878
1879                 (*work_done)++;
1880                 count++;
1881         }
1882         if (count)
1883                 atl1c_alloc_rx_buffer(adapter, true);
1884 }
1885
1886 /**
1887  * atl1c_clean - NAPI Rx polling callback
1888  * @napi: napi info
1889  * @budget: limit of packets to clean
1890  */
1891 static int atl1c_clean(struct napi_struct *napi, int budget)
1892 {
1893         struct atl1c_adapter *adapter =
1894                         container_of(napi, struct atl1c_adapter, napi);
1895         int work_done = 0;
1896         unsigned long flags;
1897
1898         /* Keep link state information with original netdev */
1899         if (!netif_carrier_ok(adapter->netdev))
1900                 goto quit_polling;
1901         /* just enable one RXQ */
1902         atl1c_clean_rx_irq(adapter, &work_done, budget);
1903
1904         if (work_done < budget) {
1905 quit_polling:
1906                 napi_complete_done(napi, work_done);
1907                 spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
1908                 adapter->hw.intr_mask |= ISR_RX_PKT;
1909                 AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
1910                 spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
1911         }
1912         return work_done;
1913 }
1914
1915 #ifdef CONFIG_NET_POLL_CONTROLLER
1916
1917 /*
1918  * Polling 'interrupt' - used by things like netconsole to send skbs
1919  * without having to re-enable interrupts. It's not called while
1920  * the interrupt routine is executing.
1921  */
1922 static void atl1c_netpoll(struct net_device *netdev)
1923 {
1924         struct atl1c_adapter *adapter = netdev_priv(netdev);
1925
1926         disable_irq(adapter->pdev->irq);
1927         atl1c_intr(adapter->pdev->irq, netdev);
1928         enable_irq(adapter->pdev->irq);
1929 }
1930 #endif
1931
1932 static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
1933 {
1934         struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1935         u16 next_to_use = 0;
1936         u16 next_to_clean = 0;
1937
1938         next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1939         next_to_use   = tpd_ring->next_to_use;
1940
1941         return (u16)(next_to_clean > next_to_use) ?
1942                 (next_to_clean - next_to_use - 1) :
1943                 (tpd_ring->count + next_to_clean - next_to_use - 1);
1944 }
1945
1946 /*
1947  * get next usable tpd
1948  * Note: should call atl1c_tdp_avail to make sure
1949  * there is enough tpd to use
1950  */
1951 static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
1952         enum atl1c_trans_queue type)
1953 {
1954         struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
1955         struct atl1c_tpd_desc *tpd_desc;
1956         u16 next_to_use = 0;
1957
1958         next_to_use = tpd_ring->next_to_use;
1959         if (++tpd_ring->next_to_use == tpd_ring->count)
1960                 tpd_ring->next_to_use = 0;
1961         tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use);
1962         memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc));
1963         return  tpd_desc;
1964 }
1965
1966 static struct atl1c_buffer *
1967 atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd)
1968 {
1969         struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
1970
1971         return &tpd_ring->buffer_info[tpd -
1972                         (struct atl1c_tpd_desc *)tpd_ring->desc];
1973 }
1974
1975 /* Calculate the transmit packet descript needed*/
1976 static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
1977 {
1978         u16 tpd_req;
1979         u16 proto_hdr_len = 0;
1980
1981         tpd_req = skb_shinfo(skb)->nr_frags + 1;
1982
1983         if (skb_is_gso(skb)) {
1984                 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1985                 if (proto_hdr_len < skb_headlen(skb))
1986                         tpd_req++;
1987                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1988                         tpd_req++;
1989         }
1990         return tpd_req;
1991 }
1992
1993 static int atl1c_tso_csum(struct atl1c_adapter *adapter,
1994                           struct sk_buff *skb,
1995                           struct atl1c_tpd_desc **tpd,
1996                           enum atl1c_trans_queue type)
1997 {
1998         struct pci_dev *pdev = adapter->pdev;
1999         unsigned short offload_type;
2000         u8 hdr_len;
2001         u32 real_len;
2002
2003         if (skb_is_gso(skb)) {
2004                 int err;
2005
2006                 err = skb_cow_head(skb, 0);
2007                 if (err < 0)
2008                         return err;
2009
2010                 offload_type = skb_shinfo(skb)->gso_type;
2011
2012                 if (offload_type & SKB_GSO_TCPV4) {
2013                         real_len = (((unsigned char *)ip_hdr(skb) - skb->data)
2014                                         + ntohs(ip_hdr(skb)->tot_len));
2015
2016                         if (real_len < skb->len)
2017                                 pskb_trim(skb, real_len);
2018
2019                         hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
2020                         if (unlikely(skb->len == hdr_len)) {
2021                                 /* only xsum need */
2022                                 if (netif_msg_tx_queued(adapter))
2023                                         dev_warn(&pdev->dev,
2024                                                 "IPV4 tso with zero data??\n");
2025                                 goto check_sum;
2026                         } else {
2027                                 ip_hdr(skb)->check = 0;
2028                                 tcp_hdr(skb)->check = ~csum_tcpudp_magic(
2029                                                         ip_hdr(skb)->saddr,
2030                                                         ip_hdr(skb)->daddr,
2031                                                         0, IPPROTO_TCP, 0);
2032                                 (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT;
2033                         }
2034                 }
2035
2036                 if (offload_type & SKB_GSO_TCPV6) {
2037                         struct atl1c_tpd_ext_desc *etpd =
2038                                 *(struct atl1c_tpd_ext_desc **)(tpd);
2039
2040                         memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
2041                         *tpd = atl1c_get_tpd(adapter, type);
2042                         ipv6_hdr(skb)->payload_len = 0;
2043                         /* check payload == 0 byte ? */
2044                         hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
2045                         if (unlikely(skb->len == hdr_len)) {
2046                                 /* only xsum need */
2047                                 if (netif_msg_tx_queued(adapter))
2048                                         dev_warn(&pdev->dev,
2049                                                 "IPV6 tso with zero data??\n");
2050                                 goto check_sum;
2051                         } else
2052                                 tcp_v6_gso_csum_prep(skb);
2053
2054                         etpd->word1 |= 1 << TPD_LSO_EN_SHIFT;
2055                         etpd->word1 |= 1 << TPD_LSO_VER_SHIFT;
2056                         etpd->pkt_len = cpu_to_le32(skb->len);
2057                         (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT;
2058                 }
2059
2060                 (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT;
2061                 (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) <<
2062                                 TPD_TCPHDR_OFFSET_SHIFT;
2063                 (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) <<
2064                                 TPD_MSS_SHIFT;
2065                 return 0;
2066         }
2067
2068 check_sum:
2069         if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2070                 u8 css, cso;
2071                 cso = skb_checksum_start_offset(skb);
2072
2073                 if (unlikely(cso & 0x1)) {
2074                         if (netif_msg_tx_err(adapter))
2075                                 dev_err(&adapter->pdev->dev,
2076                                         "payload offset should not an event number\n");
2077                         return -1;
2078                 } else {
2079                         css = cso + skb->csum_offset;
2080
2081                         (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) <<
2082                                         TPD_PLOADOFFSET_SHIFT;
2083                         (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) <<
2084                                         TPD_CCSUM_OFFSET_SHIFT;
2085                         (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT;
2086                 }
2087         }
2088         return 0;
2089 }
2090
2091 static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
2092                               struct atl1c_tpd_desc *first_tpd,
2093                               enum atl1c_trans_queue type)
2094 {
2095         struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
2096         struct atl1c_buffer *buffer_info;
2097         struct atl1c_tpd_desc *tpd;
2098         u16 first_index, index;
2099
2100         first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc;
2101         index = first_index;
2102         while (index != tpd_ring->next_to_use) {
2103                 tpd = ATL1C_TPD_DESC(tpd_ring, index);
2104                 buffer_info = &tpd_ring->buffer_info[index];
2105                 atl1c_clean_buffer(adpt->pdev, buffer_info);
2106                 memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
2107                 if (++index == tpd_ring->count)
2108                         index = 0;
2109         }
2110         tpd_ring->next_to_use = first_index;
2111 }
2112
2113 static int atl1c_tx_map(struct atl1c_adapter *adapter,
2114                       struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
2115                         enum atl1c_trans_queue type)
2116 {
2117         struct atl1c_tpd_desc *use_tpd = NULL;
2118         struct atl1c_buffer *buffer_info = NULL;
2119         u16 buf_len = skb_headlen(skb);
2120         u16 map_len = 0;
2121         u16 mapped_len = 0;
2122         u16 hdr_len = 0;
2123         u16 nr_frags;
2124         u16 f;
2125         int tso;
2126
2127         nr_frags = skb_shinfo(skb)->nr_frags;
2128         tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK;
2129         if (tso) {
2130                 /* TSO */
2131                 map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2132                 use_tpd = tpd;
2133
2134                 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2135                 buffer_info->length = map_len;
2136                 buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2137                                                   skb->data, hdr_len,
2138                                                   DMA_TO_DEVICE);
2139                 if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2140                         goto err_dma;
2141                 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2142                 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2143                         ATL1C_PCIMAP_TODEVICE);
2144                 mapped_len += map_len;
2145                 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2146                 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2147         }
2148
2149         if (mapped_len < buf_len) {
2150                 /* mapped_len == 0, means we should use the first tpd,
2151                    which is given by caller  */
2152                 if (mapped_len == 0)
2153                         use_tpd = tpd;
2154                 else {
2155                         use_tpd = atl1c_get_tpd(adapter, type);
2156                         memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2157                 }
2158                 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2159                 buffer_info->length = buf_len - mapped_len;
2160                 buffer_info->dma =
2161                         dma_map_single(&adapter->pdev->dev,
2162                                        skb->data + mapped_len,
2163                                        buffer_info->length, DMA_TO_DEVICE);
2164                 if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
2165                         goto err_dma;
2166
2167                 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2168                 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
2169                         ATL1C_PCIMAP_TODEVICE);
2170                 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2171                 use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2172         }
2173
2174         for (f = 0; f < nr_frags; f++) {
2175                 skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2176
2177                 use_tpd = atl1c_get_tpd(adapter, type);
2178                 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
2179
2180                 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
2181                 buffer_info->length = skb_frag_size(frag);
2182                 buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev,
2183                                                     frag, 0,
2184                                                     buffer_info->length,
2185                                                     DMA_TO_DEVICE);
2186                 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma))
2187                         goto err_dma;
2188
2189                 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2190                 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE,
2191                         ATL1C_PCIMAP_TODEVICE);
2192                 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2193                 use_tpd->buffer_len  = cpu_to_le16(buffer_info->length);
2194         }
2195
2196         /* The last tpd */
2197         use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
2198         /* The last buffer info contain the skb address,
2199            so it will be free after unmap */
2200         buffer_info->skb = skb;
2201
2202         return 0;
2203
2204 err_dma:
2205         buffer_info->dma = 0;
2206         buffer_info->length = 0;
2207         return -1;
2208 }
2209
2210 static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2211                            struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2212 {
2213         struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2214         u16 reg;
2215
2216         reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
2217         AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
2218 }
2219
2220 static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
2221                                           struct net_device *netdev)
2222 {
2223         struct atl1c_adapter *adapter = netdev_priv(netdev);
2224         u16 tpd_req;
2225         struct atl1c_tpd_desc *tpd;
2226         enum atl1c_trans_queue type = atl1c_trans_normal;
2227
2228         if (test_bit(__AT_DOWN, &adapter->flags)) {
2229                 dev_kfree_skb_any(skb);
2230                 return NETDEV_TX_OK;
2231         }
2232
2233         tpd_req = atl1c_cal_tpd_req(skb);
2234
2235         if (atl1c_tpd_avail(adapter, type) < tpd_req) {
2236                 /* no enough descriptor, just stop queue */
2237                 netif_stop_queue(netdev);
2238                 return NETDEV_TX_BUSY;
2239         }
2240
2241         tpd = atl1c_get_tpd(adapter, type);
2242
2243         /* do TSO and check sum */
2244         if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
2245                 dev_kfree_skb_any(skb);
2246                 return NETDEV_TX_OK;
2247         }
2248
2249         if (unlikely(skb_vlan_tag_present(skb))) {
2250                 u16 vlan = skb_vlan_tag_get(skb);
2251                 __le16 tag;
2252
2253                 vlan = cpu_to_le16(vlan);
2254                 AT_VLAN_TO_TAG(vlan, tag);
2255                 tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT;
2256                 tpd->vlan_tag = tag;
2257         }
2258
2259         if (skb_network_offset(skb) != ETH_HLEN)
2260                 tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
2261
2262         if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
2263                 netif_info(adapter, tx_done, adapter->netdev,
2264                            "tx-skb dropped due to dma error\n");
2265                 /* roll back tpd/buffer */
2266                 atl1c_tx_rollback(adapter, tpd, type);
2267                 dev_kfree_skb_any(skb);
2268         } else {
2269                 netdev_sent_queue(adapter->netdev, skb->len);
2270                 atl1c_tx_queue(adapter, skb, tpd, type);
2271         }
2272
2273         return NETDEV_TX_OK;
2274 }
2275
2276 static void atl1c_free_irq(struct atl1c_adapter *adapter)
2277 {
2278         struct net_device *netdev = adapter->netdev;
2279
2280         free_irq(adapter->pdev->irq, netdev);
2281
2282         if (adapter->have_msi)
2283                 pci_disable_msi(adapter->pdev);
2284 }
2285
2286 static int atl1c_request_irq(struct atl1c_adapter *adapter)
2287 {
2288         struct pci_dev    *pdev   = adapter->pdev;
2289         struct net_device *netdev = adapter->netdev;
2290         int flags = 0;
2291         int err = 0;
2292
2293         adapter->have_msi = true;
2294         err = pci_enable_msi(adapter->pdev);
2295         if (err) {
2296                 if (netif_msg_ifup(adapter))
2297                         dev_err(&pdev->dev,
2298                                 "Unable to allocate MSI interrupt Error: %d\n",
2299                                 err);
2300                 adapter->have_msi = false;
2301         }
2302
2303         if (!adapter->have_msi)
2304                 flags |= IRQF_SHARED;
2305         err = request_irq(adapter->pdev->irq, atl1c_intr, flags,
2306                         netdev->name, netdev);
2307         if (err) {
2308                 if (netif_msg_ifup(adapter))
2309                         dev_err(&pdev->dev,
2310                                 "Unable to allocate interrupt Error: %d\n",
2311                                 err);
2312                 if (adapter->have_msi)
2313                         pci_disable_msi(adapter->pdev);
2314                 return err;
2315         }
2316         if (netif_msg_ifup(adapter))
2317                 dev_dbg(&pdev->dev, "atl1c_request_irq OK\n");
2318         return err;
2319 }
2320
2321
2322 static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
2323 {
2324         /* release tx-pending skbs and reset tx/rx ring index */
2325         atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
2326         atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2327         atl1c_clean_rx_ring(adapter);
2328 }
2329
2330 static int atl1c_up(struct atl1c_adapter *adapter)
2331 {
2332         struct net_device *netdev = adapter->netdev;
2333         int err;
2334
2335         netif_carrier_off(netdev);
2336
2337         err = atl1c_configure(adapter);
2338         if (unlikely(err))
2339                 goto err_up;
2340
2341         err = atl1c_request_irq(adapter);
2342         if (unlikely(err))
2343                 goto err_up;
2344
2345         atl1c_check_link_status(adapter);
2346         clear_bit(__AT_DOWN, &adapter->flags);
2347         napi_enable(&adapter->napi);
2348         napi_enable(&adapter->tx_napi);
2349         atl1c_irq_enable(adapter);
2350         netif_start_queue(netdev);
2351         return err;
2352
2353 err_up:
2354         atl1c_clean_rx_ring(adapter);
2355         return err;
2356 }
2357
2358 static void atl1c_down(struct atl1c_adapter *adapter)
2359 {
2360         struct net_device *netdev = adapter->netdev;
2361
2362         atl1c_del_timer(adapter);
2363         adapter->work_event = 0; /* clear all event */
2364         /* signal that we're down so the interrupt handler does not
2365          * reschedule our watchdog timer */
2366         set_bit(__AT_DOWN, &adapter->flags);
2367         netif_carrier_off(netdev);
2368         napi_disable(&adapter->napi);
2369         napi_disable(&adapter->tx_napi);
2370         atl1c_irq_disable(adapter);
2371         atl1c_free_irq(adapter);
2372         /* disable ASPM if device inactive */
2373         atl1c_disable_l0s_l1(&adapter->hw);
2374         /* reset MAC to disable all RX/TX */
2375         atl1c_reset_mac(&adapter->hw);
2376         msleep(1);
2377
2378         adapter->link_speed = SPEED_0;
2379         adapter->link_duplex = -1;
2380         atl1c_reset_dma_ring(adapter);
2381 }
2382
2383 /**
2384  * atl1c_open - Called when a network interface is made active
2385  * @netdev: network interface device structure
2386  *
2387  * Returns 0 on success, negative value on failure
2388  *
2389  * The open entry point is called when a network interface is made
2390  * active by the system (IFF_UP).  At this point all resources needed
2391  * for transmit and receive operations are allocated, the interrupt
2392  * handler is registered with the OS, the watchdog timer is started,
2393  * and the stack is notified that the interface is ready.
2394  */
2395 static int atl1c_open(struct net_device *netdev)
2396 {
2397         struct atl1c_adapter *adapter = netdev_priv(netdev);
2398         int err;
2399
2400         /* disallow open during test */
2401         if (test_bit(__AT_TESTING, &adapter->flags))
2402                 return -EBUSY;
2403
2404         /* allocate rx/tx dma buffer & descriptors */
2405         err = atl1c_setup_ring_resources(adapter);
2406         if (unlikely(err))
2407                 return err;
2408
2409         err = atl1c_up(adapter);
2410         if (unlikely(err))
2411                 goto err_up;
2412
2413         return 0;
2414
2415 err_up:
2416         atl1c_free_irq(adapter);
2417         atl1c_free_ring_resources(adapter);
2418         atl1c_reset_mac(&adapter->hw);
2419         return err;
2420 }
2421
2422 /**
2423  * atl1c_close - Disables a network interface
2424  * @netdev: network interface device structure
2425  *
2426  * Returns 0, this is not allowed to fail
2427  *
2428  * The close entry point is called when an interface is de-activated
2429  * by the OS.  The hardware is still under the drivers control, but
2430  * needs to be disabled.  A global MAC reset is issued to stop the
2431  * hardware, and all transmit and receive resources are freed.
2432  */
2433 static int atl1c_close(struct net_device *netdev)
2434 {
2435         struct atl1c_adapter *adapter = netdev_priv(netdev);
2436
2437         WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2438         set_bit(__AT_DOWN, &adapter->flags);
2439         cancel_work_sync(&adapter->common_task);
2440         atl1c_down(adapter);
2441         atl1c_free_ring_resources(adapter);
2442         return 0;
2443 }
2444
2445 static int atl1c_suspend(struct device *dev)
2446 {
2447         struct net_device *netdev = dev_get_drvdata(dev);
2448         struct atl1c_adapter *adapter = netdev_priv(netdev);
2449         struct atl1c_hw *hw = &adapter->hw;
2450         u32 wufc = adapter->wol;
2451
2452         atl1c_disable_l0s_l1(hw);
2453         if (netif_running(netdev)) {
2454                 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2455                 atl1c_down(adapter);
2456         }
2457         netif_device_detach(netdev);
2458
2459         if (wufc)
2460                 if (atl1c_phy_to_ps_link(hw) != 0)
2461                         dev_dbg(dev, "phy power saving failed");
2462
2463         atl1c_power_saving(hw, wufc);
2464
2465         return 0;
2466 }
2467
2468 #ifdef CONFIG_PM_SLEEP
2469 static int atl1c_resume(struct device *dev)
2470 {
2471         struct net_device *netdev = dev_get_drvdata(dev);
2472         struct atl1c_adapter *adapter = netdev_priv(netdev);
2473
2474         AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2475         atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2476
2477         atl1c_phy_reset(&adapter->hw);
2478         atl1c_reset_mac(&adapter->hw);
2479         atl1c_phy_init(&adapter->hw);
2480
2481         netif_device_attach(netdev);
2482         if (netif_running(netdev))
2483                 atl1c_up(adapter);
2484
2485         return 0;
2486 }
2487 #endif
2488
2489 static void atl1c_shutdown(struct pci_dev *pdev)
2490 {
2491         struct net_device *netdev = pci_get_drvdata(pdev);
2492         struct atl1c_adapter *adapter = netdev_priv(netdev);
2493
2494         atl1c_suspend(&pdev->dev);
2495         pci_wake_from_d3(pdev, adapter->wol);
2496         pci_set_power_state(pdev, PCI_D3hot);
2497 }
2498
2499 static const struct net_device_ops atl1c_netdev_ops = {
2500         .ndo_open               = atl1c_open,
2501         .ndo_stop               = atl1c_close,
2502         .ndo_validate_addr      = eth_validate_addr,
2503         .ndo_start_xmit         = atl1c_xmit_frame,
2504         .ndo_set_mac_address    = atl1c_set_mac_addr,
2505         .ndo_set_rx_mode        = atl1c_set_multi,
2506         .ndo_change_mtu         = atl1c_change_mtu,
2507         .ndo_fix_features       = atl1c_fix_features,
2508         .ndo_set_features       = atl1c_set_features,
2509         .ndo_do_ioctl           = atl1c_ioctl,
2510         .ndo_tx_timeout         = atl1c_tx_timeout,
2511         .ndo_get_stats          = atl1c_get_stats,
2512 #ifdef CONFIG_NET_POLL_CONTROLLER
2513         .ndo_poll_controller    = atl1c_netpoll,
2514 #endif
2515 };
2516
2517 static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2518 {
2519         SET_NETDEV_DEV(netdev, &pdev->dev);
2520         pci_set_drvdata(pdev, netdev);
2521
2522         netdev->netdev_ops = &atl1c_netdev_ops;
2523         netdev->watchdog_timeo = AT_TX_WATCHDOG;
2524         netdev->min_mtu = ETH_ZLEN - (ETH_HLEN + VLAN_HLEN);
2525         atl1c_set_ethtool_ops(netdev);
2526
2527         /* TODO: add when ready */
2528         netdev->hw_features =   NETIF_F_SG              |
2529                                 NETIF_F_HW_CSUM         |
2530                                 NETIF_F_HW_VLAN_CTAG_RX |
2531                                 NETIF_F_TSO             |
2532                                 NETIF_F_TSO6;
2533         netdev->features =      netdev->hw_features     |
2534                                 NETIF_F_HW_VLAN_CTAG_TX;
2535         return 0;
2536 }
2537
2538 /**
2539  * atl1c_probe - Device Initialization Routine
2540  * @pdev: PCI device information struct
2541  * @ent: entry in atl1c_pci_tbl
2542  *
2543  * Returns 0 on success, negative on failure
2544  *
2545  * atl1c_probe initializes an adapter identified by a pci_dev structure.
2546  * The OS initialization, configuring of the adapter private structure,
2547  * and a hardware reset occur.
2548  */
2549 static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2550 {
2551         struct net_device *netdev;
2552         struct atl1c_adapter *adapter;
2553         static int cards_found;
2554
2555         int err = 0;
2556
2557         /* enable device (incl. PCI PM wakeup and hotplug setup) */
2558         err = pci_enable_device_mem(pdev);
2559         if (err) {
2560                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2561                 return err;
2562         }
2563
2564         /*
2565          * The atl1c chip can DMA to 64-bit addresses, but it uses a single
2566          * shared register for the high 32 bits, so only a single, aligned,
2567          * 4 GB physical address range can be used at a time.
2568          *
2569          * Supporting 64-bit DMA on this hardware is more trouble than it's
2570          * worth.  It is far easier to limit to 32-bit DMA than update
2571          * various kernel subsystems to support the mechanics required by a
2572          * fixed-high-32-bit system.
2573          */
2574         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2575         if (err) {
2576                 dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
2577                 goto err_dma;
2578         }
2579
2580         err = pci_request_regions(pdev, atl1c_driver_name);
2581         if (err) {
2582                 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2583                 goto err_pci_reg;
2584         }
2585
2586         pci_set_master(pdev);
2587
2588         netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
2589         if (netdev == NULL) {
2590                 err = -ENOMEM;
2591                 goto err_alloc_etherdev;
2592         }
2593
2594         err = atl1c_init_netdev(netdev, pdev);
2595         if (err) {
2596                 dev_err(&pdev->dev, "init netdevice failed\n");
2597                 goto err_init_netdev;
2598         }
2599         adapter = netdev_priv(netdev);
2600         adapter->bd_number = cards_found;
2601         adapter->netdev = netdev;
2602         adapter->pdev = pdev;
2603         adapter->hw.adapter = adapter;
2604         adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
2605         adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2606         if (!adapter->hw.hw_addr) {
2607                 err = -EIO;
2608                 dev_err(&pdev->dev, "cannot map device registers\n");
2609                 goto err_ioremap;
2610         }
2611
2612         /* init mii data */
2613         adapter->mii.dev = netdev;
2614         adapter->mii.mdio_read  = atl1c_mdio_read;
2615         adapter->mii.mdio_write = atl1c_mdio_write;
2616         adapter->mii.phy_id_mask = 0x1f;
2617         adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
2618         dev_set_threaded(netdev, true);
2619         netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
2620         netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64);
2621         timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
2622         /* setup the private structure */
2623         err = atl1c_sw_init(adapter);
2624         if (err) {
2625                 dev_err(&pdev->dev, "net device private data init failed\n");
2626                 goto err_sw_init;
2627         }
2628         /* set max MTU */
2629         atl1c_set_max_mtu(netdev);
2630
2631         atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2632
2633         /* Init GPHY as early as possible due to power saving issue  */
2634         atl1c_phy_reset(&adapter->hw);
2635
2636         err = atl1c_reset_mac(&adapter->hw);
2637         if (err) {
2638                 err = -EIO;
2639                 goto err_reset;
2640         }
2641
2642         /* reset the controller to
2643          * put the device in a known good starting state */
2644         err = atl1c_phy_init(&adapter->hw);
2645         if (err) {
2646                 err = -EIO;
2647                 goto err_reset;
2648         }
2649         if (atl1c_read_mac_addr(&adapter->hw)) {
2650                 /* got a random MAC address, set NET_ADDR_RANDOM to netdev */
2651                 netdev->addr_assign_type = NET_ADDR_RANDOM;
2652         }
2653         memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
2654         if (netif_msg_probe(adapter))
2655                 dev_dbg(&pdev->dev, "mac address : %pM\n",
2656                         adapter->hw.mac_addr);
2657
2658         atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
2659         INIT_WORK(&adapter->common_task, atl1c_common_task);
2660         adapter->work_event = 0;
2661         err = register_netdev(netdev);
2662         if (err) {
2663                 dev_err(&pdev->dev, "register netdevice failed\n");
2664                 goto err_register;
2665         }
2666
2667         cards_found++;
2668         return 0;
2669
2670 err_reset:
2671 err_register:
2672 err_sw_init:
2673         iounmap(adapter->hw.hw_addr);
2674 err_init_netdev:
2675 err_ioremap:
2676         free_netdev(netdev);
2677 err_alloc_etherdev:
2678         pci_release_regions(pdev);
2679 err_pci_reg:
2680 err_dma:
2681         pci_disable_device(pdev);
2682         return err;
2683 }
2684
2685 /**
2686  * atl1c_remove - Device Removal Routine
2687  * @pdev: PCI device information struct
2688  *
2689  * atl1c_remove is called by the PCI subsystem to alert the driver
2690  * that it should release a PCI device.  The could be caused by a
2691  * Hot-Plug event, or because the driver is going to be removed from
2692  * memory.
2693  */
2694 static void atl1c_remove(struct pci_dev *pdev)
2695 {
2696         struct net_device *netdev = pci_get_drvdata(pdev);
2697         struct atl1c_adapter *adapter = netdev_priv(netdev);
2698
2699         unregister_netdev(netdev);
2700         /* restore permanent address */
2701         atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
2702         atl1c_phy_disable(&adapter->hw);
2703
2704         iounmap(adapter->hw.hw_addr);
2705
2706         pci_release_regions(pdev);
2707         pci_disable_device(pdev);
2708         free_netdev(netdev);
2709 }
2710
2711 /**
2712  * atl1c_io_error_detected - called when PCI error is detected
2713  * @pdev: Pointer to PCI device
2714  * @state: The current pci connection state
2715  *
2716  * This function is called after a PCI bus error affecting
2717  * this device has been detected.
2718  */
2719 static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
2720                                                 pci_channel_state_t state)
2721 {
2722         struct net_device *netdev = pci_get_drvdata(pdev);
2723         struct atl1c_adapter *adapter = netdev_priv(netdev);
2724
2725         netif_device_detach(netdev);
2726
2727         if (state == pci_channel_io_perm_failure)
2728                 return PCI_ERS_RESULT_DISCONNECT;
2729
2730         if (netif_running(netdev))
2731                 atl1c_down(adapter);
2732
2733         pci_disable_device(pdev);
2734
2735         /* Request a slot slot reset. */
2736         return PCI_ERS_RESULT_NEED_RESET;
2737 }
2738
2739 /**
2740  * atl1c_io_slot_reset - called after the pci bus has been reset.
2741  * @pdev: Pointer to PCI device
2742  *
2743  * Restart the card from scratch, as if from a cold-boot. Implementation
2744  * resembles the first-half of the e1000_resume routine.
2745  */
2746 static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
2747 {
2748         struct net_device *netdev = pci_get_drvdata(pdev);
2749         struct atl1c_adapter *adapter = netdev_priv(netdev);
2750
2751         if (pci_enable_device(pdev)) {
2752                 if (netif_msg_hw(adapter))
2753                         dev_err(&pdev->dev,
2754                                 "Cannot re-enable PCI device after reset\n");
2755                 return PCI_ERS_RESULT_DISCONNECT;
2756         }
2757         pci_set_master(pdev);
2758
2759         pci_enable_wake(pdev, PCI_D3hot, 0);
2760         pci_enable_wake(pdev, PCI_D3cold, 0);
2761
2762         atl1c_reset_mac(&adapter->hw);
2763
2764         return PCI_ERS_RESULT_RECOVERED;
2765 }
2766
2767 /**
2768  * atl1c_io_resume - called when traffic can start flowing again.
2769  * @pdev: Pointer to PCI device
2770  *
2771  * This callback is called when the error recovery driver tells us that
2772  * its OK to resume normal operation. Implementation resembles the
2773  * second-half of the atl1c_resume routine.
2774  */
2775 static void atl1c_io_resume(struct pci_dev *pdev)
2776 {
2777         struct net_device *netdev = pci_get_drvdata(pdev);
2778         struct atl1c_adapter *adapter = netdev_priv(netdev);
2779
2780         if (netif_running(netdev)) {
2781                 if (atl1c_up(adapter)) {
2782                         if (netif_msg_hw(adapter))
2783                                 dev_err(&pdev->dev,
2784                                         "Cannot bring device back up after reset\n");
2785                         return;
2786                 }
2787         }
2788
2789         netif_device_attach(netdev);
2790 }
2791
2792 static const struct pci_error_handlers atl1c_err_handler = {
2793         .error_detected = atl1c_io_error_detected,
2794         .slot_reset = atl1c_io_slot_reset,
2795         .resume = atl1c_io_resume,
2796 };
2797
2798 static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
2799
2800 static struct pci_driver atl1c_driver = {
2801         .name     = atl1c_driver_name,
2802         .id_table = atl1c_pci_tbl,
2803         .probe    = atl1c_probe,
2804         .remove   = atl1c_remove,
2805         .shutdown = atl1c_shutdown,
2806         .err_handler = &atl1c_err_handler,
2807         .driver.pm = &atl1c_pm_ops,
2808 };
2809
2810 module_pci_driver(atl1c_driver);