tg3: Create MII_TG3_FET namespace
[linux-2.6-microblaze.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2009 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/in.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
38 #include <linux/ip.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
44
45 #include <net/checksum.h>
46 #include <net/ip.h>
47
48 #include <asm/system.h>
49 #include <asm/io.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
52
53 #ifdef CONFIG_SPARC
54 #include <asm/idprom.h>
55 #include <asm/prom.h>
56 #endif
57
58 #define BAR_0   0
59 #define BAR_2   2
60
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
63 #else
64 #define TG3_VLAN_TAG_USED 0
65 #endif
66
67 #include "tg3.h"
68
69 #define DRV_MODULE_NAME         "tg3"
70 #define PFX DRV_MODULE_NAME     ": "
71 #define DRV_MODULE_VERSION      "3.99"
72 #define DRV_MODULE_RELDATE      "April 20, 2009"
73
74 #define TG3_DEF_MAC_MODE        0
75 #define TG3_DEF_RX_MODE         0
76 #define TG3_DEF_TX_MODE         0
77 #define TG3_DEF_MSG_ENABLE        \
78         (NETIF_MSG_DRV          | \
79          NETIF_MSG_PROBE        | \
80          NETIF_MSG_LINK         | \
81          NETIF_MSG_TIMER        | \
82          NETIF_MSG_IFDOWN       | \
83          NETIF_MSG_IFUP         | \
84          NETIF_MSG_RX_ERR       | \
85          NETIF_MSG_TX_ERR)
86
87 /* length of time before we decide the hardware is borked,
88  * and dev->tx_timeout() should be called to fix the problem
89  */
90 #define TG3_TX_TIMEOUT                  (5 * HZ)
91
92 /* hardware minimum and maximum for a single frame's data payload */
93 #define TG3_MIN_MTU                     60
94 #define TG3_MAX_MTU(tp) \
95         ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
96
97 /* These numbers seem to be hard coded in the NIC firmware somehow.
98  * You can't change the ring sizes, but you can change where you place
99  * them in the NIC onboard memory.
100  */
101 #define TG3_RX_RING_SIZE                512
102 #define TG3_DEF_RX_RING_PENDING         200
103 #define TG3_RX_JUMBO_RING_SIZE          256
104 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
105
106 /* Do not place this n-ring entries value into the tp struct itself,
107  * we really want to expose these constants to GCC so that modulo et
108  * al.  operations are done with shifts and masks instead of with
109  * hw multiply/modulo instructions.  Another solution would be to
110  * replace things like '% foo' with '& (foo - 1)'.
111  */
112 #define TG3_RX_RCB_RING_SIZE(tp)        \
113         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
114
115 #define TG3_TX_RING_SIZE                512
116 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
117
118 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
119                                  TG3_RX_RING_SIZE)
120 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121                                  TG3_RX_JUMBO_RING_SIZE)
122 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
123                                    TG3_RX_RCB_RING_SIZE(tp))
124 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
125                                  TG3_TX_RING_SIZE)
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH(tp)                ((tp)->tx_pending / 4)
133
134 #define TG3_RAW_IP_ALIGN 2
135
136 /* number of ETHTOOL_GSTATS u64's */
137 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138
139 #define TG3_NUM_TEST            6
140
141 #define FIRMWARE_TG3            "tigon/tg3.bin"
142 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
143 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
144
145 static char version[] __devinitdata =
146         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
147
148 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
149 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_MODULE_VERSION);
152 MODULE_FIRMWARE(FIRMWARE_TG3);
153 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
154 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
155
156
157 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
158 module_param(tg3_debug, int, 0);
159 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
160
161 static struct pci_device_id tg3_pci_tbl[] = {
162         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
163         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
164         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
165         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
166         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
167         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
168         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
169         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
170         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
171         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
172         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
173         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
174         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
175         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
176         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
177         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
178         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
179         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
180         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
181         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
182         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
183         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
184         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
185         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
186         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
187         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
188         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
189         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
190         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
191         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
192         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
193         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
194         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
195         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
196         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
197         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
198         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
199         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
200         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
201         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
202         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
203         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
204         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
205         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
206         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
207         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
208         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
209         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
210         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
211         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
212         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
213         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
214         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
227         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
228         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
229         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
230         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
231         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
232         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
233         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
234         {}
235 };
236
237 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
238
239 static const struct {
240         const char string[ETH_GSTRING_LEN];
241 } ethtool_stats_keys[TG3_NUM_STATS] = {
242         { "rx_octets" },
243         { "rx_fragments" },
244         { "rx_ucast_packets" },
245         { "rx_mcast_packets" },
246         { "rx_bcast_packets" },
247         { "rx_fcs_errors" },
248         { "rx_align_errors" },
249         { "rx_xon_pause_rcvd" },
250         { "rx_xoff_pause_rcvd" },
251         { "rx_mac_ctrl_rcvd" },
252         { "rx_xoff_entered" },
253         { "rx_frame_too_long_errors" },
254         { "rx_jabbers" },
255         { "rx_undersize_packets" },
256         { "rx_in_length_errors" },
257         { "rx_out_length_errors" },
258         { "rx_64_or_less_octet_packets" },
259         { "rx_65_to_127_octet_packets" },
260         { "rx_128_to_255_octet_packets" },
261         { "rx_256_to_511_octet_packets" },
262         { "rx_512_to_1023_octet_packets" },
263         { "rx_1024_to_1522_octet_packets" },
264         { "rx_1523_to_2047_octet_packets" },
265         { "rx_2048_to_4095_octet_packets" },
266         { "rx_4096_to_8191_octet_packets" },
267         { "rx_8192_to_9022_octet_packets" },
268
269         { "tx_octets" },
270         { "tx_collisions" },
271
272         { "tx_xon_sent" },
273         { "tx_xoff_sent" },
274         { "tx_flow_control" },
275         { "tx_mac_errors" },
276         { "tx_single_collisions" },
277         { "tx_mult_collisions" },
278         { "tx_deferred" },
279         { "tx_excessive_collisions" },
280         { "tx_late_collisions" },
281         { "tx_collide_2times" },
282         { "tx_collide_3times" },
283         { "tx_collide_4times" },
284         { "tx_collide_5times" },
285         { "tx_collide_6times" },
286         { "tx_collide_7times" },
287         { "tx_collide_8times" },
288         { "tx_collide_9times" },
289         { "tx_collide_10times" },
290         { "tx_collide_11times" },
291         { "tx_collide_12times" },
292         { "tx_collide_13times" },
293         { "tx_collide_14times" },
294         { "tx_collide_15times" },
295         { "tx_ucast_packets" },
296         { "tx_mcast_packets" },
297         { "tx_bcast_packets" },
298         { "tx_carrier_sense_errors" },
299         { "tx_discards" },
300         { "tx_errors" },
301
302         { "dma_writeq_full" },
303         { "dma_write_prioq_full" },
304         { "rxbds_empty" },
305         { "rx_discards" },
306         { "rx_errors" },
307         { "rx_threshold_hit" },
308
309         { "dma_readq_full" },
310         { "dma_read_prioq_full" },
311         { "tx_comp_queue_full" },
312
313         { "ring_set_send_prod_index" },
314         { "ring_status_update" },
315         { "nic_irqs" },
316         { "nic_avoided_irqs" },
317         { "nic_tx_threshold_hit" }
318 };
319
320 static const struct {
321         const char string[ETH_GSTRING_LEN];
322 } ethtool_test_keys[TG3_NUM_TEST] = {
323         { "nvram test     (online) " },
324         { "link test      (online) " },
325         { "register test  (offline)" },
326         { "memory test    (offline)" },
327         { "loopback test  (offline)" },
328         { "interrupt test (offline)" },
329 };
330
331 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
332 {
333         writel(val, tp->regs + off);
334 }
335
336 static u32 tg3_read32(struct tg3 *tp, u32 off)
337 {
338         return (readl(tp->regs + off));
339 }
340
341 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
342 {
343         writel(val, tp->aperegs + off);
344 }
345
346 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
347 {
348         return (readl(tp->aperegs + off));
349 }
350
351 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
352 {
353         unsigned long flags;
354
355         spin_lock_irqsave(&tp->indirect_lock, flags);
356         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
357         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
358         spin_unlock_irqrestore(&tp->indirect_lock, flags);
359 }
360
361 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
362 {
363         writel(val, tp->regs + off);
364         readl(tp->regs + off);
365 }
366
367 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
368 {
369         unsigned long flags;
370         u32 val;
371
372         spin_lock_irqsave(&tp->indirect_lock, flags);
373         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
374         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
375         spin_unlock_irqrestore(&tp->indirect_lock, flags);
376         return val;
377 }
378
379 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
380 {
381         unsigned long flags;
382
383         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
384                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
385                                        TG3_64BIT_REG_LOW, val);
386                 return;
387         }
388         if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
389                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
390                                        TG3_64BIT_REG_LOW, val);
391                 return;
392         }
393
394         spin_lock_irqsave(&tp->indirect_lock, flags);
395         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
396         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
397         spin_unlock_irqrestore(&tp->indirect_lock, flags);
398
399         /* In indirect mode when disabling interrupts, we also need
400          * to clear the interrupt bit in the GRC local ctrl register.
401          */
402         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
403             (val == 0x1)) {
404                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
405                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
406         }
407 }
408
409 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
410 {
411         unsigned long flags;
412         u32 val;
413
414         spin_lock_irqsave(&tp->indirect_lock, flags);
415         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
416         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
417         spin_unlock_irqrestore(&tp->indirect_lock, flags);
418         return val;
419 }
420
421 /* usec_wait specifies the wait time in usec when writing to certain registers
422  * where it is unsafe to read back the register without some delay.
423  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
424  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
425  */
426 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
427 {
428         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
429             (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
430                 /* Non-posted methods */
431                 tp->write32(tp, off, val);
432         else {
433                 /* Posted method */
434                 tg3_write32(tp, off, val);
435                 if (usec_wait)
436                         udelay(usec_wait);
437                 tp->read32(tp, off);
438         }
439         /* Wait again after the read for the posted method to guarantee that
440          * the wait time is met.
441          */
442         if (usec_wait)
443                 udelay(usec_wait);
444 }
445
446 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
447 {
448         tp->write32_mbox(tp, off, val);
449         if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
450             !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
451                 tp->read32_mbox(tp, off);
452 }
453
454 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
455 {
456         void __iomem *mbox = tp->regs + off;
457         writel(val, mbox);
458         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
459                 writel(val, mbox);
460         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
461                 readl(mbox);
462 }
463
464 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
465 {
466         return (readl(tp->regs + off + GRCMBOX_BASE));
467 }
468
469 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
470 {
471         writel(val, tp->regs + off + GRCMBOX_BASE);
472 }
473
474 #define tw32_mailbox(reg, val)  tp->write32_mbox(tp, reg, val)
475 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
476 #define tw32_rx_mbox(reg, val)  tp->write32_rx_mbox(tp, reg, val)
477 #define tw32_tx_mbox(reg, val)  tp->write32_tx_mbox(tp, reg, val)
478 #define tr32_mailbox(reg)       tp->read32_mbox(tp, reg)
479
480 #define tw32(reg,val)           tp->write32(tp, reg, val)
481 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val), 0)
482 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
483 #define tr32(reg)               tp->read32(tp, reg)
484
485 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
486 {
487         unsigned long flags;
488
489         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
490             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
491                 return;
492
493         spin_lock_irqsave(&tp->indirect_lock, flags);
494         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
495                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
496                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
497
498                 /* Always leave this as zero. */
499                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
500         } else {
501                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
502                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
503
504                 /* Always leave this as zero. */
505                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
506         }
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508 }
509
510 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
511 {
512         unsigned long flags;
513
514         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
515             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
516                 *val = 0;
517                 return;
518         }
519
520         spin_lock_irqsave(&tp->indirect_lock, flags);
521         if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
522                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
523                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
524
525                 /* Always leave this as zero. */
526                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
527         } else {
528                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
529                 *val = tr32(TG3PCI_MEM_WIN_DATA);
530
531                 /* Always leave this as zero. */
532                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
533         }
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535 }
536
537 static void tg3_ape_lock_init(struct tg3 *tp)
538 {
539         int i;
540
541         /* Make sure the driver hasn't any stale locks. */
542         for (i = 0; i < 8; i++)
543                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
544                                 APE_LOCK_GRANT_DRIVER);
545 }
546
547 static int tg3_ape_lock(struct tg3 *tp, int locknum)
548 {
549         int i, off;
550         int ret = 0;
551         u32 status;
552
553         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
554                 return 0;
555
556         switch (locknum) {
557                 case TG3_APE_LOCK_GRC:
558                 case TG3_APE_LOCK_MEM:
559                         break;
560                 default:
561                         return -EINVAL;
562         }
563
564         off = 4 * locknum;
565
566         tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
567
568         /* Wait for up to 1 millisecond to acquire lock. */
569         for (i = 0; i < 100; i++) {
570                 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
571                 if (status == APE_LOCK_GRANT_DRIVER)
572                         break;
573                 udelay(10);
574         }
575
576         if (status != APE_LOCK_GRANT_DRIVER) {
577                 /* Revoke the lock request. */
578                 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
579                                 APE_LOCK_GRANT_DRIVER);
580
581                 ret = -EBUSY;
582         }
583
584         return ret;
585 }
586
587 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
588 {
589         int off;
590
591         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
592                 return;
593
594         switch (locknum) {
595                 case TG3_APE_LOCK_GRC:
596                 case TG3_APE_LOCK_MEM:
597                         break;
598                 default:
599                         return;
600         }
601
602         off = 4 * locknum;
603         tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
604 }
605
606 static void tg3_disable_ints(struct tg3 *tp)
607 {
608         tw32(TG3PCI_MISC_HOST_CTRL,
609              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
610         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
611 }
612
613 static inline void tg3_cond_int(struct tg3 *tp)
614 {
615         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
616             (tp->hw_status->status & SD_STATUS_UPDATED))
617                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
618         else
619                 tw32(HOSTCC_MODE, tp->coalesce_mode |
620                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
621 }
622
623 static void tg3_enable_ints(struct tg3 *tp)
624 {
625         tp->irq_sync = 0;
626         wmb();
627
628         tw32(TG3PCI_MISC_HOST_CTRL,
629              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
630         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
631                        (tp->last_tag << 24));
632         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
633                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
634                                (tp->last_tag << 24));
635         tg3_cond_int(tp);
636 }
637
638 static inline unsigned int tg3_has_work(struct tg3 *tp)
639 {
640         struct tg3_hw_status *sblk = tp->hw_status;
641         unsigned int work_exists = 0;
642
643         /* check for phy events */
644         if (!(tp->tg3_flags &
645               (TG3_FLAG_USE_LINKCHG_REG |
646                TG3_FLAG_POLL_SERDES))) {
647                 if (sblk->status & SD_STATUS_LINK_CHG)
648                         work_exists = 1;
649         }
650         /* check for RX/TX work to do */
651         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
652             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
653                 work_exists = 1;
654
655         return work_exists;
656 }
657
658 /* tg3_restart_ints
659  *  similar to tg3_enable_ints, but it accurately determines whether there
660  *  is new work pending and can return without flushing the PIO write
661  *  which reenables interrupts
662  */
663 static void tg3_restart_ints(struct tg3 *tp)
664 {
665         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
666                      tp->last_tag << 24);
667         mmiowb();
668
669         /* When doing tagged status, this work check is unnecessary.
670          * The last_tag we write above tells the chip which piece of
671          * work we've completed.
672          */
673         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
674             tg3_has_work(tp))
675                 tw32(HOSTCC_MODE, tp->coalesce_mode |
676                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
677 }
678
679 static inline void tg3_netif_stop(struct tg3 *tp)
680 {
681         tp->dev->trans_start = jiffies; /* prevent tx timeout */
682         napi_disable(&tp->napi);
683         netif_tx_disable(tp->dev);
684 }
685
686 static inline void tg3_netif_start(struct tg3 *tp)
687 {
688         netif_wake_queue(tp->dev);
689         /* NOTE: unconditional netif_wake_queue is only appropriate
690          * so long as all callers are assured to have free tx slots
691          * (such as after tg3_init_hw)
692          */
693         napi_enable(&tp->napi);
694         tp->hw_status->status |= SD_STATUS_UPDATED;
695         tg3_enable_ints(tp);
696 }
697
698 static void tg3_switch_clocks(struct tg3 *tp)
699 {
700         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
701         u32 orig_clock_ctrl;
702
703         if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
704             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
705                 return;
706
707         orig_clock_ctrl = clock_ctrl;
708         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
709                        CLOCK_CTRL_CLKRUN_OENABLE |
710                        0x1f);
711         tp->pci_clock_ctrl = clock_ctrl;
712
713         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
714                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
715                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
716                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
717                 }
718         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
719                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
720                             clock_ctrl |
721                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
722                             40);
723                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
724                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
725                             40);
726         }
727         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
728 }
729
730 #define PHY_BUSY_LOOPS  5000
731
732 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
733 {
734         u32 frame_val;
735         unsigned int loops;
736         int ret;
737
738         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
739                 tw32_f(MAC_MI_MODE,
740                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
741                 udelay(80);
742         }
743
744         *val = 0x0;
745
746         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
747                       MI_COM_PHY_ADDR_MASK);
748         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
749                       MI_COM_REG_ADDR_MASK);
750         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
751
752         tw32_f(MAC_MI_COM, frame_val);
753
754         loops = PHY_BUSY_LOOPS;
755         while (loops != 0) {
756                 udelay(10);
757                 frame_val = tr32(MAC_MI_COM);
758
759                 if ((frame_val & MI_COM_BUSY) == 0) {
760                         udelay(5);
761                         frame_val = tr32(MAC_MI_COM);
762                         break;
763                 }
764                 loops -= 1;
765         }
766
767         ret = -EBUSY;
768         if (loops != 0) {
769                 *val = frame_val & MI_COM_DATA_MASK;
770                 ret = 0;
771         }
772
773         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
774                 tw32_f(MAC_MI_MODE, tp->mi_mode);
775                 udelay(80);
776         }
777
778         return ret;
779 }
780
781 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
782 {
783         u32 frame_val;
784         unsigned int loops;
785         int ret;
786
787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
788             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
789                 return 0;
790
791         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
792                 tw32_f(MAC_MI_MODE,
793                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
794                 udelay(80);
795         }
796
797         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
798                       MI_COM_PHY_ADDR_MASK);
799         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
800                       MI_COM_REG_ADDR_MASK);
801         frame_val |= (val & MI_COM_DATA_MASK);
802         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
803
804         tw32_f(MAC_MI_COM, frame_val);
805
806         loops = PHY_BUSY_LOOPS;
807         while (loops != 0) {
808                 udelay(10);
809                 frame_val = tr32(MAC_MI_COM);
810                 if ((frame_val & MI_COM_BUSY) == 0) {
811                         udelay(5);
812                         frame_val = tr32(MAC_MI_COM);
813                         break;
814                 }
815                 loops -= 1;
816         }
817
818         ret = -EBUSY;
819         if (loops != 0)
820                 ret = 0;
821
822         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
823                 tw32_f(MAC_MI_MODE, tp->mi_mode);
824                 udelay(80);
825         }
826
827         return ret;
828 }
829
830 static int tg3_bmcr_reset(struct tg3 *tp)
831 {
832         u32 phy_control;
833         int limit, err;
834
835         /* OK, reset it, and poll the BMCR_RESET bit until it
836          * clears or we time out.
837          */
838         phy_control = BMCR_RESET;
839         err = tg3_writephy(tp, MII_BMCR, phy_control);
840         if (err != 0)
841                 return -EBUSY;
842
843         limit = 5000;
844         while (limit--) {
845                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
846                 if (err != 0)
847                         return -EBUSY;
848
849                 if ((phy_control & BMCR_RESET) == 0) {
850                         udelay(40);
851                         break;
852                 }
853                 udelay(10);
854         }
855         if (limit < 0)
856                 return -EBUSY;
857
858         return 0;
859 }
860
861 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
862 {
863         struct tg3 *tp = bp->priv;
864         u32 val;
865
866         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
867                 return -EAGAIN;
868
869         if (tg3_readphy(tp, reg, &val))
870                 return -EIO;
871
872         return val;
873 }
874
875 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
876 {
877         struct tg3 *tp = bp->priv;
878
879         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
880                 return -EAGAIN;
881
882         if (tg3_writephy(tp, reg, val))
883                 return -EIO;
884
885         return 0;
886 }
887
888 static int tg3_mdio_reset(struct mii_bus *bp)
889 {
890         return 0;
891 }
892
893 static void tg3_mdio_config_5785(struct tg3 *tp)
894 {
895         u32 val;
896         struct phy_device *phydev;
897
898         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
899         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
900         case TG3_PHY_ID_BCM50610:
901                 val = MAC_PHYCFG2_50610_LED_MODES;
902                 break;
903         case TG3_PHY_ID_BCMAC131:
904                 val = MAC_PHYCFG2_AC131_LED_MODES;
905                 break;
906         case TG3_PHY_ID_RTL8211C:
907                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
908                 break;
909         case TG3_PHY_ID_RTL8201E:
910                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
911                 break;
912         default:
913                 return;
914         }
915
916         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
917                 tw32(MAC_PHYCFG2, val);
918
919                 val = tr32(MAC_PHYCFG1);
920                 val &= ~(MAC_PHYCFG1_RGMII_INT |
921                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
922                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
923                 tw32(MAC_PHYCFG1, val);
924
925                 return;
926         }
927
928         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
929                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
930                        MAC_PHYCFG2_FMODE_MASK_MASK |
931                        MAC_PHYCFG2_GMODE_MASK_MASK |
932                        MAC_PHYCFG2_ACT_MASK_MASK   |
933                        MAC_PHYCFG2_QUAL_MASK_MASK |
934                        MAC_PHYCFG2_INBAND_ENABLE;
935
936         tw32(MAC_PHYCFG2, val);
937
938         val = tr32(MAC_PHYCFG1);
939         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
940                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
941         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
942                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
944                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
945                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
946         }
947         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
948                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
949         tw32(MAC_PHYCFG1, val);
950
951         val = tr32(MAC_EXT_RGMII_MODE);
952         val &= ~(MAC_RGMII_MODE_RX_INT_B |
953                  MAC_RGMII_MODE_RX_QUALITY |
954                  MAC_RGMII_MODE_RX_ACTIVITY |
955                  MAC_RGMII_MODE_RX_ENG_DET |
956                  MAC_RGMII_MODE_TX_ENABLE |
957                  MAC_RGMII_MODE_TX_LOWPWR |
958                  MAC_RGMII_MODE_TX_RESET);
959         if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
960                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
961                         val |= MAC_RGMII_MODE_RX_INT_B |
962                                MAC_RGMII_MODE_RX_QUALITY |
963                                MAC_RGMII_MODE_RX_ACTIVITY |
964                                MAC_RGMII_MODE_RX_ENG_DET;
965                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
966                         val |= MAC_RGMII_MODE_TX_ENABLE |
967                                MAC_RGMII_MODE_TX_LOWPWR |
968                                MAC_RGMII_MODE_TX_RESET;
969         }
970         tw32(MAC_EXT_RGMII_MODE, val);
971 }
972
973 static void tg3_mdio_start(struct tg3 *tp)
974 {
975         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
976                 mutex_lock(&tp->mdio_bus->mdio_lock);
977                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
978                 mutex_unlock(&tp->mdio_bus->mdio_lock);
979         }
980
981         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
982         tw32_f(MAC_MI_MODE, tp->mi_mode);
983         udelay(80);
984
985         if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
986             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
987                 tg3_mdio_config_5785(tp);
988 }
989
990 static void tg3_mdio_stop(struct tg3 *tp)
991 {
992         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
993                 mutex_lock(&tp->mdio_bus->mdio_lock);
994                 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
995                 mutex_unlock(&tp->mdio_bus->mdio_lock);
996         }
997 }
998
999 static int tg3_mdio_init(struct tg3 *tp)
1000 {
1001         int i;
1002         u32 reg;
1003         struct phy_device *phydev;
1004
1005         tg3_mdio_start(tp);
1006
1007         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1008             (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1009                 return 0;
1010
1011         tp->mdio_bus = mdiobus_alloc();
1012         if (tp->mdio_bus == NULL)
1013                 return -ENOMEM;
1014
1015         tp->mdio_bus->name     = "tg3 mdio bus";
1016         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1017                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1018         tp->mdio_bus->priv     = tp;
1019         tp->mdio_bus->parent   = &tp->pdev->dev;
1020         tp->mdio_bus->read     = &tg3_mdio_read;
1021         tp->mdio_bus->write    = &tg3_mdio_write;
1022         tp->mdio_bus->reset    = &tg3_mdio_reset;
1023         tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1024         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1025
1026         for (i = 0; i < PHY_MAX_ADDR; i++)
1027                 tp->mdio_bus->irq[i] = PHY_POLL;
1028
1029         /* The bus registration will look for all the PHYs on the mdio bus.
1030          * Unfortunately, it does not ensure the PHY is powered up before
1031          * accessing the PHY ID registers.  A chip reset is the
1032          * quickest way to bring the device back to an operational state..
1033          */
1034         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1035                 tg3_bmcr_reset(tp);
1036
1037         i = mdiobus_register(tp->mdio_bus);
1038         if (i) {
1039                 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1040                         tp->dev->name, i);
1041                 mdiobus_free(tp->mdio_bus);
1042                 return i;
1043         }
1044
1045         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1046
1047         if (!phydev || !phydev->drv) {
1048                 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1049                 mdiobus_unregister(tp->mdio_bus);
1050                 mdiobus_free(tp->mdio_bus);
1051                 return -ENODEV;
1052         }
1053
1054         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1055         case TG3_PHY_ID_BCM57780:
1056                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1057                 break;
1058         case TG3_PHY_ID_BCM50610:
1059                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1060                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1061                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1062                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1063                 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1064                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1065                 /* fallthru */
1066         case TG3_PHY_ID_RTL8211C:
1067                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1068                 break;
1069         case TG3_PHY_ID_RTL8201E:
1070         case TG3_PHY_ID_BCMAC131:
1071                 phydev->interface = PHY_INTERFACE_MODE_MII;
1072                 break;
1073         }
1074
1075         tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1076
1077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1078                 tg3_mdio_config_5785(tp);
1079
1080         return 0;
1081 }
1082
1083 static void tg3_mdio_fini(struct tg3 *tp)
1084 {
1085         if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1086                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1087                 mdiobus_unregister(tp->mdio_bus);
1088                 mdiobus_free(tp->mdio_bus);
1089                 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1090         }
1091 }
1092
1093 /* tp->lock is held. */
1094 static inline void tg3_generate_fw_event(struct tg3 *tp)
1095 {
1096         u32 val;
1097
1098         val = tr32(GRC_RX_CPU_EVENT);
1099         val |= GRC_RX_CPU_DRIVER_EVENT;
1100         tw32_f(GRC_RX_CPU_EVENT, val);
1101
1102         tp->last_event_jiffies = jiffies;
1103 }
1104
1105 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1106
1107 /* tp->lock is held. */
1108 static void tg3_wait_for_event_ack(struct tg3 *tp)
1109 {
1110         int i;
1111         unsigned int delay_cnt;
1112         long time_remain;
1113
1114         /* If enough time has passed, no wait is necessary. */
1115         time_remain = (long)(tp->last_event_jiffies + 1 +
1116                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1117                       (long)jiffies;
1118         if (time_remain < 0)
1119                 return;
1120
1121         /* Check if we can shorten the wait time. */
1122         delay_cnt = jiffies_to_usecs(time_remain);
1123         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1124                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1125         delay_cnt = (delay_cnt >> 3) + 1;
1126
1127         for (i = 0; i < delay_cnt; i++) {
1128                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1129                         break;
1130                 udelay(8);
1131         }
1132 }
1133
1134 /* tp->lock is held. */
1135 static void tg3_ump_link_report(struct tg3 *tp)
1136 {
1137         u32 reg;
1138         u32 val;
1139
1140         if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1141             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1142                 return;
1143
1144         tg3_wait_for_event_ack(tp);
1145
1146         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1147
1148         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1149
1150         val = 0;
1151         if (!tg3_readphy(tp, MII_BMCR, &reg))
1152                 val = reg << 16;
1153         if (!tg3_readphy(tp, MII_BMSR, &reg))
1154                 val |= (reg & 0xffff);
1155         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1156
1157         val = 0;
1158         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1159                 val = reg << 16;
1160         if (!tg3_readphy(tp, MII_LPA, &reg))
1161                 val |= (reg & 0xffff);
1162         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1163
1164         val = 0;
1165         if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1166                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1167                         val = reg << 16;
1168                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1169                         val |= (reg & 0xffff);
1170         }
1171         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1172
1173         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1174                 val = reg << 16;
1175         else
1176                 val = 0;
1177         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1178
1179         tg3_generate_fw_event(tp);
1180 }
1181
1182 static void tg3_link_report(struct tg3 *tp)
1183 {
1184         if (!netif_carrier_ok(tp->dev)) {
1185                 if (netif_msg_link(tp))
1186                         printk(KERN_INFO PFX "%s: Link is down.\n",
1187                                tp->dev->name);
1188                 tg3_ump_link_report(tp);
1189         } else if (netif_msg_link(tp)) {
1190                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1191                        tp->dev->name,
1192                        (tp->link_config.active_speed == SPEED_1000 ?
1193                         1000 :
1194                         (tp->link_config.active_speed == SPEED_100 ?
1195                          100 : 10)),
1196                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1197                         "full" : "half"));
1198
1199                 printk(KERN_INFO PFX
1200                        "%s: Flow control is %s for TX and %s for RX.\n",
1201                        tp->dev->name,
1202                        (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1203                        "on" : "off",
1204                        (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1205                        "on" : "off");
1206                 tg3_ump_link_report(tp);
1207         }
1208 }
1209
1210 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1211 {
1212         u16 miireg;
1213
1214         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1215                 miireg = ADVERTISE_PAUSE_CAP;
1216         else if (flow_ctrl & FLOW_CTRL_TX)
1217                 miireg = ADVERTISE_PAUSE_ASYM;
1218         else if (flow_ctrl & FLOW_CTRL_RX)
1219                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1220         else
1221                 miireg = 0;
1222
1223         return miireg;
1224 }
1225
1226 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1227 {
1228         u16 miireg;
1229
1230         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1231                 miireg = ADVERTISE_1000XPAUSE;
1232         else if (flow_ctrl & FLOW_CTRL_TX)
1233                 miireg = ADVERTISE_1000XPSE_ASYM;
1234         else if (flow_ctrl & FLOW_CTRL_RX)
1235                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1236         else
1237                 miireg = 0;
1238
1239         return miireg;
1240 }
1241
1242 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1243 {
1244         u8 cap = 0;
1245
1246         if (lcladv & ADVERTISE_1000XPAUSE) {
1247                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1248                         if (rmtadv & LPA_1000XPAUSE)
1249                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1250                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1251                                 cap = FLOW_CTRL_RX;
1252                 } else {
1253                         if (rmtadv & LPA_1000XPAUSE)
1254                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1255                 }
1256         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1257                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1258                         cap = FLOW_CTRL_TX;
1259         }
1260
1261         return cap;
1262 }
1263
1264 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1265 {
1266         u8 autoneg;
1267         u8 flowctrl = 0;
1268         u32 old_rx_mode = tp->rx_mode;
1269         u32 old_tx_mode = tp->tx_mode;
1270
1271         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1272                 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1273         else
1274                 autoneg = tp->link_config.autoneg;
1275
1276         if (autoneg == AUTONEG_ENABLE &&
1277             (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1278                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1279                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1280                 else
1281                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1282         } else
1283                 flowctrl = tp->link_config.flowctrl;
1284
1285         tp->link_config.active_flowctrl = flowctrl;
1286
1287         if (flowctrl & FLOW_CTRL_RX)
1288                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1289         else
1290                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1291
1292         if (old_rx_mode != tp->rx_mode)
1293                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1294
1295         if (flowctrl & FLOW_CTRL_TX)
1296                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1297         else
1298                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1299
1300         if (old_tx_mode != tp->tx_mode)
1301                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1302 }
1303
1304 static void tg3_adjust_link(struct net_device *dev)
1305 {
1306         u8 oldflowctrl, linkmesg = 0;
1307         u32 mac_mode, lcl_adv, rmt_adv;
1308         struct tg3 *tp = netdev_priv(dev);
1309         struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1310
1311         spin_lock(&tp->lock);
1312
1313         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1314                                     MAC_MODE_HALF_DUPLEX);
1315
1316         oldflowctrl = tp->link_config.active_flowctrl;
1317
1318         if (phydev->link) {
1319                 lcl_adv = 0;
1320                 rmt_adv = 0;
1321
1322                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1323                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1324                 else
1325                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1326
1327                 if (phydev->duplex == DUPLEX_HALF)
1328                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1329                 else {
1330                         lcl_adv = tg3_advert_flowctrl_1000T(
1331                                   tp->link_config.flowctrl);
1332
1333                         if (phydev->pause)
1334                                 rmt_adv = LPA_PAUSE_CAP;
1335                         if (phydev->asym_pause)
1336                                 rmt_adv |= LPA_PAUSE_ASYM;
1337                 }
1338
1339                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1340         } else
1341                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1342
1343         if (mac_mode != tp->mac_mode) {
1344                 tp->mac_mode = mac_mode;
1345                 tw32_f(MAC_MODE, tp->mac_mode);
1346                 udelay(40);
1347         }
1348
1349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1350                 if (phydev->speed == SPEED_10)
1351                         tw32(MAC_MI_STAT,
1352                              MAC_MI_STAT_10MBPS_MODE |
1353                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1354                 else
1355                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1356         }
1357
1358         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1359                 tw32(MAC_TX_LENGTHS,
1360                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1361                       (6 << TX_LENGTHS_IPG_SHIFT) |
1362                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1363         else
1364                 tw32(MAC_TX_LENGTHS,
1365                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1366                       (6 << TX_LENGTHS_IPG_SHIFT) |
1367                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1368
1369         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1370             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1371             phydev->speed != tp->link_config.active_speed ||
1372             phydev->duplex != tp->link_config.active_duplex ||
1373             oldflowctrl != tp->link_config.active_flowctrl)
1374             linkmesg = 1;
1375
1376         tp->link_config.active_speed = phydev->speed;
1377         tp->link_config.active_duplex = phydev->duplex;
1378
1379         spin_unlock(&tp->lock);
1380
1381         if (linkmesg)
1382                 tg3_link_report(tp);
1383 }
1384
1385 static int tg3_phy_init(struct tg3 *tp)
1386 {
1387         struct phy_device *phydev;
1388
1389         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1390                 return 0;
1391
1392         /* Bring the PHY back to a known state. */
1393         tg3_bmcr_reset(tp);
1394
1395         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1396
1397         /* Attach the MAC to the PHY. */
1398         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1399                              phydev->dev_flags, phydev->interface);
1400         if (IS_ERR(phydev)) {
1401                 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1402                 return PTR_ERR(phydev);
1403         }
1404
1405         /* Mask with MAC supported features. */
1406         switch (phydev->interface) {
1407         case PHY_INTERFACE_MODE_GMII:
1408         case PHY_INTERFACE_MODE_RGMII:
1409                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1410                         phydev->supported &= (PHY_GBIT_FEATURES |
1411                                               SUPPORTED_Pause |
1412                                               SUPPORTED_Asym_Pause);
1413                         break;
1414                 }
1415                 /* fallthru */
1416         case PHY_INTERFACE_MODE_MII:
1417                 phydev->supported &= (PHY_BASIC_FEATURES |
1418                                       SUPPORTED_Pause |
1419                                       SUPPORTED_Asym_Pause);
1420                 break;
1421         default:
1422                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1423                 return -EINVAL;
1424         }
1425
1426         tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1427
1428         phydev->advertising = phydev->supported;
1429
1430         return 0;
1431 }
1432
1433 static void tg3_phy_start(struct tg3 *tp)
1434 {
1435         struct phy_device *phydev;
1436
1437         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438                 return;
1439
1440         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1441
1442         if (tp->link_config.phy_is_low_power) {
1443                 tp->link_config.phy_is_low_power = 0;
1444                 phydev->speed = tp->link_config.orig_speed;
1445                 phydev->duplex = tp->link_config.orig_duplex;
1446                 phydev->autoneg = tp->link_config.orig_autoneg;
1447                 phydev->advertising = tp->link_config.orig_advertising;
1448         }
1449
1450         phy_start(phydev);
1451
1452         phy_start_aneg(phydev);
1453 }
1454
1455 static void tg3_phy_stop(struct tg3 *tp)
1456 {
1457         if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458                 return;
1459
1460         phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1461 }
1462
1463 static void tg3_phy_fini(struct tg3 *tp)
1464 {
1465         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1466                 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1467                 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1468         }
1469 }
1470
1471 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1472 {
1473         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1474         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475 }
1476
1477 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1478 {
1479         u32 reg;
1480
1481         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
1483                 return;
1484
1485         reg = MII_TG3_MISC_SHDW_WREN |
1486               MII_TG3_MISC_SHDW_SCR5_SEL |
1487               MII_TG3_MISC_SHDW_SCR5_LPED |
1488               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1489               MII_TG3_MISC_SHDW_SCR5_SDTL |
1490               MII_TG3_MISC_SHDW_SCR5_C125OE;
1491         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1492                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1493
1494         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1495
1496
1497         reg = MII_TG3_MISC_SHDW_WREN |
1498               MII_TG3_MISC_SHDW_APD_SEL |
1499               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1500         if (enable)
1501                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1502
1503         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1504 }
1505
1506 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1507 {
1508         u32 phy;
1509
1510         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1511             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1512                 return;
1513
1514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1515                 u32 ephy;
1516
1517                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1518                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1519
1520                         tg3_writephy(tp, MII_TG3_FET_TEST,
1521                                      ephy | MII_TG3_FET_SHADOW_EN);
1522                         if (!tg3_readphy(tp, reg, &phy)) {
1523                                 if (enable)
1524                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1525                                 else
1526                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1527                                 tg3_writephy(tp, reg, phy);
1528                         }
1529                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1530                 }
1531         } else {
1532                 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1533                       MII_TG3_AUXCTL_SHDWSEL_MISC;
1534                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1535                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1536                         if (enable)
1537                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1538                         else
1539                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1540                         phy |= MII_TG3_AUXCTL_MISC_WREN;
1541                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1542                 }
1543         }
1544 }
1545
1546 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1547 {
1548         u32 val;
1549
1550         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1551                 return;
1552
1553         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1554             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1555                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1556                              (val | (1 << 15) | (1 << 4)));
1557 }
1558
1559 static void tg3_phy_apply_otp(struct tg3 *tp)
1560 {
1561         u32 otp, phy;
1562
1563         if (!tp->phy_otp)
1564                 return;
1565
1566         otp = tp->phy_otp;
1567
1568         /* Enable SM_DSP clock and tx 6dB coding. */
1569         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1570               MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1571               MII_TG3_AUXCTL_ACTL_TX_6DB;
1572         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1573
1574         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1575         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1576         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1577
1578         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1579               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1580         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1581
1582         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1583         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1584         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1585
1586         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1587         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1588
1589         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1590         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1591
1592         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1593               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1594         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1595
1596         /* Turn off SM_DSP clock. */
1597         phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1598               MII_TG3_AUXCTL_ACTL_TX_6DB;
1599         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1600 }
1601
1602 static int tg3_wait_macro_done(struct tg3 *tp)
1603 {
1604         int limit = 100;
1605
1606         while (limit--) {
1607                 u32 tmp32;
1608
1609                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1610                         if ((tmp32 & 0x1000) == 0)
1611                                 break;
1612                 }
1613         }
1614         if (limit < 0)
1615                 return -EBUSY;
1616
1617         return 0;
1618 }
1619
1620 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1621 {
1622         static const u32 test_pat[4][6] = {
1623         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1624         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1625         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1626         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1627         };
1628         int chan;
1629
1630         for (chan = 0; chan < 4; chan++) {
1631                 int i;
1632
1633                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1634                              (chan * 0x2000) | 0x0200);
1635                 tg3_writephy(tp, 0x16, 0x0002);
1636
1637                 for (i = 0; i < 6; i++)
1638                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1639                                      test_pat[chan][i]);
1640
1641                 tg3_writephy(tp, 0x16, 0x0202);
1642                 if (tg3_wait_macro_done(tp)) {
1643                         *resetp = 1;
1644                         return -EBUSY;
1645                 }
1646
1647                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1648                              (chan * 0x2000) | 0x0200);
1649                 tg3_writephy(tp, 0x16, 0x0082);
1650                 if (tg3_wait_macro_done(tp)) {
1651                         *resetp = 1;
1652                         return -EBUSY;
1653                 }
1654
1655                 tg3_writephy(tp, 0x16, 0x0802);
1656                 if (tg3_wait_macro_done(tp)) {
1657                         *resetp = 1;
1658                         return -EBUSY;
1659                 }
1660
1661                 for (i = 0; i < 6; i += 2) {
1662                         u32 low, high;
1663
1664                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1665                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1666                             tg3_wait_macro_done(tp)) {
1667                                 *resetp = 1;
1668                                 return -EBUSY;
1669                         }
1670                         low &= 0x7fff;
1671                         high &= 0x000f;
1672                         if (low != test_pat[chan][i] ||
1673                             high != test_pat[chan][i+1]) {
1674                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1675                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1676                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1677
1678                                 return -EBUSY;
1679                         }
1680                 }
1681         }
1682
1683         return 0;
1684 }
1685
1686 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1687 {
1688         int chan;
1689
1690         for (chan = 0; chan < 4; chan++) {
1691                 int i;
1692
1693                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1694                              (chan * 0x2000) | 0x0200);
1695                 tg3_writephy(tp, 0x16, 0x0002);
1696                 for (i = 0; i < 6; i++)
1697                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1698                 tg3_writephy(tp, 0x16, 0x0202);
1699                 if (tg3_wait_macro_done(tp))
1700                         return -EBUSY;
1701         }
1702
1703         return 0;
1704 }
1705
1706 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1707 {
1708         u32 reg32, phy9_orig;
1709         int retries, do_phy_reset, err;
1710
1711         retries = 10;
1712         do_phy_reset = 1;
1713         do {
1714                 if (do_phy_reset) {
1715                         err = tg3_bmcr_reset(tp);
1716                         if (err)
1717                                 return err;
1718                         do_phy_reset = 0;
1719                 }
1720
1721                 /* Disable transmitter and interrupt.  */
1722                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1723                         continue;
1724
1725                 reg32 |= 0x3000;
1726                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1727
1728                 /* Set full-duplex, 1000 mbps.  */
1729                 tg3_writephy(tp, MII_BMCR,
1730                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1731
1732                 /* Set to master mode.  */
1733                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1734                         continue;
1735
1736                 tg3_writephy(tp, MII_TG3_CTRL,
1737                              (MII_TG3_CTRL_AS_MASTER |
1738                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1739
1740                 /* Enable SM_DSP_CLOCK and 6dB.  */
1741                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1742
1743                 /* Block the PHY control access.  */
1744                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1745                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1746
1747                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1748                 if (!err)
1749                         break;
1750         } while (--retries);
1751
1752         err = tg3_phy_reset_chanpat(tp);
1753         if (err)
1754                 return err;
1755
1756         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1757         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1758
1759         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1760         tg3_writephy(tp, 0x16, 0x0000);
1761
1762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1763             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1764                 /* Set Extended packet length bit for jumbo frames */
1765                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1766         }
1767         else {
1768                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1769         }
1770
1771         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1772
1773         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1774                 reg32 &= ~0x3000;
1775                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1776         } else if (!err)
1777                 err = -EBUSY;
1778
1779         return err;
1780 }
1781
1782 /* This will reset the tigon3 PHY if there is no valid
1783  * link unless the FORCE argument is non-zero.
1784  */
1785 static int tg3_phy_reset(struct tg3 *tp)
1786 {
1787         u32 cpmuctrl;
1788         u32 phy_status;
1789         int err;
1790
1791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1792                 u32 val;
1793
1794                 val = tr32(GRC_MISC_CFG);
1795                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1796                 udelay(40);
1797         }
1798         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1799         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1800         if (err != 0)
1801                 return -EBUSY;
1802
1803         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1804                 netif_carrier_off(tp->dev);
1805                 tg3_link_report(tp);
1806         }
1807
1808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1809             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1810             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1811                 err = tg3_phy_reset_5703_4_5(tp);
1812                 if (err)
1813                         return err;
1814                 goto out;
1815         }
1816
1817         cpmuctrl = 0;
1818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1819             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1820                 cpmuctrl = tr32(TG3_CPMU_CTRL);
1821                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1822                         tw32(TG3_CPMU_CTRL,
1823                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1824         }
1825
1826         err = tg3_bmcr_reset(tp);
1827         if (err)
1828                 return err;
1829
1830         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1831                 u32 phy;
1832
1833                 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1834                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1835
1836                 tw32(TG3_CPMU_CTRL, cpmuctrl);
1837         }
1838
1839         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1840             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1841                 u32 val;
1842
1843                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1844                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1845                     CPMU_LSPD_1000MB_MACCLK_12_5) {
1846                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1847                         udelay(40);
1848                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1849                 }
1850         }
1851
1852         tg3_phy_apply_otp(tp);
1853
1854         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1855                 tg3_phy_toggle_apd(tp, true);
1856         else
1857                 tg3_phy_toggle_apd(tp, false);
1858
1859 out:
1860         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1861                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1862                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1863                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1864                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1865                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1866                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1867         }
1868         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1869                 tg3_writephy(tp, 0x1c, 0x8d68);
1870                 tg3_writephy(tp, 0x1c, 0x8d68);
1871         }
1872         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1873                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1874                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1875                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1876                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1877                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1878                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1879                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1880                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1881         }
1882         else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1883                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1884                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1885                 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1886                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1887                         tg3_writephy(tp, MII_TG3_TEST1,
1888                                      MII_TG3_TEST1_TRIM_EN | 0x4);
1889                 } else
1890                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1891                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1892         }
1893         /* Set Extended packet length bit (bit 14) on all chips that */
1894         /* support jumbo frames */
1895         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1896                 /* Cannot do read-modify-write on 5401 */
1897                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1898         } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1899                 u32 phy_reg;
1900
1901                 /* Set bit 14 with read-modify-write to preserve other bits */
1902                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1903                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1904                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1905         }
1906
1907         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1908          * jumbo frames transmission.
1909          */
1910         if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1911                 u32 phy_reg;
1912
1913                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1914                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
1915                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1916         }
1917
1918         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1919                 /* adjust output voltage */
1920                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
1921         }
1922
1923         tg3_phy_toggle_automdix(tp, 1);
1924         tg3_phy_set_wirespeed(tp);
1925         return 0;
1926 }
1927
1928 static void tg3_frob_aux_power(struct tg3 *tp)
1929 {
1930         struct tg3 *tp_peer = tp;
1931
1932         if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1933                 return;
1934
1935         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1936             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1937                 struct net_device *dev_peer;
1938
1939                 dev_peer = pci_get_drvdata(tp->pdev_peer);
1940                 /* remove_one() may have been run on the peer. */
1941                 if (!dev_peer)
1942                         tp_peer = tp;
1943                 else
1944                         tp_peer = netdev_priv(dev_peer);
1945         }
1946
1947         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1948             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1949             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1950             (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1951                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1952                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1953                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1954                                     (GRC_LCLCTRL_GPIO_OE0 |
1955                                      GRC_LCLCTRL_GPIO_OE1 |
1956                                      GRC_LCLCTRL_GPIO_OE2 |
1957                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
1958                                      GRC_LCLCTRL_GPIO_OUTPUT1),
1959                                     100);
1960                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
1961                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
1962                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1963                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1964                                              GRC_LCLCTRL_GPIO_OE1 |
1965                                              GRC_LCLCTRL_GPIO_OE2 |
1966                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
1967                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
1968                                              tp->grc_local_ctrl;
1969                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1970
1971                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1972                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1973
1974                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1975                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1976                 } else {
1977                         u32 no_gpio2;
1978                         u32 grc_local_ctrl = 0;
1979
1980                         if (tp_peer != tp &&
1981                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1982                                 return;
1983
1984                         /* Workaround to prevent overdrawing Amps. */
1985                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1986                             ASIC_REV_5714) {
1987                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1988                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1989                                             grc_local_ctrl, 100);
1990                         }
1991
1992                         /* On 5753 and variants, GPIO2 cannot be used. */
1993                         no_gpio2 = tp->nic_sram_data_cfg &
1994                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
1995
1996                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1997                                          GRC_LCLCTRL_GPIO_OE1 |
1998                                          GRC_LCLCTRL_GPIO_OE2 |
1999                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2000                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2001                         if (no_gpio2) {
2002                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2003                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2004                         }
2005                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2006                                                     grc_local_ctrl, 100);
2007
2008                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2009
2010                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2011                                                     grc_local_ctrl, 100);
2012
2013                         if (!no_gpio2) {
2014                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2015                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2016                                             grc_local_ctrl, 100);
2017                         }
2018                 }
2019         } else {
2020                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2021                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2022                         if (tp_peer != tp &&
2023                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2024                                 return;
2025
2026                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2027                                     (GRC_LCLCTRL_GPIO_OE1 |
2028                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2029
2030                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2031                                     GRC_LCLCTRL_GPIO_OE1, 100);
2032
2033                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2034                                     (GRC_LCLCTRL_GPIO_OE1 |
2035                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2036                 }
2037         }
2038 }
2039
2040 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2041 {
2042         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2043                 return 1;
2044         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2045                 if (speed != SPEED_10)
2046                         return 1;
2047         } else if (speed == SPEED_10)
2048                 return 1;
2049
2050         return 0;
2051 }
2052
2053 static int tg3_setup_phy(struct tg3 *, int);
2054
2055 #define RESET_KIND_SHUTDOWN     0
2056 #define RESET_KIND_INIT         1
2057 #define RESET_KIND_SUSPEND      2
2058
2059 static void tg3_write_sig_post_reset(struct tg3 *, int);
2060 static int tg3_halt_cpu(struct tg3 *, u32);
2061
2062 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2063 {
2064         u32 val;
2065
2066         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2067                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2068                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2069                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2070
2071                         sg_dig_ctrl |=
2072                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2073                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2074                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2075                 }
2076                 return;
2077         }
2078
2079         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2080                 tg3_bmcr_reset(tp);
2081                 val = tr32(GRC_MISC_CFG);
2082                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2083                 udelay(40);
2084                 return;
2085         } else if (do_low_power) {
2086                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2087                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2088
2089                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2090                              MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2091                              MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2092                              MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2093                              MII_TG3_AUXCTL_PCTL_VREG_11V);
2094         }
2095
2096         /* The PHY should not be powered down on some chips because
2097          * of bugs.
2098          */
2099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2100             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2101             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2102              (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2103                 return;
2104
2105         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2106             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2107                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2108                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2109                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2110                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2111         }
2112
2113         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2114 }
2115
2116 /* tp->lock is held. */
2117 static int tg3_nvram_lock(struct tg3 *tp)
2118 {
2119         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2120                 int i;
2121
2122                 if (tp->nvram_lock_cnt == 0) {
2123                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2124                         for (i = 0; i < 8000; i++) {
2125                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2126                                         break;
2127                                 udelay(20);
2128                         }
2129                         if (i == 8000) {
2130                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2131                                 return -ENODEV;
2132                         }
2133                 }
2134                 tp->nvram_lock_cnt++;
2135         }
2136         return 0;
2137 }
2138
2139 /* tp->lock is held. */
2140 static void tg3_nvram_unlock(struct tg3 *tp)
2141 {
2142         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2143                 if (tp->nvram_lock_cnt > 0)
2144                         tp->nvram_lock_cnt--;
2145                 if (tp->nvram_lock_cnt == 0)
2146                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2147         }
2148 }
2149
2150 /* tp->lock is held. */
2151 static void tg3_enable_nvram_access(struct tg3 *tp)
2152 {
2153         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2154             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2155                 u32 nvaccess = tr32(NVRAM_ACCESS);
2156
2157                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2158         }
2159 }
2160
2161 /* tp->lock is held. */
2162 static void tg3_disable_nvram_access(struct tg3 *tp)
2163 {
2164         if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2165             !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
2166                 u32 nvaccess = tr32(NVRAM_ACCESS);
2167
2168                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2169         }
2170 }
2171
2172 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2173                                         u32 offset, u32 *val)
2174 {
2175         u32 tmp;
2176         int i;
2177
2178         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2179                 return -EINVAL;
2180
2181         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2182                                         EEPROM_ADDR_DEVID_MASK |
2183                                         EEPROM_ADDR_READ);
2184         tw32(GRC_EEPROM_ADDR,
2185              tmp |
2186              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2187              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2188               EEPROM_ADDR_ADDR_MASK) |
2189              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2190
2191         for (i = 0; i < 1000; i++) {
2192                 tmp = tr32(GRC_EEPROM_ADDR);
2193
2194                 if (tmp & EEPROM_ADDR_COMPLETE)
2195                         break;
2196                 msleep(1);
2197         }
2198         if (!(tmp & EEPROM_ADDR_COMPLETE))
2199                 return -EBUSY;
2200
2201         tmp = tr32(GRC_EEPROM_DATA);
2202
2203         /*
2204          * The data will always be opposite the native endian
2205          * format.  Perform a blind byteswap to compensate.
2206          */
2207         *val = swab32(tmp);
2208
2209         return 0;
2210 }
2211
2212 #define NVRAM_CMD_TIMEOUT 10000
2213
2214 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2215 {
2216         int i;
2217
2218         tw32(NVRAM_CMD, nvram_cmd);
2219         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2220                 udelay(10);
2221                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2222                         udelay(10);
2223                         break;
2224                 }
2225         }
2226
2227         if (i == NVRAM_CMD_TIMEOUT)
2228                 return -EBUSY;
2229
2230         return 0;
2231 }
2232
2233 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2234 {
2235         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2236             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2237             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2238            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2239             (tp->nvram_jedecnum == JEDEC_ATMEL))
2240
2241                 addr = ((addr / tp->nvram_pagesize) <<
2242                         ATMEL_AT45DB0X1B_PAGE_POS) +
2243                        (addr % tp->nvram_pagesize);
2244
2245         return addr;
2246 }
2247
2248 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2249 {
2250         if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2251             (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2252             (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2253            !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2254             (tp->nvram_jedecnum == JEDEC_ATMEL))
2255
2256                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2257                         tp->nvram_pagesize) +
2258                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2259
2260         return addr;
2261 }
2262
2263 /* NOTE: Data read in from NVRAM is byteswapped according to
2264  * the byteswapping settings for all other register accesses.
2265  * tg3 devices are BE devices, so on a BE machine, the data
2266  * returned will be exactly as it is seen in NVRAM.  On a LE
2267  * machine, the 32-bit value will be byteswapped.
2268  */
2269 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2270 {
2271         int ret;
2272
2273         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2274                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2275
2276         offset = tg3_nvram_phys_addr(tp, offset);
2277
2278         if (offset > NVRAM_ADDR_MSK)
2279                 return -EINVAL;
2280
2281         ret = tg3_nvram_lock(tp);
2282         if (ret)
2283                 return ret;
2284
2285         tg3_enable_nvram_access(tp);
2286
2287         tw32(NVRAM_ADDR, offset);
2288         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2289                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2290
2291         if (ret == 0)
2292                 *val = tr32(NVRAM_RDDATA);
2293
2294         tg3_disable_nvram_access(tp);
2295
2296         tg3_nvram_unlock(tp);
2297
2298         return ret;
2299 }
2300
2301 /* Ensures NVRAM data is in bytestream format. */
2302 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2303 {
2304         u32 v;
2305         int res = tg3_nvram_read(tp, offset, &v);
2306         if (!res)
2307                 *val = cpu_to_be32(v);
2308         return res;
2309 }
2310
2311 /* tp->lock is held. */
2312 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2313 {
2314         u32 addr_high, addr_low;
2315         int i;
2316
2317         addr_high = ((tp->dev->dev_addr[0] << 8) |
2318                      tp->dev->dev_addr[1]);
2319         addr_low = ((tp->dev->dev_addr[2] << 24) |
2320                     (tp->dev->dev_addr[3] << 16) |
2321                     (tp->dev->dev_addr[4] <<  8) |
2322                     (tp->dev->dev_addr[5] <<  0));
2323         for (i = 0; i < 4; i++) {
2324                 if (i == 1 && skip_mac_1)
2325                         continue;
2326                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2327                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2328         }
2329
2330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2332                 for (i = 0; i < 12; i++) {
2333                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2334                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2335                 }
2336         }
2337
2338         addr_high = (tp->dev->dev_addr[0] +
2339                      tp->dev->dev_addr[1] +
2340                      tp->dev->dev_addr[2] +
2341                      tp->dev->dev_addr[3] +
2342                      tp->dev->dev_addr[4] +
2343                      tp->dev->dev_addr[5]) &
2344                 TX_BACKOFF_SEED_MASK;
2345         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2346 }
2347
2348 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2349 {
2350         u32 misc_host_ctrl;
2351         bool device_should_wake, do_low_power;
2352
2353         /* Make sure register accesses (indirect or otherwise)
2354          * will function correctly.
2355          */
2356         pci_write_config_dword(tp->pdev,
2357                                TG3PCI_MISC_HOST_CTRL,
2358                                tp->misc_host_ctrl);
2359
2360         switch (state) {
2361         case PCI_D0:
2362                 pci_enable_wake(tp->pdev, state, false);
2363                 pci_set_power_state(tp->pdev, PCI_D0);
2364
2365                 /* Switch out of Vaux if it is a NIC */
2366                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2367                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2368
2369                 return 0;
2370
2371         case PCI_D1:
2372         case PCI_D2:
2373         case PCI_D3hot:
2374                 break;
2375
2376         default:
2377                 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2378                         tp->dev->name, state);
2379                 return -EINVAL;
2380         }
2381
2382         /* Restore the CLKREQ setting. */
2383         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2384                 u16 lnkctl;
2385
2386                 pci_read_config_word(tp->pdev,
2387                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2388                                      &lnkctl);
2389                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2390                 pci_write_config_word(tp->pdev,
2391                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2392                                       lnkctl);
2393         }
2394
2395         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2396         tw32(TG3PCI_MISC_HOST_CTRL,
2397              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2398
2399         device_should_wake = pci_pme_capable(tp->pdev, state) &&
2400                              device_may_wakeup(&tp->pdev->dev) &&
2401                              (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2402
2403         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2404                 do_low_power = false;
2405                 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2406                     !tp->link_config.phy_is_low_power) {
2407                         struct phy_device *phydev;
2408                         u32 phyid, advertising;
2409
2410                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2411
2412                         tp->link_config.phy_is_low_power = 1;
2413
2414                         tp->link_config.orig_speed = phydev->speed;
2415                         tp->link_config.orig_duplex = phydev->duplex;
2416                         tp->link_config.orig_autoneg = phydev->autoneg;
2417                         tp->link_config.orig_advertising = phydev->advertising;
2418
2419                         advertising = ADVERTISED_TP |
2420                                       ADVERTISED_Pause |
2421                                       ADVERTISED_Autoneg |
2422                                       ADVERTISED_10baseT_Half;
2423
2424                         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2425                             device_should_wake) {
2426                                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2427                                         advertising |=
2428                                                 ADVERTISED_100baseT_Half |
2429                                                 ADVERTISED_100baseT_Full |
2430                                                 ADVERTISED_10baseT_Full;
2431                                 else
2432                                         advertising |= ADVERTISED_10baseT_Full;
2433                         }
2434
2435                         phydev->advertising = advertising;
2436
2437                         phy_start_aneg(phydev);
2438
2439                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2440                         if (phyid != TG3_PHY_ID_BCMAC131) {
2441                                 phyid &= TG3_PHY_OUI_MASK;
2442                                 if (phyid == TG3_PHY_OUI_1 ||
2443                                     phyid == TG3_PHY_OUI_2 ||
2444                                     phyid == TG3_PHY_OUI_3)
2445                                         do_low_power = true;
2446                         }
2447                 }
2448         } else {
2449                 do_low_power = true;
2450
2451                 if (tp->link_config.phy_is_low_power == 0) {
2452                         tp->link_config.phy_is_low_power = 1;
2453                         tp->link_config.orig_speed = tp->link_config.speed;
2454                         tp->link_config.orig_duplex = tp->link_config.duplex;
2455                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2456                 }
2457
2458                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2459                         tp->link_config.speed = SPEED_10;
2460                         tp->link_config.duplex = DUPLEX_HALF;
2461                         tp->link_config.autoneg = AUTONEG_ENABLE;
2462                         tg3_setup_phy(tp, 0);
2463                 }
2464         }
2465
2466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467                 u32 val;
2468
2469                 val = tr32(GRC_VCPU_EXT_CTRL);
2470                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2471         } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2472                 int i;
2473                 u32 val;
2474
2475                 for (i = 0; i < 200; i++) {
2476                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2477                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2478                                 break;
2479                         msleep(1);
2480                 }
2481         }
2482         if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2483                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2484                                                      WOL_DRV_STATE_SHUTDOWN |
2485                                                      WOL_DRV_WOL |
2486                                                      WOL_SET_MAGIC_PKT);
2487
2488         if (device_should_wake) {
2489                 u32 mac_mode;
2490
2491                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2492                         if (do_low_power) {
2493                                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2494                                 udelay(40);
2495                         }
2496
2497                         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2498                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2499                         else
2500                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2501
2502                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2503                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2504                             ASIC_REV_5700) {
2505                                 u32 speed = (tp->tg3_flags &
2506                                              TG3_FLAG_WOL_SPEED_100MB) ?
2507                                              SPEED_100 : SPEED_10;
2508                                 if (tg3_5700_link_polarity(tp, speed))
2509                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2510                                 else
2511                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2512                         }
2513                 } else {
2514                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2515                 }
2516
2517                 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2518                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2519
2520                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2521                 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2522                     !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2523                     ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2524                      (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2525                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2526
2527                 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2528                         mac_mode |= tp->mac_mode &
2529                                     (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2530                         if (mac_mode & MAC_MODE_APE_TX_EN)
2531                                 mac_mode |= MAC_MODE_TDE_ENABLE;
2532                 }
2533
2534                 tw32_f(MAC_MODE, mac_mode);
2535                 udelay(100);
2536
2537                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2538                 udelay(10);
2539         }
2540
2541         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2542             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2543              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2544                 u32 base_val;
2545
2546                 base_val = tp->pci_clock_ctrl;
2547                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2548                              CLOCK_CTRL_TXCLK_DISABLE);
2549
2550                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2551                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2552         } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2553                    (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2554                    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2555                 /* do nothing */
2556         } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2557                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2558                 u32 newbits1, newbits2;
2559
2560                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2561                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2562                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2563                                     CLOCK_CTRL_TXCLK_DISABLE |
2564                                     CLOCK_CTRL_ALTCLK);
2565                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2566                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2567                         newbits1 = CLOCK_CTRL_625_CORE;
2568                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2569                 } else {
2570                         newbits1 = CLOCK_CTRL_ALTCLK;
2571                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2572                 }
2573
2574                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2575                             40);
2576
2577                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2578                             40);
2579
2580                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2581                         u32 newbits3;
2582
2583                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2584                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2585                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2586                                             CLOCK_CTRL_TXCLK_DISABLE |
2587                                             CLOCK_CTRL_44MHZ_CORE);
2588                         } else {
2589                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2590                         }
2591
2592                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2593                                     tp->pci_clock_ctrl | newbits3, 40);
2594                 }
2595         }
2596
2597         if (!(device_should_wake) &&
2598             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2599                 tg3_power_down_phy(tp, do_low_power);
2600
2601         tg3_frob_aux_power(tp);
2602
2603         /* Workaround for unstable PLL clock */
2604         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2605             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2606                 u32 val = tr32(0x7d00);
2607
2608                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2609                 tw32(0x7d00, val);
2610                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2611                         int err;
2612
2613                         err = tg3_nvram_lock(tp);
2614                         tg3_halt_cpu(tp, RX_CPU_BASE);
2615                         if (!err)
2616                                 tg3_nvram_unlock(tp);
2617                 }
2618         }
2619
2620         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2621
2622         if (device_should_wake)
2623                 pci_enable_wake(tp->pdev, state, true);
2624
2625         /* Finally, set the new power state. */
2626         pci_set_power_state(tp->pdev, state);
2627
2628         return 0;
2629 }
2630
2631 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2632 {
2633         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2634         case MII_TG3_AUX_STAT_10HALF:
2635                 *speed = SPEED_10;
2636                 *duplex = DUPLEX_HALF;
2637                 break;
2638
2639         case MII_TG3_AUX_STAT_10FULL:
2640                 *speed = SPEED_10;
2641                 *duplex = DUPLEX_FULL;
2642                 break;
2643
2644         case MII_TG3_AUX_STAT_100HALF:
2645                 *speed = SPEED_100;
2646                 *duplex = DUPLEX_HALF;
2647                 break;
2648
2649         case MII_TG3_AUX_STAT_100FULL:
2650                 *speed = SPEED_100;
2651                 *duplex = DUPLEX_FULL;
2652                 break;
2653
2654         case MII_TG3_AUX_STAT_1000HALF:
2655                 *speed = SPEED_1000;
2656                 *duplex = DUPLEX_HALF;
2657                 break;
2658
2659         case MII_TG3_AUX_STAT_1000FULL:
2660                 *speed = SPEED_1000;
2661                 *duplex = DUPLEX_FULL;
2662                 break;
2663
2664         default:
2665                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2666                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2667                                  SPEED_10;
2668                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2669                                   DUPLEX_HALF;
2670                         break;
2671                 }
2672                 *speed = SPEED_INVALID;
2673                 *duplex = DUPLEX_INVALID;
2674                 break;
2675         }
2676 }
2677
2678 static void tg3_phy_copper_begin(struct tg3 *tp)
2679 {
2680         u32 new_adv;
2681         int i;
2682
2683         if (tp->link_config.phy_is_low_power) {
2684                 /* Entering low power mode.  Disable gigabit and
2685                  * 100baseT advertisements.
2686                  */
2687                 tg3_writephy(tp, MII_TG3_CTRL, 0);
2688
2689                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2690                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2691                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2692                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2693
2694                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2695         } else if (tp->link_config.speed == SPEED_INVALID) {
2696                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2697                         tp->link_config.advertising &=
2698                                 ~(ADVERTISED_1000baseT_Half |
2699                                   ADVERTISED_1000baseT_Full);
2700
2701                 new_adv = ADVERTISE_CSMA;
2702                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2703                         new_adv |= ADVERTISE_10HALF;
2704                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2705                         new_adv |= ADVERTISE_10FULL;
2706                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2707                         new_adv |= ADVERTISE_100HALF;
2708                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2709                         new_adv |= ADVERTISE_100FULL;
2710
2711                 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2712
2713                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2714
2715                 if (tp->link_config.advertising &
2716                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2717                         new_adv = 0;
2718                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2719                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2720                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2721                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2722                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2723                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2724                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2725                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2726                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2727                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2728                 } else {
2729                         tg3_writephy(tp, MII_TG3_CTRL, 0);
2730                 }
2731         } else {
2732                 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2733                 new_adv |= ADVERTISE_CSMA;
2734
2735                 /* Asking for a specific link mode. */
2736                 if (tp->link_config.speed == SPEED_1000) {
2737                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2738
2739                         if (tp->link_config.duplex == DUPLEX_FULL)
2740                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2741                         else
2742                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2743                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2744                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2745                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2746                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2747                 } else {
2748                         if (tp->link_config.speed == SPEED_100) {
2749                                 if (tp->link_config.duplex == DUPLEX_FULL)
2750                                         new_adv |= ADVERTISE_100FULL;
2751                                 else
2752                                         new_adv |= ADVERTISE_100HALF;
2753                         } else {
2754                                 if (tp->link_config.duplex == DUPLEX_FULL)
2755                                         new_adv |= ADVERTISE_10FULL;
2756                                 else
2757                                         new_adv |= ADVERTISE_10HALF;
2758                         }
2759                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
2760
2761                         new_adv = 0;
2762                 }
2763
2764                 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2765         }
2766
2767         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2768             tp->link_config.speed != SPEED_INVALID) {
2769                 u32 bmcr, orig_bmcr;
2770
2771                 tp->link_config.active_speed = tp->link_config.speed;
2772                 tp->link_config.active_duplex = tp->link_config.duplex;
2773
2774                 bmcr = 0;
2775                 switch (tp->link_config.speed) {
2776                 default:
2777                 case SPEED_10:
2778                         break;
2779
2780                 case SPEED_100:
2781                         bmcr |= BMCR_SPEED100;
2782                         break;
2783
2784                 case SPEED_1000:
2785                         bmcr |= TG3_BMCR_SPEED1000;
2786                         break;
2787                 }
2788
2789                 if (tp->link_config.duplex == DUPLEX_FULL)
2790                         bmcr |= BMCR_FULLDPLX;
2791
2792                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2793                     (bmcr != orig_bmcr)) {
2794                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2795                         for (i = 0; i < 1500; i++) {
2796                                 u32 tmp;
2797
2798                                 udelay(10);
2799                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2800                                     tg3_readphy(tp, MII_BMSR, &tmp))
2801                                         continue;
2802                                 if (!(tmp & BMSR_LSTATUS)) {
2803                                         udelay(40);
2804                                         break;
2805                                 }
2806                         }
2807                         tg3_writephy(tp, MII_BMCR, bmcr);
2808                         udelay(40);
2809                 }
2810         } else {
2811                 tg3_writephy(tp, MII_BMCR,
2812                              BMCR_ANENABLE | BMCR_ANRESTART);
2813         }
2814 }
2815
2816 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2817 {
2818         int err;
2819
2820         /* Turn off tap power management. */
2821         /* Set Extended packet length bit */
2822         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2823
2824         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2825         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2826
2827         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2828         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2829
2830         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2831         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2832
2833         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2834         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2835
2836         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2837         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2838
2839         udelay(40);
2840
2841         return err;
2842 }
2843
2844 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2845 {
2846         u32 adv_reg, all_mask = 0;
2847
2848         if (mask & ADVERTISED_10baseT_Half)
2849                 all_mask |= ADVERTISE_10HALF;
2850         if (mask & ADVERTISED_10baseT_Full)
2851                 all_mask |= ADVERTISE_10FULL;
2852         if (mask & ADVERTISED_100baseT_Half)
2853                 all_mask |= ADVERTISE_100HALF;
2854         if (mask & ADVERTISED_100baseT_Full)
2855                 all_mask |= ADVERTISE_100FULL;
2856
2857         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2858                 return 0;
2859
2860         if ((adv_reg & all_mask) != all_mask)
2861                 return 0;
2862         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2863                 u32 tg3_ctrl;
2864
2865                 all_mask = 0;
2866                 if (mask & ADVERTISED_1000baseT_Half)
2867                         all_mask |= ADVERTISE_1000HALF;
2868                 if (mask & ADVERTISED_1000baseT_Full)
2869                         all_mask |= ADVERTISE_1000FULL;
2870
2871                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2872                         return 0;
2873
2874                 if ((tg3_ctrl & all_mask) != all_mask)
2875                         return 0;
2876         }
2877         return 1;
2878 }
2879
2880 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2881 {
2882         u32 curadv, reqadv;
2883
2884         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2885                 return 1;
2886
2887         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2888         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2889
2890         if (tp->link_config.active_duplex == DUPLEX_FULL) {
2891                 if (curadv != reqadv)
2892                         return 0;
2893
2894                 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2895                         tg3_readphy(tp, MII_LPA, rmtadv);
2896         } else {
2897                 /* Reprogram the advertisement register, even if it
2898                  * does not affect the current link.  If the link
2899                  * gets renegotiated in the future, we can save an
2900                  * additional renegotiation cycle by advertising
2901                  * it correctly in the first place.
2902                  */
2903                 if (curadv != reqadv) {
2904                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2905                                      ADVERTISE_PAUSE_ASYM);
2906                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2907                 }
2908         }
2909
2910         return 1;
2911 }
2912
2913 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2914 {
2915         int current_link_up;
2916         u32 bmsr, dummy;
2917         u32 lcl_adv, rmt_adv;
2918         u16 current_speed;
2919         u8 current_duplex;
2920         int i, err;
2921
2922         tw32(MAC_EVENT, 0);
2923
2924         tw32_f(MAC_STATUS,
2925              (MAC_STATUS_SYNC_CHANGED |
2926               MAC_STATUS_CFG_CHANGED |
2927               MAC_STATUS_MI_COMPLETION |
2928               MAC_STATUS_LNKSTATE_CHANGED));
2929         udelay(40);
2930
2931         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2932                 tw32_f(MAC_MI_MODE,
2933                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2934                 udelay(80);
2935         }
2936
2937         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2938
2939         /* Some third-party PHYs need to be reset on link going
2940          * down.
2941          */
2942         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2943              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2944              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2945             netif_carrier_ok(tp->dev)) {
2946                 tg3_readphy(tp, MII_BMSR, &bmsr);
2947                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2948                     !(bmsr & BMSR_LSTATUS))
2949                         force_reset = 1;
2950         }
2951         if (force_reset)
2952                 tg3_phy_reset(tp);
2953
2954         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2955                 tg3_readphy(tp, MII_BMSR, &bmsr);
2956                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2957                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2958                         bmsr = 0;
2959
2960                 if (!(bmsr & BMSR_LSTATUS)) {
2961                         err = tg3_init_5401phy_dsp(tp);
2962                         if (err)
2963                                 return err;
2964
2965                         tg3_readphy(tp, MII_BMSR, &bmsr);
2966                         for (i = 0; i < 1000; i++) {
2967                                 udelay(10);
2968                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2969                                     (bmsr & BMSR_LSTATUS)) {
2970                                         udelay(40);
2971                                         break;
2972                                 }
2973                         }
2974
2975                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2976                             !(bmsr & BMSR_LSTATUS) &&
2977                             tp->link_config.active_speed == SPEED_1000) {
2978                                 err = tg3_phy_reset(tp);
2979                                 if (!err)
2980                                         err = tg3_init_5401phy_dsp(tp);
2981                                 if (err)
2982                                         return err;
2983                         }
2984                 }
2985         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2986                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2987                 /* 5701 {A0,B0} CRC bug workaround */
2988                 tg3_writephy(tp, 0x15, 0x0a75);
2989                 tg3_writephy(tp, 0x1c, 0x8c68);
2990                 tg3_writephy(tp, 0x1c, 0x8d68);
2991                 tg3_writephy(tp, 0x1c, 0x8c68);
2992         }
2993
2994         /* Clear pending interrupts... */
2995         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2996         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2997
2998         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2999                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3000         else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
3001                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3002
3003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3004             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3005                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3006                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3007                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3008                 else
3009                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3010         }
3011
3012         current_link_up = 0;
3013         current_speed = SPEED_INVALID;
3014         current_duplex = DUPLEX_INVALID;
3015
3016         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3017                 u32 val;
3018
3019                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3020                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3021                 if (!(val & (1 << 10))) {
3022                         val |= (1 << 10);
3023                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3024                         goto relink;
3025                 }
3026         }
3027
3028         bmsr = 0;
3029         for (i = 0; i < 100; i++) {
3030                 tg3_readphy(tp, MII_BMSR, &bmsr);
3031                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3032                     (bmsr & BMSR_LSTATUS))
3033                         break;
3034                 udelay(40);
3035         }
3036
3037         if (bmsr & BMSR_LSTATUS) {
3038                 u32 aux_stat, bmcr;
3039
3040                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3041                 for (i = 0; i < 2000; i++) {
3042                         udelay(10);
3043                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3044                             aux_stat)
3045                                 break;
3046                 }
3047
3048                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3049                                              &current_speed,
3050                                              &current_duplex);
3051
3052                 bmcr = 0;
3053                 for (i = 0; i < 200; i++) {
3054                         tg3_readphy(tp, MII_BMCR, &bmcr);
3055                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3056                                 continue;
3057                         if (bmcr && bmcr != 0x7fff)
3058                                 break;
3059                         udelay(10);
3060                 }
3061
3062                 lcl_adv = 0;
3063                 rmt_adv = 0;
3064
3065                 tp->link_config.active_speed = current_speed;
3066                 tp->link_config.active_duplex = current_duplex;
3067
3068                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3069                         if ((bmcr & BMCR_ANENABLE) &&
3070                             tg3_copper_is_advertising_all(tp,
3071                                                 tp->link_config.advertising)) {
3072                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3073                                                                   &rmt_adv))
3074                                         current_link_up = 1;
3075                         }
3076                 } else {
3077                         if (!(bmcr & BMCR_ANENABLE) &&
3078                             tp->link_config.speed == current_speed &&
3079                             tp->link_config.duplex == current_duplex &&
3080                             tp->link_config.flowctrl ==
3081                             tp->link_config.active_flowctrl) {
3082                                 current_link_up = 1;
3083                         }
3084                 }
3085
3086                 if (current_link_up == 1 &&
3087                     tp->link_config.active_duplex == DUPLEX_FULL)
3088                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3089         }
3090
3091 relink:
3092         if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3093                 u32 tmp;
3094
3095                 tg3_phy_copper_begin(tp);
3096
3097                 tg3_readphy(tp, MII_BMSR, &tmp);
3098                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3099                     (tmp & BMSR_LSTATUS))
3100                         current_link_up = 1;
3101         }
3102
3103         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3104         if (current_link_up == 1) {
3105                 if (tp->link_config.active_speed == SPEED_100 ||
3106                     tp->link_config.active_speed == SPEED_10)
3107                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3108                 else
3109                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3110         } else
3111                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3112
3113         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3114         if (tp->link_config.active_duplex == DUPLEX_HALF)
3115                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3116
3117         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3118                 if (current_link_up == 1 &&
3119                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3120                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3121                 else
3122                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3123         }
3124
3125         /* ??? Without this setting Netgear GA302T PHY does not
3126          * ??? send/receive packets...
3127          */
3128         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
3129             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3130                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3131                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3132                 udelay(80);
3133         }
3134
3135         tw32_f(MAC_MODE, tp->mac_mode);
3136         udelay(40);
3137
3138         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3139                 /* Polled via timer. */
3140                 tw32_f(MAC_EVENT, 0);
3141         } else {
3142                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3143         }
3144         udelay(40);
3145
3146         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3147             current_link_up == 1 &&
3148             tp->link_config.active_speed == SPEED_1000 &&
3149             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3150              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3151                 udelay(120);
3152                 tw32_f(MAC_STATUS,
3153                      (MAC_STATUS_SYNC_CHANGED |
3154                       MAC_STATUS_CFG_CHANGED));
3155                 udelay(40);
3156                 tg3_write_mem(tp,
3157                               NIC_SRAM_FIRMWARE_MBOX,
3158                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3159         }
3160
3161         /* Prevent send BD corruption. */
3162         if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3163                 u16 oldlnkctl, newlnkctl;
3164
3165                 pci_read_config_word(tp->pdev,
3166                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3167                                      &oldlnkctl);
3168                 if (tp->link_config.active_speed == SPEED_100 ||
3169                     tp->link_config.active_speed == SPEED_10)
3170                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3171                 else
3172                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3173                 if (newlnkctl != oldlnkctl)
3174                         pci_write_config_word(tp->pdev,
3175                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3176                                               newlnkctl);
3177         } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3178                 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3179                 if (tp->link_config.active_speed == SPEED_100 ||
3180                     tp->link_config.active_speed == SPEED_10)
3181                         newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3182                 else
3183                         newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3184                 if (newreg != oldreg)
3185                         tw32(TG3_PCIE_LNKCTL, newreg);
3186         }
3187
3188         if (current_link_up != netif_carrier_ok(tp->dev)) {
3189                 if (current_link_up)
3190                         netif_carrier_on(tp->dev);
3191                 else
3192                         netif_carrier_off(tp->dev);
3193                 tg3_link_report(tp);
3194         }
3195
3196         return 0;
3197 }
3198
3199 struct tg3_fiber_aneginfo {
3200         int state;
3201 #define ANEG_STATE_UNKNOWN              0
3202 #define ANEG_STATE_AN_ENABLE            1
3203 #define ANEG_STATE_RESTART_INIT         2
3204 #define ANEG_STATE_RESTART              3
3205 #define ANEG_STATE_DISABLE_LINK_OK      4
3206 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3207 #define ANEG_STATE_ABILITY_DETECT       6
3208 #define ANEG_STATE_ACK_DETECT_INIT      7
3209 #define ANEG_STATE_ACK_DETECT           8
3210 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3211 #define ANEG_STATE_COMPLETE_ACK         10
3212 #define ANEG_STATE_IDLE_DETECT_INIT     11
3213 #define ANEG_STATE_IDLE_DETECT          12
3214 #define ANEG_STATE_LINK_OK              13
3215 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3216 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3217
3218         u32 flags;
3219 #define MR_AN_ENABLE            0x00000001
3220 #define MR_RESTART_AN           0x00000002
3221 #define MR_AN_COMPLETE          0x00000004
3222 #define MR_PAGE_RX              0x00000008
3223 #define MR_NP_LOADED            0x00000010
3224 #define MR_TOGGLE_TX            0x00000020
3225 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3226 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3227 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3228 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3229 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3230 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3231 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3232 #define MR_TOGGLE_RX            0x00002000
3233 #define MR_NP_RX                0x00004000
3234
3235 #define MR_LINK_OK              0x80000000
3236
3237         unsigned long link_time, cur_time;
3238
3239         u32 ability_match_cfg;
3240         int ability_match_count;
3241
3242         char ability_match, idle_match, ack_match;
3243
3244         u32 txconfig, rxconfig;
3245 #define ANEG_CFG_NP             0x00000080
3246 #define ANEG_CFG_ACK            0x00000040
3247 #define ANEG_CFG_RF2            0x00000020
3248 #define ANEG_CFG_RF1            0x00000010
3249 #define ANEG_CFG_PS2            0x00000001
3250 #define ANEG_CFG_PS1            0x00008000
3251 #define ANEG_CFG_HD             0x00004000
3252 #define ANEG_CFG_FD             0x00002000
3253 #define ANEG_CFG_INVAL          0x00001f06
3254
3255 };
3256 #define ANEG_OK         0
3257 #define ANEG_DONE       1
3258 #define ANEG_TIMER_ENAB 2
3259 #define ANEG_FAILED     -1
3260
3261 #define ANEG_STATE_SETTLE_TIME  10000
3262
3263 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3264                                    struct tg3_fiber_aneginfo *ap)
3265 {
3266         u16 flowctrl;
3267         unsigned long delta;
3268         u32 rx_cfg_reg;
3269         int ret;
3270
3271         if (ap->state == ANEG_STATE_UNKNOWN) {
3272                 ap->rxconfig = 0;
3273                 ap->link_time = 0;
3274                 ap->cur_time = 0;
3275                 ap->ability_match_cfg = 0;
3276                 ap->ability_match_count = 0;
3277                 ap->ability_match = 0;
3278                 ap->idle_match = 0;
3279                 ap->ack_match = 0;
3280         }
3281         ap->cur_time++;
3282
3283         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3284                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3285
3286                 if (rx_cfg_reg != ap->ability_match_cfg) {
3287                         ap->ability_match_cfg = rx_cfg_reg;
3288                         ap->ability_match = 0;
3289                         ap->ability_match_count = 0;
3290                 } else {
3291                         if (++ap->ability_match_count > 1) {
3292                                 ap->ability_match = 1;
3293                                 ap->ability_match_cfg = rx_cfg_reg;
3294                         }
3295                 }
3296                 if (rx_cfg_reg & ANEG_CFG_ACK)
3297                         ap->ack_match = 1;
3298                 else
3299                         ap->ack_match = 0;
3300
3301                 ap->idle_match = 0;
3302         } else {
3303                 ap->idle_match = 1;
3304                 ap->ability_match_cfg = 0;
3305                 ap->ability_match_count = 0;
3306                 ap->ability_match = 0;
3307                 ap->ack_match = 0;
3308
3309                 rx_cfg_reg = 0;
3310         }
3311
3312         ap->rxconfig = rx_cfg_reg;
3313         ret = ANEG_OK;
3314
3315         switch(ap->state) {
3316         case ANEG_STATE_UNKNOWN:
3317                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3318                         ap->state = ANEG_STATE_AN_ENABLE;
3319
3320                 /* fallthru */
3321         case ANEG_STATE_AN_ENABLE:
3322                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3323                 if (ap->flags & MR_AN_ENABLE) {
3324                         ap->link_time = 0;
3325                         ap->cur_time = 0;
3326                         ap->ability_match_cfg = 0;
3327                         ap->ability_match_count = 0;
3328                         ap->ability_match = 0;
3329                         ap->idle_match = 0;
3330                         ap->ack_match = 0;
3331
3332                         ap->state = ANEG_STATE_RESTART_INIT;
3333                 } else {
3334                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3335                 }
3336                 break;
3337
3338         case ANEG_STATE_RESTART_INIT:
3339                 ap->link_time = ap->cur_time;
3340                 ap->flags &= ~(MR_NP_LOADED);
3341                 ap->txconfig = 0;
3342                 tw32(MAC_TX_AUTO_NEG, 0);
3343                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3344                 tw32_f(MAC_MODE, tp->mac_mode);
3345                 udelay(40);
3346
3347                 ret = ANEG_TIMER_ENAB;
3348                 ap->state = ANEG_STATE_RESTART;
3349
3350                 /* fallthru */
3351         case ANEG_STATE_RESTART:
3352                 delta = ap->cur_time - ap->link_time;
3353                 if (delta > ANEG_STATE_SETTLE_TIME) {
3354                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3355                 } else {
3356                         ret = ANEG_TIMER_ENAB;
3357                 }
3358                 break;
3359
3360         case ANEG_STATE_DISABLE_LINK_OK:
3361                 ret = ANEG_DONE;
3362                 break;
3363
3364         case ANEG_STATE_ABILITY_DETECT_INIT:
3365                 ap->flags &= ~(MR_TOGGLE_TX);
3366                 ap->txconfig = ANEG_CFG_FD;
3367                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3368                 if (flowctrl & ADVERTISE_1000XPAUSE)
3369                         ap->txconfig |= ANEG_CFG_PS1;
3370                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3371                         ap->txconfig |= ANEG_CFG_PS2;
3372                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3373                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3374                 tw32_f(MAC_MODE, tp->mac_mode);
3375                 udelay(40);
3376
3377                 ap->state = ANEG_STATE_ABILITY_DETECT;
3378                 break;
3379
3380         case ANEG_STATE_ABILITY_DETECT:
3381                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3382                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3383                 }
3384                 break;
3385
3386         case ANEG_STATE_ACK_DETECT_INIT:
3387                 ap->txconfig |= ANEG_CFG_ACK;
3388                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3389                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3390                 tw32_f(MAC_MODE, tp->mac_mode);
3391                 udelay(40);
3392
3393                 ap->state = ANEG_STATE_ACK_DETECT;
3394
3395                 /* fallthru */
3396         case ANEG_STATE_ACK_DETECT:
3397                 if (ap->ack_match != 0) {
3398                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3399                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3400                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3401                         } else {
3402                                 ap->state = ANEG_STATE_AN_ENABLE;
3403                         }
3404                 } else if (ap->ability_match != 0 &&
3405                            ap->rxconfig == 0) {
3406                         ap->state = ANEG_STATE_AN_ENABLE;
3407                 }
3408                 break;
3409
3410         case ANEG_STATE_COMPLETE_ACK_INIT:
3411                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3412                         ret = ANEG_FAILED;
3413                         break;
3414                 }
3415                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3416                                MR_LP_ADV_HALF_DUPLEX |
3417                                MR_LP_ADV_SYM_PAUSE |
3418                                MR_LP_ADV_ASYM_PAUSE |
3419                                MR_LP_ADV_REMOTE_FAULT1 |
3420                                MR_LP_ADV_REMOTE_FAULT2 |
3421                                MR_LP_ADV_NEXT_PAGE |
3422                                MR_TOGGLE_RX |
3423                                MR_NP_RX);
3424                 if (ap->rxconfig & ANEG_CFG_FD)
3425                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3426                 if (ap->rxconfig & ANEG_CFG_HD)
3427                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3428                 if (ap->rxconfig & ANEG_CFG_PS1)
3429                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3430                 if (ap->rxconfig & ANEG_CFG_PS2)
3431                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3432                 if (ap->rxconfig & ANEG_CFG_RF1)
3433                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3434                 if (ap->rxconfig & ANEG_CFG_RF2)
3435                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3436                 if (ap->rxconfig & ANEG_CFG_NP)
3437                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3438
3439                 ap->link_time = ap->cur_time;
3440
3441                 ap->flags ^= (MR_TOGGLE_TX);
3442                 if (ap->rxconfig & 0x0008)
3443                         ap->flags |= MR_TOGGLE_RX;
3444                 if (ap->rxconfig & ANEG_CFG_NP)
3445                         ap->flags |= MR_NP_RX;
3446                 ap->flags |= MR_PAGE_RX;
3447
3448                 ap->state = ANEG_STATE_COMPLETE_ACK;
3449                 ret = ANEG_TIMER_ENAB;
3450                 break;
3451
3452         case ANEG_STATE_COMPLETE_ACK:
3453                 if (ap->ability_match != 0 &&
3454                     ap->rxconfig == 0) {
3455                         ap->state = ANEG_STATE_AN_ENABLE;
3456                         break;
3457                 }
3458                 delta = ap->cur_time - ap->link_time;
3459                 if (delta > ANEG_STATE_SETTLE_TIME) {
3460                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3461                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3462                         } else {
3463                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3464                                     !(ap->flags & MR_NP_RX)) {
3465                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3466                                 } else {
3467                                         ret = ANEG_FAILED;
3468                                 }
3469                         }
3470                 }
3471                 break;
3472
3473         case ANEG_STATE_IDLE_DETECT_INIT:
3474                 ap->link_time = ap->cur_time;
3475                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3476                 tw32_f(MAC_MODE, tp->mac_mode);
3477                 udelay(40);
3478
3479                 ap->state = ANEG_STATE_IDLE_DETECT;
3480                 ret = ANEG_TIMER_ENAB;
3481                 break;
3482
3483         case ANEG_STATE_IDLE_DETECT:
3484                 if (ap->ability_match != 0 &&
3485                     ap->rxconfig == 0) {
3486                         ap->state = ANEG_STATE_AN_ENABLE;
3487                         break;
3488                 }
3489                 delta = ap->cur_time - ap->link_time;
3490                 if (delta > ANEG_STATE_SETTLE_TIME) {
3491                         /* XXX another gem from the Broadcom driver :( */
3492                         ap->state = ANEG_STATE_LINK_OK;
3493                 }
3494                 break;
3495
3496         case ANEG_STATE_LINK_OK:
3497                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3498                 ret = ANEG_DONE;
3499                 break;
3500
3501         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3502                 /* ??? unimplemented */
3503                 break;
3504
3505         case ANEG_STATE_NEXT_PAGE_WAIT:
3506                 /* ??? unimplemented */
3507                 break;
3508
3509         default:
3510                 ret = ANEG_FAILED;
3511                 break;
3512         }
3513
3514         return ret;
3515 }
3516
3517 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3518 {
3519         int res = 0;
3520         struct tg3_fiber_aneginfo aninfo;
3521         int status = ANEG_FAILED;
3522         unsigned int tick;
3523         u32 tmp;
3524
3525         tw32_f(MAC_TX_AUTO_NEG, 0);
3526
3527         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3528         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3529         udelay(40);
3530
3531         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3532         udelay(40);
3533
3534         memset(&aninfo, 0, sizeof(aninfo));
3535         aninfo.flags |= MR_AN_ENABLE;
3536         aninfo.state = ANEG_STATE_UNKNOWN;
3537         aninfo.cur_time = 0;
3538         tick = 0;
3539         while (++tick < 195000) {
3540                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3541                 if (status == ANEG_DONE || status == ANEG_FAILED)
3542                         break;
3543
3544                 udelay(1);
3545         }
3546
3547         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3548         tw32_f(MAC_MODE, tp->mac_mode);
3549         udelay(40);
3550
3551         *txflags = aninfo.txconfig;
3552         *rxflags = aninfo.flags;
3553
3554         if (status == ANEG_DONE &&
3555             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3556                              MR_LP_ADV_FULL_DUPLEX)))
3557                 res = 1;
3558
3559         return res;
3560 }
3561
3562 static void tg3_init_bcm8002(struct tg3 *tp)
3563 {
3564         u32 mac_status = tr32(MAC_STATUS);
3565         int i;
3566
3567         /* Reset when initting first time or we have a link. */
3568         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3569             !(mac_status & MAC_STATUS_PCS_SYNCED))
3570                 return;
3571
3572         /* Set PLL lock range. */
3573         tg3_writephy(tp, 0x16, 0x8007);
3574
3575         /* SW reset */
3576         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3577
3578         /* Wait for reset to complete. */
3579         /* XXX schedule_timeout() ... */
3580         for (i = 0; i < 500; i++)
3581                 udelay(10);
3582
3583         /* Config mode; select PMA/Ch 1 regs. */
3584         tg3_writephy(tp, 0x10, 0x8411);
3585
3586         /* Enable auto-lock and comdet, select txclk for tx. */
3587         tg3_writephy(tp, 0x11, 0x0a10);
3588
3589         tg3_writephy(tp, 0x18, 0x00a0);
3590         tg3_writephy(tp, 0x16, 0x41ff);
3591
3592         /* Assert and deassert POR. */
3593         tg3_writephy(tp, 0x13, 0x0400);
3594         udelay(40);
3595         tg3_writephy(tp, 0x13, 0x0000);
3596
3597         tg3_writephy(tp, 0x11, 0x0a50);
3598         udelay(40);
3599         tg3_writephy(tp, 0x11, 0x0a10);
3600
3601         /* Wait for signal to stabilize */
3602         /* XXX schedule_timeout() ... */
3603         for (i = 0; i < 15000; i++)
3604                 udelay(10);
3605
3606         /* Deselect the channel register so we can read the PHYID
3607          * later.
3608          */
3609         tg3_writephy(tp, 0x10, 0x8011);
3610 }
3611
3612 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3613 {
3614         u16 flowctrl;
3615         u32 sg_dig_ctrl, sg_dig_status;
3616         u32 serdes_cfg, expected_sg_dig_ctrl;
3617         int workaround, port_a;
3618         int current_link_up;
3619
3620         serdes_cfg = 0;
3621         expected_sg_dig_ctrl = 0;
3622         workaround = 0;
3623         port_a = 1;
3624         current_link_up = 0;
3625
3626         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3627             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3628                 workaround = 1;
3629                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3630                         port_a = 0;
3631
3632                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3633                 /* preserve bits 20-23 for voltage regulator */
3634                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3635         }
3636
3637         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3638
3639         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3640                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3641                         if (workaround) {
3642                                 u32 val = serdes_cfg;
3643
3644                                 if (port_a)
3645                                         val |= 0xc010000;
3646                                 else
3647                                         val |= 0x4010000;
3648                                 tw32_f(MAC_SERDES_CFG, val);
3649                         }
3650
3651                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3652                 }
3653                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3654                         tg3_setup_flow_control(tp, 0, 0);
3655                         current_link_up = 1;
3656                 }
3657                 goto out;
3658         }
3659
3660         /* Want auto-negotiation.  */
3661         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3662
3663         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3664         if (flowctrl & ADVERTISE_1000XPAUSE)
3665                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3666         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3667                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3668
3669         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3670                 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3671                     tp->serdes_counter &&
3672                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3673                                     MAC_STATUS_RCVD_CFG)) ==
3674                      MAC_STATUS_PCS_SYNCED)) {
3675                         tp->serdes_counter--;
3676                         current_link_up = 1;
3677                         goto out;
3678                 }
3679 restart_autoneg:
3680                 if (workaround)
3681                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3682                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3683                 udelay(5);
3684                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3685
3686                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3687                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3688         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3689                                  MAC_STATUS_SIGNAL_DET)) {
3690                 sg_dig_status = tr32(SG_DIG_STATUS);
3691                 mac_status = tr32(MAC_STATUS);
3692
3693                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3694                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3695                         u32 local_adv = 0, remote_adv = 0;
3696
3697                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3698                                 local_adv |= ADVERTISE_1000XPAUSE;
3699                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3700                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3701
3702                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3703                                 remote_adv |= LPA_1000XPAUSE;
3704                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3705                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3706
3707                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3708                         current_link_up = 1;
3709                         tp->serdes_counter = 0;
3710                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3711                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3712                         if (tp->serdes_counter)
3713                                 tp->serdes_counter--;
3714                         else {
3715                                 if (workaround) {
3716                                         u32 val = serdes_cfg;
3717
3718                                         if (port_a)
3719                                                 val |= 0xc010000;
3720                                         else
3721                                                 val |= 0x4010000;
3722
3723                                         tw32_f(MAC_SERDES_CFG, val);
3724                                 }
3725
3726                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3727                                 udelay(40);
3728
3729                                 /* Link parallel detection - link is up */
3730                                 /* only if we have PCS_SYNC and not */
3731                                 /* receiving config code words */
3732                                 mac_status = tr32(MAC_STATUS);
3733                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3734                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
3735                                         tg3_setup_flow_control(tp, 0, 0);
3736                                         current_link_up = 1;
3737                                         tp->tg3_flags2 |=
3738                                                 TG3_FLG2_PARALLEL_DETECT;
3739                                         tp->serdes_counter =
3740                                                 SERDES_PARALLEL_DET_TIMEOUT;
3741                                 } else
3742                                         goto restart_autoneg;
3743                         }
3744                 }
3745         } else {
3746                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3747                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3748         }
3749
3750 out:
3751         return current_link_up;
3752 }
3753
3754 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3755 {
3756         int current_link_up = 0;
3757
3758         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3759                 goto out;
3760
3761         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3762                 u32 txflags, rxflags;
3763                 int i;
3764
3765                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3766                         u32 local_adv = 0, remote_adv = 0;
3767
3768                         if (txflags & ANEG_CFG_PS1)
3769                                 local_adv |= ADVERTISE_1000XPAUSE;
3770                         if (txflags & ANEG_CFG_PS2)
3771                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3772
3773                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
3774                                 remote_adv |= LPA_1000XPAUSE;
3775                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3776                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3777
3778                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3779
3780                         current_link_up = 1;
3781                 }
3782                 for (i = 0; i < 30; i++) {
3783                         udelay(20);
3784                         tw32_f(MAC_STATUS,
3785                                (MAC_STATUS_SYNC_CHANGED |
3786                                 MAC_STATUS_CFG_CHANGED));
3787                         udelay(40);
3788                         if ((tr32(MAC_STATUS) &
3789                              (MAC_STATUS_SYNC_CHANGED |
3790                               MAC_STATUS_CFG_CHANGED)) == 0)
3791                                 break;
3792                 }
3793
3794                 mac_status = tr32(MAC_STATUS);
3795                 if (current_link_up == 0 &&
3796                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
3797                     !(mac_status & MAC_STATUS_RCVD_CFG))
3798                         current_link_up = 1;
3799         } else {
3800                 tg3_setup_flow_control(tp, 0, 0);
3801
3802                 /* Forcing 1000FD link up. */
3803                 current_link_up = 1;
3804
3805                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3806                 udelay(40);
3807
3808                 tw32_f(MAC_MODE, tp->mac_mode);
3809                 udelay(40);
3810         }
3811
3812 out:
3813         return current_link_up;
3814 }
3815
3816 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3817 {
3818         u32 orig_pause_cfg;
3819         u16 orig_active_speed;
3820         u8 orig_active_duplex;
3821         u32 mac_status;
3822         int current_link_up;
3823         int i;
3824
3825         orig_pause_cfg = tp->link_config.active_flowctrl;
3826         orig_active_speed = tp->link_config.active_speed;
3827         orig_active_duplex = tp->link_config.active_duplex;
3828
3829         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3830             netif_carrier_ok(tp->dev) &&
3831             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3832                 mac_status = tr32(MAC_STATUS);
3833                 mac_status &= (MAC_STATUS_PCS_SYNCED |
3834                                MAC_STATUS_SIGNAL_DET |
3835                                MAC_STATUS_CFG_CHANGED |
3836                                MAC_STATUS_RCVD_CFG);
3837                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3838                                    MAC_STATUS_SIGNAL_DET)) {
3839                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3840                                             MAC_STATUS_CFG_CHANGED));
3841                         return 0;
3842                 }
3843         }
3844
3845         tw32_f(MAC_TX_AUTO_NEG, 0);
3846
3847         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3848         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3849         tw32_f(MAC_MODE, tp->mac_mode);
3850         udelay(40);
3851
3852         if (tp->phy_id == PHY_ID_BCM8002)
3853                 tg3_init_bcm8002(tp);
3854
3855         /* Enable link change event even when serdes polling.  */
3856         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3857         udelay(40);
3858
3859         current_link_up = 0;
3860         mac_status = tr32(MAC_STATUS);
3861
3862         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3863                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3864         else
3865                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3866
3867         tp->hw_status->status =
3868                 (SD_STATUS_UPDATED |
3869                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3870
3871         for (i = 0; i < 100; i++) {
3872                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3873                                     MAC_STATUS_CFG_CHANGED));
3874                 udelay(5);
3875                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3876                                          MAC_STATUS_CFG_CHANGED |
3877                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3878                         break;
3879         }
3880
3881         mac_status = tr32(MAC_STATUS);
3882         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3883                 current_link_up = 0;
3884                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3885                     tp->serdes_counter == 0) {
3886                         tw32_f(MAC_MODE, (tp->mac_mode |
3887                                           MAC_MODE_SEND_CONFIGS));
3888                         udelay(1);
3889                         tw32_f(MAC_MODE, tp->mac_mode);
3890                 }
3891         }
3892
3893         if (current_link_up == 1) {
3894                 tp->link_config.active_speed = SPEED_1000;
3895                 tp->link_config.active_duplex = DUPLEX_FULL;
3896                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3897                                     LED_CTRL_LNKLED_OVERRIDE |
3898                                     LED_CTRL_1000MBPS_ON));
3899         } else {
3900                 tp->link_config.active_speed = SPEED_INVALID;
3901                 tp->link_config.active_duplex = DUPLEX_INVALID;
3902                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3903                                     LED_CTRL_LNKLED_OVERRIDE |
3904                                     LED_CTRL_TRAFFIC_OVERRIDE));
3905         }
3906
3907         if (current_link_up != netif_carrier_ok(tp->dev)) {
3908                 if (current_link_up)
3909                         netif_carrier_on(tp->dev);
3910                 else
3911                         netif_carrier_off(tp->dev);
3912                 tg3_link_report(tp);
3913         } else {
3914                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3915                 if (orig_pause_cfg != now_pause_cfg ||
3916                     orig_active_speed != tp->link_config.active_speed ||
3917                     orig_active_duplex != tp->link_config.active_duplex)
3918                         tg3_link_report(tp);
3919         }
3920
3921         return 0;
3922 }
3923
3924 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3925 {
3926         int current_link_up, err = 0;
3927         u32 bmsr, bmcr;
3928         u16 current_speed;
3929         u8 current_duplex;
3930         u32 local_adv, remote_adv;
3931
3932         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3933         tw32_f(MAC_MODE, tp->mac_mode);
3934         udelay(40);
3935
3936         tw32(MAC_EVENT, 0);
3937
3938         tw32_f(MAC_STATUS,
3939              (MAC_STATUS_SYNC_CHANGED |
3940               MAC_STATUS_CFG_CHANGED |
3941               MAC_STATUS_MI_COMPLETION |
3942               MAC_STATUS_LNKSTATE_CHANGED));
3943         udelay(40);
3944
3945         if (force_reset)
3946                 tg3_phy_reset(tp);
3947
3948         current_link_up = 0;
3949         current_speed = SPEED_INVALID;
3950         current_duplex = DUPLEX_INVALID;
3951
3952         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3953         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3954         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3955                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3956                         bmsr |= BMSR_LSTATUS;
3957                 else
3958                         bmsr &= ~BMSR_LSTATUS;
3959         }
3960
3961         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3962
3963         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3964             (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3965                 /* do nothing, just check for link up at the end */
3966         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3967                 u32 adv, new_adv;
3968
3969                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3970                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3971                                   ADVERTISE_1000XPAUSE |
3972                                   ADVERTISE_1000XPSE_ASYM |
3973                                   ADVERTISE_SLCT);
3974
3975                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3976
3977                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3978                         new_adv |= ADVERTISE_1000XHALF;
3979                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3980                         new_adv |= ADVERTISE_1000XFULL;
3981
3982                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3983                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
3984                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3985                         tg3_writephy(tp, MII_BMCR, bmcr);
3986
3987                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3988                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3989                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3990
3991                         return err;
3992                 }
3993         } else {
3994                 u32 new_bmcr;
3995
3996                 bmcr &= ~BMCR_SPEED1000;
3997                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3998
3999                 if (tp->link_config.duplex == DUPLEX_FULL)
4000                         new_bmcr |= BMCR_FULLDPLX;
4001
4002                 if (new_bmcr != bmcr) {
4003                         /* BMCR_SPEED1000 is a reserved bit that needs
4004                          * to be set on write.
4005                          */
4006                         new_bmcr |= BMCR_SPEED1000;
4007
4008                         /* Force a linkdown */
4009                         if (netif_carrier_ok(tp->dev)) {
4010                                 u32 adv;
4011
4012                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4013                                 adv &= ~(ADVERTISE_1000XFULL |
4014                                          ADVERTISE_1000XHALF |
4015                                          ADVERTISE_SLCT);
4016                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4017                                 tg3_writephy(tp, MII_BMCR, bmcr |
4018                                                            BMCR_ANRESTART |
4019                                                            BMCR_ANENABLE);
4020                                 udelay(10);
4021                                 netif_carrier_off(tp->dev);
4022                         }
4023                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4024                         bmcr = new_bmcr;
4025                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4026                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4027                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4028                             ASIC_REV_5714) {
4029                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4030                                         bmsr |= BMSR_LSTATUS;
4031                                 else
4032                                         bmsr &= ~BMSR_LSTATUS;
4033                         }
4034                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4035                 }
4036         }
4037
4038         if (bmsr & BMSR_LSTATUS) {
4039                 current_speed = SPEED_1000;
4040                 current_link_up = 1;
4041                 if (bmcr & BMCR_FULLDPLX)
4042                         current_duplex = DUPLEX_FULL;
4043                 else
4044                         current_duplex = DUPLEX_HALF;
4045
4046                 local_adv = 0;
4047                 remote_adv = 0;
4048
4049                 if (bmcr & BMCR_ANENABLE) {
4050                         u32 common;
4051
4052                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4053                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4054                         common = local_adv & remote_adv;
4055                         if (common & (ADVERTISE_1000XHALF |
4056                                       ADVERTISE_1000XFULL)) {
4057                                 if (common & ADVERTISE_1000XFULL)
4058                                         current_duplex = DUPLEX_FULL;
4059                                 else
4060                                         current_duplex = DUPLEX_HALF;
4061                         }
4062                         else
4063                                 current_link_up = 0;
4064                 }
4065         }
4066
4067         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4068                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4069
4070         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4071         if (tp->link_config.active_duplex == DUPLEX_HALF)
4072                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4073
4074         tw32_f(MAC_MODE, tp->mac_mode);
4075         udelay(40);
4076
4077         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4078
4079         tp->link_config.active_speed = current_speed;
4080         tp->link_config.active_duplex = current_duplex;
4081
4082         if (current_link_up != netif_carrier_ok(tp->dev)) {
4083                 if (current_link_up)
4084                         netif_carrier_on(tp->dev);
4085                 else {
4086                         netif_carrier_off(tp->dev);
4087                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4088                 }
4089                 tg3_link_report(tp);
4090         }
4091         return err;
4092 }
4093
4094 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4095 {
4096         if (tp->serdes_counter) {
4097                 /* Give autoneg time to complete. */
4098                 tp->serdes_counter--;
4099                 return;
4100         }
4101         if (!netif_carrier_ok(tp->dev) &&
4102             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4103                 u32 bmcr;
4104
4105                 tg3_readphy(tp, MII_BMCR, &bmcr);
4106                 if (bmcr & BMCR_ANENABLE) {
4107                         u32 phy1, phy2;
4108
4109                         /* Select shadow register 0x1f */
4110                         tg3_writephy(tp, 0x1c, 0x7c00);
4111                         tg3_readphy(tp, 0x1c, &phy1);
4112
4113                         /* Select expansion interrupt status register */
4114                         tg3_writephy(tp, 0x17, 0x0f01);
4115                         tg3_readphy(tp, 0x15, &phy2);
4116                         tg3_readphy(tp, 0x15, &phy2);
4117
4118                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4119                                 /* We have signal detect and not receiving
4120                                  * config code words, link is up by parallel
4121                                  * detection.
4122                                  */
4123
4124                                 bmcr &= ~BMCR_ANENABLE;
4125                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4126                                 tg3_writephy(tp, MII_BMCR, bmcr);
4127                                 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4128                         }
4129                 }
4130         }
4131         else if (netif_carrier_ok(tp->dev) &&
4132                  (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4133                  (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4134                 u32 phy2;
4135
4136                 /* Select expansion interrupt status register */
4137                 tg3_writephy(tp, 0x17, 0x0f01);
4138                 tg3_readphy(tp, 0x15, &phy2);
4139                 if (phy2 & 0x20) {
4140                         u32 bmcr;
4141
4142                         /* Config code words received, turn on autoneg. */
4143                         tg3_readphy(tp, MII_BMCR, &bmcr);
4144                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4145
4146                         tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4147
4148                 }
4149         }
4150 }
4151
4152 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4153 {
4154         int err;
4155
4156         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4157                 err = tg3_setup_fiber_phy(tp, force_reset);
4158         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4159                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4160         } else {
4161                 err = tg3_setup_copper_phy(tp, force_reset);
4162         }
4163
4164         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4165                 u32 val, scale;
4166
4167                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4168                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4169                         scale = 65;
4170                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4171                         scale = 6;
4172                 else
4173                         scale = 12;
4174
4175                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4176                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4177                 tw32(GRC_MISC_CFG, val);
4178         }
4179
4180         if (tp->link_config.active_speed == SPEED_1000 &&
4181             tp->link_config.active_duplex == DUPLEX_HALF)
4182                 tw32(MAC_TX_LENGTHS,
4183                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4184                       (6 << TX_LENGTHS_IPG_SHIFT) |
4185                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4186         else
4187                 tw32(MAC_TX_LENGTHS,
4188                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4189                       (6 << TX_LENGTHS_IPG_SHIFT) |
4190                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4191
4192         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4193                 if (netif_carrier_ok(tp->dev)) {
4194                         tw32(HOSTCC_STAT_COAL_TICKS,
4195                              tp->coal.stats_block_coalesce_usecs);
4196                 } else {
4197                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4198                 }
4199         }
4200
4201         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4202                 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4203                 if (!netif_carrier_ok(tp->dev))
4204                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4205                               tp->pwrmgmt_thresh;
4206                 else
4207                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4208                 tw32(PCIE_PWR_MGMT_THRESH, val);
4209         }
4210
4211         return err;
4212 }
4213
4214 /* This is called whenever we suspect that the system chipset is re-
4215  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4216  * is bogus tx completions. We try to recover by setting the
4217  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4218  * in the workqueue.
4219  */
4220 static void tg3_tx_recover(struct tg3 *tp)
4221 {
4222         BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4223                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4224
4225         printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
4226                "mapped I/O cycles to the network device, attempting to "
4227                "recover. Please report the problem to the driver maintainer "
4228                "and include system chipset information.\n", tp->dev->name);
4229
4230         spin_lock(&tp->lock);
4231         tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4232         spin_unlock(&tp->lock);
4233 }
4234
4235 static inline u32 tg3_tx_avail(struct tg3 *tp)
4236 {
4237         smp_mb();
4238         return (tp->tx_pending -
4239                 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
4240 }
4241
4242 /* Tigon3 never reports partial packet sends.  So we do not
4243  * need special logic to handle SKBs that have not had all
4244  * of their frags sent yet, like SunGEM does.
4245  */
4246 static void tg3_tx(struct tg3 *tp)
4247 {
4248         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
4249         u32 sw_idx = tp->tx_cons;
4250
4251         while (sw_idx != hw_idx) {
4252                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
4253                 struct sk_buff *skb = ri->skb;
4254                 int i, tx_bug = 0;
4255
4256                 if (unlikely(skb == NULL)) {
4257                         tg3_tx_recover(tp);
4258                         return;
4259                 }
4260
4261                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4262
4263                 ri->skb = NULL;
4264
4265                 sw_idx = NEXT_TX(sw_idx);
4266
4267                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4268                         ri = &tp->tx_buffers[sw_idx];
4269                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4270                                 tx_bug = 1;
4271                         sw_idx = NEXT_TX(sw_idx);
4272                 }
4273
4274                 dev_kfree_skb(skb);
4275
4276                 if (unlikely(tx_bug)) {
4277                         tg3_tx_recover(tp);
4278                         return;
4279                 }
4280         }
4281
4282         tp->tx_cons = sw_idx;
4283
4284         /* Need to make the tx_cons update visible to tg3_start_xmit()
4285          * before checking for netif_queue_stopped().  Without the
4286          * memory barrier, there is a small possibility that tg3_start_xmit()
4287          * will miss it and cause the queue to be stopped forever.
4288          */
4289         smp_mb();
4290
4291         if (unlikely(netif_queue_stopped(tp->dev) &&
4292                      (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4293                 netif_tx_lock(tp->dev);
4294                 if (netif_queue_stopped(tp->dev) &&
4295                     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4296                         netif_wake_queue(tp->dev);
4297                 netif_tx_unlock(tp->dev);
4298         }
4299 }
4300
4301 /* Returns size of skb allocated or < 0 on error.
4302  *
4303  * We only need to fill in the address because the other members
4304  * of the RX descriptor are invariant, see tg3_init_rings.
4305  *
4306  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4307  * posting buffers we only dirty the first cache line of the RX
4308  * descriptor (containing the address).  Whereas for the RX status
4309  * buffers the cpu only reads the last cacheline of the RX descriptor
4310  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4311  */
4312 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4313                             int src_idx, u32 dest_idx_unmasked)
4314 {
4315         struct tg3_rx_buffer_desc *desc;
4316         struct ring_info *map, *src_map;
4317         struct sk_buff *skb;
4318         dma_addr_t mapping;
4319         int skb_size, dest_idx;
4320
4321         src_map = NULL;
4322         switch (opaque_key) {
4323         case RXD_OPAQUE_RING_STD:
4324                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4325                 desc = &tp->rx_std[dest_idx];
4326                 map = &tp->rx_std_buffers[dest_idx];
4327                 if (src_idx >= 0)
4328                         src_map = &tp->rx_std_buffers[src_idx];
4329                 skb_size = tp->rx_pkt_buf_sz;
4330                 break;
4331
4332         case RXD_OPAQUE_RING_JUMBO:
4333                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4334                 desc = &tp->rx_jumbo[dest_idx];
4335                 map = &tp->rx_jumbo_buffers[dest_idx];
4336                 if (src_idx >= 0)
4337                         src_map = &tp->rx_jumbo_buffers[src_idx];
4338                 skb_size = RX_JUMBO_PKT_BUF_SZ;
4339                 break;
4340
4341         default:
4342                 return -EINVAL;
4343         }
4344
4345         /* Do not overwrite any of the map or rp information
4346          * until we are sure we can commit to a new buffer.
4347          *
4348          * Callers depend upon this behavior and assume that
4349          * we leave everything unchanged if we fail.
4350          */
4351         skb = netdev_alloc_skb(tp->dev, skb_size);
4352         if (skb == NULL)
4353                 return -ENOMEM;
4354
4355         skb_reserve(skb, tp->rx_offset);
4356
4357         mapping = pci_map_single(tp->pdev, skb->data,
4358                                  skb_size - tp->rx_offset,
4359                                  PCI_DMA_FROMDEVICE);
4360
4361         map->skb = skb;
4362         pci_unmap_addr_set(map, mapping, mapping);
4363
4364         if (src_map != NULL)
4365                 src_map->skb = NULL;
4366
4367         desc->addr_hi = ((u64)mapping >> 32);
4368         desc->addr_lo = ((u64)mapping & 0xffffffff);
4369
4370         return skb_size;
4371 }
4372
4373 /* We only need to move over in the address because the other
4374  * members of the RX descriptor are invariant.  See notes above
4375  * tg3_alloc_rx_skb for full details.
4376  */
4377 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4378                            int src_idx, u32 dest_idx_unmasked)
4379 {
4380         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4381         struct ring_info *src_map, *dest_map;
4382         int dest_idx;
4383
4384         switch (opaque_key) {
4385         case RXD_OPAQUE_RING_STD:
4386                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4387                 dest_desc = &tp->rx_std[dest_idx];
4388                 dest_map = &tp->rx_std_buffers[dest_idx];
4389                 src_desc = &tp->rx_std[src_idx];
4390                 src_map = &tp->rx_std_buffers[src_idx];
4391                 break;
4392
4393         case RXD_OPAQUE_RING_JUMBO:
4394                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4395                 dest_desc = &tp->rx_jumbo[dest_idx];
4396                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4397                 src_desc = &tp->rx_jumbo[src_idx];
4398                 src_map = &tp->rx_jumbo_buffers[src_idx];
4399                 break;
4400
4401         default:
4402                 return;
4403         }
4404
4405         dest_map->skb = src_map->skb;
4406         pci_unmap_addr_set(dest_map, mapping,
4407                            pci_unmap_addr(src_map, mapping));
4408         dest_desc->addr_hi = src_desc->addr_hi;
4409         dest_desc->addr_lo = src_desc->addr_lo;
4410
4411         src_map->skb = NULL;
4412 }
4413
4414 #if TG3_VLAN_TAG_USED
4415 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4416 {
4417         return vlan_gro_receive(&tp->napi, tp->vlgrp, vlan_tag, skb);
4418 }
4419 #endif
4420
4421 /* The RX ring scheme is composed of multiple rings which post fresh
4422  * buffers to the chip, and one special ring the chip uses to report
4423  * status back to the host.
4424  *
4425  * The special ring reports the status of received packets to the
4426  * host.  The chip does not write into the original descriptor the
4427  * RX buffer was obtained from.  The chip simply takes the original
4428  * descriptor as provided by the host, updates the status and length
4429  * field, then writes this into the next status ring entry.
4430  *
4431  * Each ring the host uses to post buffers to the chip is described
4432  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4433  * it is first placed into the on-chip ram.  When the packet's length
4434  * is known, it walks down the TG3_BDINFO entries to select the ring.
4435  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4436  * which is within the range of the new packet's length is chosen.
4437  *
4438  * The "separate ring for rx status" scheme may sound queer, but it makes
4439  * sense from a cache coherency perspective.  If only the host writes
4440  * to the buffer post rings, and only the chip writes to the rx status
4441  * rings, then cache lines never move beyond shared-modified state.
4442  * If both the host and chip were to write into the same ring, cache line
4443  * eviction could occur since both entities want it in an exclusive state.
4444  */
4445 static int tg3_rx(struct tg3 *tp, int budget)
4446 {
4447         u32 work_mask, rx_std_posted = 0;
4448         u32 sw_idx = tp->rx_rcb_ptr;
4449         u16 hw_idx;
4450         int received;
4451
4452         hw_idx = tp->hw_status->idx[0].rx_producer;
4453         /*
4454          * We need to order the read of hw_idx and the read of
4455          * the opaque cookie.
4456          */
4457         rmb();
4458         work_mask = 0;
4459         received = 0;
4460         while (sw_idx != hw_idx && budget > 0) {
4461                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4462                 unsigned int len;
4463                 struct sk_buff *skb;
4464                 dma_addr_t dma_addr;
4465                 u32 opaque_key, desc_idx, *post_ptr;
4466
4467                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4468                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4469                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4470                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4471                                                   mapping);
4472                         skb = tp->rx_std_buffers[desc_idx].skb;
4473                         post_ptr = &tp->rx_std_ptr;
4474                         rx_std_posted++;
4475                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4476                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4477                                                   mapping);
4478                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
4479                         post_ptr = &tp->rx_jumbo_ptr;
4480                 }
4481                 else {
4482                         goto next_pkt_nopost;
4483                 }
4484
4485                 work_mask |= opaque_key;
4486
4487                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4488                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4489                 drop_it:
4490                         tg3_recycle_rx(tp, opaque_key,
4491                                        desc_idx, *post_ptr);
4492                 drop_it_no_recycle:
4493                         /* Other statistics kept track of by card. */
4494                         tp->net_stats.rx_dropped++;
4495                         goto next_pkt;
4496                 }
4497
4498                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4499                       ETH_FCS_LEN;
4500
4501                 if (len > RX_COPY_THRESHOLD
4502                         && tp->rx_offset == NET_IP_ALIGN
4503                         /* rx_offset will likely not equal NET_IP_ALIGN
4504                          * if this is a 5701 card running in PCI-X mode
4505                          * [see tg3_get_invariants()]
4506                          */
4507                 ) {
4508                         int skb_size;
4509
4510                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4511                                                     desc_idx, *post_ptr);
4512                         if (skb_size < 0)
4513                                 goto drop_it;
4514
4515                         pci_unmap_single(tp->pdev, dma_addr,
4516                                          skb_size - tp->rx_offset,
4517                                          PCI_DMA_FROMDEVICE);
4518
4519                         skb_put(skb, len);
4520                 } else {
4521                         struct sk_buff *copy_skb;
4522
4523                         tg3_recycle_rx(tp, opaque_key,
4524                                        desc_idx, *post_ptr);
4525
4526                         copy_skb = netdev_alloc_skb(tp->dev,
4527                                                     len + TG3_RAW_IP_ALIGN);
4528                         if (copy_skb == NULL)
4529                                 goto drop_it_no_recycle;
4530
4531                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4532                         skb_put(copy_skb, len);
4533                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4534                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4535                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4536
4537                         /* We'll reuse the original ring buffer. */
4538                         skb = copy_skb;
4539                 }
4540
4541                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4542                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4543                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4544                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4545                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4546                 else
4547                         skb->ip_summed = CHECKSUM_NONE;
4548
4549                 skb->protocol = eth_type_trans(skb, tp->dev);
4550
4551                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4552                     skb->protocol != htons(ETH_P_8021Q)) {
4553                         dev_kfree_skb(skb);
4554                         goto next_pkt;
4555                 }
4556
4557 #if TG3_VLAN_TAG_USED
4558                 if (tp->vlgrp != NULL &&
4559                     desc->type_flags & RXD_FLAG_VLAN) {
4560                         tg3_vlan_rx(tp, skb,
4561                                     desc->err_vlan & RXD_VLAN_MASK);
4562                 } else
4563 #endif
4564                         napi_gro_receive(&tp->napi, skb);
4565
4566                 received++;
4567                 budget--;
4568
4569 next_pkt:
4570                 (*post_ptr)++;
4571
4572                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4573                         u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4574
4575                         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4576                                      TG3_64BIT_REG_LOW, idx);
4577                         work_mask &= ~RXD_OPAQUE_RING_STD;
4578                         rx_std_posted = 0;
4579                 }
4580 next_pkt_nopost:
4581                 sw_idx++;
4582                 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4583
4584                 /* Refresh hw_idx to see if there is new work */
4585                 if (sw_idx == hw_idx) {
4586                         hw_idx = tp->hw_status->idx[0].rx_producer;
4587                         rmb();
4588                 }
4589         }
4590
4591         /* ACK the status ring. */
4592         tp->rx_rcb_ptr = sw_idx;
4593         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4594
4595         /* Refill RX ring(s). */
4596         if (work_mask & RXD_OPAQUE_RING_STD) {
4597                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4598                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4599                              sw_idx);
4600         }
4601         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4602                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4603                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4604                              sw_idx);
4605         }
4606         mmiowb();
4607
4608         return received;
4609 }
4610
4611 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4612 {
4613         struct tg3_hw_status *sblk = tp->hw_status;
4614
4615         /* handle link change and other phy events */
4616         if (!(tp->tg3_flags &
4617               (TG3_FLAG_USE_LINKCHG_REG |
4618                TG3_FLAG_POLL_SERDES))) {
4619                 if (sblk->status & SD_STATUS_LINK_CHG) {
4620                         sblk->status = SD_STATUS_UPDATED |
4621                                 (sblk->status & ~SD_STATUS_LINK_CHG);
4622                         spin_lock(&tp->lock);
4623                         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4624                                 tw32_f(MAC_STATUS,
4625                                      (MAC_STATUS_SYNC_CHANGED |
4626                                       MAC_STATUS_CFG_CHANGED |
4627                                       MAC_STATUS_MI_COMPLETION |
4628                                       MAC_STATUS_LNKSTATE_CHANGED));
4629                                 udelay(40);
4630                         } else
4631                                 tg3_setup_phy(tp, 0);
4632                         spin_unlock(&tp->lock);
4633                 }
4634         }
4635
4636         /* run TX completion thread */
4637         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4638                 tg3_tx(tp);
4639                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4640                         return work_done;
4641         }
4642
4643         /* run RX thread, within the bounds set by NAPI.
4644          * All RX "locking" is done by ensuring outside
4645          * code synchronizes with tg3->napi.poll()
4646          */
4647         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4648                 work_done += tg3_rx(tp, budget - work_done);
4649
4650         return work_done;
4651 }
4652
4653 static int tg3_poll(struct napi_struct *napi, int budget)
4654 {
4655         struct tg3 *tp = container_of(napi, struct tg3, napi);
4656         int work_done = 0;
4657         struct tg3_hw_status *sblk = tp->hw_status;
4658
4659         while (1) {
4660                 work_done = tg3_poll_work(tp, work_done, budget);
4661
4662                 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4663                         goto tx_recovery;
4664
4665                 if (unlikely(work_done >= budget))
4666                         break;
4667
4668                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4669                         /* tp->last_tag is used in tg3_restart_ints() below
4670                          * to tell the hw how much work has been processed,
4671                          * so we must read it before checking for more work.
4672                          */
4673                         tp->last_tag = sblk->status_tag;
4674                         tp->last_irq_tag = tp->last_tag;
4675                         rmb();
4676                 } else
4677                         sblk->status &= ~SD_STATUS_UPDATED;
4678
4679                 if (likely(!tg3_has_work(tp))) {
4680                         napi_complete(napi);
4681                         tg3_restart_ints(tp);
4682                         break;
4683                 }
4684         }
4685
4686         return work_done;
4687
4688 tx_recovery:
4689         /* work_done is guaranteed to be less than budget. */
4690         napi_complete(napi);
4691         schedule_work(&tp->reset_task);
4692         return work_done;
4693 }
4694
4695 static void tg3_irq_quiesce(struct tg3 *tp)
4696 {
4697         BUG_ON(tp->irq_sync);
4698
4699         tp->irq_sync = 1;
4700         smp_mb();
4701
4702         synchronize_irq(tp->pdev->irq);
4703 }
4704
4705 static inline int tg3_irq_sync(struct tg3 *tp)
4706 {
4707         return tp->irq_sync;
4708 }
4709
4710 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4711  * If irq_sync is non-zero, then the IRQ handler must be synchronized
4712  * with as well.  Most of the time, this is not necessary except when
4713  * shutting down the device.
4714  */
4715 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4716 {
4717         spin_lock_bh(&tp->lock);
4718         if (irq_sync)
4719                 tg3_irq_quiesce(tp);
4720 }
4721
4722 static inline void tg3_full_unlock(struct tg3 *tp)
4723 {
4724         spin_unlock_bh(&tp->lock);
4725 }
4726
4727 /* One-shot MSI handler - Chip automatically disables interrupt
4728  * after sending MSI so driver doesn't have to do it.
4729  */
4730 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4731 {
4732         struct net_device *dev = dev_id;
4733         struct tg3 *tp = netdev_priv(dev);
4734
4735         prefetch(tp->hw_status);
4736         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4737
4738         if (likely(!tg3_irq_sync(tp)))
4739                 napi_schedule(&tp->napi);
4740
4741         return IRQ_HANDLED;
4742 }
4743
4744 /* MSI ISR - No need to check for interrupt sharing and no need to
4745  * flush status block and interrupt mailbox. PCI ordering rules
4746  * guarantee that MSI will arrive after the status block.
4747  */
4748 static irqreturn_t tg3_msi(int irq, void *dev_id)
4749 {
4750         struct net_device *dev = dev_id;
4751         struct tg3 *tp = netdev_priv(dev);
4752
4753         prefetch(tp->hw_status);
4754         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4755         /*
4756          * Writing any value to intr-mbox-0 clears PCI INTA# and
4757          * chip-internal interrupt pending events.
4758          * Writing non-zero to intr-mbox-0 additional tells the
4759          * NIC to stop sending us irqs, engaging "in-intr-handler"
4760          * event coalescing.
4761          */
4762         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4763         if (likely(!tg3_irq_sync(tp)))
4764                 napi_schedule(&tp->napi);
4765
4766         return IRQ_RETVAL(1);
4767 }
4768
4769 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4770 {
4771         struct net_device *dev = dev_id;
4772         struct tg3 *tp = netdev_priv(dev);
4773         struct tg3_hw_status *sblk = tp->hw_status;
4774         unsigned int handled = 1;
4775
4776         /* In INTx mode, it is possible for the interrupt to arrive at
4777          * the CPU before the status block posted prior to the interrupt.
4778          * Reading the PCI State register will confirm whether the
4779          * interrupt is ours and will flush the status block.
4780          */
4781         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4782                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4783                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4784                         handled = 0;
4785                         goto out;
4786                 }
4787         }
4788
4789         /*
4790          * Writing any value to intr-mbox-0 clears PCI INTA# and
4791          * chip-internal interrupt pending events.
4792          * Writing non-zero to intr-mbox-0 additional tells the
4793          * NIC to stop sending us irqs, engaging "in-intr-handler"
4794          * event coalescing.
4795          *
4796          * Flush the mailbox to de-assert the IRQ immediately to prevent
4797          * spurious interrupts.  The flush impacts performance but
4798          * excessive spurious interrupts can be worse in some cases.
4799          */
4800         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4801         if (tg3_irq_sync(tp))
4802                 goto out;
4803         sblk->status &= ~SD_STATUS_UPDATED;
4804         if (likely(tg3_has_work(tp))) {
4805                 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4806                 napi_schedule(&tp->napi);
4807         } else {
4808                 /* No work, shared interrupt perhaps?  re-enable
4809                  * interrupts, and flush that PCI write
4810                  */
4811                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4812                                0x00000000);
4813         }
4814 out:
4815         return IRQ_RETVAL(handled);
4816 }
4817
4818 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4819 {
4820         struct net_device *dev = dev_id;
4821         struct tg3 *tp = netdev_priv(dev);
4822         struct tg3_hw_status *sblk = tp->hw_status;
4823         unsigned int handled = 1;
4824
4825         /* In INTx mode, it is possible for the interrupt to arrive at
4826          * the CPU before the status block posted prior to the interrupt.
4827          * Reading the PCI State register will confirm whether the
4828          * interrupt is ours and will flush the status block.
4829          */
4830         if (unlikely(sblk->status_tag == tp->last_irq_tag)) {
4831                 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4832                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4833                         handled = 0;
4834                         goto out;
4835                 }
4836         }
4837
4838         /*
4839          * writing any value to intr-mbox-0 clears PCI INTA# and
4840          * chip-internal interrupt pending events.
4841          * writing non-zero to intr-mbox-0 additional tells the
4842          * NIC to stop sending us irqs, engaging "in-intr-handler"
4843          * event coalescing.
4844          *
4845          * Flush the mailbox to de-assert the IRQ immediately to prevent
4846          * spurious interrupts.  The flush impacts performance but
4847          * excessive spurious interrupts can be worse in some cases.
4848          */
4849         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4850
4851         /*
4852          * In a shared interrupt configuration, sometimes other devices'
4853          * interrupts will scream.  We record the current status tag here
4854          * so that the above check can report that the screaming interrupts
4855          * are unhandled.  Eventually they will be silenced.
4856          */
4857         tp->last_irq_tag = sblk->status_tag;
4858
4859         if (tg3_irq_sync(tp))
4860                 goto out;
4861
4862         prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4863
4864         napi_schedule(&tp->napi);
4865
4866 out:
4867         return IRQ_RETVAL(handled);
4868 }
4869
4870 /* ISR for interrupt test */
4871 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4872 {
4873         struct net_device *dev = dev_id;
4874         struct tg3 *tp = netdev_priv(dev);
4875         struct tg3_hw_status *sblk = tp->hw_status;
4876
4877         if ((sblk->status & SD_STATUS_UPDATED) ||
4878             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4879                 tg3_disable_ints(tp);
4880                 return IRQ_RETVAL(1);
4881         }
4882         return IRQ_RETVAL(0);
4883 }
4884
4885 static int tg3_init_hw(struct tg3 *, int);
4886 static int tg3_halt(struct tg3 *, int, int);
4887
4888 /* Restart hardware after configuration changes, self-test, etc.
4889  * Invoked with tp->lock held.
4890  */
4891 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4892         __releases(tp->lock)
4893         __acquires(tp->lock)
4894 {
4895         int err;
4896
4897         err = tg3_init_hw(tp, reset_phy);
4898         if (err) {
4899                 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4900                        "aborting.\n", tp->dev->name);
4901                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4902                 tg3_full_unlock(tp);
4903                 del_timer_sync(&tp->timer);
4904                 tp->irq_sync = 0;
4905                 napi_enable(&tp->napi);
4906                 dev_close(tp->dev);
4907                 tg3_full_lock(tp, 0);
4908         }
4909         return err;
4910 }
4911
4912 #ifdef CONFIG_NET_POLL_CONTROLLER
4913 static void tg3_poll_controller(struct net_device *dev)
4914 {
4915         struct tg3 *tp = netdev_priv(dev);
4916
4917         tg3_interrupt(tp->pdev->irq, dev);
4918 }
4919 #endif
4920
4921 static void tg3_reset_task(struct work_struct *work)
4922 {
4923         struct tg3 *tp = container_of(work, struct tg3, reset_task);
4924         int err;
4925         unsigned int restart_timer;
4926
4927         tg3_full_lock(tp, 0);
4928
4929         if (!netif_running(tp->dev)) {
4930                 tg3_full_unlock(tp);
4931                 return;
4932         }
4933
4934         tg3_full_unlock(tp);
4935
4936         tg3_phy_stop(tp);
4937
4938         tg3_netif_stop(tp);
4939
4940         tg3_full_lock(tp, 1);
4941
4942         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4943         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4944
4945         if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4946                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4947                 tp->write32_rx_mbox = tg3_write_flush_reg32;
4948                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4949                 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4950         }
4951
4952         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4953         err = tg3_init_hw(tp, 1);
4954         if (err)
4955                 goto out;
4956
4957         tg3_netif_start(tp);
4958
4959         if (restart_timer)
4960                 mod_timer(&tp->timer, jiffies + 1);
4961
4962 out:
4963         tg3_full_unlock(tp);
4964
4965         if (!err)
4966                 tg3_phy_start(tp);
4967 }
4968
4969 static void tg3_dump_short_state(struct tg3 *tp)
4970 {
4971         printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4972                tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4973         printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4974                tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4975 }
4976
4977 static void tg3_tx_timeout(struct net_device *dev)
4978 {
4979         struct tg3 *tp = netdev_priv(dev);
4980
4981         if (netif_msg_tx_err(tp)) {
4982                 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4983                        dev->name);
4984                 tg3_dump_short_state(tp);
4985         }
4986
4987         schedule_work(&tp->reset_task);
4988 }
4989
4990 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4991 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4992 {
4993         u32 base = (u32) mapping & 0xffffffff;
4994
4995         return ((base > 0xffffdcc0) &&
4996                 (base + len + 8 < base));
4997 }
4998
4999 /* Test for DMA addresses > 40-bit */
5000 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5001                                           int len)
5002 {
5003 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5004         if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5005                 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5006         return 0;
5007 #else
5008         return 0;
5009 #endif
5010 }
5011
5012 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
5013
5014 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5015 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5016                                        u32 last_plus_one, u32 *start,
5017                                        u32 base_flags, u32 mss)
5018 {
5019         struct sk_buff *new_skb;
5020         dma_addr_t new_addr = 0;
5021         u32 entry = *start;
5022         int i, ret = 0;
5023
5024         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5025                 new_skb = skb_copy(skb, GFP_ATOMIC);
5026         else {
5027                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5028
5029                 new_skb = skb_copy_expand(skb,
5030                                           skb_headroom(skb) + more_headroom,
5031                                           skb_tailroom(skb), GFP_ATOMIC);
5032         }
5033
5034         if (!new_skb) {
5035                 ret = -1;
5036         } else {
5037                 /* New SKB is guaranteed to be linear. */
5038                 entry = *start;
5039                 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
5040                 new_addr = skb_shinfo(new_skb)->dma_head;
5041
5042                 /* Make sure new skb does not cross any 4G boundaries.
5043                  * Drop the packet if it does.
5044                  */
5045                 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
5046                         if (!ret)
5047                                 skb_dma_unmap(&tp->pdev->dev, new_skb,
5048                                               DMA_TO_DEVICE);
5049                         ret = -1;
5050                         dev_kfree_skb(new_skb);
5051                         new_skb = NULL;
5052                 } else {
5053                         tg3_set_txd(tp, entry, new_addr, new_skb->len,
5054                                     base_flags, 1 | (mss << 1));
5055                         *start = NEXT_TX(entry);
5056                 }
5057         }
5058
5059         /* Now clean up the sw ring entries. */
5060         i = 0;
5061         while (entry != last_plus_one) {
5062                 if (i == 0) {
5063                         tp->tx_buffers[entry].skb = new_skb;
5064                 } else {
5065                         tp->tx_buffers[entry].skb = NULL;
5066                 }
5067                 entry = NEXT_TX(entry);
5068                 i++;
5069         }
5070
5071         skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5072         dev_kfree_skb(skb);
5073
5074         return ret;
5075 }
5076
5077 static void tg3_set_txd(struct tg3 *tp, int entry,
5078                         dma_addr_t mapping, int len, u32 flags,
5079                         u32 mss_and_is_end)
5080 {
5081         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
5082         int is_end = (mss_and_is_end & 0x1);
5083         u32 mss = (mss_and_is_end >> 1);
5084         u32 vlan_tag = 0;
5085
5086         if (is_end)
5087                 flags |= TXD_FLAG_END;
5088         if (flags & TXD_FLAG_VLAN) {
5089                 vlan_tag = flags >> 16;
5090                 flags &= 0xffff;
5091         }
5092         vlan_tag |= (mss << TXD_MSS_SHIFT);
5093
5094         txd->addr_hi = ((u64) mapping >> 32);
5095         txd->addr_lo = ((u64) mapping & 0xffffffff);
5096         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5097         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5098 }
5099
5100 /* hard_start_xmit for devices that don't have any bugs and
5101  * support TG3_FLG2_HW_TSO_2 only.
5102  */
5103 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5104 {
5105         struct tg3 *tp = netdev_priv(dev);
5106         u32 len, entry, base_flags, mss;
5107         struct skb_shared_info *sp;
5108         dma_addr_t mapping;
5109
5110         len = skb_headlen(skb);
5111
5112         /* We are running in BH disabled context with netif_tx_lock
5113          * and TX reclaim runs via tp->napi.poll inside of a software
5114          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5115          * no IRQ context deadlocks to worry about either.  Rejoice!
5116          */
5117         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5118                 if (!netif_queue_stopped(dev)) {
5119                         netif_stop_queue(dev);
5120
5121                         /* This is a hard error, log it. */
5122                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5123                                "queue awake!\n", dev->name);
5124                 }
5125                 return NETDEV_TX_BUSY;
5126         }
5127
5128         entry = tp->tx_prod;
5129         base_flags = 0;
5130         mss = 0;
5131         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5132                 int tcp_opt_len, ip_tcp_len;
5133
5134                 if (skb_header_cloned(skb) &&
5135                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5136                         dev_kfree_skb(skb);
5137                         goto out_unlock;
5138                 }
5139
5140                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5141                         mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
5142                 else {
5143                         struct iphdr *iph = ip_hdr(skb);
5144
5145                         tcp_opt_len = tcp_optlen(skb);
5146                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5147
5148                         iph->check = 0;
5149                         iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5150                         mss |= (ip_tcp_len + tcp_opt_len) << 9;
5151                 }
5152
5153                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5154                                TXD_FLAG_CPU_POST_DMA);
5155
5156                 tcp_hdr(skb)->check = 0;
5157
5158         }
5159         else if (skb->ip_summed == CHECKSUM_PARTIAL)
5160                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5161 #if TG3_VLAN_TAG_USED
5162         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5163                 base_flags |= (TXD_FLAG_VLAN |
5164                                (vlan_tx_tag_get(skb) << 16));
5165 #endif
5166
5167         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5168                 dev_kfree_skb(skb);
5169                 goto out_unlock;
5170         }
5171
5172         sp = skb_shinfo(skb);
5173
5174         mapping = sp->dma_head;
5175
5176         tp->tx_buffers[entry].skb = skb;
5177
5178         tg3_set_txd(tp, entry, mapping, len, base_flags,
5179                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5180
5181         entry = NEXT_TX(entry);
5182
5183         /* Now loop through additional data fragments, and queue them. */
5184         if (skb_shinfo(skb)->nr_frags > 0) {
5185                 unsigned int i, last;
5186
5187                 last = skb_shinfo(skb)->nr_frags - 1;
5188                 for (i = 0; i <= last; i++) {
5189                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5190
5191                         len = frag->size;
5192                         mapping = sp->dma_maps[i];
5193                         tp->tx_buffers[entry].skb = NULL;
5194
5195                         tg3_set_txd(tp, entry, mapping, len,
5196                                     base_flags, (i == last) | (mss << 1));
5197
5198                         entry = NEXT_TX(entry);
5199                 }
5200         }
5201
5202         /* Packets are ready, update Tx producer idx local and on card. */
5203         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5204
5205         tp->tx_prod = entry;
5206         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5207                 netif_stop_queue(dev);
5208                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5209                         netif_wake_queue(tp->dev);
5210         }
5211
5212 out_unlock:
5213         mmiowb();
5214
5215         return NETDEV_TX_OK;
5216 }
5217
5218 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
5219
5220 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5221  * TSO header is greater than 80 bytes.
5222  */
5223 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5224 {
5225         struct sk_buff *segs, *nskb;
5226
5227         /* Estimate the number of fragments in the worst case */
5228         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
5229                 netif_stop_queue(tp->dev);
5230                 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
5231                         return NETDEV_TX_BUSY;
5232
5233                 netif_wake_queue(tp->dev);
5234         }
5235
5236         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5237         if (IS_ERR(segs))
5238                 goto tg3_tso_bug_end;
5239
5240         do {
5241                 nskb = segs;
5242                 segs = segs->next;
5243                 nskb->next = NULL;
5244                 tg3_start_xmit_dma_bug(nskb, tp->dev);
5245         } while (segs);
5246
5247 tg3_tso_bug_end:
5248         dev_kfree_skb(skb);
5249
5250         return NETDEV_TX_OK;
5251 }
5252
5253 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5254  * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5255  */
5256 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
5257 {
5258         struct tg3 *tp = netdev_priv(dev);
5259         u32 len, entry, base_flags, mss;
5260         struct skb_shared_info *sp;
5261         int would_hit_hwbug;
5262         dma_addr_t mapping;
5263
5264         len = skb_headlen(skb);
5265
5266         /* We are running in BH disabled context with netif_tx_lock
5267          * and TX reclaim runs via tp->napi.poll inside of a software
5268          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5269          * no IRQ context deadlocks to worry about either.  Rejoice!
5270          */
5271         if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
5272                 if (!netif_queue_stopped(dev)) {
5273                         netif_stop_queue(dev);
5274
5275                         /* This is a hard error, log it. */
5276                         printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5277                                "queue awake!\n", dev->name);
5278                 }
5279                 return NETDEV_TX_BUSY;
5280         }
5281
5282         entry = tp->tx_prod;
5283         base_flags = 0;
5284         if (skb->ip_summed == CHECKSUM_PARTIAL)
5285                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5286         mss = 0;
5287         if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5288                 struct iphdr *iph;
5289                 int tcp_opt_len, ip_tcp_len, hdr_len;
5290
5291                 if (skb_header_cloned(skb) &&
5292                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5293                         dev_kfree_skb(skb);
5294                         goto out_unlock;
5295                 }
5296
5297                 tcp_opt_len = tcp_optlen(skb);
5298                 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5299
5300                 hdr_len = ip_tcp_len + tcp_opt_len;
5301                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5302                              (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5303                         return (tg3_tso_bug(tp, skb));
5304
5305                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5306                                TXD_FLAG_CPU_POST_DMA);
5307
5308                 iph = ip_hdr(skb);
5309                 iph->check = 0;
5310                 iph->tot_len = htons(mss + hdr_len);
5311                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5312                         tcp_hdr(skb)->check = 0;
5313                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5314                 } else
5315                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5316                                                                  iph->daddr, 0,
5317                                                                  IPPROTO_TCP,
5318                                                                  0);
5319
5320                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5321                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5322                         if (tcp_opt_len || iph->ihl > 5) {
5323                                 int tsflags;
5324
5325                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5326                                 mss |= (tsflags << 11);
5327                         }
5328                 } else {
5329                         if (tcp_opt_len || iph->ihl > 5) {
5330                                 int tsflags;
5331
5332                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5333                                 base_flags |= tsflags << 12;
5334                         }
5335                 }
5336         }
5337 #if TG3_VLAN_TAG_USED
5338         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5339                 base_flags |= (TXD_FLAG_VLAN |
5340                                (vlan_tx_tag_get(skb) << 16));
5341 #endif
5342
5343         if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5344                 dev_kfree_skb(skb);
5345                 goto out_unlock;
5346         }
5347
5348         sp = skb_shinfo(skb);
5349
5350         mapping = sp->dma_head;
5351
5352         tp->tx_buffers[entry].skb = skb;
5353
5354         would_hit_hwbug = 0;
5355
5356         if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5357                 would_hit_hwbug = 1;
5358         else if (tg3_4g_overflow_test(mapping, len))
5359                 would_hit_hwbug = 1;
5360
5361         tg3_set_txd(tp, entry, mapping, len, base_flags,
5362                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5363
5364         entry = NEXT_TX(entry);
5365
5366         /* Now loop through additional data fragments, and queue them. */
5367         if (skb_shinfo(skb)->nr_frags > 0) {
5368                 unsigned int i, last;
5369
5370                 last = skb_shinfo(skb)->nr_frags - 1;
5371                 for (i = 0; i <= last; i++) {
5372                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5373
5374                         len = frag->size;
5375                         mapping = sp->dma_maps[i];
5376
5377                         tp->tx_buffers[entry].skb = NULL;
5378
5379                         if (tg3_4g_overflow_test(mapping, len))
5380                                 would_hit_hwbug = 1;
5381
5382                         if (tg3_40bit_overflow_test(tp, mapping, len))
5383                                 would_hit_hwbug = 1;
5384
5385                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5386                                 tg3_set_txd(tp, entry, mapping, len,
5387                                             base_flags, (i == last)|(mss << 1));
5388                         else
5389                                 tg3_set_txd(tp, entry, mapping, len,
5390                                             base_flags, (i == last));
5391
5392                         entry = NEXT_TX(entry);
5393                 }
5394         }
5395
5396         if (would_hit_hwbug) {
5397                 u32 last_plus_one = entry;
5398                 u32 start;
5399
5400                 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5401                 start &= (TG3_TX_RING_SIZE - 1);
5402
5403                 /* If the workaround fails due to memory/mapping
5404                  * failure, silently drop this packet.
5405                  */
5406                 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5407                                                 &start, base_flags, mss))
5408                         goto out_unlock;
5409
5410                 entry = start;
5411         }
5412
5413         /* Packets are ready, update Tx producer idx local and on card. */
5414         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5415
5416         tp->tx_prod = entry;
5417         if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5418                 netif_stop_queue(dev);
5419                 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5420                         netif_wake_queue(tp->dev);
5421         }
5422
5423 out_unlock:
5424         mmiowb();
5425
5426         return NETDEV_TX_OK;
5427 }
5428
5429 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5430                                int new_mtu)
5431 {
5432         dev->mtu = new_mtu;
5433
5434         if (new_mtu > ETH_DATA_LEN) {
5435                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5436                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5437                         ethtool_op_set_tso(dev, 0);
5438                 }
5439                 else
5440                         tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5441         } else {
5442                 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5443                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5444                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5445         }
5446 }
5447
5448 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5449 {
5450         struct tg3 *tp = netdev_priv(dev);
5451         int err;
5452
5453         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5454                 return -EINVAL;
5455
5456         if (!netif_running(dev)) {
5457                 /* We'll just catch it later when the
5458                  * device is up'd.
5459                  */
5460                 tg3_set_mtu(dev, tp, new_mtu);
5461                 return 0;
5462         }
5463
5464         tg3_phy_stop(tp);
5465
5466         tg3_netif_stop(tp);
5467
5468         tg3_full_lock(tp, 1);
5469
5470         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5471
5472         tg3_set_mtu(dev, tp, new_mtu);
5473
5474         err = tg3_restart_hw(tp, 0);
5475
5476         if (!err)
5477                 tg3_netif_start(tp);
5478
5479         tg3_full_unlock(tp);
5480
5481         if (!err)
5482                 tg3_phy_start(tp);
5483
5484         return err;
5485 }
5486
5487 /* Free up pending packets in all rx/tx rings.
5488  *
5489  * The chip has been shut down and the driver detached from
5490  * the networking, so no interrupts or new tx packets will
5491  * end up in the driver.  tp->{tx,}lock is not held and we are not
5492  * in an interrupt context and thus may sleep.
5493  */
5494 static void tg3_free_rings(struct tg3 *tp)
5495 {
5496         struct ring_info *rxp;
5497         int i;
5498
5499         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5500                 rxp = &tp->rx_std_buffers[i];
5501
5502                 if (rxp->skb == NULL)
5503                         continue;
5504                 pci_unmap_single(tp->pdev,
5505                                  pci_unmap_addr(rxp, mapping),
5506                                  tp->rx_pkt_buf_sz - tp->rx_offset,
5507                                  PCI_DMA_FROMDEVICE);
5508                 dev_kfree_skb_any(rxp->skb);
5509                 rxp->skb = NULL;
5510         }
5511
5512         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5513                 rxp = &tp->rx_jumbo_buffers[i];
5514
5515                 if (rxp->skb == NULL)
5516                         continue;
5517                 pci_unmap_single(tp->pdev,
5518                                  pci_unmap_addr(rxp, mapping),
5519                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5520                                  PCI_DMA_FROMDEVICE);
5521                 dev_kfree_skb_any(rxp->skb);
5522                 rxp->skb = NULL;
5523         }
5524
5525         for (i = 0; i < TG3_TX_RING_SIZE; ) {
5526                 struct tx_ring_info *txp;
5527                 struct sk_buff *skb;
5528
5529                 txp = &tp->tx_buffers[i];
5530                 skb = txp->skb;
5531
5532                 if (skb == NULL) {
5533                         i++;
5534                         continue;
5535                 }
5536
5537                 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5538
5539                 txp->skb = NULL;
5540
5541                 i += skb_shinfo(skb)->nr_frags + 1;
5542
5543                 dev_kfree_skb_any(skb);
5544         }
5545 }
5546
5547 /* Initialize tx/rx rings for packet processing.
5548  *
5549  * The chip has been shut down and the driver detached from
5550  * the networking, so no interrupts or new tx packets will
5551  * end up in the driver.  tp->{tx,}lock are held and thus
5552  * we may not sleep.
5553  */
5554 static int tg3_init_rings(struct tg3 *tp)
5555 {
5556         u32 i;
5557
5558         /* Free up all the SKBs. */
5559         tg3_free_rings(tp);
5560
5561         /* Zero out all descriptors. */
5562         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5563         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5564         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5565         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5566
5567         tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5568         if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5569             (tp->dev->mtu > ETH_DATA_LEN))
5570                 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5571
5572         /* Initialize invariants of the rings, we only set this
5573          * stuff once.  This works because the card does not
5574          * write into the rx buffer posting rings.
5575          */
5576         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5577                 struct tg3_rx_buffer_desc *rxd;
5578
5579                 rxd = &tp->rx_std[i];
5580                 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5581                         << RXD_LEN_SHIFT;
5582                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5583                 rxd->opaque = (RXD_OPAQUE_RING_STD |
5584                                (i << RXD_OPAQUE_INDEX_SHIFT));
5585         }
5586
5587         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5588                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5589                         struct tg3_rx_buffer_desc *rxd;
5590
5591                         rxd = &tp->rx_jumbo[i];
5592                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5593                                 << RXD_LEN_SHIFT;
5594                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5595                                 RXD_FLAG_JUMBO;
5596                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5597                                (i << RXD_OPAQUE_INDEX_SHIFT));
5598                 }
5599         }
5600
5601         /* Now allocate fresh SKBs for each rx ring. */
5602         for (i = 0; i < tp->rx_pending; i++) {
5603                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5604                         printk(KERN_WARNING PFX
5605                                "%s: Using a smaller RX standard ring, "
5606                                "only %d out of %d buffers were allocated "
5607                                "successfully.\n",
5608                                tp->dev->name, i, tp->rx_pending);
5609                         if (i == 0)
5610                                 return -ENOMEM;
5611                         tp->rx_pending = i;
5612                         break;
5613                 }
5614         }
5615
5616         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5617                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5618                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5619                                              -1, i) < 0) {
5620                                 printk(KERN_WARNING PFX
5621                                        "%s: Using a smaller RX jumbo ring, "
5622                                        "only %d out of %d buffers were "
5623                                        "allocated successfully.\n",
5624                                        tp->dev->name, i, tp->rx_jumbo_pending);
5625                                 if (i == 0) {
5626                                         tg3_free_rings(tp);
5627                                         return -ENOMEM;
5628                                 }
5629                                 tp->rx_jumbo_pending = i;
5630                                 break;
5631                         }
5632                 }
5633         }
5634         return 0;
5635 }
5636
5637 /*
5638  * Must not be invoked with interrupt sources disabled and
5639  * the hardware shutdown down.
5640  */
5641 static void tg3_free_consistent(struct tg3 *tp)
5642 {
5643         kfree(tp->rx_std_buffers);
5644         tp->rx_std_buffers = NULL;
5645         if (tp->rx_std) {
5646                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5647                                     tp->rx_std, tp->rx_std_mapping);
5648                 tp->rx_std = NULL;
5649         }
5650         if (tp->rx_jumbo) {
5651                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5652                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
5653                 tp->rx_jumbo = NULL;
5654         }
5655         if (tp->rx_rcb) {
5656                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5657                                     tp->rx_rcb, tp->rx_rcb_mapping);
5658                 tp->rx_rcb = NULL;
5659         }
5660         if (tp->tx_ring) {
5661                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5662                         tp->tx_ring, tp->tx_desc_mapping);
5663                 tp->tx_ring = NULL;
5664         }
5665         if (tp->hw_status) {
5666                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5667                                     tp->hw_status, tp->status_mapping);
5668                 tp->hw_status = NULL;
5669         }
5670         if (tp->hw_stats) {
5671                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5672                                     tp->hw_stats, tp->stats_mapping);
5673                 tp->hw_stats = NULL;
5674         }
5675 }
5676
5677 /*
5678  * Must not be invoked with interrupt sources disabled and
5679  * the hardware shutdown down.  Can sleep.
5680  */
5681 static int tg3_alloc_consistent(struct tg3 *tp)
5682 {
5683         tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5684                                       (TG3_RX_RING_SIZE +
5685                                        TG3_RX_JUMBO_RING_SIZE)) +
5686                                      (sizeof(struct tx_ring_info) *
5687                                       TG3_TX_RING_SIZE),
5688                                      GFP_KERNEL);
5689         if (!tp->rx_std_buffers)
5690                 return -ENOMEM;
5691
5692         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5693         tp->tx_buffers = (struct tx_ring_info *)
5694                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5695
5696         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5697                                           &tp->rx_std_mapping);
5698         if (!tp->rx_std)
5699                 goto err_out;
5700
5701         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5702                                             &tp->rx_jumbo_mapping);
5703
5704         if (!tp->rx_jumbo)
5705                 goto err_out;
5706
5707         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5708                                           &tp->rx_rcb_mapping);
5709         if (!tp->rx_rcb)
5710                 goto err_out;
5711
5712         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5713                                            &tp->tx_desc_mapping);
5714         if (!tp->tx_ring)
5715                 goto err_out;
5716
5717         tp->hw_status = pci_alloc_consistent(tp->pdev,
5718                                              TG3_HW_STATUS_SIZE,
5719                                              &tp->status_mapping);
5720         if (!tp->hw_status)
5721                 goto err_out;
5722
5723         tp->hw_stats = pci_alloc_consistent(tp->pdev,
5724                                             sizeof(struct tg3_hw_stats),
5725                                             &tp->stats_mapping);
5726         if (!tp->hw_stats)
5727                 goto err_out;
5728
5729         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5730         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5731
5732         return 0;
5733
5734 err_out:
5735         tg3_free_consistent(tp);
5736         return -ENOMEM;
5737 }
5738
5739 #define MAX_WAIT_CNT 1000
5740
5741 /* To stop a block, clear the enable bit and poll till it
5742  * clears.  tp->lock is held.
5743  */
5744 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5745 {
5746         unsigned int i;
5747         u32 val;
5748
5749         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5750                 switch (ofs) {
5751                 case RCVLSC_MODE:
5752                 case DMAC_MODE:
5753                 case MBFREE_MODE:
5754                 case BUFMGR_MODE:
5755                 case MEMARB_MODE:
5756                         /* We can't enable/disable these bits of the
5757                          * 5705/5750, just say success.
5758                          */
5759                         return 0;
5760
5761                 default:
5762                         break;
5763                 }
5764         }
5765
5766         val = tr32(ofs);
5767         val &= ~enable_bit;
5768         tw32_f(ofs, val);
5769
5770         for (i = 0; i < MAX_WAIT_CNT; i++) {
5771                 udelay(100);
5772                 val = tr32(ofs);
5773                 if ((val & enable_bit) == 0)
5774                         break;
5775         }
5776
5777         if (i == MAX_WAIT_CNT && !silent) {
5778                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5779                        "ofs=%lx enable_bit=%x\n",
5780                        ofs, enable_bit);
5781                 return -ENODEV;
5782         }
5783
5784         return 0;
5785 }
5786
5787 /* tp->lock is held. */
5788 static int tg3_abort_hw(struct tg3 *tp, int silent)
5789 {
5790         int i, err;
5791
5792         tg3_disable_ints(tp);
5793
5794         tp->rx_mode &= ~RX_MODE_ENABLE;
5795         tw32_f(MAC_RX_MODE, tp->rx_mode);
5796         udelay(10);
5797
5798         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5799         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5800         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5801         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5802         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5803         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5804
5805         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5806         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5807         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5808         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5809         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5810         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5811         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5812
5813         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5814         tw32_f(MAC_MODE, tp->mac_mode);
5815         udelay(40);
5816
5817         tp->tx_mode &= ~TX_MODE_ENABLE;
5818         tw32_f(MAC_TX_MODE, tp->tx_mode);
5819
5820         for (i = 0; i < MAX_WAIT_CNT; i++) {
5821                 udelay(100);
5822                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5823                         break;
5824         }
5825         if (i >= MAX_WAIT_CNT) {
5826                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5827                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5828                        tp->dev->name, tr32(MAC_TX_MODE));
5829                 err |= -ENODEV;
5830         }
5831
5832         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5833         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5834         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5835
5836         tw32(FTQ_RESET, 0xffffffff);
5837         tw32(FTQ_RESET, 0x00000000);
5838
5839         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5840         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5841
5842         if (tp->hw_status)
5843                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5844         if (tp->hw_stats)
5845                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5846
5847         return err;
5848 }
5849
5850 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5851 {
5852         int i;
5853         u32 apedata;
5854
5855         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5856         if (apedata != APE_SEG_SIG_MAGIC)
5857                 return;
5858
5859         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5860         if (!(apedata & APE_FW_STATUS_READY))
5861                 return;
5862
5863         /* Wait for up to 1 millisecond for APE to service previous event. */
5864         for (i = 0; i < 10; i++) {
5865                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5866                         return;
5867
5868                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5869
5870                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5871                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5872                                         event | APE_EVENT_STATUS_EVENT_PENDING);
5873
5874                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5875
5876                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5877                         break;
5878
5879                 udelay(100);
5880         }
5881
5882         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5883                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5884 }
5885
5886 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5887 {
5888         u32 event;
5889         u32 apedata;
5890
5891         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5892                 return;
5893
5894         switch (kind) {
5895                 case RESET_KIND_INIT:
5896                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5897                                         APE_HOST_SEG_SIG_MAGIC);
5898                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5899                                         APE_HOST_SEG_LEN_MAGIC);
5900                         apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5901                         tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5902                         tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5903                                         APE_HOST_DRIVER_ID_MAGIC);
5904                         tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5905                                         APE_HOST_BEHAV_NO_PHYLOCK);
5906
5907                         event = APE_EVENT_STATUS_STATE_START;
5908                         break;
5909                 case RESET_KIND_SHUTDOWN:
5910                         /* With the interface we are currently using,
5911                          * APE does not track driver state.  Wiping
5912                          * out the HOST SEGMENT SIGNATURE forces
5913                          * the APE to assume OS absent status.
5914                          */
5915                         tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5916
5917                         event = APE_EVENT_STATUS_STATE_UNLOAD;
5918                         break;
5919                 case RESET_KIND_SUSPEND:
5920                         event = APE_EVENT_STATUS_STATE_SUSPEND;
5921                         break;
5922                 default:
5923                         return;
5924         }
5925
5926         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5927
5928         tg3_ape_send_event(tp, event);
5929 }
5930
5931 /* tp->lock is held. */
5932 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5933 {
5934         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5935                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5936
5937         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5938                 switch (kind) {
5939                 case RESET_KIND_INIT:
5940                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5941                                       DRV_STATE_START);
5942                         break;
5943
5944                 case RESET_KIND_SHUTDOWN:
5945                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5946                                       DRV_STATE_UNLOAD);
5947                         break;
5948
5949                 case RESET_KIND_SUSPEND:
5950                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5951                                       DRV_STATE_SUSPEND);
5952                         break;
5953
5954                 default:
5955                         break;
5956                 }
5957         }
5958
5959         if (kind == RESET_KIND_INIT ||
5960             kind == RESET_KIND_SUSPEND)
5961                 tg3_ape_driver_state_change(tp, kind);
5962 }
5963
5964 /* tp->lock is held. */
5965 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5966 {
5967         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5968                 switch (kind) {
5969                 case RESET_KIND_INIT:
5970                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5971                                       DRV_STATE_START_DONE);
5972                         break;
5973
5974                 case RESET_KIND_SHUTDOWN:
5975                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5976                                       DRV_STATE_UNLOAD_DONE);
5977                         break;
5978
5979                 default:
5980                         break;
5981                 }
5982         }
5983
5984         if (kind == RESET_KIND_SHUTDOWN)
5985                 tg3_ape_driver_state_change(tp, kind);
5986 }
5987
5988 /* tp->lock is held. */
5989 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5990 {
5991         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5992                 switch (kind) {
5993                 case RESET_KIND_INIT:
5994                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5995                                       DRV_STATE_START);
5996                         break;
5997
5998                 case RESET_KIND_SHUTDOWN:
5999                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6000                                       DRV_STATE_UNLOAD);
6001                         break;
6002
6003                 case RESET_KIND_SUSPEND:
6004                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6005                                       DRV_STATE_SUSPEND);
6006                         break;
6007
6008                 default:
6009                         break;
6010                 }
6011         }
6012 }
6013
6014 static int tg3_poll_fw(struct tg3 *tp)
6015 {
6016         int i;
6017         u32 val;
6018
6019         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6020                 /* Wait up to 20ms for init done. */
6021                 for (i = 0; i < 200; i++) {
6022                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6023                                 return 0;
6024                         udelay(100);
6025                 }
6026                 return -ENODEV;
6027         }
6028
6029         /* Wait for firmware initialization to complete. */
6030         for (i = 0; i < 100000; i++) {
6031                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6032                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6033                         break;
6034                 udelay(10);
6035         }
6036
6037         /* Chip might not be fitted with firmware.  Some Sun onboard
6038          * parts are configured like that.  So don't signal the timeout
6039          * of the above loop as an error, but do report the lack of
6040          * running firmware once.
6041          */
6042         if (i >= 100000 &&
6043             !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6044                 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6045
6046                 printk(KERN_INFO PFX "%s: No firmware running.\n",
6047                        tp->dev->name);
6048         }
6049
6050         return 0;
6051 }
6052
6053 /* Save PCI command register before chip reset */
6054 static void tg3_save_pci_state(struct tg3 *tp)
6055 {
6056         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6057 }
6058
6059 /* Restore PCI state after chip reset */
6060 static void tg3_restore_pci_state(struct tg3 *tp)
6061 {
6062         u32 val;
6063
6064         /* Re-enable indirect register accesses. */
6065         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6066                                tp->misc_host_ctrl);
6067
6068         /* Set MAX PCI retry to zero. */
6069         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6070         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6071             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6072                 val |= PCISTATE_RETRY_SAME_DMA;
6073         /* Allow reads and writes to the APE register and memory space. */
6074         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6075                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6076                        PCISTATE_ALLOW_APE_SHMEM_WR;
6077         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6078
6079         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6080
6081         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6082                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6083                         pcie_set_readrq(tp->pdev, 4096);
6084                 else {
6085                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6086                                               tp->pci_cacheline_sz);
6087                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6088                                               tp->pci_lat_timer);
6089                 }
6090         }
6091
6092         /* Make sure PCI-X relaxed ordering bit is clear. */
6093         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6094                 u16 pcix_cmd;
6095
6096                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6097                                      &pcix_cmd);
6098                 pcix_cmd &= ~PCI_X_CMD_ERO;
6099                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6100                                       pcix_cmd);
6101         }
6102
6103         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6104
6105                 /* Chip reset on 5780 will reset MSI enable bit,
6106                  * so need to restore it.
6107                  */
6108                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6109                         u16 ctrl;
6110
6111                         pci_read_config_word(tp->pdev,
6112                                              tp->msi_cap + PCI_MSI_FLAGS,
6113                                              &ctrl);
6114                         pci_write_config_word(tp->pdev,
6115                                               tp->msi_cap + PCI_MSI_FLAGS,
6116                                               ctrl | PCI_MSI_FLAGS_ENABLE);
6117                         val = tr32(MSGINT_MODE);
6118                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6119                 }
6120         }
6121 }
6122
6123 static void tg3_stop_fw(struct tg3 *);
6124
6125 /* tp->lock is held. */
6126 static int tg3_chip_reset(struct tg3 *tp)
6127 {
6128         u32 val;
6129         void (*write_op)(struct tg3 *, u32, u32);
6130         int err;
6131
6132         tg3_nvram_lock(tp);
6133
6134         tg3_mdio_stop(tp);
6135
6136         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6137
6138         /* No matching tg3_nvram_unlock() after this because
6139          * chip reset below will undo the nvram lock.
6140          */
6141         tp->nvram_lock_cnt = 0;
6142
6143         /* GRC_MISC_CFG core clock reset will clear the memory
6144          * enable bit in PCI register 4 and the MSI enable bit
6145          * on some chips, so we save relevant registers here.
6146          */
6147         tg3_save_pci_state(tp);
6148
6149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6150             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6151                 tw32(GRC_FASTBOOT_PC, 0);
6152
6153         /*
6154          * We must avoid the readl() that normally takes place.
6155          * It locks machines, causes machine checks, and other
6156          * fun things.  So, temporarily disable the 5701
6157          * hardware workaround, while we do the reset.
6158          */
6159         write_op = tp->write32;
6160         if (write_op == tg3_write_flush_reg32)
6161                 tp->write32 = tg3_write32;
6162
6163         /* Prevent the irq handler from reading or writing PCI registers
6164          * during chip reset when the memory enable bit in the PCI command
6165          * register may be cleared.  The chip does not generate interrupt
6166          * at this time, but the irq handler may still be called due to irq
6167          * sharing or irqpoll.
6168          */
6169         tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6170         if (tp->hw_status) {
6171                 tp->hw_status->status = 0;
6172                 tp->hw_status->status_tag = 0;
6173         }
6174         tp->last_tag = 0;
6175         tp->last_irq_tag = 0;
6176         smp_mb();
6177         synchronize_irq(tp->pdev->irq);
6178
6179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6180                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6181                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6182         }
6183
6184         /* do the reset */
6185         val = GRC_MISC_CFG_CORECLK_RESET;
6186
6187         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6188                 if (tr32(0x7e2c) == 0x60) {
6189                         tw32(0x7e2c, 0x20);
6190                 }
6191                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6192                         tw32(GRC_MISC_CFG, (1 << 29));
6193                         val |= (1 << 29);
6194                 }
6195         }
6196
6197         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6198                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6199                 tw32(GRC_VCPU_EXT_CTRL,
6200                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6201         }
6202
6203         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6204                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6205         tw32(GRC_MISC_CFG, val);
6206
6207         /* restore 5701 hardware bug workaround write method */
6208         tp->write32 = write_op;
6209
6210         /* Unfortunately, we have to delay before the PCI read back.
6211          * Some 575X chips even will not respond to a PCI cfg access
6212          * when the reset command is given to the chip.
6213          *
6214          * How do these hardware designers expect things to work
6215          * properly if the PCI write is posted for a long period
6216          * of time?  It is always necessary to have some method by
6217          * which a register read back can occur to push the write
6218          * out which does the reset.
6219          *
6220          * For most tg3 variants the trick below was working.
6221          * Ho hum...
6222          */
6223         udelay(120);
6224
6225         /* Flush PCI posted writes.  The normal MMIO registers
6226          * are inaccessible at this time so this is the only
6227          * way to make this reliably (actually, this is no longer
6228          * the case, see above).  I tried to use indirect
6229          * register read/write but this upset some 5701 variants.
6230          */
6231         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6232
6233         udelay(120);
6234
6235         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6236                 u16 val16;
6237
6238                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6239                         int i;
6240                         u32 cfg_val;
6241
6242                         /* Wait for link training to complete.  */
6243                         for (i = 0; i < 5000; i++)
6244                                 udelay(100);
6245
6246                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6247                         pci_write_config_dword(tp->pdev, 0xc4,
6248                                                cfg_val | (1 << 15));
6249                 }
6250
6251                 /* Clear the "no snoop" and "relaxed ordering" bits. */
6252                 pci_read_config_word(tp->pdev,
6253                                      tp->pcie_cap + PCI_EXP_DEVCTL,
6254                                      &val16);
6255                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6256                            PCI_EXP_DEVCTL_NOSNOOP_EN);
6257                 /*
6258                  * Older PCIe devices only support the 128 byte
6259                  * MPS setting.  Enforce the restriction.
6260                  */
6261                 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6262                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6263                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6264                 pci_write_config_word(tp->pdev,
6265                                       tp->pcie_cap + PCI_EXP_DEVCTL,
6266                                       val16);
6267
6268                 pcie_set_readrq(tp->pdev, 4096);
6269
6270                 /* Clear error status */
6271                 pci_write_config_word(tp->pdev,
6272                                       tp->pcie_cap + PCI_EXP_DEVSTA,
6273                                       PCI_EXP_DEVSTA_CED |
6274                                       PCI_EXP_DEVSTA_NFED |
6275                                       PCI_EXP_DEVSTA_FED |
6276                                       PCI_EXP_DEVSTA_URD);
6277         }
6278
6279         tg3_restore_pci_state(tp);
6280
6281         tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6282
6283         val = 0;
6284         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6285                 val = tr32(MEMARB_MODE);
6286         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6287
6288         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6289                 tg3_stop_fw(tp);
6290                 tw32(0x5000, 0x400);
6291         }
6292
6293         tw32(GRC_MODE, tp->grc_mode);
6294
6295         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6296                 val = tr32(0xc4);
6297
6298                 tw32(0xc4, val | (1 << 15));
6299         }
6300
6301         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6302             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6303                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6304                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6305                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6306                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6307         }
6308
6309         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6310                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6311                 tw32_f(MAC_MODE, tp->mac_mode);
6312         } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6313                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6314                 tw32_f(MAC_MODE, tp->mac_mode);
6315         } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6316                 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6317                 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6318                         tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6319                 tw32_f(MAC_MODE, tp->mac_mode);
6320         } else
6321                 tw32_f(MAC_MODE, 0);
6322         udelay(40);
6323
6324         tg3_mdio_start(tp);
6325
6326         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6327
6328         err = tg3_poll_fw(tp);
6329         if (err)
6330                 return err;
6331
6332         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6333             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6334                 val = tr32(0x7c00);
6335
6336                 tw32(0x7c00, val | (1 << 25));
6337         }
6338
6339         /* Reprobe ASF enable state.  */
6340         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6341         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6342         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6343         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6344                 u32 nic_cfg;
6345
6346                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6347                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6348                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6349                         tp->last_event_jiffies = jiffies;
6350                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6351                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6352                 }
6353         }
6354
6355         return 0;
6356 }
6357
6358 /* tp->lock is held. */
6359 static void tg3_stop_fw(struct tg3 *tp)
6360 {
6361         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6362            !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6363                 /* Wait for RX cpu to ACK the previous event. */
6364                 tg3_wait_for_event_ack(tp);
6365
6366                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6367
6368                 tg3_generate_fw_event(tp);
6369
6370                 /* Wait for RX cpu to ACK this event. */
6371                 tg3_wait_for_event_ack(tp);
6372         }
6373 }
6374
6375 /* tp->lock is held. */
6376 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6377 {
6378         int err;
6379
6380         tg3_stop_fw(tp);
6381
6382         tg3_write_sig_pre_reset(tp, kind);
6383
6384         tg3_abort_hw(tp, silent);
6385         err = tg3_chip_reset(tp);
6386
6387         __tg3_set_mac_addr(tp, 0);
6388
6389         tg3_write_sig_legacy(tp, kind);
6390         tg3_write_sig_post_reset(tp, kind);
6391
6392         if (err)
6393                 return err;
6394
6395         return 0;
6396 }
6397
6398 #define RX_CPU_SCRATCH_BASE     0x30000
6399 #define RX_CPU_SCRATCH_SIZE     0x04000
6400 #define TX_CPU_SCRATCH_BASE     0x34000
6401 #define TX_CPU_SCRATCH_SIZE     0x04000
6402
6403 /* tp->lock is held. */
6404 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6405 {
6406         int i;
6407
6408         BUG_ON(offset == TX_CPU_BASE &&
6409             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6410
6411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6412                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6413
6414                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6415                 return 0;
6416         }
6417         if (offset == RX_CPU_BASE) {
6418                 for (i = 0; i < 10000; i++) {
6419                         tw32(offset + CPU_STATE, 0xffffffff);
6420                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6421                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6422                                 break;
6423                 }
6424
6425                 tw32(offset + CPU_STATE, 0xffffffff);
6426                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
6427                 udelay(10);
6428         } else {
6429                 for (i = 0; i < 10000; i++) {
6430                         tw32(offset + CPU_STATE, 0xffffffff);
6431                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
6432                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6433                                 break;
6434                 }
6435         }
6436
6437         if (i >= 10000) {
6438                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6439                        "and %s CPU\n",
6440                        tp->dev->name,
6441                        (offset == RX_CPU_BASE ? "RX" : "TX"));
6442                 return -ENODEV;
6443         }
6444
6445         /* Clear firmware's nvram arbitration. */
6446         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6447                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6448         return 0;
6449 }
6450
6451 struct fw_info {
6452         unsigned int fw_base;
6453         unsigned int fw_len;
6454         const __be32 *fw_data;
6455 };
6456
6457 /* tp->lock is held. */
6458 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6459                                  int cpu_scratch_size, struct fw_info *info)
6460 {
6461         int err, lock_err, i;
6462         void (*write_op)(struct tg3 *, u32, u32);
6463
6464         if (cpu_base == TX_CPU_BASE &&
6465             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6466                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6467                        "TX cpu firmware on %s which is 5705.\n",
6468                        tp->dev->name);
6469                 return -EINVAL;
6470         }
6471
6472         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6473                 write_op = tg3_write_mem;
6474         else
6475                 write_op = tg3_write_indirect_reg32;
6476
6477         /* It is possible that bootcode is still loading at this point.
6478          * Get the nvram lock first before halting the cpu.
6479          */
6480         lock_err = tg3_nvram_lock(tp);
6481         err = tg3_halt_cpu(tp, cpu_base);
6482         if (!lock_err)
6483                 tg3_nvram_unlock(tp);
6484         if (err)
6485                 goto out;
6486
6487         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6488                 write_op(tp, cpu_scratch_base + i, 0);
6489         tw32(cpu_base + CPU_STATE, 0xffffffff);
6490         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6491         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
6492                 write_op(tp, (cpu_scratch_base +
6493                               (info->fw_base & 0xffff) +
6494                               (i * sizeof(u32))),
6495                               be32_to_cpu(info->fw_data[i]));
6496
6497         err = 0;
6498
6499 out:
6500         return err;
6501 }
6502
6503 /* tp->lock is held. */
6504 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6505 {
6506         struct fw_info info;
6507         const __be32 *fw_data;
6508         int err, i;
6509
6510         fw_data = (void *)tp->fw->data;
6511
6512         /* Firmware blob starts with version numbers, followed by
6513            start address and length. We are setting complete length.
6514            length = end_address_of_bss - start_address_of_text.
6515            Remainder is the blob to be loaded contiguously
6516            from start address. */
6517
6518         info.fw_base = be32_to_cpu(fw_data[1]);
6519         info.fw_len = tp->fw->size - 12;
6520         info.fw_data = &fw_data[3];
6521
6522         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6523                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6524                                     &info);
6525         if (err)
6526                 return err;
6527
6528         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6529                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6530                                     &info);
6531         if (err)
6532                 return err;
6533
6534         /* Now startup only the RX cpu. */
6535         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6536         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6537
6538         for (i = 0; i < 5; i++) {
6539                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
6540                         break;
6541                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6542                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
6543                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
6544                 udelay(1000);
6545         }
6546         if (i >= 5) {
6547                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6548                        "to set RX CPU PC, is %08x should be %08x\n",
6549                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6550                        info.fw_base);
6551                 return -ENODEV;
6552         }
6553         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6554         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
6555
6556         return 0;
6557 }
6558
6559 /* 5705 needs a special version of the TSO firmware.  */
6560
6561 /* tp->lock is held. */
6562 static int tg3_load_tso_firmware(struct tg3 *tp)
6563 {
6564         struct fw_info info;
6565         const __be32 *fw_data;
6566         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6567         int err, i;
6568
6569         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6570                 return 0;
6571
6572         fw_data = (void *)tp->fw->data;
6573
6574         /* Firmware blob starts with version numbers, followed by
6575            start address and length. We are setting complete length.
6576            length = end_address_of_bss - start_address_of_text.
6577            Remainder is the blob to be loaded contiguously
6578            from start address. */
6579
6580         info.fw_base = be32_to_cpu(fw_data[1]);
6581         cpu_scratch_size = tp->fw_len;
6582         info.fw_len = tp->fw->size - 12;
6583         info.fw_data = &fw_data[3];
6584
6585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6586                 cpu_base = RX_CPU_BASE;
6587                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6588         } else {
6589                 cpu_base = TX_CPU_BASE;
6590                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6591                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6592         }
6593
6594         err = tg3_load_firmware_cpu(tp, cpu_base,
6595                                     cpu_scratch_base, cpu_scratch_size,
6596                                     &info);
6597         if (err)
6598                 return err;
6599
6600         /* Now startup the cpu. */
6601         tw32(cpu_base + CPU_STATE, 0xffffffff);
6602         tw32_f(cpu_base + CPU_PC, info.fw_base);
6603
6604         for (i = 0; i < 5; i++) {
6605                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
6606                         break;
6607                 tw32(cpu_base + CPU_STATE, 0xffffffff);
6608                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
6609                 tw32_f(cpu_base + CPU_PC, info.fw_base);
6610                 udelay(1000);
6611         }
6612         if (i >= 5) {
6613                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
6614                        "to set CPU PC, is %08x should be %08x\n",
6615                        tp->dev->name, tr32(cpu_base + CPU_PC),
6616                        info.fw_base);
6617                 return -ENODEV;
6618         }
6619         tw32(cpu_base + CPU_STATE, 0xffffffff);
6620         tw32_f(cpu_base + CPU_MODE,  0x00000000);
6621         return 0;
6622 }
6623
6624
6625 static int tg3_set_mac_addr(struct net_device *dev, void *p)
6626 {
6627         struct tg3 *tp = netdev_priv(dev);
6628         struct sockaddr *addr = p;
6629         int err = 0, skip_mac_1 = 0;
6630
6631         if (!is_valid_ether_addr(addr->sa_data))
6632                 return -EINVAL;
6633
6634         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6635
6636         if (!netif_running(dev))
6637                 return 0;
6638
6639         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6640                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
6641
6642                 addr0_high = tr32(MAC_ADDR_0_HIGH);
6643                 addr0_low = tr32(MAC_ADDR_0_LOW);
6644                 addr1_high = tr32(MAC_ADDR_1_HIGH);
6645                 addr1_low = tr32(MAC_ADDR_1_LOW);
6646
6647                 /* Skip MAC addr 1 if ASF is using it. */
6648                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
6649                     !(addr1_high == 0 && addr1_low == 0))
6650                         skip_mac_1 = 1;
6651         }
6652         spin_lock_bh(&tp->lock);
6653         __tg3_set_mac_addr(tp, skip_mac_1);
6654         spin_unlock_bh(&tp->lock);
6655
6656         return err;
6657 }
6658
6659 /* tp->lock is held. */
6660 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
6661                            dma_addr_t mapping, u32 maxlen_flags,
6662                            u32 nic_addr)
6663 {
6664         tg3_write_mem(tp,
6665                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
6666                       ((u64) mapping >> 32));
6667         tg3_write_mem(tp,
6668                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6669                       ((u64) mapping & 0xffffffff));
6670         tg3_write_mem(tp,
6671                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6672                        maxlen_flags);
6673
6674         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6675                 tg3_write_mem(tp,
6676                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6677                               nic_addr);
6678 }
6679
6680 static void __tg3_set_rx_mode(struct net_device *);
6681 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6682 {
6683         tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6684         tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6685         tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6686         tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6687         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6688                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6689                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6690         }
6691         tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6692         tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6693         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6694                 u32 val = ec->stats_block_coalesce_usecs;
6695
6696                 if (!netif_carrier_ok(tp->dev))
6697                         val = 0;
6698
6699                 tw32(HOSTCC_STAT_COAL_TICKS, val);
6700         }
6701 }
6702
6703 /* tp->lock is held. */
6704 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6705 {
6706         u32 val, rdmac_mode;
6707         int i, err, limit;
6708
6709         tg3_disable_ints(tp);
6710
6711         tg3_stop_fw(tp);
6712
6713         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6714
6715         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6716                 tg3_abort_hw(tp, 1);
6717         }
6718
6719         if (reset_phy &&
6720             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
6721                 tg3_phy_reset(tp);
6722
6723         err = tg3_chip_reset(tp);
6724         if (err)
6725                 return err;
6726
6727         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6728
6729         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
6730                 val = tr32(TG3_CPMU_CTRL);
6731                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
6732                 tw32(TG3_CPMU_CTRL, val);
6733
6734                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
6735                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
6736                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
6737                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
6738
6739                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
6740                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
6741                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
6742                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
6743
6744                 val = tr32(TG3_CPMU_HST_ACC);
6745                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
6746                 val |= CPMU_HST_ACC_MACCLK_6_25;
6747                 tw32(TG3_CPMU_HST_ACC, val);
6748         }
6749
6750         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6751                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
6752                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
6753                        PCIE_PWR_MGMT_L1_THRESH_4MS;
6754                 tw32(PCIE_PWR_MGMT_THRESH, val);
6755
6756                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
6757                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
6758
6759                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
6760         }
6761
6762         if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
6763                 val = tr32(TG3_PCIE_LNKCTL);
6764                 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
6765                         val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6766                 else
6767                         val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
6768                 tw32(TG3_PCIE_LNKCTL, val);
6769         }
6770
6771         /* This works around an issue with Athlon chipsets on
6772          * B3 tigon3 silicon.  This bit has no effect on any
6773          * other revision.  But do not set this on PCI Express
6774          * chips and don't even touch the clocks if the CPMU is present.
6775          */
6776         if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
6777                 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6778                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6779                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6780         }
6781
6782         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6783             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6784                 val = tr32(TG3PCI_PCISTATE);
6785                 val |= PCISTATE_RETRY_SAME_DMA;
6786                 tw32(TG3PCI_PCISTATE, val);
6787         }
6788
6789         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6790                 /* Allow reads and writes to the
6791                  * APE register and memory space.
6792                  */
6793                 val = tr32(TG3PCI_PCISTATE);
6794                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6795                        PCISTATE_ALLOW_APE_SHMEM_WR;
6796                 tw32(TG3PCI_PCISTATE, val);
6797         }
6798
6799         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6800                 /* Enable some hw fixes.  */
6801                 val = tr32(TG3PCI_MSI_DATA);
6802                 val |= (1 << 26) | (1 << 28) | (1 << 29);
6803                 tw32(TG3PCI_MSI_DATA, val);
6804         }
6805
6806         /* Descriptor ring init may make accesses to the
6807          * NIC SRAM area to setup the TX descriptors, so we
6808          * can only do this after the hardware has been
6809          * successfully reset.
6810          */
6811         err = tg3_init_rings(tp);
6812         if (err)
6813                 return err;
6814
6815         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
6816             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
6817                 /* This value is determined during the probe time DMA
6818                  * engine test, tg3_test_dma.
6819                  */
6820                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6821         }
6822
6823         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6824                           GRC_MODE_4X_NIC_SEND_RINGS |
6825                           GRC_MODE_NO_TX_PHDR_CSUM |
6826                           GRC_MODE_NO_RX_PHDR_CSUM);
6827         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6828
6829         /* Pseudo-header checksum is done by hardware logic and not
6830          * the offload processers, so make the chip do the pseudo-
6831          * header checksums on receive.  For transmit it is more
6832          * convenient to do the pseudo-header checksum in software
6833          * as Linux does that on transmit for us in all cases.
6834          */
6835         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6836
6837         tw32(GRC_MODE,
6838              tp->grc_mode |
6839              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6840
6841         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
6842         val = tr32(GRC_MISC_CFG);
6843         val &= ~0xff;
6844         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6845         tw32(GRC_MISC_CFG, val);
6846
6847         /* Initialize MBUF/DESC pool. */
6848         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6849                 /* Do nothing.  */
6850         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6851                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6852                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6853                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6854                 else
6855                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6856                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6857                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6858         }
6859         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6860                 int fw_len;
6861
6862                 fw_len = tp->fw_len;
6863                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6864                 tw32(BUFMGR_MB_POOL_ADDR,
6865                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6866                 tw32(BUFMGR_MB_POOL_SIZE,
6867                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6868         }
6869
6870         if (tp->dev->mtu <= ETH_DATA_LEN) {
6871                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6872                      tp->bufmgr_config.mbuf_read_dma_low_water);
6873                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6874                      tp->bufmgr_config.mbuf_mac_rx_low_water);
6875                 tw32(BUFMGR_MB_HIGH_WATER,
6876                      tp->bufmgr_config.mbuf_high_water);
6877         } else {
6878                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
6879                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6880                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
6881                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6882                 tw32(BUFMGR_MB_HIGH_WATER,
6883                      tp->bufmgr_config.mbuf_high_water_jumbo);
6884         }
6885         tw32(BUFMGR_DMA_LOW_WATER,
6886              tp->bufmgr_config.dma_low_water);
6887         tw32(BUFMGR_DMA_HIGH_WATER,
6888              tp->bufmgr_config.dma_high_water);
6889
6890         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6891         for (i = 0; i < 2000; i++) {
6892                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6893                         break;
6894                 udelay(10);
6895         }
6896         if (i >= 2000) {
6897                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6898                        tp->dev->name);
6899                 return -ENODEV;
6900         }
6901
6902         /* Setup replenish threshold. */
6903         val = tp->rx_pending / 8;
6904         if (val == 0)
6905                 val = 1;
6906         else if (val > tp->rx_std_max_post)
6907                 val = tp->rx_std_max_post;
6908         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6909                 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6910                         tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6911
6912                 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6913                         val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6914         }
6915
6916         tw32(RCVBDI_STD_THRESH, val);
6917
6918         /* Initialize TG3_BDINFO's at:
6919          *  RCVDBDI_STD_BD:     standard eth size rx ring
6920          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
6921          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
6922          *
6923          * like so:
6924          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
6925          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
6926          *                              ring attribute flags
6927          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
6928          *
6929          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6930          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6931          *
6932          * The size of each ring is fixed in the firmware, but the location is
6933          * configurable.
6934          */
6935         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6936              ((u64) tp->rx_std_mapping >> 32));
6937         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6938              ((u64) tp->rx_std_mapping & 0xffffffff));
6939         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6940              NIC_SRAM_RX_BUFFER_DESC);
6941
6942         /* Don't even try to program the JUMBO/MINI buffer descriptor
6943          * configs on 5705.
6944          */
6945         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6946                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6947                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6948         } else {
6949                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6950                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6951
6952                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6953                      BDINFO_FLAGS_DISABLED);
6954
6955                 /* Setup replenish threshold. */
6956                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6957
6958                 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6959                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6960                              ((u64) tp->rx_jumbo_mapping >> 32));
6961                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6962                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6963                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6964                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6965                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6966                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6967                 } else {
6968                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6969                              BDINFO_FLAGS_DISABLED);
6970                 }
6971
6972         }
6973
6974         /* There is only one send ring on 5705/5750, no need to explicitly
6975          * disable the others.
6976          */
6977         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6978                 /* Clear out send RCB ring in SRAM. */
6979                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6980                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6981                                       BDINFO_FLAGS_DISABLED);
6982         }
6983
6984         tp->tx_prod = 0;
6985         tp->tx_cons = 0;
6986         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6987         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6988
6989         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6990                        tp->tx_desc_mapping,
6991                        (TG3_TX_RING_SIZE <<
6992                         BDINFO_FLAGS_MAXLEN_SHIFT),
6993                        NIC_SRAM_TX_BUFFER_DESC);
6994
6995         /* There is only one receive return ring on 5705/5750, no need
6996          * to explicitly disable the others.
6997          */
6998         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6999                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7000                      i += TG3_BDINFO_SIZE) {
7001                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7002                                       BDINFO_FLAGS_DISABLED);
7003                 }
7004         }
7005
7006         tp->rx_rcb_ptr = 0;
7007         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7008
7009         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7010                        tp->rx_rcb_mapping,
7011                        (TG3_RX_RCB_RING_SIZE(tp) <<
7012                         BDINFO_FLAGS_MAXLEN_SHIFT),
7013                        0);
7014
7015         tp->rx_std_ptr = tp->rx_pending;
7016         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7017                      tp->rx_std_ptr);
7018
7019         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7020                                                 tp->rx_jumbo_pending : 0;
7021         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7022                      tp->rx_jumbo_ptr);
7023
7024         /* Initialize MAC address and backoff seed. */
7025         __tg3_set_mac_addr(tp, 0);
7026
7027         /* MTU + ethernet header + FCS + optional VLAN tag */
7028         tw32(MAC_RX_MTU_SIZE,
7029              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7030
7031         /* The slot time is changed by tg3_setup_phy if we
7032          * run at gigabit with half duplex.
7033          */
7034         tw32(MAC_TX_LENGTHS,
7035              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7036              (6 << TX_LENGTHS_IPG_SHIFT) |
7037              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7038
7039         /* Receive rules. */
7040         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7041         tw32(RCVLPC_CONFIG, 0x0181);
7042
7043         /* Calculate RDMAC_MODE setting early, we need it to determine
7044          * the RCVLPC_STATE_ENABLE mask.
7045          */
7046         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7047                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7048                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7049                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7050                       RDMAC_MODE_LNGREAD_ENAB);
7051
7052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7055                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7056                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7057                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7058
7059         /* If statement applies to 5705 and 5750 PCI devices only */
7060         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7061              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7062             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7063                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7064                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7065                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7066                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7067                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7068                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7069                 }
7070         }
7071
7072         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7073                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7074
7075         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7076                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7077
7078         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7079             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7080                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7081
7082         /* Receive/send statistics. */
7083         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7084                 val = tr32(RCVLPC_STATS_ENABLE);
7085                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7086                 tw32(RCVLPC_STATS_ENABLE, val);
7087         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7088                    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7089                 val = tr32(RCVLPC_STATS_ENABLE);
7090                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7091                 tw32(RCVLPC_STATS_ENABLE, val);
7092         } else {
7093                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7094         }
7095         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7096         tw32(SNDDATAI_STATSENAB, 0xffffff);
7097         tw32(SNDDATAI_STATSCTRL,
7098              (SNDDATAI_SCTRL_ENABLE |
7099               SNDDATAI_SCTRL_FASTUPD));
7100
7101         /* Setup host coalescing engine. */
7102         tw32(HOSTCC_MODE, 0);
7103         for (i = 0; i < 2000; i++) {
7104                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7105                         break;
7106                 udelay(10);
7107         }
7108
7109         __tg3_set_coalesce(tp, &tp->coal);
7110
7111         /* set status block DMA address */
7112         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7113              ((u64) tp->status_mapping >> 32));
7114         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7115              ((u64) tp->status_mapping & 0xffffffff));
7116
7117         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7118                 /* Status/statistics block address.  See tg3_timer,
7119                  * the tg3_periodic_fetch_stats call there, and
7120                  * tg3_get_stats to see how this works for 5705/5750 chips.
7121                  */
7122                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7123                      ((u64) tp->stats_mapping >> 32));
7124                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7125                      ((u64) tp->stats_mapping & 0xffffffff));
7126                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7127                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7128         }
7129
7130         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7131
7132         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7133         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7134         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7135                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7136
7137         /* Clear statistics/status block in chip, and status block in ram. */
7138         for (i = NIC_SRAM_STATS_BLK;
7139              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7140              i += sizeof(u32)) {
7141                 tg3_write_mem(tp, i, 0);
7142                 udelay(40);
7143         }
7144         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7145
7146         if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7147                 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7148                 /* reset to prevent losing 1st rx packet intermittently */
7149                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7150                 udelay(10);
7151         }
7152
7153         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7154                 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7155         else
7156                 tp->mac_mode = 0;
7157         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7158                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7159         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7160             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7161             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7162                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7163         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7164         udelay(40);
7165
7166         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7167          * If TG3_FLG2_IS_NIC is zero, we should read the
7168          * register to preserve the GPIO settings for LOMs. The GPIOs,
7169          * whether used as inputs or outputs, are set by boot code after
7170          * reset.
7171          */
7172         if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7173                 u32 gpio_mask;
7174
7175                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7176                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7177                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7178
7179                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7180                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7181                                      GRC_LCLCTRL_GPIO_OUTPUT3;
7182
7183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7184                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7185
7186                 tp->grc_local_ctrl &= ~gpio_mask;
7187                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7188
7189                 /* GPIO1 must be driven high for eeprom write protect */
7190                 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7191                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7192                                                GRC_LCLCTRL_GPIO_OUTPUT1);
7193         }
7194         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7195         udelay(100);
7196
7197         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7198
7199         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7200                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7201                 udelay(40);
7202         }
7203
7204         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7205                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7206                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7207                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7208                WDMAC_MODE_LNGREAD_ENAB);
7209
7210         /* If statement applies to 5705 and 5750 PCI devices only */
7211         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7212              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7213             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7214                 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7215                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7216                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7217                         /* nothing */
7218                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7219                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7220                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7221                         val |= WDMAC_MODE_RX_ACCEL;
7222                 }
7223         }
7224
7225         /* Enable host coalescing bug fix */
7226         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7227                 val |= WDMAC_MODE_STATUS_TAG_FIX;
7228
7229         tw32_f(WDMAC_MODE, val);
7230         udelay(40);
7231
7232         if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7233                 u16 pcix_cmd;
7234
7235                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7236                                      &pcix_cmd);
7237                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7238                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7239                         pcix_cmd |= PCI_X_CMD_READ_2K;
7240                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7241                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7242                         pcix_cmd |= PCI_X_CMD_READ_2K;
7243                 }
7244                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7245                                       pcix_cmd);
7246         }
7247
7248         tw32_f(RDMAC_MODE, rdmac_mode);
7249         udelay(40);
7250
7251         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7252         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7253                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7254
7255         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7256                 tw32(SNDDATAC_MODE,
7257                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7258         else
7259                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7260
7261         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7262         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7263         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7264         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7265         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7266                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7267         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7268         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7269
7270         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7271                 err = tg3_load_5701_a0_firmware_fix(tp);
7272                 if (err)
7273                         return err;
7274         }
7275
7276         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7277                 err = tg3_load_tso_firmware(tp);
7278                 if (err)
7279                         return err;
7280         }
7281
7282         tp->tx_mode = TX_MODE_ENABLE;
7283         tw32_f(MAC_TX_MODE, tp->tx_mode);
7284         udelay(100);
7285
7286         tp->rx_mode = RX_MODE_ENABLE;
7287         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7288                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7289
7290         tw32_f(MAC_RX_MODE, tp->rx_mode);
7291         udelay(10);
7292
7293         tw32(MAC_LED_CTRL, tp->led_ctrl);
7294
7295         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7296         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7297                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7298                 udelay(10);
7299         }
7300         tw32_f(MAC_RX_MODE, tp->rx_mode);
7301         udelay(10);
7302
7303         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7304                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7305                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7306                         /* Set drive transmission level to 1.2V  */
7307                         /* only if the signal pre-emphasis bit is not set  */
7308                         val = tr32(MAC_SERDES_CFG);
7309                         val &= 0xfffff000;
7310                         val |= 0x880;
7311                         tw32(MAC_SERDES_CFG, val);
7312                 }
7313                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7314                         tw32(MAC_SERDES_CFG, 0x616000);
7315         }
7316
7317         /* Prevent chip from dropping frames when flow control
7318          * is enabled.
7319          */
7320         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7321
7322         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7323             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7324                 /* Use hardware link auto-negotiation */
7325                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7326         }
7327
7328         if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7329             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7330                 u32 tmp;
7331
7332                 tmp = tr32(SERDES_RX_CTRL);
7333                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7334                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7335                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7336                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7337         }
7338
7339         if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7340                 if (tp->link_config.phy_is_low_power) {
7341                         tp->link_config.phy_is_low_power = 0;
7342                         tp->link_config.speed = tp->link_config.orig_speed;
7343                         tp->link_config.duplex = tp->link_config.orig_duplex;
7344                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
7345                 }
7346
7347                 err = tg3_setup_phy(tp, 0);
7348                 if (err)
7349                         return err;
7350
7351                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7352                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7353                         u32 tmp;
7354
7355                         /* Clear CRC stats. */
7356                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7357                                 tg3_writephy(tp, MII_TG3_TEST1,
7358                                              tmp | MII_TG3_TEST1_CRC_EN);
7359                                 tg3_readphy(tp, 0x14, &tmp);
7360                         }
7361                 }
7362         }
7363
7364         __tg3_set_rx_mode(tp->dev);
7365
7366         /* Initialize receive rules. */
7367         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
7368         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7369         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
7370         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7371
7372         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7373             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7374                 limit = 8;
7375         else
7376                 limit = 16;
7377         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7378                 limit -= 4;
7379         switch (limit) {
7380         case 16:
7381                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
7382         case 15:
7383                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
7384         case 14:
7385                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
7386         case 13:
7387                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
7388         case 12:
7389                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
7390         case 11:
7391                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
7392         case 10:
7393                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
7394         case 9:
7395                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
7396         case 8:
7397                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
7398         case 7:
7399                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
7400         case 6:
7401                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
7402         case 5:
7403                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
7404         case 4:
7405                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
7406         case 3:
7407                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
7408         case 2:
7409         case 1:
7410
7411         default:
7412                 break;
7413         }
7414
7415         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7416                 /* Write our heartbeat update interval to APE. */
7417                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7418                                 APE_HOST_HEARTBEAT_INT_DISABLE);
7419
7420         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7421
7422         return 0;
7423 }
7424
7425 /* Called at device open time to get the chip ready for
7426  * packet processing.  Invoked with tp->lock held.
7427  */
7428 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7429 {
7430         tg3_switch_clocks(tp);
7431
7432         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7433
7434         return tg3_reset_hw(tp, reset_phy);
7435 }
7436
7437 #define TG3_STAT_ADD32(PSTAT, REG) \
7438 do {    u32 __val = tr32(REG); \
7439         (PSTAT)->low += __val; \
7440         if ((PSTAT)->low < __val) \
7441                 (PSTAT)->high += 1; \
7442 } while (0)
7443
7444 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7445 {
7446         struct tg3_hw_stats *sp = tp->hw_stats;
7447
7448         if (!netif_carrier_ok(tp->dev))
7449                 return;
7450
7451         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7452         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7453         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7454         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7455         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7456         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7457         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7458         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7459         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7460         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7461         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7462         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7463         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7464
7465         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7466         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7467         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7468         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7469         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7470         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7471         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7472         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7473         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7474         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7475         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7476         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7477         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7478         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7479
7480         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7481         TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7482         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7483 }
7484
7485 static void tg3_timer(unsigned long __opaque)
7486 {
7487         struct tg3 *tp = (struct tg3 *) __opaque;
7488
7489         if (tp->irq_sync)
7490                 goto restart_timer;
7491
7492         spin_lock(&tp->lock);
7493
7494         if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7495                 /* All of this garbage is because when using non-tagged
7496                  * IRQ status the mailbox/status_block protocol the chip
7497                  * uses with the cpu is race prone.
7498                  */
7499                 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7500                         tw32(GRC_LOCAL_CTRL,
7501                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7502                 } else {
7503                         tw32(HOSTCC_MODE, tp->coalesce_mode |
7504                              (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7505                 }
7506
7507                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7508                         tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7509                         spin_unlock(&tp->lock);
7510                         schedule_work(&tp->reset_task);
7511                         return;
7512                 }
7513         }
7514
7515         /* This part only runs once per second. */
7516         if (!--tp->timer_counter) {
7517                 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7518                         tg3_periodic_fetch_stats(tp);
7519
7520                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7521                         u32 mac_stat;
7522                         int phy_event;
7523
7524                         mac_stat = tr32(MAC_STATUS);
7525
7526                         phy_event = 0;
7527                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7528                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7529                                         phy_event = 1;
7530                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7531                                 phy_event = 1;
7532
7533                         if (phy_event)
7534                                 tg3_setup_phy(tp, 0);
7535                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7536                         u32 mac_stat = tr32(MAC_STATUS);
7537                         int need_setup = 0;
7538
7539                         if (netif_carrier_ok(tp->dev) &&
7540                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7541                                 need_setup = 1;
7542                         }
7543                         if (! netif_carrier_ok(tp->dev) &&
7544                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
7545                                          MAC_STATUS_SIGNAL_DET))) {
7546                                 need_setup = 1;
7547                         }
7548                         if (need_setup) {
7549                                 if (!tp->serdes_counter) {
7550                                         tw32_f(MAC_MODE,
7551                                              (tp->mac_mode &
7552                                               ~MAC_MODE_PORT_MODE_MASK));
7553                                         udelay(40);
7554                                         tw32_f(MAC_MODE, tp->mac_mode);
7555                                         udelay(40);
7556                                 }
7557                                 tg3_setup_phy(tp, 0);
7558                         }
7559                 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7560                         tg3_serdes_parallel_detect(tp);
7561
7562                 tp->timer_counter = tp->timer_multiplier;
7563         }
7564
7565         /* Heartbeat is only sent once every 2 seconds.
7566          *
7567          * The heartbeat is to tell the ASF firmware that the host
7568          * driver is still alive.  In the event that the OS crashes,
7569          * ASF needs to reset the hardware to free up the FIFO space
7570          * that may be filled with rx packets destined for the host.
7571          * If the FIFO is full, ASF will no longer function properly.
7572          *
7573          * Unintended resets have been reported on real time kernels
7574          * where the timer doesn't run on time.  Netpoll will also have
7575          * same problem.
7576          *
7577          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7578          * to check the ring condition when the heartbeat is expiring
7579          * before doing the reset.  This will prevent most unintended
7580          * resets.
7581          */
7582         if (!--tp->asf_counter) {
7583                 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7584                     !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7585                         tg3_wait_for_event_ack(tp);
7586
7587                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7588                                       FWCMD_NICDRV_ALIVE3);
7589                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7590                         /* 5 seconds timeout */
7591                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7592
7593                         tg3_generate_fw_event(tp);
7594                 }
7595                 tp->asf_counter = tp->asf_multiplier;
7596         }
7597
7598         spin_unlock(&tp->lock);
7599
7600 restart_timer:
7601         tp->timer.expires = jiffies + tp->timer_offset;
7602         add_timer(&tp->timer);
7603 }
7604
7605 static int tg3_request_irq(struct tg3 *tp)
7606 {
7607         irq_handler_t fn;
7608         unsigned long flags;
7609         struct net_device *dev = tp->dev;
7610
7611         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7612                 fn = tg3_msi;
7613                 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
7614                         fn = tg3_msi_1shot;
7615                 flags = IRQF_SAMPLE_RANDOM;
7616         } else {
7617                 fn = tg3_interrupt;
7618                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7619                         fn = tg3_interrupt_tagged;
7620                 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
7621         }
7622         return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
7623 }
7624
7625 static int tg3_test_interrupt(struct tg3 *tp)
7626 {
7627         struct net_device *dev = tp->dev;
7628         int err, i, intr_ok = 0;
7629
7630         if (!netif_running(dev))
7631                 return -ENODEV;
7632
7633         tg3_disable_ints(tp);
7634
7635         free_irq(tp->pdev->irq, dev);
7636
7637         err = request_irq(tp->pdev->irq, tg3_test_isr,
7638                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
7639         if (err)
7640                 return err;
7641
7642         tp->hw_status->status &= ~SD_STATUS_UPDATED;
7643         tg3_enable_ints(tp);
7644
7645         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
7646                HOSTCC_MODE_NOW);
7647
7648         for (i = 0; i < 5; i++) {
7649                 u32 int_mbox, misc_host_ctrl;
7650
7651                 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
7652                                         TG3_64BIT_REG_LOW);
7653                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
7654
7655                 if ((int_mbox != 0) ||
7656                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
7657                         intr_ok = 1;
7658                         break;
7659                 }
7660
7661                 msleep(10);
7662         }
7663
7664         tg3_disable_ints(tp);
7665
7666         free_irq(tp->pdev->irq, dev);
7667
7668         err = tg3_request_irq(tp);
7669
7670         if (err)
7671                 return err;
7672
7673         if (intr_ok)
7674                 return 0;
7675
7676         return -EIO;
7677 }
7678
7679 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
7680  * successfully restored
7681  */
7682 static int tg3_test_msi(struct tg3 *tp)
7683 {
7684         struct net_device *dev = tp->dev;
7685         int err;
7686         u16 pci_cmd;
7687
7688         if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
7689                 return 0;
7690
7691         /* Turn off SERR reporting in case MSI terminates with Master
7692          * Abort.
7693          */
7694         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7695         pci_write_config_word(tp->pdev, PCI_COMMAND,
7696                               pci_cmd & ~PCI_COMMAND_SERR);
7697
7698         err = tg3_test_interrupt(tp);
7699
7700         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7701
7702         if (!err)
7703                 return 0;
7704
7705         /* other failures */
7706         if (err != -EIO)
7707                 return err;
7708
7709         /* MSI test failed, go back to INTx mode */
7710         printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
7711                "switching to INTx mode. Please report this failure to "
7712                "the PCI maintainer and include system chipset information.\n",
7713                        tp->dev->name);
7714
7715         free_irq(tp->pdev->irq, dev);
7716         pci_disable_msi(tp->pdev);
7717
7718         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7719
7720         err = tg3_request_irq(tp);
7721         if (err)
7722                 return err;
7723
7724         /* Need to reset the chip because the MSI cycle may have terminated
7725          * with Master Abort.
7726          */
7727         tg3_full_lock(tp, 1);
7728
7729         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7730         err = tg3_init_hw(tp, 1);
7731
7732         tg3_full_unlock(tp);
7733
7734         if (err)
7735                 free_irq(tp->pdev->irq, dev);
7736
7737         return err;
7738 }
7739
7740 static int tg3_request_firmware(struct tg3 *tp)
7741 {
7742         const __be32 *fw_data;
7743
7744         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
7745                 printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
7746                        tp->dev->name, tp->fw_needed);
7747                 return -ENOENT;
7748         }
7749
7750         fw_data = (void *)tp->fw->data;
7751
7752         /* Firmware blob starts with version numbers, followed by
7753          * start address and _full_ length including BSS sections
7754          * (which must be longer than the actual data, of course
7755          */
7756
7757         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
7758         if (tp->fw_len < (tp->fw->size - 12)) {
7759                 printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
7760                        tp->dev->name, tp->fw_len, tp->fw_needed);
7761                 release_firmware(tp->fw);
7762                 tp->fw = NULL;
7763                 return -EINVAL;
7764         }
7765
7766         /* We no longer need firmware; we have it. */
7767         tp->fw_needed = NULL;
7768         return 0;
7769 }
7770
7771 static int tg3_open(struct net_device *dev)
7772 {
7773         struct tg3 *tp = netdev_priv(dev);
7774         int err;
7775
7776         if (tp->fw_needed) {
7777                 err = tg3_request_firmware(tp);
7778                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7779                         if (err)
7780                                 return err;
7781                 } else if (err) {
7782                         printk(KERN_WARNING "%s: TSO capability disabled.\n",
7783                                tp->dev->name);
7784                         tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7785                 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7786                         printk(KERN_NOTICE "%s: TSO capability restored.\n",
7787                                tp->dev->name);
7788                         tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7789                 }
7790         }
7791
7792         netif_carrier_off(tp->dev);
7793
7794         err = tg3_set_power_state(tp, PCI_D0);
7795         if (err)
7796                 return err;
7797
7798         tg3_full_lock(tp, 0);
7799
7800         tg3_disable_ints(tp);
7801         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7802
7803         tg3_full_unlock(tp);
7804
7805         /* The placement of this call is tied
7806          * to the setup and use of Host TX descriptors.
7807          */
7808         err = tg3_alloc_consistent(tp);
7809         if (err)
7810                 return err;
7811
7812         if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7813                 /* All MSI supporting chips should support tagged
7814                  * status.  Assert that this is the case.
7815                  */
7816                 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7817                         printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7818                                "Not using MSI.\n", tp->dev->name);
7819                 } else if (pci_enable_msi(tp->pdev) == 0) {
7820                         u32 msi_mode;
7821
7822                         msi_mode = tr32(MSGINT_MODE);
7823                         tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7824                         tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7825                 }
7826         }
7827         err = tg3_request_irq(tp);
7828
7829         if (err) {
7830                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7831                         pci_disable_msi(tp->pdev);
7832                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7833                 }
7834                 tg3_free_consistent(tp);
7835                 return err;
7836         }
7837
7838         napi_enable(&tp->napi);
7839
7840         tg3_full_lock(tp, 0);
7841
7842         err = tg3_init_hw(tp, 1);
7843         if (err) {
7844                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7845                 tg3_free_rings(tp);
7846         } else {
7847                 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7848                         tp->timer_offset = HZ;
7849                 else
7850                         tp->timer_offset = HZ / 10;
7851
7852                 BUG_ON(tp->timer_offset > HZ);
7853                 tp->timer_counter = tp->timer_multiplier =
7854                         (HZ / tp->timer_offset);
7855                 tp->asf_counter = tp->asf_multiplier =
7856                         ((HZ / tp->timer_offset) * 2);
7857
7858                 init_timer(&tp->timer);
7859                 tp->timer.expires = jiffies + tp->timer_offset;
7860                 tp->timer.data = (unsigned long) tp;
7861                 tp->timer.function = tg3_timer;
7862         }
7863
7864         tg3_full_unlock(tp);
7865
7866         if (err) {
7867                 napi_disable(&tp->napi);
7868                 free_irq(tp->pdev->irq, dev);
7869                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7870                         pci_disable_msi(tp->pdev);
7871                         tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7872                 }
7873                 tg3_free_consistent(tp);
7874                 return err;
7875         }
7876
7877         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7878                 err = tg3_test_msi(tp);
7879
7880                 if (err) {
7881                         tg3_full_lock(tp, 0);
7882
7883                         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7884                                 pci_disable_msi(tp->pdev);
7885                                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7886                         }
7887                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7888                         tg3_free_rings(tp);
7889                         tg3_free_consistent(tp);
7890
7891                         tg3_full_unlock(tp);
7892
7893                         napi_disable(&tp->napi);
7894
7895                         return err;
7896                 }
7897
7898                 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7899                         if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7900                                 u32 val = tr32(PCIE_TRANSACTION_CFG);
7901
7902                                 tw32(PCIE_TRANSACTION_CFG,
7903                                      val | PCIE_TRANS_CFG_1SHOT_MSI);
7904                         }
7905                 }
7906         }
7907
7908         tg3_phy_start(tp);
7909
7910         tg3_full_lock(tp, 0);
7911
7912         add_timer(&tp->timer);
7913         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7914         tg3_enable_ints(tp);
7915
7916         tg3_full_unlock(tp);
7917
7918         netif_start_queue(dev);
7919
7920         return 0;
7921 }
7922
7923 #if 0
7924 /*static*/ void tg3_dump_state(struct tg3 *tp)
7925 {
7926         u32 val32, val32_2, val32_3, val32_4, val32_5;
7927         u16 val16;
7928         int i;
7929
7930         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
7931         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
7932         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
7933                val16, val32);
7934
7935         /* MAC block */
7936         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
7937                tr32(MAC_MODE), tr32(MAC_STATUS));
7938         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
7939                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
7940         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
7941                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
7942         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
7943                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
7944
7945         /* Send data initiator control block */
7946         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
7947                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
7948         printk("       SNDDATAI_STATSCTRL[%08x]\n",
7949                tr32(SNDDATAI_STATSCTRL));
7950
7951         /* Send data completion control block */
7952         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
7953
7954         /* Send BD ring selector block */
7955         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
7956                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
7957
7958         /* Send BD initiator control block */
7959         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
7960                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
7961
7962         /* Send BD completion control block */
7963         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
7964
7965         /* Receive list placement control block */
7966         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
7967                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
7968         printk("       RCVLPC_STATSCTRL[%08x]\n",
7969                tr32(RCVLPC_STATSCTRL));
7970
7971         /* Receive data and receive BD initiator control block */
7972         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
7973                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
7974
7975         /* Receive data completion control block */
7976         printk("DEBUG: RCVDCC_MODE[%08x]\n",
7977                tr32(RCVDCC_MODE));
7978
7979         /* Receive BD initiator control block */
7980         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
7981                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
7982
7983         /* Receive BD completion control block */
7984         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
7985                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
7986
7987         /* Receive list selector control block */
7988         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
7989                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
7990
7991         /* Mbuf cluster free block */
7992         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
7993                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
7994
7995         /* Host coalescing control block */
7996         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
7997                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
7998         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
7999                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8000                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8001         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8002                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8003                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8004         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8005                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8006         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8007                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8008
8009         /* Memory arbiter control block */
8010         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8011                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8012
8013         /* Buffer manager control block */
8014         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8015                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8016         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8017                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8018         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8019                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8020                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8021                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8022
8023         /* Read DMA control block */
8024         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8025                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8026
8027         /* Write DMA control block */
8028         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8029                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8030
8031         /* DMA completion block */
8032         printk("DEBUG: DMAC_MODE[%08x]\n",
8033                tr32(DMAC_MODE));
8034
8035         /* GRC block */
8036         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8037                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8038         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8039                tr32(GRC_LOCAL_CTRL));
8040
8041         /* TG3_BDINFOs */
8042         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8043                tr32(RCVDBDI_JUMBO_BD + 0x0),
8044                tr32(RCVDBDI_JUMBO_BD + 0x4),
8045                tr32(RCVDBDI_JUMBO_BD + 0x8),
8046                tr32(RCVDBDI_JUMBO_BD + 0xc));
8047         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8048                tr32(RCVDBDI_STD_BD + 0x0),
8049                tr32(RCVDBDI_STD_BD + 0x4),
8050                tr32(RCVDBDI_STD_BD + 0x8),
8051                tr32(RCVDBDI_STD_BD + 0xc));
8052         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8053                tr32(RCVDBDI_MINI_BD + 0x0),
8054                tr32(RCVDBDI_MINI_BD + 0x4),
8055                tr32(RCVDBDI_MINI_BD + 0x8),
8056                tr32(RCVDBDI_MINI_BD + 0xc));
8057
8058         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8059         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8060         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8061         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8062         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8063                val32, val32_2, val32_3, val32_4);
8064
8065         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8066         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8067         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8068         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8069         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8070                val32, val32_2, val32_3, val32_4);
8071
8072         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8073         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8074         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8075         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8076         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8077         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8078                val32, val32_2, val32_3, val32_4, val32_5);
8079
8080         /* SW status block */
8081         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8082                tp->hw_status->status,
8083                tp->hw_status->status_tag,
8084                tp->hw_status->rx_jumbo_consumer,
8085                tp->hw_status->rx_consumer,
8086                tp->hw_status->rx_mini_consumer,
8087                tp->hw_status->idx[0].rx_producer,
8088                tp->hw_status->idx[0].tx_consumer);
8089
8090         /* SW statistics block */
8091         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8092                ((u32 *)tp->hw_stats)[0],
8093                ((u32 *)tp->hw_stats)[1],
8094                ((u32 *)tp->hw_stats)[2],
8095                ((u32 *)tp->hw_stats)[3]);
8096
8097         /* Mailboxes */
8098         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8099                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8100                tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8101                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8102                tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8103
8104         /* NIC side send descriptors. */
8105         for (i = 0; i < 6; i++) {
8106                 unsigned long txd;
8107
8108                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8109                         + (i * sizeof(struct tg3_tx_buffer_desc));
8110                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8111                        i,
8112                        readl(txd + 0x0), readl(txd + 0x4),
8113                        readl(txd + 0x8), readl(txd + 0xc));
8114         }
8115
8116         /* NIC side RX descriptors. */
8117         for (i = 0; i < 6; i++) {
8118                 unsigned long rxd;
8119
8120                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8121                         + (i * sizeof(struct tg3_rx_buffer_desc));
8122                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8123                        i,
8124                        readl(rxd + 0x0), readl(rxd + 0x4),
8125                        readl(rxd + 0x8), readl(rxd + 0xc));
8126                 rxd += (4 * sizeof(u32));
8127                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8128                        i,
8129                        readl(rxd + 0x0), readl(rxd + 0x4),
8130                        readl(rxd + 0x8), readl(rxd + 0xc));
8131         }
8132
8133         for (i = 0; i < 6; i++) {
8134                 unsigned long rxd;
8135
8136                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8137                         + (i * sizeof(struct tg3_rx_buffer_desc));
8138                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8139                        i,
8140                        readl(rxd + 0x0), readl(rxd + 0x4),
8141                        readl(rxd + 0x8), readl(rxd + 0xc));
8142                 rxd += (4 * sizeof(u32));
8143                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8144                        i,
8145                        readl(rxd + 0x0), readl(rxd + 0x4),
8146                        readl(rxd + 0x8), readl(rxd + 0xc));
8147         }
8148 }
8149 #endif
8150
8151 static struct net_device_stats *tg3_get_stats(struct net_device *);
8152 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8153
8154 static int tg3_close(struct net_device *dev)
8155 {
8156         struct tg3 *tp = netdev_priv(dev);
8157
8158         napi_disable(&tp->napi);
8159         cancel_work_sync(&tp->reset_task);
8160
8161         netif_stop_queue(dev);
8162
8163         del_timer_sync(&tp->timer);
8164
8165         tg3_full_lock(tp, 1);
8166 #if 0
8167         tg3_dump_state(tp);
8168 #endif
8169
8170         tg3_disable_ints(tp);
8171
8172         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8173         tg3_free_rings(tp);
8174         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8175
8176         tg3_full_unlock(tp);
8177
8178         free_irq(tp->pdev->irq, dev);
8179         if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8180                 pci_disable_msi(tp->pdev);
8181                 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8182         }
8183
8184         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8185                sizeof(tp->net_stats_prev));
8186         memcpy(&tp->estats_prev, tg3_get_estats(tp),
8187                sizeof(tp->estats_prev));
8188
8189         tg3_free_consistent(tp);
8190
8191         tg3_set_power_state(tp, PCI_D3hot);
8192
8193         netif_carrier_off(tp->dev);
8194
8195         return 0;
8196 }
8197
8198 static inline unsigned long get_stat64(tg3_stat64_t *val)
8199 {
8200         unsigned long ret;
8201
8202 #if (BITS_PER_LONG == 32)
8203         ret = val->low;
8204 #else
8205         ret = ((u64)val->high << 32) | ((u64)val->low);
8206 #endif
8207         return ret;
8208 }
8209
8210 static inline u64 get_estat64(tg3_stat64_t *val)
8211 {
8212        return ((u64)val->high << 32) | ((u64)val->low);
8213 }
8214
8215 static unsigned long calc_crc_errors(struct tg3 *tp)
8216 {
8217         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8218
8219         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8220             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8221              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8222                 u32 val;
8223
8224                 spin_lock_bh(&tp->lock);
8225                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8226                         tg3_writephy(tp, MII_TG3_TEST1,
8227                                      val | MII_TG3_TEST1_CRC_EN);
8228                         tg3_readphy(tp, 0x14, &val);
8229                 } else
8230                         val = 0;
8231                 spin_unlock_bh(&tp->lock);
8232
8233                 tp->phy_crc_errors += val;
8234
8235                 return tp->phy_crc_errors;
8236         }
8237
8238         return get_stat64(&hw_stats->rx_fcs_errors);
8239 }
8240
8241 #define ESTAT_ADD(member) \
8242         estats->member =        old_estats->member + \
8243                                 get_estat64(&hw_stats->member)
8244
8245 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8246 {
8247         struct tg3_ethtool_stats *estats = &tp->estats;
8248         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8249         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8250
8251         if (!hw_stats)
8252                 return old_estats;
8253
8254         ESTAT_ADD(rx_octets);
8255         ESTAT_ADD(rx_fragments);
8256         ESTAT_ADD(rx_ucast_packets);
8257         ESTAT_ADD(rx_mcast_packets);
8258         ESTAT_ADD(rx_bcast_packets);
8259         ESTAT_ADD(rx_fcs_errors);
8260         ESTAT_ADD(rx_align_errors);
8261         ESTAT_ADD(rx_xon_pause_rcvd);
8262         ESTAT_ADD(rx_xoff_pause_rcvd);
8263         ESTAT_ADD(rx_mac_ctrl_rcvd);
8264         ESTAT_ADD(rx_xoff_entered);
8265         ESTAT_ADD(rx_frame_too_long_errors);
8266         ESTAT_ADD(rx_jabbers);
8267         ESTAT_ADD(rx_undersize_packets);
8268         ESTAT_ADD(rx_in_length_errors);
8269         ESTAT_ADD(rx_out_length_errors);
8270         ESTAT_ADD(rx_64_or_less_octet_packets);
8271         ESTAT_ADD(rx_65_to_127_octet_packets);
8272         ESTAT_ADD(rx_128_to_255_octet_packets);
8273         ESTAT_ADD(rx_256_to_511_octet_packets);
8274         ESTAT_ADD(rx_512_to_1023_octet_packets);
8275         ESTAT_ADD(rx_1024_to_1522_octet_packets);
8276         ESTAT_ADD(rx_1523_to_2047_octet_packets);
8277         ESTAT_ADD(rx_2048_to_4095_octet_packets);
8278         ESTAT_ADD(rx_4096_to_8191_octet_packets);
8279         ESTAT_ADD(rx_8192_to_9022_octet_packets);
8280
8281         ESTAT_ADD(tx_octets);
8282         ESTAT_ADD(tx_collisions);
8283         ESTAT_ADD(tx_xon_sent);
8284         ESTAT_ADD(tx_xoff_sent);
8285         ESTAT_ADD(tx_flow_control);
8286         ESTAT_ADD(tx_mac_errors);
8287         ESTAT_ADD(tx_single_collisions);
8288         ESTAT_ADD(tx_mult_collisions);
8289         ESTAT_ADD(tx_deferred);
8290         ESTAT_ADD(tx_excessive_collisions);
8291         ESTAT_ADD(tx_late_collisions);
8292         ESTAT_ADD(tx_collide_2times);
8293         ESTAT_ADD(tx_collide_3times);
8294         ESTAT_ADD(tx_collide_4times);
8295         ESTAT_ADD(tx_collide_5times);
8296         ESTAT_ADD(tx_collide_6times);
8297         ESTAT_ADD(tx_collide_7times);
8298         ESTAT_ADD(tx_collide_8times);
8299         ESTAT_ADD(tx_collide_9times);
8300         ESTAT_ADD(tx_collide_10times);
8301         ESTAT_ADD(tx_collide_11times);
8302         ESTAT_ADD(tx_collide_12times);
8303         ESTAT_ADD(tx_collide_13times);
8304         ESTAT_ADD(tx_collide_14times);
8305         ESTAT_ADD(tx_collide_15times);
8306         ESTAT_ADD(tx_ucast_packets);
8307         ESTAT_ADD(tx_mcast_packets);
8308         ESTAT_ADD(tx_bcast_packets);
8309         ESTAT_ADD(tx_carrier_sense_errors);
8310         ESTAT_ADD(tx_discards);
8311         ESTAT_ADD(tx_errors);
8312
8313         ESTAT_ADD(dma_writeq_full);
8314         ESTAT_ADD(dma_write_prioq_full);
8315         ESTAT_ADD(rxbds_empty);
8316         ESTAT_ADD(rx_discards);
8317         ESTAT_ADD(rx_errors);
8318         ESTAT_ADD(rx_threshold_hit);
8319
8320         ESTAT_ADD(dma_readq_full);
8321         ESTAT_ADD(dma_read_prioq_full);
8322         ESTAT_ADD(tx_comp_queue_full);
8323
8324         ESTAT_ADD(ring_set_send_prod_index);
8325         ESTAT_ADD(ring_status_update);
8326         ESTAT_ADD(nic_irqs);
8327         ESTAT_ADD(nic_avoided_irqs);
8328         ESTAT_ADD(nic_tx_threshold_hit);
8329
8330         return estats;
8331 }
8332
8333 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8334 {
8335         struct tg3 *tp = netdev_priv(dev);
8336         struct net_device_stats *stats = &tp->net_stats;
8337         struct net_device_stats *old_stats = &tp->net_stats_prev;
8338         struct tg3_hw_stats *hw_stats = tp->hw_stats;
8339
8340         if (!hw_stats)
8341                 return old_stats;
8342
8343         stats->rx_packets = old_stats->rx_packets +
8344                 get_stat64(&hw_stats->rx_ucast_packets) +
8345                 get_stat64(&hw_stats->rx_mcast_packets) +
8346                 get_stat64(&hw_stats->rx_bcast_packets);
8347
8348         stats->tx_packets = old_stats->tx_packets +
8349                 get_stat64(&hw_stats->tx_ucast_packets) +
8350                 get_stat64(&hw_stats->tx_mcast_packets) +
8351                 get_stat64(&hw_stats->tx_bcast_packets);
8352
8353         stats->rx_bytes = old_stats->rx_bytes +
8354                 get_stat64(&hw_stats->rx_octets);
8355         stats->tx_bytes = old_stats->tx_bytes +
8356                 get_stat64(&hw_stats->tx_octets);
8357
8358         stats->rx_errors = old_stats->rx_errors +
8359                 get_stat64(&hw_stats->rx_errors);
8360         stats->tx_errors = old_stats->tx_errors +
8361                 get_stat64(&hw_stats->tx_errors) +
8362                 get_stat64(&hw_stats->tx_mac_errors) +
8363                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8364                 get_stat64(&hw_stats->tx_discards);
8365
8366         stats->multicast = old_stats->multicast +
8367                 get_stat64(&hw_stats->rx_mcast_packets);
8368         stats->collisions = old_stats->collisions +
8369                 get_stat64(&hw_stats->tx_collisions);
8370
8371         stats->rx_length_errors = old_stats->rx_length_errors +
8372                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8373                 get_stat64(&hw_stats->rx_undersize_packets);
8374
8375         stats->rx_over_errors = old_stats->rx_over_errors +
8376                 get_stat64(&hw_stats->rxbds_empty);
8377         stats->rx_frame_errors = old_stats->rx_frame_errors +
8378                 get_stat64(&hw_stats->rx_align_errors);
8379         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8380                 get_stat64(&hw_stats->tx_discards);
8381         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8382                 get_stat64(&hw_stats->tx_carrier_sense_errors);
8383
8384         stats->rx_crc_errors = old_stats->rx_crc_errors +
8385                 calc_crc_errors(tp);
8386
8387         stats->rx_missed_errors = old_stats->rx_missed_errors +
8388                 get_stat64(&hw_stats->rx_discards);
8389
8390         return stats;
8391 }
8392
8393 static inline u32 calc_crc(unsigned char *buf, int len)
8394 {
8395         u32 reg;
8396         u32 tmp;
8397         int j, k;
8398
8399         reg = 0xffffffff;
8400
8401         for (j = 0; j < len; j++) {
8402                 reg ^= buf[j];
8403
8404                 for (k = 0; k < 8; k++) {
8405                         tmp = reg & 0x01;
8406
8407                         reg >>= 1;
8408
8409                         if (tmp) {
8410                                 reg ^= 0xedb88320;
8411                         }
8412                 }
8413         }
8414
8415         return ~reg;
8416 }
8417
8418 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8419 {
8420         /* accept or reject all multicast frames */
8421         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8422         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8423         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8424         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8425 }
8426
8427 static void __tg3_set_rx_mode(struct net_device *dev)
8428 {
8429         struct tg3 *tp = netdev_priv(dev);
8430         u32 rx_mode;
8431
8432         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8433                                   RX_MODE_KEEP_VLAN_TAG);
8434
8435         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8436          * flag clear.
8437          */
8438 #if TG3_VLAN_TAG_USED
8439         if (!tp->vlgrp &&
8440             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8441                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8442 #else
8443         /* By definition, VLAN is disabled always in this
8444          * case.
8445          */
8446         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8447                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8448 #endif
8449
8450         if (dev->flags & IFF_PROMISC) {
8451                 /* Promiscuous mode. */
8452                 rx_mode |= RX_MODE_PROMISC;
8453         } else if (dev->flags & IFF_ALLMULTI) {
8454                 /* Accept all multicast. */
8455                 tg3_set_multi (tp, 1);
8456         } else if (dev->mc_count < 1) {
8457                 /* Reject all multicast. */
8458                 tg3_set_multi (tp, 0);
8459         } else {
8460                 /* Accept one or more multicast(s). */
8461                 struct dev_mc_list *mclist;
8462                 unsigned int i;
8463                 u32 mc_filter[4] = { 0, };
8464                 u32 regidx;
8465                 u32 bit;
8466                 u32 crc;
8467
8468                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8469                      i++, mclist = mclist->next) {
8470
8471                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8472                         bit = ~crc & 0x7f;
8473                         regidx = (bit & 0x60) >> 5;
8474                         bit &= 0x1f;
8475                         mc_filter[regidx] |= (1 << bit);
8476                 }
8477
8478                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8479                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8480                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8481                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8482         }
8483
8484         if (rx_mode != tp->rx_mode) {
8485                 tp->rx_mode = rx_mode;
8486                 tw32_f(MAC_RX_MODE, rx_mode);
8487                 udelay(10);
8488         }
8489 }
8490
8491 static void tg3_set_rx_mode(struct net_device *dev)
8492 {
8493         struct tg3 *tp = netdev_priv(dev);
8494
8495         if (!netif_running(dev))
8496                 return;
8497
8498         tg3_full_lock(tp, 0);
8499         __tg3_set_rx_mode(dev);
8500         tg3_full_unlock(tp);
8501 }
8502
8503 #define TG3_REGDUMP_LEN         (32 * 1024)
8504
8505 static int tg3_get_regs_len(struct net_device *dev)
8506 {
8507         return TG3_REGDUMP_LEN;
8508 }
8509
8510 static void tg3_get_regs(struct net_device *dev,
8511                 struct ethtool_regs *regs, void *_p)
8512 {
8513         u32 *p = _p;
8514         struct tg3 *tp = netdev_priv(dev);
8515         u8 *orig_p = _p;
8516         int i;
8517
8518         regs->version = 0;
8519
8520         memset(p, 0, TG3_REGDUMP_LEN);
8521
8522         if (tp->link_config.phy_is_low_power)
8523                 return;
8524
8525         tg3_full_lock(tp, 0);
8526
8527 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
8528 #define GET_REG32_LOOP(base,len)                \
8529 do {    p = (u32 *)(orig_p + (base));           \
8530         for (i = 0; i < len; i += 4)            \
8531                 __GET_REG32((base) + i);        \
8532 } while (0)
8533 #define GET_REG32_1(reg)                        \
8534 do {    p = (u32 *)(orig_p + (reg));            \
8535         __GET_REG32((reg));                     \
8536 } while (0)
8537
8538         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8539         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8540         GET_REG32_LOOP(MAC_MODE, 0x4f0);
8541         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8542         GET_REG32_1(SNDDATAC_MODE);
8543         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8544         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8545         GET_REG32_1(SNDBDC_MODE);
8546         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8547         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8548         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8549         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8550         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8551         GET_REG32_1(RCVDCC_MODE);
8552         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8553         GET_REG32_LOOP(RCVCC_MODE, 0x14);
8554         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8555         GET_REG32_1(MBFREE_MODE);
8556         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8557         GET_REG32_LOOP(MEMARB_MODE, 0x10);
8558         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8559         GET_REG32_LOOP(RDMAC_MODE, 0x08);
8560         GET_REG32_LOOP(WDMAC_MODE, 0x08);
8561         GET_REG32_1(RX_CPU_MODE);
8562         GET_REG32_1(RX_CPU_STATE);
8563         GET_REG32_1(RX_CPU_PGMCTR);
8564         GET_REG32_1(RX_CPU_HWBKPT);
8565         GET_REG32_1(TX_CPU_MODE);
8566         GET_REG32_1(TX_CPU_STATE);
8567         GET_REG32_1(TX_CPU_PGMCTR);
8568         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8569         GET_REG32_LOOP(FTQ_RESET, 0x120);
8570         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8571         GET_REG32_1(DMAC_MODE);
8572         GET_REG32_LOOP(GRC_MODE, 0x4c);
8573         if (tp->tg3_flags & TG3_FLAG_NVRAM)
8574                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8575
8576 #undef __GET_REG32
8577 #undef GET_REG32_LOOP
8578 #undef GET_REG32_1
8579
8580         tg3_full_unlock(tp);
8581 }
8582
8583 static int tg3_get_eeprom_len(struct net_device *dev)
8584 {
8585         struct tg3 *tp = netdev_priv(dev);
8586
8587         return tp->nvram_size;
8588 }
8589
8590 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8591 {
8592         struct tg3 *tp = netdev_priv(dev);
8593         int ret;
8594         u8  *pd;
8595         u32 i, offset, len, b_offset, b_count;
8596         __be32 val;
8597
8598         if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
8599                 return -EINVAL;
8600
8601         if (tp->link_config.phy_is_low_power)
8602                 return -EAGAIN;
8603
8604         offset = eeprom->offset;
8605         len = eeprom->len;
8606         eeprom->len = 0;
8607
8608         eeprom->magic = TG3_EEPROM_MAGIC;
8609
8610         if (offset & 3) {
8611                 /* adjustments to start on required 4 byte boundary */
8612                 b_offset = offset & 3;
8613                 b_count = 4 - b_offset;
8614                 if (b_count > len) {
8615                         /* i.e. offset=1 len=2 */
8616                         b_count = len;
8617                 }
8618                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
8619                 if (ret)
8620                         return ret;
8621                 memcpy(data, ((char*)&val) + b_offset, b_count);
8622                 len -= b_count;
8623                 offset += b_count;
8624                 eeprom->len += b_count;
8625         }
8626
8627         /* read bytes upto the last 4 byte boundary */
8628         pd = &data[eeprom->len];
8629         for (i = 0; i < (len - (len & 3)); i += 4) {
8630                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
8631                 if (ret) {
8632                         eeprom->len += i;
8633                         return ret;
8634                 }
8635                 memcpy(pd + i, &val, 4);
8636         }
8637         eeprom->len += i;
8638
8639         if (len & 3) {
8640                 /* read last bytes not ending on 4 byte boundary */
8641                 pd = &data[eeprom->len];
8642                 b_count = len & 3;
8643                 b_offset = offset + len - b_count;
8644                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
8645                 if (ret)
8646                         return ret;
8647                 memcpy(pd, &val, b_count);
8648                 eeprom->len += b_count;
8649         }
8650         return 0;
8651 }
8652
8653 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8654
8655 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8656 {
8657         struct tg3 *tp = netdev_priv(dev);
8658         int ret;
8659         u32 offset, len, b_offset, odd_len;
8660         u8 *buf;
8661         __be32 start, end;
8662
8663         if (tp->link_config.phy_is_low_power)
8664                 return -EAGAIN;
8665
8666         if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
8667             eeprom->magic != TG3_EEPROM_MAGIC)
8668                 return -EINVAL;
8669
8670         offset = eeprom->offset;
8671         len = eeprom->len;
8672
8673         if ((b_offset = (offset & 3))) {
8674                 /* adjustments to start on required 4 byte boundary */
8675                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
8676                 if (ret)
8677                         return ret;
8678                 len += b_offset;
8679                 offset &= ~3;
8680                 if (len < 4)
8681                         len = 4;
8682         }
8683
8684         odd_len = 0;
8685         if (len & 3) {
8686                 /* adjustments to end on required 4 byte boundary */
8687                 odd_len = 1;
8688                 len = (len + 3) & ~3;
8689                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
8690                 if (ret)
8691                         return ret;
8692         }
8693
8694         buf = data;
8695         if (b_offset || odd_len) {
8696                 buf = kmalloc(len, GFP_KERNEL);
8697                 if (!buf)
8698                         return -ENOMEM;
8699                 if (b_offset)
8700                         memcpy(buf, &start, 4);
8701                 if (odd_len)
8702                         memcpy(buf+len-4, &end, 4);
8703                 memcpy(buf + b_offset, data, eeprom->len);
8704         }
8705
8706         ret = tg3_nvram_write_block(tp, offset, len, buf);
8707
8708         if (buf != data)
8709                 kfree(buf);
8710
8711         return ret;
8712 }
8713
8714 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8715 {
8716         struct tg3 *tp = netdev_priv(dev);
8717
8718         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8719                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8720                         return -EAGAIN;
8721                 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8722         }
8723
8724         cmd->supported = (SUPPORTED_Autoneg);
8725
8726         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
8727                 cmd->supported |= (SUPPORTED_1000baseT_Half |
8728                                    SUPPORTED_1000baseT_Full);
8729
8730         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
8731                 cmd->supported |= (SUPPORTED_100baseT_Half |
8732                                   SUPPORTED_100baseT_Full |
8733                                   SUPPORTED_10baseT_Half |
8734                                   SUPPORTED_10baseT_Full |
8735                                   SUPPORTED_TP);
8736                 cmd->port = PORT_TP;
8737         } else {
8738                 cmd->supported |= SUPPORTED_FIBRE;
8739                 cmd->port = PORT_FIBRE;
8740         }
8741
8742         cmd->advertising = tp->link_config.advertising;
8743         if (netif_running(dev)) {
8744                 cmd->speed = tp->link_config.active_speed;
8745                 cmd->duplex = tp->link_config.active_duplex;
8746         }
8747         cmd->phy_address = PHY_ADDR;
8748         cmd->transceiver = XCVR_INTERNAL;
8749         cmd->autoneg = tp->link_config.autoneg;
8750         cmd->maxtxpkt = 0;
8751         cmd->maxrxpkt = 0;
8752         return 0;
8753 }
8754
8755 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8756 {
8757         struct tg3 *tp = netdev_priv(dev);
8758
8759         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8760                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8761                         return -EAGAIN;
8762                 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
8763         }
8764
8765         if (cmd->autoneg != AUTONEG_ENABLE &&
8766             cmd->autoneg != AUTONEG_DISABLE)
8767                 return -EINVAL;
8768
8769         if (cmd->autoneg == AUTONEG_DISABLE &&
8770             cmd->duplex != DUPLEX_FULL &&
8771             cmd->duplex != DUPLEX_HALF)
8772                 return -EINVAL;
8773
8774         if (cmd->autoneg == AUTONEG_ENABLE) {
8775                 u32 mask = ADVERTISED_Autoneg |
8776                            ADVERTISED_Pause |
8777                            ADVERTISED_Asym_Pause;
8778
8779                 if (!(tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
8780                         mask |= ADVERTISED_1000baseT_Half |
8781                                 ADVERTISED_1000baseT_Full;
8782
8783                 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
8784                         mask |= ADVERTISED_100baseT_Half |
8785                                 ADVERTISED_100baseT_Full |
8786                                 ADVERTISED_10baseT_Half |
8787                                 ADVERTISED_10baseT_Full |
8788                                 ADVERTISED_TP;
8789                 else
8790                         mask |= ADVERTISED_FIBRE;
8791
8792                 if (cmd->advertising & ~mask)
8793                         return -EINVAL;
8794
8795                 mask &= (ADVERTISED_1000baseT_Half |
8796                          ADVERTISED_1000baseT_Full |
8797                          ADVERTISED_100baseT_Half |
8798                          ADVERTISED_100baseT_Full |
8799                          ADVERTISED_10baseT_Half |
8800                          ADVERTISED_10baseT_Full);
8801
8802                 cmd->advertising &= mask;
8803         } else {
8804                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
8805                         if (cmd->speed != SPEED_1000)
8806                                 return -EINVAL;
8807
8808                         if (cmd->duplex != DUPLEX_FULL)
8809                                 return -EINVAL;
8810                 } else {
8811                         if (cmd->speed != SPEED_100 &&
8812                             cmd->speed != SPEED_10)
8813                                 return -EINVAL;
8814                 }
8815         }
8816
8817         tg3_full_lock(tp, 0);
8818
8819         tp->link_config.autoneg = cmd->autoneg;
8820         if (cmd->autoneg == AUTONEG_ENABLE) {
8821                 tp->link_config.advertising = (cmd->advertising |
8822                                               ADVERTISED_Autoneg);
8823                 tp->link_config.speed = SPEED_INVALID;
8824                 tp->link_config.duplex = DUPLEX_INVALID;
8825         } else {
8826                 tp->link_config.advertising = 0;
8827                 tp->link_config.speed = cmd->speed;
8828                 tp->link_config.duplex = cmd->duplex;
8829         }
8830
8831         tp->link_config.orig_speed = tp->link_config.speed;
8832         tp->link_config.orig_duplex = tp->link_config.duplex;
8833         tp->link_config.orig_autoneg = tp->link_config.autoneg;
8834
8835         if (netif_running(dev))
8836                 tg3_setup_phy(tp, 1);
8837
8838         tg3_full_unlock(tp);
8839
8840         return 0;
8841 }
8842
8843 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8844 {
8845         struct tg3 *tp = netdev_priv(dev);
8846
8847         strcpy(info->driver, DRV_MODULE_NAME);
8848         strcpy(info->version, DRV_MODULE_VERSION);
8849         strcpy(info->fw_version, tp->fw_ver);
8850         strcpy(info->bus_info, pci_name(tp->pdev));
8851 }
8852
8853 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8854 {
8855         struct tg3 *tp = netdev_priv(dev);
8856
8857         if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
8858             device_can_wakeup(&tp->pdev->dev))
8859                 wol->supported = WAKE_MAGIC;
8860         else
8861                 wol->supported = 0;
8862         wol->wolopts = 0;
8863         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
8864             device_can_wakeup(&tp->pdev->dev))
8865                 wol->wolopts = WAKE_MAGIC;
8866         memset(&wol->sopass, 0, sizeof(wol->sopass));
8867 }
8868
8869 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8870 {
8871         struct tg3 *tp = netdev_priv(dev);
8872         struct device *dp = &tp->pdev->dev;
8873
8874         if (wol->wolopts & ~WAKE_MAGIC)
8875                 return -EINVAL;
8876         if ((wol->wolopts & WAKE_MAGIC) &&
8877             !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
8878                 return -EINVAL;
8879
8880         spin_lock_bh(&tp->lock);
8881         if (wol->wolopts & WAKE_MAGIC) {
8882                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
8883                 device_set_wakeup_enable(dp, true);
8884         } else {
8885                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8886                 device_set_wakeup_enable(dp, false);
8887         }
8888         spin_unlock_bh(&tp->lock);
8889
8890         return 0;
8891 }
8892
8893 static u32 tg3_get_msglevel(struct net_device *dev)
8894 {
8895         struct tg3 *tp = netdev_priv(dev);
8896         return tp->msg_enable;
8897 }
8898
8899 static void tg3_set_msglevel(struct net_device *dev, u32 value)
8900 {
8901         struct tg3 *tp = netdev_priv(dev);
8902         tp->msg_enable = value;
8903 }
8904
8905 static int tg3_set_tso(struct net_device *dev, u32 value)
8906 {
8907         struct tg3 *tp = netdev_priv(dev);
8908
8909         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8910                 if (value)
8911                         return -EINVAL;
8912                 return 0;
8913         }
8914         if ((dev->features & NETIF_F_IPV6_CSUM) &&
8915             (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) {
8916                 if (value) {
8917                         dev->features |= NETIF_F_TSO6;
8918                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8919                             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
8920                              GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
8921                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8922                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8923                                 dev->features |= NETIF_F_TSO_ECN;
8924                 } else
8925                         dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
8926         }
8927         return ethtool_op_set_tso(dev, value);
8928 }
8929
8930 static int tg3_nway_reset(struct net_device *dev)
8931 {
8932         struct tg3 *tp = netdev_priv(dev);
8933         int r;
8934
8935         if (!netif_running(dev))
8936                 return -EAGAIN;
8937
8938         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8939                 return -EINVAL;
8940
8941         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
8942                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
8943                         return -EAGAIN;
8944                 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
8945         } else {
8946                 u32 bmcr;
8947
8948                 spin_lock_bh(&tp->lock);
8949                 r = -EINVAL;
8950                 tg3_readphy(tp, MII_BMCR, &bmcr);
8951                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
8952                     ((bmcr & BMCR_ANENABLE) ||
8953                      (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
8954                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
8955                                                    BMCR_ANENABLE);
8956                         r = 0;
8957                 }
8958                 spin_unlock_bh(&tp->lock);
8959         }
8960
8961         return r;
8962 }
8963
8964 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8965 {
8966         struct tg3 *tp = netdev_priv(dev);
8967
8968         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
8969         ering->rx_mini_max_pending = 0;
8970         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8971                 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
8972         else
8973                 ering->rx_jumbo_max_pending = 0;
8974
8975         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
8976
8977         ering->rx_pending = tp->rx_pending;
8978         ering->rx_mini_pending = 0;
8979         if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
8980                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
8981         else
8982                 ering->rx_jumbo_pending = 0;
8983
8984         ering->tx_pending = tp->tx_pending;
8985 }
8986
8987 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
8988 {
8989         struct tg3 *tp = netdev_priv(dev);
8990         int irq_sync = 0, err = 0;
8991
8992         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
8993             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
8994             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
8995             (ering->tx_pending <= MAX_SKB_FRAGS) ||
8996             ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
8997              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
8998                 return -EINVAL;
8999
9000         if (netif_running(dev)) {
9001                 tg3_phy_stop(tp);
9002                 tg3_netif_stop(tp);
9003                 irq_sync = 1;
9004         }
9005
9006         tg3_full_lock(tp, irq_sync);
9007
9008         tp->rx_pending = ering->rx_pending;
9009
9010         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9011             tp->rx_pending > 63)
9012                 tp->rx_pending = 63;
9013         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9014         tp->tx_pending = ering->tx_pending;
9015
9016         if (netif_running(dev)) {
9017                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9018                 err = tg3_restart_hw(tp, 1);
9019                 if (!err)
9020                         tg3_netif_start(tp);
9021         }
9022
9023         tg3_full_unlock(tp);
9024
9025         if (irq_sync && !err)
9026                 tg3_phy_start(tp);
9027
9028         return err;
9029 }
9030
9031 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9032 {
9033         struct tg3 *tp = netdev_priv(dev);
9034
9035         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9036
9037         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9038                 epause->rx_pause = 1;
9039         else
9040                 epause->rx_pause = 0;
9041
9042         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9043                 epause->tx_pause = 1;
9044         else
9045                 epause->tx_pause = 0;
9046 }
9047
9048 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9049 {
9050         struct tg3 *tp = netdev_priv(dev);
9051         int err = 0;
9052
9053         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9054                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9055                         return -EAGAIN;
9056
9057                 if (epause->autoneg) {
9058                         u32 newadv;
9059                         struct phy_device *phydev;
9060
9061                         phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9062
9063                         if (epause->rx_pause) {
9064                                 if (epause->tx_pause)
9065                                         newadv = ADVERTISED_Pause;
9066                                 else
9067                                         newadv = ADVERTISED_Pause |
9068                                                  ADVERTISED_Asym_Pause;
9069                         } else if (epause->tx_pause) {
9070                                 newadv = ADVERTISED_Asym_Pause;
9071                         } else
9072                                 newadv = 0;
9073
9074                         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9075                                 u32 oldadv = phydev->advertising &
9076                                              (ADVERTISED_Pause |
9077                                               ADVERTISED_Asym_Pause);
9078                                 if (oldadv != newadv) {
9079                                         phydev->advertising &=
9080                                                 ~(ADVERTISED_Pause |
9081                                                   ADVERTISED_Asym_Pause);
9082                                         phydev->advertising |= newadv;
9083                                         err = phy_start_aneg(phydev);
9084                                 }
9085                         } else {
9086                                 tp->link_config.advertising &=
9087                                                 ~(ADVERTISED_Pause |
9088                                                   ADVERTISED_Asym_Pause);
9089                                 tp->link_config.advertising |= newadv;
9090                         }
9091                 } else {
9092                         if (epause->rx_pause)
9093                                 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9094                         else
9095                                 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9096
9097                         if (epause->tx_pause)
9098                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9099                         else
9100                                 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9101
9102                         if (netif_running(dev))
9103                                 tg3_setup_flow_control(tp, 0, 0);
9104                 }
9105         } else {
9106                 int irq_sync = 0;
9107
9108                 if (netif_running(dev)) {
9109                         tg3_netif_stop(tp);
9110                         irq_sync = 1;
9111                 }
9112
9113                 tg3_full_lock(tp, irq_sync);
9114
9115                 if (epause->autoneg)
9116                         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9117                 else
9118                         tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9119                 if (epause->rx_pause)
9120                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
9121                 else
9122                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9123                 if (epause->tx_pause)
9124                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
9125                 else
9126                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9127
9128                 if (netif_running(dev)) {
9129                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9130                         err = tg3_restart_hw(tp, 1);
9131                         if (!err)
9132                                 tg3_netif_start(tp);
9133                 }
9134
9135                 tg3_full_unlock(tp);
9136         }
9137
9138         return err;
9139 }
9140
9141 static u32 tg3_get_rx_csum(struct net_device *dev)
9142 {
9143         struct tg3 *tp = netdev_priv(dev);
9144         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9145 }
9146
9147 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9148 {
9149         struct tg3 *tp = netdev_priv(dev);
9150
9151         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9152                 if (data != 0)
9153                         return -EINVAL;
9154                 return 0;
9155         }
9156
9157         spin_lock_bh(&tp->lock);
9158         if (data)
9159                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9160         else
9161                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9162         spin_unlock_bh(&tp->lock);
9163
9164         return 0;
9165 }
9166
9167 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9168 {
9169         struct tg3 *tp = netdev_priv(dev);
9170
9171         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9172                 if (data != 0)
9173                         return -EINVAL;
9174                 return 0;
9175         }
9176
9177         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9178                 ethtool_op_set_tx_ipv6_csum(dev, data);
9179         else
9180                 ethtool_op_set_tx_csum(dev, data);
9181
9182         return 0;
9183 }
9184
9185 static int tg3_get_sset_count (struct net_device *dev, int sset)
9186 {
9187         switch (sset) {
9188         case ETH_SS_TEST:
9189                 return TG3_NUM_TEST;
9190         case ETH_SS_STATS:
9191                 return TG3_NUM_STATS;
9192         default:
9193                 return -EOPNOTSUPP;
9194         }
9195 }
9196
9197 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9198 {
9199         switch (stringset) {
9200         case ETH_SS_STATS:
9201                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
9202                 break;
9203         case ETH_SS_TEST:
9204                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
9205                 break;
9206         default:
9207                 WARN_ON(1);     /* we need a WARN() */
9208                 break;
9209         }
9210 }
9211
9212 static int tg3_phys_id(struct net_device *dev, u32 data)
9213 {
9214         struct tg3 *tp = netdev_priv(dev);
9215         int i;
9216
9217         if (!netif_running(tp->dev))
9218                 return -EAGAIN;
9219
9220         if (data == 0)
9221                 data = UINT_MAX / 2;
9222
9223         for (i = 0; i < (data * 2); i++) {
9224                 if ((i % 2) == 0)
9225                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9226                                            LED_CTRL_1000MBPS_ON |
9227                                            LED_CTRL_100MBPS_ON |
9228                                            LED_CTRL_10MBPS_ON |
9229                                            LED_CTRL_TRAFFIC_OVERRIDE |
9230                                            LED_CTRL_TRAFFIC_BLINK |
9231                                            LED_CTRL_TRAFFIC_LED);
9232
9233                 else
9234                         tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9235                                            LED_CTRL_TRAFFIC_OVERRIDE);
9236
9237                 if (msleep_interruptible(500))
9238                         break;
9239         }
9240         tw32(MAC_LED_CTRL, tp->led_ctrl);
9241         return 0;
9242 }
9243
9244 static void tg3_get_ethtool_stats (struct net_device *dev,
9245                                    struct ethtool_stats *estats, u64 *tmp_stats)
9246 {
9247         struct tg3 *tp = netdev_priv(dev);
9248         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9249 }
9250
9251 #define NVRAM_TEST_SIZE 0x100
9252 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
9253 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
9254 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
9255 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9256 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9257
9258 static int tg3_test_nvram(struct tg3 *tp)
9259 {
9260         u32 csum, magic;
9261         __be32 *buf;
9262         int i, j, k, err = 0, size;
9263
9264         if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9265                 return 0;
9266
9267         if (tg3_nvram_read(tp, 0, &magic) != 0)
9268                 return -EIO;
9269
9270         if (magic == TG3_EEPROM_MAGIC)
9271                 size = NVRAM_TEST_SIZE;
9272         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9273                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9274                     TG3_EEPROM_SB_FORMAT_1) {
9275                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9276                         case TG3_EEPROM_SB_REVISION_0:
9277                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9278                                 break;
9279                         case TG3_EEPROM_SB_REVISION_2:
9280                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9281                                 break;
9282                         case TG3_EEPROM_SB_REVISION_3:
9283                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9284                                 break;
9285                         default:
9286                                 return 0;
9287                         }
9288                 } else
9289                         return 0;
9290         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9291                 size = NVRAM_SELFBOOT_HW_SIZE;
9292         else
9293                 return -EIO;
9294
9295         buf = kmalloc(size, GFP_KERNEL);
9296         if (buf == NULL)
9297                 return -ENOMEM;
9298
9299         err = -EIO;
9300         for (i = 0, j = 0; i < size; i += 4, j++) {
9301                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
9302                 if (err)
9303                         break;
9304         }
9305         if (i < size)
9306                 goto out;
9307
9308         /* Selfboot format */
9309         magic = be32_to_cpu(buf[0]);
9310         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9311             TG3_EEPROM_MAGIC_FW) {
9312                 u8 *buf8 = (u8 *) buf, csum8 = 0;
9313
9314                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9315                     TG3_EEPROM_SB_REVISION_2) {
9316                         /* For rev 2, the csum doesn't include the MBA. */
9317                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9318                                 csum8 += buf8[i];
9319                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9320                                 csum8 += buf8[i];
9321                 } else {
9322                         for (i = 0; i < size; i++)
9323                                 csum8 += buf8[i];
9324                 }
9325
9326                 if (csum8 == 0) {
9327                         err = 0;
9328                         goto out;
9329                 }
9330
9331                 err = -EIO;
9332                 goto out;
9333         }
9334
9335         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9336             TG3_EEPROM_MAGIC_HW) {
9337                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9338                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9339                 u8 *buf8 = (u8 *) buf;
9340
9341                 /* Separate the parity bits and the data bytes.  */
9342                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9343                         if ((i == 0) || (i == 8)) {
9344                                 int l;
9345                                 u8 msk;
9346
9347                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9348                                         parity[k++] = buf8[i] & msk;
9349                                 i++;
9350                         }
9351                         else if (i == 16) {
9352                                 int l;
9353                                 u8 msk;
9354
9355                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9356                                         parity[k++] = buf8[i] & msk;
9357                                 i++;
9358
9359                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9360                                         parity[k++] = buf8[i] & msk;
9361                                 i++;
9362                         }
9363                         data[j++] = buf8[i];
9364                 }
9365
9366                 err = -EIO;
9367                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9368                         u8 hw8 = hweight8(data[i]);
9369
9370                         if ((hw8 & 0x1) && parity[i])
9371                                 goto out;
9372                         else if (!(hw8 & 0x1) && !parity[i])
9373                                 goto out;
9374                 }
9375                 err = 0;
9376                 goto out;
9377         }
9378
9379         /* Bootstrap checksum at offset 0x10 */
9380         csum = calc_crc((unsigned char *) buf, 0x10);
9381         if (csum != be32_to_cpu(buf[0x10/4]))
9382                 goto out;
9383
9384         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9385         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9386         if (csum != be32_to_cpu(buf[0xfc/4]))
9387                 goto out;
9388
9389         err = 0;
9390
9391 out:
9392         kfree(buf);
9393         return err;
9394 }
9395
9396 #define TG3_SERDES_TIMEOUT_SEC  2
9397 #define TG3_COPPER_TIMEOUT_SEC  6
9398
9399 static int tg3_test_link(struct tg3 *tp)
9400 {
9401         int i, max;
9402
9403         if (!netif_running(tp->dev))
9404                 return -ENODEV;
9405
9406         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9407                 max = TG3_SERDES_TIMEOUT_SEC;
9408         else
9409                 max = TG3_COPPER_TIMEOUT_SEC;
9410
9411         for (i = 0; i < max; i++) {
9412                 if (netif_carrier_ok(tp->dev))
9413                         return 0;
9414
9415                 if (msleep_interruptible(1000))
9416                         break;
9417         }
9418
9419         return -EIO;
9420 }
9421
9422 /* Only test the commonly used registers */
9423 static int tg3_test_registers(struct tg3 *tp)
9424 {
9425         int i, is_5705, is_5750;
9426         u32 offset, read_mask, write_mask, val, save_val, read_val;
9427         static struct {
9428                 u16 offset;
9429                 u16 flags;
9430 #define TG3_FL_5705     0x1
9431 #define TG3_FL_NOT_5705 0x2
9432 #define TG3_FL_NOT_5788 0x4
9433 #define TG3_FL_NOT_5750 0x8
9434                 u32 read_mask;
9435                 u32 write_mask;
9436         } reg_tbl[] = {
9437                 /* MAC Control Registers */
9438                 { MAC_MODE, TG3_FL_NOT_5705,
9439                         0x00000000, 0x00ef6f8c },
9440                 { MAC_MODE, TG3_FL_5705,
9441                         0x00000000, 0x01ef6b8c },
9442                 { MAC_STATUS, TG3_FL_NOT_5705,
9443                         0x03800107, 0x00000000 },
9444                 { MAC_STATUS, TG3_FL_5705,
9445                         0x03800100, 0x00000000 },
9446                 { MAC_ADDR_0_HIGH, 0x0000,
9447                         0x00000000, 0x0000ffff },
9448                 { MAC_ADDR_0_LOW, 0x0000,
9449                         0x00000000, 0xffffffff },
9450                 { MAC_RX_MTU_SIZE, 0x0000,
9451                         0x00000000, 0x0000ffff },
9452                 { MAC_TX_MODE, 0x0000,
9453                         0x00000000, 0x00000070 },
9454                 { MAC_TX_LENGTHS, 0x0000,
9455                         0x00000000, 0x00003fff },
9456                 { MAC_RX_MODE, TG3_FL_NOT_5705,
9457                         0x00000000, 0x000007fc },
9458                 { MAC_RX_MODE, TG3_FL_5705,
9459                         0x00000000, 0x000007dc },
9460                 { MAC_HASH_REG_0, 0x0000,
9461                         0x00000000, 0xffffffff },
9462                 { MAC_HASH_REG_1, 0x0000,
9463                         0x00000000, 0xffffffff },
9464                 { MAC_HASH_REG_2, 0x0000,
9465                         0x00000000, 0xffffffff },
9466                 { MAC_HASH_REG_3, 0x0000,
9467                         0x00000000, 0xffffffff },
9468
9469                 /* Receive Data and Receive BD Initiator Control Registers. */
9470                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9471                         0x00000000, 0xffffffff },
9472                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9473                         0x00000000, 0xffffffff },
9474                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9475                         0x00000000, 0x00000003 },
9476                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9477                         0x00000000, 0xffffffff },
9478                 { RCVDBDI_STD_BD+0, 0x0000,
9479                         0x00000000, 0xffffffff },
9480                 { RCVDBDI_STD_BD+4, 0x0000,
9481                         0x00000000, 0xffffffff },
9482                 { RCVDBDI_STD_BD+8, 0x0000,
9483                         0x00000000, 0xffff0002 },
9484                 { RCVDBDI_STD_BD+0xc, 0x0000,
9485                         0x00000000, 0xffffffff },
9486
9487                 /* Receive BD Initiator Control Registers. */
9488                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9489                         0x00000000, 0xffffffff },
9490                 { RCVBDI_STD_THRESH, TG3_FL_5705,
9491                         0x00000000, 0x000003ff },
9492                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9493                         0x00000000, 0xffffffff },
9494
9495                 /* Host Coalescing Control Registers. */
9496                 { HOSTCC_MODE, TG3_FL_NOT_5705,
9497                         0x00000000, 0x00000004 },
9498                 { HOSTCC_MODE, TG3_FL_5705,
9499                         0x00000000, 0x000000f6 },
9500                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9501                         0x00000000, 0xffffffff },
9502                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9503                         0x00000000, 0x000003ff },
9504                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9505                         0x00000000, 0xffffffff },
9506                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9507                         0x00000000, 0x000003ff },
9508                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9509                         0x00000000, 0xffffffff },
9510                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9511                         0x00000000, 0x000000ff },
9512                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9513                         0x00000000, 0xffffffff },
9514                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9515                         0x00000000, 0x000000ff },
9516                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9517                         0x00000000, 0xffffffff },
9518                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9519                         0x00000000, 0xffffffff },
9520                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9521                         0x00000000, 0xffffffff },
9522                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9523                         0x00000000, 0x000000ff },
9524                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9525                         0x00000000, 0xffffffff },
9526                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9527                         0x00000000, 0x000000ff },
9528                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9529                         0x00000000, 0xffffffff },
9530                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9531                         0x00000000, 0xffffffff },
9532                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9533                         0x00000000, 0xffffffff },
9534                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9535                         0x00000000, 0xffffffff },
9536                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9537                         0x00000000, 0xffffffff },
9538                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9539                         0xffffffff, 0x00000000 },
9540                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9541                         0xffffffff, 0x00000000 },
9542
9543                 /* Buffer Manager Control Registers. */
9544                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9545                         0x00000000, 0x007fff80 },
9546                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9547                         0x00000000, 0x007fffff },
9548                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9549                         0x00000000, 0x0000003f },
9550                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9551                         0x00000000, 0x000001ff },
9552                 { BUFMGR_MB_HIGH_WATER, 0x0000,
9553                         0x00000000, 0x000001ff },
9554                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9555                         0xffffffff, 0x00000000 },
9556                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9557                         0xffffffff, 0x00000000 },
9558
9559                 /* Mailbox Registers */
9560                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9561                         0x00000000, 0x000001ff },
9562                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9563                         0x00000000, 0x000001ff },
9564                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9565                         0x00000000, 0x000007ff },
9566                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9567                         0x00000000, 0x000001ff },
9568
9569                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9570         };
9571
9572         is_5705 = is_5750 = 0;
9573         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9574                 is_5705 = 1;
9575                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9576                         is_5750 = 1;
9577         }
9578
9579         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9580                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9581                         continue;
9582
9583                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9584                         continue;
9585
9586                 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9587                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
9588                         continue;
9589
9590                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9591                         continue;
9592
9593                 offset = (u32) reg_tbl[i].offset;
9594                 read_mask = reg_tbl[i].read_mask;
9595                 write_mask = reg_tbl[i].write_mask;
9596
9597                 /* Save the original register content */
9598                 save_val = tr32(offset);
9599
9600                 /* Determine the read-only value. */
9601                 read_val = save_val & read_mask;
9602
9603                 /* Write zero to the register, then make sure the read-only bits
9604                  * are not changed and the read/write bits are all zeros.
9605                  */
9606                 tw32(offset, 0);
9607
9608                 val = tr32(offset);
9609
9610                 /* Test the read-only and read/write bits. */
9611                 if (((val & read_mask) != read_val) || (val & write_mask))
9612                         goto out;
9613
9614                 /* Write ones to all the bits defined by RdMask and WrMask, then
9615                  * make sure the read-only bits are not changed and the
9616                  * read/write bits are all ones.
9617                  */
9618                 tw32(offset, read_mask | write_mask);
9619
9620                 val = tr32(offset);
9621
9622                 /* Test the read-only bits. */
9623                 if ((val & read_mask) != read_val)
9624                         goto out;
9625
9626                 /* Test the read/write bits. */
9627                 if ((val & write_mask) != write_mask)
9628                         goto out;
9629
9630                 tw32(offset, save_val);
9631         }
9632
9633         return 0;
9634
9635 out:
9636         if (netif_msg_hw(tp))
9637                 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9638                        offset);
9639         tw32(offset, save_val);
9640         return -EIO;
9641 }
9642
9643 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9644 {
9645         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9646         int i;
9647         u32 j;
9648
9649         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9650                 for (j = 0; j < len; j += 4) {
9651                         u32 val;
9652
9653                         tg3_write_mem(tp, offset + j, test_pattern[i]);
9654                         tg3_read_mem(tp, offset + j, &val);
9655                         if (val != test_pattern[i])
9656                                 return -EIO;
9657                 }
9658         }
9659         return 0;
9660 }
9661
9662 static int tg3_test_memory(struct tg3 *tp)
9663 {
9664         static struct mem_entry {
9665                 u32 offset;
9666                 u32 len;
9667         } mem_tbl_570x[] = {
9668                 { 0x00000000, 0x00b50},
9669                 { 0x00002000, 0x1c000},
9670                 { 0xffffffff, 0x00000}
9671         }, mem_tbl_5705[] = {
9672                 { 0x00000100, 0x0000c},
9673                 { 0x00000200, 0x00008},
9674                 { 0x00004000, 0x00800},
9675                 { 0x00006000, 0x01000},
9676                 { 0x00008000, 0x02000},
9677                 { 0x00010000, 0x0e000},
9678                 { 0xffffffff, 0x00000}
9679         }, mem_tbl_5755[] = {
9680                 { 0x00000200, 0x00008},
9681                 { 0x00004000, 0x00800},
9682                 { 0x00006000, 0x00800},
9683                 { 0x00008000, 0x02000},
9684                 { 0x00010000, 0x0c000},
9685                 { 0xffffffff, 0x00000}
9686         }, mem_tbl_5906[] = {
9687                 { 0x00000200, 0x00008},
9688                 { 0x00004000, 0x00400},
9689                 { 0x00006000, 0x00400},
9690                 { 0x00008000, 0x01000},
9691                 { 0x00010000, 0x01000},
9692                 { 0xffffffff, 0x00000}
9693         };
9694         struct mem_entry *mem_tbl;
9695         int err = 0;
9696         int i;
9697
9698         if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
9699                 mem_tbl = mem_tbl_5755;
9700         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9701                 mem_tbl = mem_tbl_5906;
9702         else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
9703                 mem_tbl = mem_tbl_5705;
9704         else
9705                 mem_tbl = mem_tbl_570x;
9706
9707         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
9708                 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
9709                     mem_tbl[i].len)) != 0)
9710                         break;
9711         }
9712
9713         return err;
9714 }
9715
9716 #define TG3_MAC_LOOPBACK        0
9717 #define TG3_PHY_LOOPBACK        1
9718
9719 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
9720 {
9721         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
9722         u32 desc_idx;
9723         struct sk_buff *skb, *rx_skb;
9724         u8 *tx_data;
9725         dma_addr_t map;
9726         int num_pkts, tx_len, rx_len, i, err;
9727         struct tg3_rx_buffer_desc *desc;
9728
9729         if (loopback_mode == TG3_MAC_LOOPBACK) {
9730                 /* HW errata - mac loopback fails in some cases on 5780.
9731                  * Normal traffic and PHY loopback are not affected by
9732                  * errata.
9733                  */
9734                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
9735                         return 0;
9736
9737                 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
9738                            MAC_MODE_PORT_INT_LPBACK;
9739                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9740                         mac_mode |= MAC_MODE_LINK_POLARITY;
9741                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
9742                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9743                 else
9744                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9745                 tw32(MAC_MODE, mac_mode);
9746         } else if (loopback_mode == TG3_PHY_LOOPBACK) {
9747                 u32 val;
9748
9749                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9750                         u32 phytest;
9751
9752                         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
9753                                 u32 phy, reg = MII_TG3_FET_SHDW_AUXSTAT2;
9754
9755                                 tg3_writephy(tp, MII_TG3_FET_TEST,
9756                                              phytest | MII_TG3_FET_SHADOW_EN);
9757                                 if (!tg3_readphy(tp, reg, &phy)) {
9758                                         phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
9759                                         tg3_writephy(tp, reg, phy);
9760                                 }
9761                                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
9762                         }
9763                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
9764                 } else
9765                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
9766
9767                 tg3_phy_toggle_automdix(tp, 0);
9768
9769                 tg3_writephy(tp, MII_BMCR, val);
9770                 udelay(40);
9771
9772                 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
9773                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9774                         tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
9775                         mac_mode |= MAC_MODE_PORT_MODE_MII;
9776                 } else
9777                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
9778
9779                 /* reset to prevent losing 1st rx packet intermittently */
9780                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
9781                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9782                         udelay(10);
9783                         tw32_f(MAC_RX_MODE, tp->rx_mode);
9784                 }
9785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
9786                         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
9787                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
9788                         else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
9789                                 mac_mode |= MAC_MODE_LINK_POLARITY;
9790                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
9791                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
9792                 }
9793                 tw32(MAC_MODE, mac_mode);
9794         }
9795         else
9796                 return -EINVAL;
9797
9798         err = -EIO;
9799
9800         tx_len = 1514;
9801         skb = netdev_alloc_skb(tp->dev, tx_len);
9802         if (!skb)
9803                 return -ENOMEM;
9804
9805         tx_data = skb_put(skb, tx_len);
9806         memcpy(tx_data, tp->dev->dev_addr, 6);
9807         memset(tx_data + 6, 0x0, 8);
9808
9809         tw32(MAC_RX_MTU_SIZE, tx_len + 4);
9810
9811         for (i = 14; i < tx_len; i++)
9812                 tx_data[i] = (u8) (i & 0xff);
9813
9814         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
9815
9816         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9817              HOSTCC_MODE_NOW);
9818
9819         udelay(10);
9820
9821         rx_start_idx = tp->hw_status->idx[0].rx_producer;
9822
9823         num_pkts = 0;
9824
9825         tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
9826
9827         tp->tx_prod++;
9828         num_pkts++;
9829
9830         tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
9831                      tp->tx_prod);
9832         tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
9833
9834         udelay(10);
9835
9836         /* 250 usec to allow enough time on some 10/100 Mbps devices.  */
9837         for (i = 0; i < 25; i++) {
9838                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9839                        HOSTCC_MODE_NOW);
9840
9841                 udelay(10);
9842
9843                 tx_idx = tp->hw_status->idx[0].tx_consumer;
9844                 rx_idx = tp->hw_status->idx[0].rx_producer;
9845                 if ((tx_idx == tp->tx_prod) &&
9846                     (rx_idx == (rx_start_idx + num_pkts)))
9847                         break;
9848         }
9849
9850         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
9851         dev_kfree_skb(skb);
9852
9853         if (tx_idx != tp->tx_prod)
9854                 goto out;
9855
9856         if (rx_idx != rx_start_idx + num_pkts)
9857                 goto out;
9858
9859         desc = &tp->rx_rcb[rx_start_idx];
9860         desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
9861         opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
9862         if (opaque_key != RXD_OPAQUE_RING_STD)
9863                 goto out;
9864
9865         if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
9866             (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
9867                 goto out;
9868
9869         rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
9870         if (rx_len != tx_len)
9871                 goto out;
9872
9873         rx_skb = tp->rx_std_buffers[desc_idx].skb;
9874
9875         map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
9876         pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
9877
9878         for (i = 14; i < tx_len; i++) {
9879                 if (*(rx_skb->data + i) != (u8) (i & 0xff))
9880                         goto out;
9881         }
9882         err = 0;
9883
9884         /* tg3_free_rings will unmap and free the rx_skb */
9885 out:
9886         return err;
9887 }
9888
9889 #define TG3_MAC_LOOPBACK_FAILED         1
9890 #define TG3_PHY_LOOPBACK_FAILED         2
9891 #define TG3_LOOPBACK_FAILED             (TG3_MAC_LOOPBACK_FAILED |      \
9892                                          TG3_PHY_LOOPBACK_FAILED)
9893
9894 static int tg3_test_loopback(struct tg3 *tp)
9895 {
9896         int err = 0;
9897         u32 cpmuctrl = 0;
9898
9899         if (!netif_running(tp->dev))
9900                 return TG3_LOOPBACK_FAILED;
9901
9902         err = tg3_reset_hw(tp, 1);
9903         if (err)
9904                 return TG3_LOOPBACK_FAILED;
9905
9906         /* Turn off gphy autopowerdown. */
9907         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9908                 tg3_phy_toggle_apd(tp, false);
9909
9910         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9911                 int i;
9912                 u32 status;
9913
9914                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
9915
9916                 /* Wait for up to 40 microseconds to acquire lock. */
9917                 for (i = 0; i < 4; i++) {
9918                         status = tr32(TG3_CPMU_MUTEX_GNT);
9919                         if (status == CPMU_MUTEX_GNT_DRIVER)
9920                                 break;
9921                         udelay(10);
9922                 }
9923
9924                 if (status != CPMU_MUTEX_GNT_DRIVER)
9925                         return TG3_LOOPBACK_FAILED;
9926
9927                 /* Turn off link-based power management. */
9928                 cpmuctrl = tr32(TG3_CPMU_CTRL);
9929                 tw32(TG3_CPMU_CTRL,
9930                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
9931                                   CPMU_CTRL_LINK_AWARE_MODE));
9932         }
9933
9934         if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
9935                 err |= TG3_MAC_LOOPBACK_FAILED;
9936
9937         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
9938                 tw32(TG3_CPMU_CTRL, cpmuctrl);
9939
9940                 /* Release the mutex */
9941                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
9942         }
9943
9944         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9945             !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
9946                 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
9947                         err |= TG3_PHY_LOOPBACK_FAILED;
9948         }
9949
9950         /* Re-enable gphy autopowerdown. */
9951         if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
9952                 tg3_phy_toggle_apd(tp, true);
9953
9954         return err;
9955 }
9956
9957 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
9958                           u64 *data)
9959 {
9960         struct tg3 *tp = netdev_priv(dev);
9961
9962         if (tp->link_config.phy_is_low_power)
9963                 tg3_set_power_state(tp, PCI_D0);
9964
9965         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
9966
9967         if (tg3_test_nvram(tp) != 0) {
9968                 etest->flags |= ETH_TEST_FL_FAILED;
9969                 data[0] = 1;
9970         }
9971         if (tg3_test_link(tp) != 0) {
9972                 etest->flags |= ETH_TEST_FL_FAILED;
9973                 data[1] = 1;
9974         }
9975         if (etest->flags & ETH_TEST_FL_OFFLINE) {
9976                 int err, err2 = 0, irq_sync = 0;
9977
9978                 if (netif_running(dev)) {
9979                         tg3_phy_stop(tp);
9980                         tg3_netif_stop(tp);
9981                         irq_sync = 1;
9982                 }
9983
9984                 tg3_full_lock(tp, irq_sync);
9985
9986                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
9987                 err = tg3_nvram_lock(tp);
9988                 tg3_halt_cpu(tp, RX_CPU_BASE);
9989                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
9990                         tg3_halt_cpu(tp, TX_CPU_BASE);
9991                 if (!err)
9992                         tg3_nvram_unlock(tp);
9993
9994                 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
9995                         tg3_phy_reset(tp);
9996
9997                 if (tg3_test_registers(tp) != 0) {
9998                         etest->flags |= ETH_TEST_FL_FAILED;
9999                         data[2] = 1;
10000                 }
10001                 if (tg3_test_memory(tp) != 0) {
10002                         etest->flags |= ETH_TEST_FL_FAILED;
10003                         data[3] = 1;
10004                 }
10005                 if ((data[4] = tg3_test_loopback(tp)) != 0)
10006                         etest->flags |= ETH_TEST_FL_FAILED;
10007
10008                 tg3_full_unlock(tp);
10009
10010                 if (tg3_test_interrupt(tp) != 0) {
10011                         etest->flags |= ETH_TEST_FL_FAILED;
10012                         data[5] = 1;
10013                 }
10014
10015                 tg3_full_lock(tp, 0);
10016
10017                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10018                 if (netif_running(dev)) {
10019                         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10020                         err2 = tg3_restart_hw(tp, 1);
10021                         if (!err2)
10022                                 tg3_netif_start(tp);
10023                 }
10024
10025                 tg3_full_unlock(tp);
10026
10027                 if (irq_sync && !err2)
10028                         tg3_phy_start(tp);
10029         }
10030         if (tp->link_config.phy_is_low_power)
10031                 tg3_set_power_state(tp, PCI_D3hot);
10032
10033 }
10034
10035 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10036 {
10037         struct mii_ioctl_data *data = if_mii(ifr);
10038         struct tg3 *tp = netdev_priv(dev);
10039         int err;
10040
10041         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10042                 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10043                         return -EAGAIN;
10044                 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10045         }
10046
10047         switch(cmd) {
10048         case SIOCGMIIPHY:
10049                 data->phy_id = PHY_ADDR;
10050
10051                 /* fallthru */
10052         case SIOCGMIIREG: {
10053                 u32 mii_regval;
10054
10055                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10056                         break;                  /* We have no PHY */
10057
10058                 if (tp->link_config.phy_is_low_power)
10059                         return -EAGAIN;
10060
10061                 spin_lock_bh(&tp->lock);
10062                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10063                 spin_unlock_bh(&tp->lock);
10064
10065                 data->val_out = mii_regval;
10066
10067                 return err;
10068         }
10069
10070         case SIOCSMIIREG:
10071                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10072                         break;                  /* We have no PHY */
10073
10074                 if (!capable(CAP_NET_ADMIN))
10075                         return -EPERM;
10076
10077                 if (tp->link_config.phy_is_low_power)
10078                         return -EAGAIN;
10079
10080                 spin_lock_bh(&tp->lock);
10081                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10082                 spin_unlock_bh(&tp->lock);
10083
10084                 return err;
10085
10086         default:
10087                 /* do nothing */
10088                 break;
10089         }
10090         return -EOPNOTSUPP;
10091 }
10092
10093 #if TG3_VLAN_TAG_USED
10094 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10095 {
10096         struct tg3 *tp = netdev_priv(dev);
10097
10098         if (!netif_running(dev)) {
10099                 tp->vlgrp = grp;
10100                 return;
10101         }
10102
10103         tg3_netif_stop(tp);
10104
10105         tg3_full_lock(tp, 0);
10106
10107         tp->vlgrp = grp;
10108
10109         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10110         __tg3_set_rx_mode(dev);
10111
10112         tg3_netif_start(tp);
10113
10114         tg3_full_unlock(tp);
10115 }
10116 #endif
10117
10118 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10119 {
10120         struct tg3 *tp = netdev_priv(dev);
10121
10122         memcpy(ec, &tp->coal, sizeof(*ec));
10123         return 0;
10124 }
10125
10126 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10127 {
10128         struct tg3 *tp = netdev_priv(dev);
10129         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10130         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10131
10132         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10133                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10134                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10135                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10136                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10137         }
10138
10139         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10140             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10141             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10142             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10143             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10144             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10145             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10146             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10147             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10148             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10149                 return -EINVAL;
10150
10151         /* No rx interrupts will be generated if both are zero */
10152         if ((ec->rx_coalesce_usecs == 0) &&
10153             (ec->rx_max_coalesced_frames == 0))
10154                 return -EINVAL;
10155
10156         /* No tx interrupts will be generated if both are zero */
10157         if ((ec->tx_coalesce_usecs == 0) &&
10158             (ec->tx_max_coalesced_frames == 0))
10159                 return -EINVAL;
10160
10161         /* Only copy relevant parameters, ignore all others. */
10162         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10163         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10164         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10165         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10166         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10167         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10168         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10169         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10170         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10171
10172         if (netif_running(dev)) {
10173                 tg3_full_lock(tp, 0);
10174                 __tg3_set_coalesce(tp, &tp->coal);
10175                 tg3_full_unlock(tp);
10176         }
10177         return 0;
10178 }
10179
10180 static const struct ethtool_ops tg3_ethtool_ops = {
10181         .get_settings           = tg3_get_settings,
10182         .set_settings           = tg3_set_settings,
10183         .get_drvinfo            = tg3_get_drvinfo,
10184         .get_regs_len           = tg3_get_regs_len,
10185         .get_regs               = tg3_get_regs,
10186         .get_wol                = tg3_get_wol,
10187         .set_wol                = tg3_set_wol,
10188         .get_msglevel           = tg3_get_msglevel,
10189         .set_msglevel           = tg3_set_msglevel,
10190         .nway_reset             = tg3_nway_reset,
10191         .get_link               = ethtool_op_get_link,
10192         .get_eeprom_len         = tg3_get_eeprom_len,
10193         .get_eeprom             = tg3_get_eeprom,
10194         .set_eeprom             = tg3_set_eeprom,
10195         .get_ringparam          = tg3_get_ringparam,
10196         .set_ringparam          = tg3_set_ringparam,
10197         .get_pauseparam         = tg3_get_pauseparam,
10198         .set_pauseparam         = tg3_set_pauseparam,
10199         .get_rx_csum            = tg3_get_rx_csum,
10200         .set_rx_csum            = tg3_set_rx_csum,
10201         .set_tx_csum            = tg3_set_tx_csum,
10202         .set_sg                 = ethtool_op_set_sg,
10203         .set_tso                = tg3_set_tso,
10204         .self_test              = tg3_self_test,
10205         .get_strings            = tg3_get_strings,
10206         .phys_id                = tg3_phys_id,
10207         .get_ethtool_stats      = tg3_get_ethtool_stats,
10208         .get_coalesce           = tg3_get_coalesce,
10209         .set_coalesce           = tg3_set_coalesce,
10210         .get_sset_count         = tg3_get_sset_count,
10211 };
10212
10213 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10214 {
10215         u32 cursize, val, magic;
10216
10217         tp->nvram_size = EEPROM_CHIP_SIZE;
10218
10219         if (tg3_nvram_read(tp, 0, &magic) != 0)
10220                 return;
10221
10222         if ((magic != TG3_EEPROM_MAGIC) &&
10223             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10224             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10225                 return;
10226
10227         /*
10228          * Size the chip by reading offsets at increasing powers of two.
10229          * When we encounter our validation signature, we know the addressing
10230          * has wrapped around, and thus have our chip size.
10231          */
10232         cursize = 0x10;
10233
10234         while (cursize < tp->nvram_size) {
10235                 if (tg3_nvram_read(tp, cursize, &val) != 0)
10236                         return;
10237
10238                 if (val == magic)
10239                         break;
10240
10241                 cursize <<= 1;
10242         }
10243
10244         tp->nvram_size = cursize;
10245 }
10246
10247 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10248 {
10249         u32 val;
10250
10251         if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
10252             tg3_nvram_read(tp, 0, &val) != 0)
10253                 return;
10254
10255         /* Selfboot format */
10256         if (val != TG3_EEPROM_MAGIC) {
10257                 tg3_get_eeprom_size(tp);
10258                 return;
10259         }
10260
10261         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10262                 if (val != 0) {
10263                         /* This is confusing.  We want to operate on the
10264                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
10265                          * call will read from NVRAM and byteswap the data
10266                          * according to the byteswapping settings for all
10267                          * other register accesses.  This ensures the data we
10268                          * want will always reside in the lower 16-bits.
10269                          * However, the data in NVRAM is in LE format, which
10270                          * means the data from the NVRAM read will always be
10271                          * opposite the endianness of the CPU.  The 16-bit
10272                          * byteswap then brings the data to CPU endianness.
10273                          */
10274                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
10275                         return;
10276                 }
10277         }
10278         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10279 }
10280
10281 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10282 {
10283         u32 nvcfg1;
10284
10285         nvcfg1 = tr32(NVRAM_CFG1);
10286         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10287                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10288         }
10289         else {
10290                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10291                 tw32(NVRAM_CFG1, nvcfg1);
10292         }
10293
10294         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10295             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10296                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10297                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10298                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10299                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10300                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10301                                 break;
10302                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10303                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10304                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10305                                 break;
10306                         case FLASH_VENDOR_ATMEL_EEPROM:
10307                                 tp->nvram_jedecnum = JEDEC_ATMEL;
10308                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10309                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10310                                 break;
10311                         case FLASH_VENDOR_ST:
10312                                 tp->nvram_jedecnum = JEDEC_ST;
10313                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10314                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10315                                 break;
10316                         case FLASH_VENDOR_SAIFUN:
10317                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
10318                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10319                                 break;
10320                         case FLASH_VENDOR_SST_SMALL:
10321                         case FLASH_VENDOR_SST_LARGE:
10322                                 tp->nvram_jedecnum = JEDEC_SST;
10323                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10324                                 break;
10325                 }
10326         }
10327         else {
10328                 tp->nvram_jedecnum = JEDEC_ATMEL;
10329                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10330                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10331         }
10332 }
10333
10334 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10335 {
10336         u32 nvcfg1;
10337
10338         nvcfg1 = tr32(NVRAM_CFG1);
10339
10340         /* NVRAM protection for TPM */
10341         if (nvcfg1 & (1 << 27))
10342                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10343
10344         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10345                 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10346                 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10347                         tp->nvram_jedecnum = JEDEC_ATMEL;
10348                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10349                         break;
10350                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10351                         tp->nvram_jedecnum = JEDEC_ATMEL;
10352                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10353                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10354                         break;
10355                 case FLASH_5752VENDOR_ST_M45PE10:
10356                 case FLASH_5752VENDOR_ST_M45PE20:
10357                 case FLASH_5752VENDOR_ST_M45PE40:
10358                         tp->nvram_jedecnum = JEDEC_ST;
10359                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10360                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10361                         break;
10362         }
10363
10364         if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10365                 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10366                         case FLASH_5752PAGE_SIZE_256:
10367                                 tp->nvram_pagesize = 256;
10368                                 break;
10369                         case FLASH_5752PAGE_SIZE_512:
10370                                 tp->nvram_pagesize = 512;
10371                                 break;
10372                         case FLASH_5752PAGE_SIZE_1K:
10373                                 tp->nvram_pagesize = 1024;
10374                                 break;
10375                         case FLASH_5752PAGE_SIZE_2K:
10376                                 tp->nvram_pagesize = 2048;
10377                                 break;
10378                         case FLASH_5752PAGE_SIZE_4K:
10379                                 tp->nvram_pagesize = 4096;
10380                                 break;
10381                         case FLASH_5752PAGE_SIZE_264:
10382                                 tp->nvram_pagesize = 264;
10383                                 break;
10384                 }
10385         }
10386         else {
10387                 /* For eeprom, set pagesize to maximum eeprom size */
10388                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10389
10390                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10391                 tw32(NVRAM_CFG1, nvcfg1);
10392         }
10393 }
10394
10395 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10396 {
10397         u32 nvcfg1, protect = 0;
10398
10399         nvcfg1 = tr32(NVRAM_CFG1);
10400
10401         /* NVRAM protection for TPM */
10402         if (nvcfg1 & (1 << 27)) {
10403                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10404                 protect = 1;
10405         }
10406
10407         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10408         switch (nvcfg1) {
10409                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10410                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10411                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10412                 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10413                         tp->nvram_jedecnum = JEDEC_ATMEL;
10414                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10415                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10416                         tp->nvram_pagesize = 264;
10417                         if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10418                             nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10419                                 tp->nvram_size = (protect ? 0x3e200 :
10420                                                   TG3_NVRAM_SIZE_512KB);
10421                         else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10422                                 tp->nvram_size = (protect ? 0x1f200 :
10423                                                   TG3_NVRAM_SIZE_256KB);
10424                         else
10425                                 tp->nvram_size = (protect ? 0x1f200 :
10426                                                   TG3_NVRAM_SIZE_128KB);
10427                         break;
10428                 case FLASH_5752VENDOR_ST_M45PE10:
10429                 case FLASH_5752VENDOR_ST_M45PE20:
10430                 case FLASH_5752VENDOR_ST_M45PE40:
10431                         tp->nvram_jedecnum = JEDEC_ST;
10432                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10433                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10434                         tp->nvram_pagesize = 256;
10435                         if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10436                                 tp->nvram_size = (protect ?
10437                                                   TG3_NVRAM_SIZE_64KB :
10438                                                   TG3_NVRAM_SIZE_128KB);
10439                         else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10440                                 tp->nvram_size = (protect ?
10441                                                   TG3_NVRAM_SIZE_64KB :
10442                                                   TG3_NVRAM_SIZE_256KB);
10443                         else
10444                                 tp->nvram_size = (protect ?
10445                                                   TG3_NVRAM_SIZE_128KB :
10446                                                   TG3_NVRAM_SIZE_512KB);
10447                         break;
10448         }
10449 }
10450
10451 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10452 {
10453         u32 nvcfg1;
10454
10455         nvcfg1 = tr32(NVRAM_CFG1);
10456
10457         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10458                 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10459                 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10460                 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10461                 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10462                         tp->nvram_jedecnum = JEDEC_ATMEL;
10463                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10464                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10465
10466                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10467                         tw32(NVRAM_CFG1, nvcfg1);
10468                         break;
10469                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10470                 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10471                 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10472                 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10473                         tp->nvram_jedecnum = JEDEC_ATMEL;
10474                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10475                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10476                         tp->nvram_pagesize = 264;
10477                         break;
10478                 case FLASH_5752VENDOR_ST_M45PE10:
10479                 case FLASH_5752VENDOR_ST_M45PE20:
10480                 case FLASH_5752VENDOR_ST_M45PE40:
10481                         tp->nvram_jedecnum = JEDEC_ST;
10482                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10483                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10484                         tp->nvram_pagesize = 256;
10485                         break;
10486         }
10487 }
10488
10489 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10490 {
10491         u32 nvcfg1, protect = 0;
10492
10493         nvcfg1 = tr32(NVRAM_CFG1);
10494
10495         /* NVRAM protection for TPM */
10496         if (nvcfg1 & (1 << 27)) {
10497                 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10498                 protect = 1;
10499         }
10500
10501         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10502         switch (nvcfg1) {
10503                 case FLASH_5761VENDOR_ATMEL_ADB021D:
10504                 case FLASH_5761VENDOR_ATMEL_ADB041D:
10505                 case FLASH_5761VENDOR_ATMEL_ADB081D:
10506                 case FLASH_5761VENDOR_ATMEL_ADB161D:
10507                 case FLASH_5761VENDOR_ATMEL_MDB021D:
10508                 case FLASH_5761VENDOR_ATMEL_MDB041D:
10509                 case FLASH_5761VENDOR_ATMEL_MDB081D:
10510                 case FLASH_5761VENDOR_ATMEL_MDB161D:
10511                         tp->nvram_jedecnum = JEDEC_ATMEL;
10512                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10513                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10514                         tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10515                         tp->nvram_pagesize = 256;
10516                         break;
10517                 case FLASH_5761VENDOR_ST_A_M45PE20:
10518                 case FLASH_5761VENDOR_ST_A_M45PE40:
10519                 case FLASH_5761VENDOR_ST_A_M45PE80:
10520                 case FLASH_5761VENDOR_ST_A_M45PE16:
10521                 case FLASH_5761VENDOR_ST_M_M45PE20:
10522                 case FLASH_5761VENDOR_ST_M_M45PE40:
10523                 case FLASH_5761VENDOR_ST_M_M45PE80:
10524                 case FLASH_5761VENDOR_ST_M_M45PE16:
10525                         tp->nvram_jedecnum = JEDEC_ST;
10526                         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10527                         tp->tg3_flags2 |= TG3_FLG2_FLASH;
10528                         tp->nvram_pagesize = 256;
10529                         break;
10530         }
10531
10532         if (protect) {
10533                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10534         } else {
10535                 switch (nvcfg1) {
10536                         case FLASH_5761VENDOR_ATMEL_ADB161D:
10537                         case FLASH_5761VENDOR_ATMEL_MDB161D:
10538                         case FLASH_5761VENDOR_ST_A_M45PE16:
10539                         case FLASH_5761VENDOR_ST_M_M45PE16:
10540                                 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10541                                 break;
10542                         case FLASH_5761VENDOR_ATMEL_ADB081D:
10543                         case FLASH_5761VENDOR_ATMEL_MDB081D:
10544                         case FLASH_5761VENDOR_ST_A_M45PE80:
10545                         case FLASH_5761VENDOR_ST_M_M45PE80:
10546                                 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10547                                 break;
10548                         case FLASH_5761VENDOR_ATMEL_ADB041D:
10549                         case FLASH_5761VENDOR_ATMEL_MDB041D:
10550                         case FLASH_5761VENDOR_ST_A_M45PE40:
10551                         case FLASH_5761VENDOR_ST_M_M45PE40:
10552                                 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10553                                 break;
10554                         case FLASH_5761VENDOR_ATMEL_ADB021D:
10555                         case FLASH_5761VENDOR_ATMEL_MDB021D:
10556                         case FLASH_5761VENDOR_ST_A_M45PE20:
10557                         case FLASH_5761VENDOR_ST_M_M45PE20:
10558                                 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10559                                 break;
10560                 }
10561         }
10562 }
10563
10564 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10565 {
10566         tp->nvram_jedecnum = JEDEC_ATMEL;
10567         tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10568         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10569 }
10570
10571 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
10572 {
10573         u32 nvcfg1;
10574
10575         nvcfg1 = tr32(NVRAM_CFG1);
10576
10577         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10578         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10579         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10580                 tp->nvram_jedecnum = JEDEC_ATMEL;
10581                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10582                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10583
10584                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10585                 tw32(NVRAM_CFG1, nvcfg1);
10586                 return;
10587         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10588         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10589         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10590         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10591         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10592         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10593         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10594                 tp->nvram_jedecnum = JEDEC_ATMEL;
10595                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10596                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10597
10598                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10599                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10600                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
10601                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
10602                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10603                         break;
10604                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
10605                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
10606                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10607                         break;
10608                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
10609                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
10610                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10611                         break;
10612                 }
10613                 break;
10614         case FLASH_5752VENDOR_ST_M45PE10:
10615         case FLASH_5752VENDOR_ST_M45PE20:
10616         case FLASH_5752VENDOR_ST_M45PE40:
10617                 tp->nvram_jedecnum = JEDEC_ST;
10618                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10619                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10620
10621                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10622                 case FLASH_5752VENDOR_ST_M45PE10:
10623                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
10624                         break;
10625                 case FLASH_5752VENDOR_ST_M45PE20:
10626                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10627                         break;
10628                 case FLASH_5752VENDOR_ST_M45PE40:
10629                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10630                         break;
10631                 }
10632                 break;
10633         default:
10634                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
10635                 return;
10636         }
10637
10638         switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10639         case FLASH_5752PAGE_SIZE_256:
10640                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10641                 tp->nvram_pagesize = 256;
10642                 break;
10643         case FLASH_5752PAGE_SIZE_512:
10644                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10645                 tp->nvram_pagesize = 512;
10646                 break;
10647         case FLASH_5752PAGE_SIZE_1K:
10648                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10649                 tp->nvram_pagesize = 1024;
10650                 break;
10651         case FLASH_5752PAGE_SIZE_2K:
10652                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10653                 tp->nvram_pagesize = 2048;
10654                 break;
10655         case FLASH_5752PAGE_SIZE_4K:
10656                 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10657                 tp->nvram_pagesize = 4096;
10658                 break;
10659         case FLASH_5752PAGE_SIZE_264:
10660                 tp->nvram_pagesize = 264;
10661                 break;
10662         case FLASH_5752PAGE_SIZE_528:
10663                 tp->nvram_pagesize = 528;
10664                 break;
10665         }
10666 }
10667
10668 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10669 static void __devinit tg3_nvram_init(struct tg3 *tp)
10670 {
10671         tw32_f(GRC_EEPROM_ADDR,
10672              (EEPROM_ADDR_FSM_RESET |
10673               (EEPROM_DEFAULT_CLOCK_PERIOD <<
10674                EEPROM_ADDR_CLKPERD_SHIFT)));
10675
10676         msleep(1);
10677
10678         /* Enable seeprom accesses. */
10679         tw32_f(GRC_LOCAL_CTRL,
10680              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10681         udelay(100);
10682
10683         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10684             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10685                 tp->tg3_flags |= TG3_FLAG_NVRAM;
10686
10687                 if (tg3_nvram_lock(tp)) {
10688                         printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10689                                "tg3_nvram_init failed.\n", tp->dev->name);
10690                         return;
10691                 }
10692                 tg3_enable_nvram_access(tp);
10693
10694                 tp->nvram_size = 0;
10695
10696                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10697                         tg3_get_5752_nvram_info(tp);
10698                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10699                         tg3_get_5755_nvram_info(tp);
10700                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10701                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10702                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10703                         tg3_get_5787_nvram_info(tp);
10704                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10705                         tg3_get_5761_nvram_info(tp);
10706                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10707                         tg3_get_5906_nvram_info(tp);
10708                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10709                         tg3_get_57780_nvram_info(tp);
10710                 else
10711                         tg3_get_nvram_info(tp);
10712
10713                 if (tp->nvram_size == 0)
10714                         tg3_get_nvram_size(tp);
10715
10716                 tg3_disable_nvram_access(tp);
10717                 tg3_nvram_unlock(tp);
10718
10719         } else {
10720                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10721
10722                 tg3_get_eeprom_size(tp);
10723         }
10724 }
10725
10726 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
10727                                     u32 offset, u32 len, u8 *buf)
10728 {
10729         int i, j, rc = 0;
10730         u32 val;
10731
10732         for (i = 0; i < len; i += 4) {
10733                 u32 addr;
10734                 __be32 data;
10735
10736                 addr = offset + i;
10737
10738                 memcpy(&data, buf + i, 4);
10739
10740                 /*
10741                  * The SEEPROM interface expects the data to always be opposite
10742                  * the native endian format.  We accomplish this by reversing
10743                  * all the operations that would have been performed on the
10744                  * data from a call to tg3_nvram_read_be32().
10745                  */
10746                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
10747
10748                 val = tr32(GRC_EEPROM_ADDR);
10749                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
10750
10751                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
10752                         EEPROM_ADDR_READ);
10753                 tw32(GRC_EEPROM_ADDR, val |
10754                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
10755                         (addr & EEPROM_ADDR_ADDR_MASK) |
10756                         EEPROM_ADDR_START |
10757                         EEPROM_ADDR_WRITE);
10758
10759                 for (j = 0; j < 1000; j++) {
10760                         val = tr32(GRC_EEPROM_ADDR);
10761
10762                         if (val & EEPROM_ADDR_COMPLETE)
10763                                 break;
10764                         msleep(1);
10765                 }
10766                 if (!(val & EEPROM_ADDR_COMPLETE)) {
10767                         rc = -EBUSY;
10768                         break;
10769                 }
10770         }
10771
10772         return rc;
10773 }
10774
10775 /* offset and length are dword aligned */
10776 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
10777                 u8 *buf)
10778 {
10779         int ret = 0;
10780         u32 pagesize = tp->nvram_pagesize;
10781         u32 pagemask = pagesize - 1;
10782         u32 nvram_cmd;
10783         u8 *tmp;
10784
10785         tmp = kmalloc(pagesize, GFP_KERNEL);
10786         if (tmp == NULL)
10787                 return -ENOMEM;
10788
10789         while (len) {
10790                 int j;
10791                 u32 phy_addr, page_off, size;
10792
10793                 phy_addr = offset & ~pagemask;
10794
10795                 for (j = 0; j < pagesize; j += 4) {
10796                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
10797                                                   (__be32 *) (tmp + j));
10798                         if (ret)
10799                                 break;
10800                 }
10801                 if (ret)
10802                         break;
10803
10804                 page_off = offset & pagemask;
10805                 size = pagesize;
10806                 if (len < size)
10807                         size = len;
10808
10809                 len -= size;
10810
10811                 memcpy(tmp + page_off, buf, size);
10812
10813                 offset = offset + (pagesize - page_off);
10814
10815                 tg3_enable_nvram_access(tp);
10816
10817                 /*
10818                  * Before we can erase the flash page, we need
10819                  * to issue a special "write enable" command.
10820                  */
10821                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10822
10823                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10824                         break;
10825
10826                 /* Erase the target page */
10827                 tw32(NVRAM_ADDR, phy_addr);
10828
10829                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
10830                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
10831
10832                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10833                         break;
10834
10835                 /* Issue another write enable to start the write. */
10836                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10837
10838                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
10839                         break;
10840
10841                 for (j = 0; j < pagesize; j += 4) {
10842                         __be32 data;
10843
10844                         data = *((__be32 *) (tmp + j));
10845
10846                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
10847
10848                         tw32(NVRAM_ADDR, phy_addr + j);
10849
10850                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
10851                                 NVRAM_CMD_WR;
10852
10853                         if (j == 0)
10854                                 nvram_cmd |= NVRAM_CMD_FIRST;
10855                         else if (j == (pagesize - 4))
10856                                 nvram_cmd |= NVRAM_CMD_LAST;
10857
10858                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10859                                 break;
10860                 }
10861                 if (ret)
10862                         break;
10863         }
10864
10865         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
10866         tg3_nvram_exec_cmd(tp, nvram_cmd);
10867
10868         kfree(tmp);
10869
10870         return ret;
10871 }
10872
10873 /* offset and length are dword aligned */
10874 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
10875                 u8 *buf)
10876 {
10877         int i, ret = 0;
10878
10879         for (i = 0; i < len; i += 4, offset += 4) {
10880                 u32 page_off, phy_addr, nvram_cmd;
10881                 __be32 data;
10882
10883                 memcpy(&data, buf + i, 4);
10884                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
10885
10886                 page_off = offset % tp->nvram_pagesize;
10887
10888                 phy_addr = tg3_nvram_phys_addr(tp, offset);
10889
10890                 tw32(NVRAM_ADDR, phy_addr);
10891
10892                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
10893
10894                 if ((page_off == 0) || (i == 0))
10895                         nvram_cmd |= NVRAM_CMD_FIRST;
10896                 if (page_off == (tp->nvram_pagesize - 4))
10897                         nvram_cmd |= NVRAM_CMD_LAST;
10898
10899                 if (i == (len - 4))
10900                         nvram_cmd |= NVRAM_CMD_LAST;
10901
10902                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10903                     !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
10904                     (tp->nvram_jedecnum == JEDEC_ST) &&
10905                     (nvram_cmd & NVRAM_CMD_FIRST)) {
10906
10907                         if ((ret = tg3_nvram_exec_cmd(tp,
10908                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
10909                                 NVRAM_CMD_DONE)))
10910
10911                                 break;
10912                 }
10913                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10914                         /* We always do complete word writes to eeprom. */
10915                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
10916                 }
10917
10918                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
10919                         break;
10920         }
10921         return ret;
10922 }
10923
10924 /* offset and length are dword aligned */
10925 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
10926 {
10927         int ret;
10928
10929         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10930                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
10931                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
10932                 udelay(40);
10933         }
10934
10935         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
10936                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
10937         }
10938         else {
10939                 u32 grc_mode;
10940
10941                 ret = tg3_nvram_lock(tp);
10942                 if (ret)
10943                         return ret;
10944
10945                 tg3_enable_nvram_access(tp);
10946                 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
10947                     !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
10948                         tw32(NVRAM_WRITE1, 0x406);
10949
10950                 grc_mode = tr32(GRC_MODE);
10951                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
10952
10953                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
10954                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
10955
10956                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
10957                                 buf);
10958                 }
10959                 else {
10960                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
10961                                 buf);
10962                 }
10963
10964                 grc_mode = tr32(GRC_MODE);
10965                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
10966
10967                 tg3_disable_nvram_access(tp);
10968                 tg3_nvram_unlock(tp);
10969         }
10970
10971         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
10972                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10973                 udelay(40);
10974         }
10975
10976         return ret;
10977 }
10978
10979 struct subsys_tbl_ent {
10980         u16 subsys_vendor, subsys_devid;
10981         u32 phy_id;
10982 };
10983
10984 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
10985         /* Broadcom boards. */
10986         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
10987         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
10988         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
10989         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
10990         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
10991         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
10992         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
10993         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
10994         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
10995         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
10996         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
10997
10998         /* 3com boards. */
10999         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11000         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11001         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
11002         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11003         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11004
11005         /* DELL boards. */
11006         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11007         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11008         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11009         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11010
11011         /* Compaq boards. */
11012         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11013         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11014         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
11015         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11016         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11017
11018         /* IBM boards. */
11019         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11020 };
11021
11022 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11023 {
11024         int i;
11025
11026         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11027                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11028                      tp->pdev->subsystem_vendor) &&
11029                     (subsys_id_to_phy_id[i].subsys_devid ==
11030                      tp->pdev->subsystem_device))
11031                         return &subsys_id_to_phy_id[i];
11032         }
11033         return NULL;
11034 }
11035
11036 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11037 {
11038         u32 val;
11039         u16 pmcsr;
11040
11041         /* On some early chips the SRAM cannot be accessed in D3hot state,
11042          * so need make sure we're in D0.
11043          */
11044         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11045         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11046         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11047         msleep(1);
11048
11049         /* Make sure register accesses (indirect or otherwise)
11050          * will function correctly.
11051          */
11052         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11053                                tp->misc_host_ctrl);
11054
11055         /* The memory arbiter has to be enabled in order for SRAM accesses
11056          * to succeed.  Normally on powerup the tg3 chip firmware will make
11057          * sure it is enabled, but other entities such as system netboot
11058          * code might disable it.
11059          */
11060         val = tr32(MEMARB_MODE);
11061         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11062
11063         tp->phy_id = PHY_ID_INVALID;
11064         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11065
11066         /* Assume an onboard device and WOL capable by default.  */
11067         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11068
11069         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11070                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11071                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11072                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11073                 }
11074                 val = tr32(VCPU_CFGSHDW);
11075                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11076                         tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11077                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11078                     (val & VCPU_CFGSHDW_WOL_MAGPKT))
11079                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11080                 goto done;
11081         }
11082
11083         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11084         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11085                 u32 nic_cfg, led_cfg;
11086                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11087                 int eeprom_phy_serdes = 0;
11088
11089                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11090                 tp->nic_sram_data_cfg = nic_cfg;
11091
11092                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11093                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11094                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11095                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11096                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11097                     (ver > 0) && (ver < 0x100))
11098                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11099
11100                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11101                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11102
11103                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11104                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11105                         eeprom_phy_serdes = 1;
11106
11107                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11108                 if (nic_phy_id != 0) {
11109                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11110                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11111
11112                         eeprom_phy_id  = (id1 >> 16) << 10;
11113                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
11114                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
11115                 } else
11116                         eeprom_phy_id = 0;
11117
11118                 tp->phy_id = eeprom_phy_id;
11119                 if (eeprom_phy_serdes) {
11120                         if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11121                                 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11122                         else
11123                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11124                 }
11125
11126                 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11127                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11128                                     SHASTA_EXT_LED_MODE_MASK);
11129                 else
11130                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11131
11132                 switch (led_cfg) {
11133                 default:
11134                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11135                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11136                         break;
11137
11138                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11139                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11140                         break;
11141
11142                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11143                         tp->led_ctrl = LED_CTRL_MODE_MAC;
11144
11145                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11146                          * read on some older 5700/5701 bootcode.
11147                          */
11148                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11149                             ASIC_REV_5700 ||
11150                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
11151                             ASIC_REV_5701)
11152                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11153
11154                         break;
11155
11156                 case SHASTA_EXT_LED_SHARED:
11157                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
11158                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11159                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11160                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11161                                                  LED_CTRL_MODE_PHY_2);
11162                         break;
11163
11164                 case SHASTA_EXT_LED_MAC:
11165                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11166                         break;
11167
11168                 case SHASTA_EXT_LED_COMBO:
11169                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
11170                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11171                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11172                                                  LED_CTRL_MODE_PHY_2);
11173                         break;
11174
11175                 }
11176
11177                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11178                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11179                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11180                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11181
11182                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11183                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11184
11185                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11186                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11187                         if ((tp->pdev->subsystem_vendor ==
11188                              PCI_VENDOR_ID_ARIMA) &&
11189                             (tp->pdev->subsystem_device == 0x205a ||
11190                              tp->pdev->subsystem_device == 0x2063))
11191                                 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11192                 } else {
11193                         tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11194                         tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11195                 }
11196
11197                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11198                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11199                         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11200                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11201                 }
11202
11203                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11204                         (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11205                         tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11206
11207                 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11208                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11209                         tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11210
11211                 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11212                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11213                         tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11214
11215                 if (cfg2 & (1 << 17))
11216                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11217
11218                 /* serdes signal pre-emphasis in register 0x590 set by */
11219                 /* bootcode if bit 18 is set */
11220                 if (cfg2 & (1 << 18))
11221                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11222
11223                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
11224                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
11225                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
11226                         tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
11227
11228                 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11229                         u32 cfg3;
11230
11231                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11232                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11233                                 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11234                 }
11235
11236                 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11237                         tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11238                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11239                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11240                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11241                         tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11242         }
11243 done:
11244         device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11245         device_set_wakeup_enable(&tp->pdev->dev,
11246                                  tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11247 }
11248
11249 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11250 {
11251         int i;
11252         u32 val;
11253
11254         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11255         tw32(OTP_CTRL, cmd);
11256
11257         /* Wait for up to 1 ms for command to execute. */
11258         for (i = 0; i < 100; i++) {
11259                 val = tr32(OTP_STATUS);
11260                 if (val & OTP_STATUS_CMD_DONE)
11261                         break;
11262                 udelay(10);
11263         }
11264
11265         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11266 }
11267
11268 /* Read the gphy configuration from the OTP region of the chip.  The gphy
11269  * configuration is a 32-bit value that straddles the alignment boundary.
11270  * We do two 32-bit reads and then shift and merge the results.
11271  */
11272 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11273 {
11274         u32 bhalf_otp, thalf_otp;
11275
11276         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11277
11278         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11279                 return 0;
11280
11281         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11282
11283         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11284                 return 0;
11285
11286         thalf_otp = tr32(OTP_READ_DATA);
11287
11288         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11289
11290         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11291                 return 0;
11292
11293         bhalf_otp = tr32(OTP_READ_DATA);
11294
11295         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11296 }
11297
11298 static int __devinit tg3_phy_probe(struct tg3 *tp)
11299 {
11300         u32 hw_phy_id_1, hw_phy_id_2;
11301         u32 hw_phy_id, hw_phy_id_masked;
11302         int err;
11303
11304         if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11305                 return tg3_phy_init(tp);
11306
11307         /* Reading the PHY ID register can conflict with ASF
11308          * firmware access to the PHY hardware.
11309          */
11310         err = 0;
11311         if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11312             (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11313                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11314         } else {
11315                 /* Now read the physical PHY_ID from the chip and verify
11316                  * that it is sane.  If it doesn't look good, we fall back
11317                  * to either the hard-coded table based PHY_ID and failing
11318                  * that the value found in the eeprom area.
11319                  */
11320                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11321                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11322
11323                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
11324                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11325                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
11326
11327                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11328         }
11329
11330         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11331                 tp->phy_id = hw_phy_id;
11332                 if (hw_phy_id_masked == PHY_ID_BCM8002)
11333                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11334                 else
11335                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11336         } else {
11337                 if (tp->phy_id != PHY_ID_INVALID) {
11338                         /* Do nothing, phy ID already set up in
11339                          * tg3_get_eeprom_hw_cfg().
11340                          */
11341                 } else {
11342                         struct subsys_tbl_ent *p;
11343
11344                         /* No eeprom signature?  Try the hardcoded
11345                          * subsys device table.
11346                          */
11347                         p = lookup_by_subsys(tp);
11348                         if (!p)
11349                                 return -ENODEV;
11350
11351                         tp->phy_id = p->phy_id;
11352                         if (!tp->phy_id ||
11353                             tp->phy_id == PHY_ID_BCM8002)
11354                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11355                 }
11356         }
11357
11358         if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11359             !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11360             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11361                 u32 bmsr, adv_reg, tg3_ctrl, mask;
11362
11363                 tg3_readphy(tp, MII_BMSR, &bmsr);
11364                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11365                     (bmsr & BMSR_LSTATUS))
11366                         goto skip_phy_reset;
11367
11368                 err = tg3_phy_reset(tp);
11369                 if (err)
11370                         return err;
11371
11372                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11373                            ADVERTISE_100HALF | ADVERTISE_100FULL |
11374                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11375                 tg3_ctrl = 0;
11376                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11377                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11378                                     MII_TG3_CTRL_ADV_1000_FULL);
11379                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11380                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11381                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11382                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
11383                 }
11384
11385                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11386                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11387                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11388                 if (!tg3_copper_is_advertising_all(tp, mask)) {
11389                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11390
11391                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11392                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11393
11394                         tg3_writephy(tp, MII_BMCR,
11395                                      BMCR_ANENABLE | BMCR_ANRESTART);
11396                 }
11397                 tg3_phy_set_wirespeed(tp);
11398
11399                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11400                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11401                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11402         }
11403
11404 skip_phy_reset:
11405         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11406                 err = tg3_init_5401phy_dsp(tp);
11407                 if (err)
11408                         return err;
11409         }
11410
11411         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11412                 err = tg3_init_5401phy_dsp(tp);
11413         }
11414
11415         if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11416                 tp->link_config.advertising =
11417                         (ADVERTISED_1000baseT_Half |
11418                          ADVERTISED_1000baseT_Full |
11419                          ADVERTISED_Autoneg |
11420                          ADVERTISED_FIBRE);
11421         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11422                 tp->link_config.advertising &=
11423                         ~(ADVERTISED_1000baseT_Half |
11424                           ADVERTISED_1000baseT_Full);
11425
11426         return err;
11427 }
11428
11429 static void __devinit tg3_read_partno(struct tg3 *tp)
11430 {
11431         unsigned char vpd_data[256];   /* in little-endian format */
11432         unsigned int i;
11433         u32 magic;
11434
11435         if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11436             tg3_nvram_read(tp, 0x0, &magic))
11437                 goto out_not_found;
11438
11439         if (magic == TG3_EEPROM_MAGIC) {
11440                 for (i = 0; i < 256; i += 4) {
11441                         u32 tmp;
11442
11443                         /* The data is in little-endian format in NVRAM.
11444                          * Use the big-endian read routines to preserve
11445                          * the byte order as it exists in NVRAM.
11446                          */
11447                         if (tg3_nvram_read_be32(tp, 0x100 + i, &tmp))
11448                                 goto out_not_found;
11449
11450                         memcpy(&vpd_data[i], &tmp, sizeof(tmp));
11451                 }
11452         } else {
11453                 int vpd_cap;
11454
11455                 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11456                 for (i = 0; i < 256; i += 4) {
11457                         u32 tmp, j = 0;
11458                         __le32 v;
11459                         u16 tmp16;
11460
11461                         pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11462                                               i);
11463                         while (j++ < 100) {
11464                                 pci_read_config_word(tp->pdev, vpd_cap +
11465                                                      PCI_VPD_ADDR, &tmp16);
11466                                 if (tmp16 & 0x8000)
11467                                         break;
11468                                 msleep(1);
11469                         }
11470                         if (!(tmp16 & 0x8000))
11471                                 goto out_not_found;
11472
11473                         pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11474                                               &tmp);
11475                         v = cpu_to_le32(tmp);
11476                         memcpy(&vpd_data[i], &v, sizeof(v));
11477                 }
11478         }
11479
11480         /* Now parse and find the part number. */
11481         for (i = 0; i < 254; ) {
11482                 unsigned char val = vpd_data[i];
11483                 unsigned int block_end;
11484
11485                 if (val == 0x82 || val == 0x91) {
11486                         i = (i + 3 +
11487                              (vpd_data[i + 1] +
11488                               (vpd_data[i + 2] << 8)));
11489                         continue;
11490                 }
11491
11492                 if (val != 0x90)
11493                         goto out_not_found;
11494
11495                 block_end = (i + 3 +
11496                              (vpd_data[i + 1] +
11497                               (vpd_data[i + 2] << 8)));
11498                 i += 3;
11499
11500                 if (block_end > 256)
11501                         goto out_not_found;
11502
11503                 while (i < (block_end - 2)) {
11504                         if (vpd_data[i + 0] == 'P' &&
11505                             vpd_data[i + 1] == 'N') {
11506                                 int partno_len = vpd_data[i + 2];
11507
11508                                 i += 3;
11509                                 if (partno_len > 24 || (partno_len + i) > 256)
11510                                         goto out_not_found;
11511
11512                                 memcpy(tp->board_part_number,
11513                                        &vpd_data[i], partno_len);
11514
11515                                 /* Success. */
11516                                 return;
11517                         }
11518                         i += 3 + vpd_data[i + 2];
11519                 }
11520
11521                 /* Part number not found. */
11522                 goto out_not_found;
11523         }
11524
11525 out_not_found:
11526         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11527                 strcpy(tp->board_part_number, "BCM95906");
11528         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11529                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
11530                 strcpy(tp->board_part_number, "BCM57780");
11531         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11532                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
11533                 strcpy(tp->board_part_number, "BCM57760");
11534         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11535                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
11536                 strcpy(tp->board_part_number, "BCM57790");
11537         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
11538                  tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
11539                 strcpy(tp->board_part_number, "BCM57788");
11540         else
11541                 strcpy(tp->board_part_number, "none");
11542 }
11543
11544 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11545 {
11546         u32 val;
11547
11548         if (tg3_nvram_read(tp, offset, &val) ||
11549             (val & 0xfc000000) != 0x0c000000 ||
11550             tg3_nvram_read(tp, offset + 4, &val) ||
11551             val != 0)
11552                 return 0;
11553
11554         return 1;
11555 }
11556
11557 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
11558 {
11559         u32 val, offset, start, ver_offset;
11560         int i;
11561         bool newver = false;
11562
11563         if (tg3_nvram_read(tp, 0xc, &offset) ||
11564             tg3_nvram_read(tp, 0x4, &start))
11565                 return;
11566
11567         offset = tg3_nvram_logical_addr(tp, offset);
11568
11569         if (tg3_nvram_read(tp, offset, &val))
11570                 return;
11571
11572         if ((val & 0xfc000000) == 0x0c000000) {
11573                 if (tg3_nvram_read(tp, offset + 4, &val))
11574                         return;
11575
11576                 if (val == 0)
11577                         newver = true;
11578         }
11579
11580         if (newver) {
11581                 if (tg3_nvram_read(tp, offset + 8, &ver_offset))
11582                         return;
11583
11584                 offset = offset + ver_offset - start;
11585                 for (i = 0; i < 16; i += 4) {
11586                         __be32 v;
11587                         if (tg3_nvram_read_be32(tp, offset + i, &v))
11588                                 return;
11589
11590                         memcpy(tp->fw_ver + i, &v, sizeof(v));
11591                 }
11592         } else {
11593                 u32 major, minor;
11594
11595                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
11596                         return;
11597
11598                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
11599                         TG3_NVM_BCVER_MAJSFT;
11600                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
11601                 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor);
11602         }
11603 }
11604
11605 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
11606 {
11607         u32 val, major, minor;
11608
11609         /* Use native endian representation */
11610         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
11611                 return;
11612
11613         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
11614                 TG3_NVM_HWSB_CFG1_MAJSFT;
11615         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
11616                 TG3_NVM_HWSB_CFG1_MINSFT;
11617
11618         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
11619 }
11620
11621 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
11622 {
11623         u32 offset, major, minor, build;
11624
11625         tp->fw_ver[0] = 's';
11626         tp->fw_ver[1] = 'b';
11627         tp->fw_ver[2] = '\0';
11628
11629         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
11630                 return;
11631
11632         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
11633         case TG3_EEPROM_SB_REVISION_0:
11634                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
11635                 break;
11636         case TG3_EEPROM_SB_REVISION_2:
11637                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
11638                 break;
11639         case TG3_EEPROM_SB_REVISION_3:
11640                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
11641                 break;
11642         default:
11643                 return;
11644         }
11645
11646         if (tg3_nvram_read(tp, offset, &val))
11647                 return;
11648
11649         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
11650                 TG3_EEPROM_SB_EDH_BLD_SHFT;
11651         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
11652                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
11653         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
11654
11655         if (minor > 99 || build > 26)
11656                 return;
11657
11658         snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor);
11659
11660         if (build > 0) {
11661                 tp->fw_ver[8] = 'a' + build - 1;
11662                 tp->fw_ver[9] = '\0';
11663         }
11664 }
11665
11666 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
11667 {
11668         u32 val, offset, start;
11669         int i, vlen;
11670
11671         for (offset = TG3_NVM_DIR_START;
11672              offset < TG3_NVM_DIR_END;
11673              offset += TG3_NVM_DIRENT_SIZE) {
11674                 if (tg3_nvram_read(tp, offset, &val))
11675                         return;
11676
11677                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11678                         break;
11679         }
11680
11681         if (offset == TG3_NVM_DIR_END)
11682                 return;
11683
11684         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11685                 start = 0x08000000;
11686         else if (tg3_nvram_read(tp, offset - 4, &start))
11687                 return;
11688
11689         if (tg3_nvram_read(tp, offset + 4, &offset) ||
11690             !tg3_fw_img_is_valid(tp, offset) ||
11691             tg3_nvram_read(tp, offset + 8, &val))
11692                 return;
11693
11694         offset += val - start;
11695
11696         vlen = strlen(tp->fw_ver);
11697
11698         tp->fw_ver[vlen++] = ',';
11699         tp->fw_ver[vlen++] = ' ';
11700
11701         for (i = 0; i < 4; i++) {
11702                 __be32 v;
11703                 if (tg3_nvram_read_be32(tp, offset, &v))
11704                         return;
11705
11706                 offset += sizeof(v);
11707
11708                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
11709                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
11710                         break;
11711                 }
11712
11713                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
11714                 vlen += sizeof(v);
11715         }
11716 }
11717
11718 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
11719 {
11720         int vlen;
11721         u32 apedata;
11722
11723         if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
11724             !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
11725                 return;
11726
11727         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
11728         if (apedata != APE_SEG_SIG_MAGIC)
11729                 return;
11730
11731         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
11732         if (!(apedata & APE_FW_STATUS_READY))
11733                 return;
11734
11735         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
11736
11737         vlen = strlen(tp->fw_ver);
11738
11739         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
11740                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
11741                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
11742                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
11743                  (apedata & APE_FW_VERSION_BLDMSK));
11744 }
11745
11746 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11747 {
11748         u32 val;
11749
11750         if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
11751                 tp->fw_ver[0] = 's';
11752                 tp->fw_ver[1] = 'b';
11753                 tp->fw_ver[2] = '\0';
11754
11755                 return;
11756         }
11757
11758         if (tg3_nvram_read(tp, 0, &val))
11759                 return;
11760
11761         if (val == TG3_EEPROM_MAGIC)
11762                 tg3_read_bc_ver(tp);
11763         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
11764                 tg3_read_sb_ver(tp, val);
11765         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11766                 tg3_read_hwsb_ver(tp);
11767         else
11768                 return;
11769
11770         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11771              (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11772                 return;
11773
11774         tg3_read_mgmtfw_ver(tp);
11775
11776         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11777 }
11778
11779 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11780
11781 static int __devinit tg3_get_invariants(struct tg3 *tp)
11782 {
11783         static struct pci_device_id write_reorder_chipsets[] = {
11784                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11785                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11786                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11787                              PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11788                 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11789                              PCI_DEVICE_ID_VIA_8385_0) },
11790                 { },
11791         };
11792         u32 misc_ctrl_reg;
11793         u32 pci_state_reg, grc_misc_cfg;
11794         u32 val;
11795         u16 pci_cmd;
11796         int err;
11797
11798         /* Force memory write invalidate off.  If we leave it on,
11799          * then on 5700_BX chips we have to enable a workaround.
11800          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11801          * to match the cacheline size.  The Broadcom driver have this
11802          * workaround but turns MWI off all the times so never uses
11803          * it.  This seems to suggest that the workaround is insufficient.
11804          */
11805         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11806         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11807         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11808
11809         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11810          * has the register indirect write enable bit set before
11811          * we try to access any of the MMIO registers.  It is also
11812          * critical that the PCI-X hw workaround situation is decided
11813          * before that as well.
11814          */
11815         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11816                               &misc_ctrl_reg);
11817
11818         tp->pci_chip_rev_id = (misc_ctrl_reg >>
11819                                MISC_HOST_CTRL_CHIPREV_SHIFT);
11820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11821                 u32 prod_id_asic_rev;
11822
11823                 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11824                                       &prod_id_asic_rev);
11825                 tp->pci_chip_rev_id = prod_id_asic_rev;
11826         }
11827
11828         /* Wrong chip ID in 5752 A0. This code can be removed later
11829          * as A0 is not in production.
11830          */
11831         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
11832                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
11833
11834         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
11835          * we need to disable memory and use config. cycles
11836          * only to access all registers. The 5702/03 chips
11837          * can mistakenly decode the special cycles from the
11838          * ICH chipsets as memory write cycles, causing corruption
11839          * of register and memory space. Only certain ICH bridges
11840          * will drive special cycles with non-zero data during the
11841          * address phase which can fall within the 5703's address
11842          * range. This is not an ICH bug as the PCI spec allows
11843          * non-zero address during special cycles. However, only
11844          * these ICH bridges are known to drive non-zero addresses
11845          * during special cycles.
11846          *
11847          * Since special cycles do not cross PCI bridges, we only
11848          * enable this workaround if the 5703 is on the secondary
11849          * bus of these ICH bridges.
11850          */
11851         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
11852             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
11853                 static struct tg3_dev_id {
11854                         u32     vendor;
11855                         u32     device;
11856                         u32     rev;
11857                 } ich_chipsets[] = {
11858                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
11859                           PCI_ANY_ID },
11860                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
11861                           PCI_ANY_ID },
11862                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
11863                           0xa },
11864                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
11865                           PCI_ANY_ID },
11866                         { },
11867                 };
11868                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
11869                 struct pci_dev *bridge = NULL;
11870
11871                 while (pci_id->vendor != 0) {
11872                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
11873                                                 bridge);
11874                         if (!bridge) {
11875                                 pci_id++;
11876                                 continue;
11877                         }
11878                         if (pci_id->rev != PCI_ANY_ID) {
11879                                 if (bridge->revision > pci_id->rev)
11880                                         continue;
11881                         }
11882                         if (bridge->subordinate &&
11883                             (bridge->subordinate->number ==
11884                              tp->pdev->bus->number)) {
11885
11886                                 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
11887                                 pci_dev_put(bridge);
11888                                 break;
11889                         }
11890                 }
11891         }
11892
11893         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
11894                 static struct tg3_dev_id {
11895                         u32     vendor;
11896                         u32     device;
11897                 } bridge_chipsets[] = {
11898                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
11899                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
11900                         { },
11901                 };
11902                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
11903                 struct pci_dev *bridge = NULL;
11904
11905                 while (pci_id->vendor != 0) {
11906                         bridge = pci_get_device(pci_id->vendor,
11907                                                 pci_id->device,
11908                                                 bridge);
11909                         if (!bridge) {
11910                                 pci_id++;
11911                                 continue;
11912                         }
11913                         if (bridge->subordinate &&
11914                             (bridge->subordinate->number <=
11915                              tp->pdev->bus->number) &&
11916                             (bridge->subordinate->subordinate >=
11917                              tp->pdev->bus->number)) {
11918                                 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
11919                                 pci_dev_put(bridge);
11920                                 break;
11921                         }
11922                 }
11923         }
11924
11925         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
11926          * DMA addresses > 40-bit. This bridge may have other additional
11927          * 57xx devices behind it in some 4-port NIC designs for example.
11928          * Any tg3 device found behind the bridge will also need the 40-bit
11929          * DMA workaround.
11930          */
11931         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11933                 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
11934                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11935                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
11936         }
11937         else {
11938                 struct pci_dev *bridge = NULL;
11939
11940                 do {
11941                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
11942                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
11943                                                 bridge);
11944                         if (bridge && bridge->subordinate &&
11945                             (bridge->subordinate->number <=
11946                              tp->pdev->bus->number) &&
11947                             (bridge->subordinate->subordinate >=
11948                              tp->pdev->bus->number)) {
11949                                 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
11950                                 pci_dev_put(bridge);
11951                                 break;
11952                         }
11953                 } while (bridge);
11954         }
11955
11956         /* Initialize misc host control in PCI block. */
11957         tp->misc_host_ctrl |= (misc_ctrl_reg &
11958                                MISC_HOST_CTRL_CHIPREV);
11959         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11960                                tp->misc_host_ctrl);
11961
11962         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
11963             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
11964                 tp->pdev_peer = tg3_find_peer(tp);
11965
11966         /* Intentionally exclude ASIC_REV_5906 */
11967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11969             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
11971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
11972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
11973                 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
11974
11975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
11977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11978             (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
11979             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
11980                 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
11981
11982         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
11983             (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11984                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
11985
11986         /* 5700 B0 chips do not support checksumming correctly due
11987          * to hardware bugs.
11988          */
11989         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
11990                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
11991         else {
11992                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11993                 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
11994                 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
11995                         tp->dev->features |= NETIF_F_IPV6_CSUM;
11996         }
11997
11998         if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
11999                 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12000                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12001                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12002                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12003                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12004                      tp->pdev_peer == tp->pdev))
12005                         tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12006
12007                 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12008                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12009                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12010                         tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12011                 } else {
12012                         tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12013                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12014                                 ASIC_REV_5750 &&
12015                             tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12016                                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12017                 }
12018         }
12019
12020         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12021              (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12022                 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12023
12024         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12025                               &pci_state_reg);
12026
12027         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12028         if (tp->pcie_cap != 0) {
12029                 u16 lnkctl;
12030
12031                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12032
12033                 pcie_set_readrq(tp->pdev, 4096);
12034
12035                 pci_read_config_word(tp->pdev,
12036                                      tp->pcie_cap + PCI_EXP_LNKCTL,
12037                                      &lnkctl);
12038                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
12039                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12040                                 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12041                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12042                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12043                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
12044                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
12045                                 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
12046                 }
12047         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
12048                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12049         } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12050                    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12051                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12052                 if (!tp->pcix_cap) {
12053                         printk(KERN_ERR PFX "Cannot find PCI-X "
12054                                             "capability, aborting.\n");
12055                         return -EIO;
12056                 }
12057
12058                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
12059                         tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12060         }
12061
12062         /* If we have an AMD 762 or VIA K8T800 chipset, write
12063          * reordering to the mailbox registers done by the host
12064          * controller can cause major troubles.  We read back from
12065          * every mailbox register write to force the writes to be
12066          * posted to the chip in order.
12067          */
12068         if (pci_dev_present(write_reorder_chipsets) &&
12069             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12070                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12071
12072         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
12073                              &tp->pci_cacheline_sz);
12074         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12075                              &tp->pci_lat_timer);
12076         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12077             tp->pci_lat_timer < 64) {
12078                 tp->pci_lat_timer = 64;
12079                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
12080                                       tp->pci_lat_timer);
12081         }
12082
12083         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12084                 /* 5700 BX chips need to have their TX producer index
12085                  * mailboxes written twice to workaround a bug.
12086                  */
12087                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12088
12089                 /* If we are in PCI-X mode, enable register write workaround.
12090                  *
12091                  * The workaround is to use indirect register accesses
12092                  * for all chip writes not to mailbox registers.
12093                  */
12094                 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
12095                         u32 pm_reg;
12096
12097                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12098
12099                         /* The chip can have it's power management PCI config
12100                          * space registers clobbered due to this bug.
12101                          * So explicitly force the chip into D0 here.
12102                          */
12103                         pci_read_config_dword(tp->pdev,
12104                                               tp->pm_cap + PCI_PM_CTRL,
12105                                               &pm_reg);
12106                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12107                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12108                         pci_write_config_dword(tp->pdev,
12109                                                tp->pm_cap + PCI_PM_CTRL,
12110                                                pm_reg);
12111
12112                         /* Also, force SERR#/PERR# in PCI command. */
12113                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12114                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12115                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12116                 }
12117         }
12118
12119         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12120                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12121         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12122                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12123
12124         /* Chip-specific fixup from Broadcom driver */
12125         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12126             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12127                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12128                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12129         }
12130
12131         /* Default fast path register access methods */
12132         tp->read32 = tg3_read32;
12133         tp->write32 = tg3_write32;
12134         tp->read32_mbox = tg3_read32;
12135         tp->write32_mbox = tg3_write32;
12136         tp->write32_tx_mbox = tg3_write32;
12137         tp->write32_rx_mbox = tg3_write32;
12138
12139         /* Various workaround register access methods */
12140         if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12141                 tp->write32 = tg3_write_indirect_reg32;
12142         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12143                  ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12144                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12145                 /*
12146                  * Back to back register writes can cause problems on these
12147                  * chips, the workaround is to read back all reg writes
12148                  * except those to mailbox regs.
12149                  *
12150                  * See tg3_write_indirect_reg32().
12151                  */
12152                 tp->write32 = tg3_write_flush_reg32;
12153         }
12154
12155
12156         if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12157             (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12158                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12159                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12160                         tp->write32_rx_mbox = tg3_write_flush_reg32;
12161         }
12162
12163         if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12164                 tp->read32 = tg3_read_indirect_reg32;
12165                 tp->write32 = tg3_write_indirect_reg32;
12166                 tp->read32_mbox = tg3_read_indirect_mbox;
12167                 tp->write32_mbox = tg3_write_indirect_mbox;
12168                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12169                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12170
12171                 iounmap(tp->regs);
12172                 tp->regs = NULL;
12173
12174                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12175                 pci_cmd &= ~PCI_COMMAND_MEMORY;
12176                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12177         }
12178         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12179                 tp->read32_mbox = tg3_read32_mbox_5906;
12180                 tp->write32_mbox = tg3_write32_mbox_5906;
12181                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12182                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12183         }
12184
12185         if (tp->write32 == tg3_write_indirect_reg32 ||
12186             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12187              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12188               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12189                 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12190
12191         /* Get eeprom hw config before calling tg3_set_power_state().
12192          * In particular, the TG3_FLG2_IS_NIC flag must be
12193          * determined before calling tg3_set_power_state() so that
12194          * we know whether or not to switch out of Vaux power.
12195          * When the flag is set, it means that GPIO1 is used for eeprom
12196          * write protect and also implies that it is a LOM where GPIOs
12197          * are not used to switch power.
12198          */
12199         tg3_get_eeprom_hw_cfg(tp);
12200
12201         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12202                 /* Allow reads and writes to the
12203                  * APE register and memory space.
12204                  */
12205                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12206                                  PCISTATE_ALLOW_APE_SHMEM_WR;
12207                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12208                                        pci_state_reg);
12209         }
12210
12211         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12212             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12213             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12214             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12215                 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12216
12217         /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12218          * GPIO1 driven high will bring 5700's external PHY out of reset.
12219          * It is also used as eeprom write protect on LOMs.
12220          */
12221         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12222         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12223             (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12224                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12225                                        GRC_LCLCTRL_GPIO_OUTPUT1);
12226         /* Unused GPIO3 must be driven as output on 5752 because there
12227          * are no pull-up resistors on unused GPIO pins.
12228          */
12229         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12230                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12231
12232         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12234                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12235
12236         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
12237             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
12238                 /* Turn off the debug UART. */
12239                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12240                 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12241                         /* Keep VMain power. */
12242                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12243                                               GRC_LCLCTRL_GPIO_OUTPUT0;
12244         }
12245
12246         /* Force the chip into D0. */
12247         err = tg3_set_power_state(tp, PCI_D0);
12248         if (err) {
12249                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12250                        pci_name(tp->pdev));
12251                 return err;
12252         }
12253
12254         /* Derive initial jumbo mode from MTU assigned in
12255          * ether_setup() via the alloc_etherdev() call
12256          */
12257         if (tp->dev->mtu > ETH_DATA_LEN &&
12258             !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12259                 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12260
12261         /* Determine WakeOnLan speed to use. */
12262         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12263             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12264             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12265             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12266                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12267         } else {
12268                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12269         }
12270
12271         /* A few boards don't want Ethernet@WireSpeed phy feature */
12272         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12273             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12274              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12275              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12276             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12277             (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12278                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12279
12280         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12281             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12282                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12283         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12284                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12285
12286         if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
12287             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12288             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12289             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780) {
12290                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12291                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12292                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12293                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12294                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12295                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12296                                 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12297                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12298                                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12299                 } else
12300                         tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12301         }
12302
12303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12304             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12305                 tp->phy_otp = tg3_read_otp_phycfg(tp);
12306                 if (tp->phy_otp == 0)
12307                         tp->phy_otp = TG3_OTP_DEFAULT;
12308         }
12309
12310         if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12311                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12312         else
12313                 tp->mi_mode = MAC_MI_MODE_BASE;
12314
12315         tp->coalesce_mode = 0;
12316         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12317             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12318                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12319
12320         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12322                 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12323
12324         if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12325              tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12326             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12327                 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12328
12329         err = tg3_mdio_init(tp);
12330         if (err)
12331                 return err;
12332
12333         /* Initialize data/descriptor byte/word swapping. */
12334         val = tr32(GRC_MODE);
12335         val &= GRC_MODE_HOST_STACKUP;
12336         tw32(GRC_MODE, val | tp->grc_mode);
12337
12338         tg3_switch_clocks(tp);
12339
12340         /* Clear this out for sanity. */
12341         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12342
12343         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12344                               &pci_state_reg);
12345         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12346             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12347                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12348
12349                 if (chiprevid == CHIPREV_ID_5701_A0 ||
12350                     chiprevid == CHIPREV_ID_5701_B0 ||
12351                     chiprevid == CHIPREV_ID_5701_B2 ||
12352                     chiprevid == CHIPREV_ID_5701_B5) {
12353                         void __iomem *sram_base;
12354
12355                         /* Write some dummy words into the SRAM status block
12356                          * area, see if it reads back correctly.  If the return
12357                          * value is bad, force enable the PCIX workaround.
12358                          */
12359                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12360
12361                         writel(0x00000000, sram_base);
12362                         writel(0x00000000, sram_base + 4);
12363                         writel(0xffffffff, sram_base + 4);
12364                         if (readl(sram_base) != 0x00000000)
12365                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12366                 }
12367         }
12368
12369         udelay(50);
12370         tg3_nvram_init(tp);
12371
12372         grc_misc_cfg = tr32(GRC_MISC_CFG);
12373         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12374
12375         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12376             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12377              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12378                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12379
12380         if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12381             (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12382                 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12383         if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12384                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12385                                       HOSTCC_MODE_CLRTICK_TXBD);
12386
12387                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12388                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12389                                        tp->misc_host_ctrl);
12390         }
12391
12392         /* Preserve the APE MAC_MODE bits */
12393         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12394                 tp->mac_mode = tr32(MAC_MODE) |
12395                                MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12396         else
12397                 tp->mac_mode = TG3_DEF_MAC_MODE;
12398
12399         /* these are limited to 10/100 only */
12400         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12401              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12402             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12403              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12404              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12405               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12406               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12407             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12408              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12409               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12410               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12411             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
12412             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12413                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12414
12415         err = tg3_phy_probe(tp);
12416         if (err) {
12417                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12418                        pci_name(tp->pdev), err);
12419                 /* ... but do not return immediately ... */
12420                 tg3_mdio_fini(tp);
12421         }
12422
12423         tg3_read_partno(tp);
12424         tg3_read_fw_ver(tp);
12425
12426         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12427                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12428         } else {
12429                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12430                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12431                 else
12432                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12433         }
12434
12435         /* 5700 {AX,BX} chips have a broken status block link
12436          * change bit implementation, so we must use the
12437          * status register in those cases.
12438          */
12439         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12440                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12441         else
12442                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12443
12444         /* The led_ctrl is set during tg3_phy_probe, here we might
12445          * have to force the link status polling mechanism based
12446          * upon subsystem IDs.
12447          */
12448         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12450             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12451                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12452                                   TG3_FLAG_USE_LINKCHG_REG);
12453         }
12454
12455         /* For all SERDES we poll the MAC status register. */
12456         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12457                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12458         else
12459                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12460
12461         tp->rx_offset = NET_IP_ALIGN;
12462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12463             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12464                 tp->rx_offset = 0;
12465
12466         tp->rx_std_max_post = TG3_RX_RING_SIZE;
12467
12468         /* Increment the rx prod index on the rx std ring by at most
12469          * 8 for these chips to workaround hw errata.
12470          */
12471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12472             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12473             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12474                 tp->rx_std_max_post = 8;
12475
12476         if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12477                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12478                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
12479
12480         return err;
12481 }
12482
12483 #ifdef CONFIG_SPARC
12484 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12485 {
12486         struct net_device *dev = tp->dev;
12487         struct pci_dev *pdev = tp->pdev;
12488         struct device_node *dp = pci_device_to_OF_node(pdev);
12489         const unsigned char *addr;
12490         int len;
12491
12492         addr = of_get_property(dp, "local-mac-address", &len);
12493         if (addr && len == 6) {
12494                 memcpy(dev->dev_addr, addr, 6);
12495                 memcpy(dev->perm_addr, dev->dev_addr, 6);
12496                 return 0;
12497         }
12498         return -ENODEV;
12499 }
12500
12501 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12502 {
12503         struct net_device *dev = tp->dev;
12504
12505         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12506         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12507         return 0;
12508 }
12509 #endif
12510
12511 static int __devinit tg3_get_device_address(struct tg3 *tp)
12512 {
12513         struct net_device *dev = tp->dev;
12514         u32 hi, lo, mac_offset;
12515         int addr_ok = 0;
12516
12517 #ifdef CONFIG_SPARC
12518         if (!tg3_get_macaddr_sparc(tp))
12519                 return 0;
12520 #endif
12521
12522         mac_offset = 0x7c;
12523         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12524             (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12525                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12526                         mac_offset = 0xcc;
12527                 if (tg3_nvram_lock(tp))
12528                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12529                 else
12530                         tg3_nvram_unlock(tp);
12531         }
12532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12533                 mac_offset = 0x10;
12534
12535         /* First try to get it from MAC address mailbox. */
12536         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12537         if ((hi >> 16) == 0x484b) {
12538                 dev->dev_addr[0] = (hi >>  8) & 0xff;
12539                 dev->dev_addr[1] = (hi >>  0) & 0xff;
12540
12541                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12542                 dev->dev_addr[2] = (lo >> 24) & 0xff;
12543                 dev->dev_addr[3] = (lo >> 16) & 0xff;
12544                 dev->dev_addr[4] = (lo >>  8) & 0xff;
12545                 dev->dev_addr[5] = (lo >>  0) & 0xff;
12546
12547                 /* Some old bootcode may report a 0 MAC address in SRAM */
12548                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12549         }
12550         if (!addr_ok) {
12551                 /* Next, try NVRAM. */
12552                 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
12553                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
12554                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
12555                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
12556                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
12557                 }
12558                 /* Finally just fetch it out of the MAC control regs. */
12559                 else {
12560                         hi = tr32(MAC_ADDR_0_HIGH);
12561                         lo = tr32(MAC_ADDR_0_LOW);
12562
12563                         dev->dev_addr[5] = lo & 0xff;
12564                         dev->dev_addr[4] = (lo >> 8) & 0xff;
12565                         dev->dev_addr[3] = (lo >> 16) & 0xff;
12566                         dev->dev_addr[2] = (lo >> 24) & 0xff;
12567                         dev->dev_addr[1] = hi & 0xff;
12568                         dev->dev_addr[0] = (hi >> 8) & 0xff;
12569                 }
12570         }
12571
12572         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12573 #ifdef CONFIG_SPARC
12574                 if (!tg3_get_default_macaddr_sparc(tp))
12575                         return 0;
12576 #endif
12577                 return -EINVAL;
12578         }
12579         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12580         return 0;
12581 }
12582
12583 #define BOUNDARY_SINGLE_CACHELINE       1
12584 #define BOUNDARY_MULTI_CACHELINE        2
12585
12586 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12587 {
12588         int cacheline_size;
12589         u8 byte;
12590         int goal;
12591
12592         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12593         if (byte == 0)
12594                 cacheline_size = 1024;
12595         else
12596                 cacheline_size = (int) byte * 4;
12597
12598         /* On 5703 and later chips, the boundary bits have no
12599          * effect.
12600          */
12601         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12602             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12603             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12604                 goto out;
12605
12606 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12607         goal = BOUNDARY_MULTI_CACHELINE;
12608 #else
12609 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12610         goal = BOUNDARY_SINGLE_CACHELINE;
12611 #else
12612         goal = 0;
12613 #endif
12614 #endif
12615
12616         if (!goal)
12617                 goto out;
12618
12619         /* PCI controllers on most RISC systems tend to disconnect
12620          * when a device tries to burst across a cache-line boundary.
12621          * Therefore, letting tg3 do so just wastes PCI bandwidth.
12622          *
12623          * Unfortunately, for PCI-E there are only limited
12624          * write-side controls for this, and thus for reads
12625          * we will still get the disconnects.  We'll also waste
12626          * these PCI cycles for both read and write for chips
12627          * other than 5700 and 5701 which do not implement the
12628          * boundary bits.
12629          */
12630         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12631             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12632                 switch (cacheline_size) {
12633                 case 16:
12634                 case 32:
12635                 case 64:
12636                 case 128:
12637                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12638                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12639                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12640                         } else {
12641                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12642                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12643                         }
12644                         break;
12645
12646                 case 256:
12647                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12648                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12649                         break;
12650
12651                 default:
12652                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12653                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12654                         break;
12655                 }
12656         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12657                 switch (cacheline_size) {
12658                 case 16:
12659                 case 32:
12660                 case 64:
12661                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12662                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12663                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12664                                 break;
12665                         }
12666                         /* fallthrough */
12667                 case 128:
12668                 default:
12669                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12670                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12671                         break;
12672                 }
12673         } else {
12674                 switch (cacheline_size) {
12675                 case 16:
12676                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12677                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12678                                         DMA_RWCTRL_WRITE_BNDRY_16);
12679                                 break;
12680                         }
12681                         /* fallthrough */
12682                 case 32:
12683                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12684                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12685                                         DMA_RWCTRL_WRITE_BNDRY_32);
12686                                 break;
12687                         }
12688                         /* fallthrough */
12689                 case 64:
12690                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12691                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12692                                         DMA_RWCTRL_WRITE_BNDRY_64);
12693                                 break;
12694                         }
12695                         /* fallthrough */
12696                 case 128:
12697                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
12698                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12699                                         DMA_RWCTRL_WRITE_BNDRY_128);
12700                                 break;
12701                         }
12702                         /* fallthrough */
12703                 case 256:
12704                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
12705                                 DMA_RWCTRL_WRITE_BNDRY_256);
12706                         break;
12707                 case 512:
12708                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
12709                                 DMA_RWCTRL_WRITE_BNDRY_512);
12710                         break;
12711                 case 1024:
12712                 default:
12713                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12714                                 DMA_RWCTRL_WRITE_BNDRY_1024);
12715                         break;
12716                 }
12717         }
12718
12719 out:
12720         return val;
12721 }
12722
12723 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12724 {
12725         struct tg3_internal_buffer_desc test_desc;
12726         u32 sram_dma_descs;
12727         int i, ret;
12728
12729         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12730
12731         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12732         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12733         tw32(RDMAC_STATUS, 0);
12734         tw32(WDMAC_STATUS, 0);
12735
12736         tw32(BUFMGR_MODE, 0);
12737         tw32(FTQ_RESET, 0);
12738
12739         test_desc.addr_hi = ((u64) buf_dma) >> 32;
12740         test_desc.addr_lo = buf_dma & 0xffffffff;
12741         test_desc.nic_mbuf = 0x00002100;
12742         test_desc.len = size;
12743
12744         /*
12745          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12746          * the *second* time the tg3 driver was getting loaded after an
12747          * initial scan.
12748          *
12749          * Broadcom tells me:
12750          *   ...the DMA engine is connected to the GRC block and a DMA
12751          *   reset may affect the GRC block in some unpredictable way...
12752          *   The behavior of resets to individual blocks has not been tested.
12753          *
12754          * Broadcom noted the GRC reset will also reset all sub-components.
12755          */
12756         if (to_device) {
12757                 test_desc.cqid_sqid = (13 << 8) | 2;
12758
12759                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12760                 udelay(40);
12761         } else {
12762                 test_desc.cqid_sqid = (16 << 8) | 7;
12763
12764                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12765                 udelay(40);
12766         }
12767         test_desc.flags = 0x00000005;
12768
12769         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12770                 u32 val;
12771
12772                 val = *(((u32 *)&test_desc) + i);
12773                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12774                                        sram_dma_descs + (i * sizeof(u32)));
12775                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12776         }
12777         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12778
12779         if (to_device) {
12780                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12781         } else {
12782                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12783         }
12784
12785         ret = -ENODEV;
12786         for (i = 0; i < 40; i++) {
12787                 u32 val;
12788
12789                 if (to_device)
12790                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12791                 else
12792                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12793                 if ((val & 0xffff) == sram_dma_descs) {
12794                         ret = 0;
12795                         break;
12796                 }
12797
12798                 udelay(100);
12799         }
12800
12801         return ret;
12802 }
12803
12804 #define TEST_BUFFER_SIZE        0x2000
12805
12806 static int __devinit tg3_test_dma(struct tg3 *tp)
12807 {
12808         dma_addr_t buf_dma;
12809         u32 *buf, saved_dma_rwctrl;
12810         int ret;
12811
12812         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12813         if (!buf) {
12814                 ret = -ENOMEM;
12815                 goto out_nofree;
12816         }
12817
12818         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12819                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12820
12821         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12822
12823         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12824                 /* DMA read watermark not used on PCIE */
12825                 tp->dma_rwctrl |= 0x00180000;
12826         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
12827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
12828                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
12829                         tp->dma_rwctrl |= 0x003f0000;
12830                 else
12831                         tp->dma_rwctrl |= 0x003f000f;
12832         } else {
12833                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12834                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
12835                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
12836                         u32 read_water = 0x7;
12837
12838                         /* If the 5704 is behind the EPB bridge, we can
12839                          * do the less restrictive ONE_DMA workaround for
12840                          * better performance.
12841                          */
12842                         if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
12843                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12844                                 tp->dma_rwctrl |= 0x8000;
12845                         else if (ccval == 0x6 || ccval == 0x7)
12846                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
12847
12848                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
12849                                 read_water = 4;
12850                         /* Set bit 23 to enable PCIX hw bug fix */
12851                         tp->dma_rwctrl |=
12852                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
12853                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
12854                                 (1 << 23);
12855                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
12856                         /* 5780 always in PCIX mode */
12857                         tp->dma_rwctrl |= 0x00144000;
12858                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12859                         /* 5714 always in PCIX mode */
12860                         tp->dma_rwctrl |= 0x00148000;
12861                 } else {
12862                         tp->dma_rwctrl |= 0x001b000f;
12863                 }
12864         }
12865
12866         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
12867             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
12868                 tp->dma_rwctrl &= 0xfffffff0;
12869
12870         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12871             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
12872                 /* Remove this if it causes problems for some boards. */
12873                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
12874
12875                 /* On 5700/5701 chips, we need to set this bit.
12876                  * Otherwise the chip will issue cacheline transactions
12877                  * to streamable DMA memory with not all the byte
12878                  * enables turned on.  This is an error on several
12879                  * RISC PCI controllers, in particular sparc64.
12880                  *
12881                  * On 5703/5704 chips, this bit has been reassigned
12882                  * a different meaning.  In particular, it is used
12883                  * on those chips to enable a PCI-X workaround.
12884                  */
12885                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
12886         }
12887
12888         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12889
12890 #if 0
12891         /* Unneeded, already done by tg3_get_invariants.  */
12892         tg3_switch_clocks(tp);
12893 #endif
12894
12895         ret = 0;
12896         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12897             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
12898                 goto out;
12899
12900         /* It is best to perform DMA test with maximum write burst size
12901          * to expose the 5700/5701 write DMA bug.
12902          */
12903         saved_dma_rwctrl = tp->dma_rwctrl;
12904         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12905         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12906
12907         while (1) {
12908                 u32 *p = buf, i;
12909
12910                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
12911                         p[i] = i;
12912
12913                 /* Send the buffer to the chip. */
12914                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
12915                 if (ret) {
12916                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
12917                         break;
12918                 }
12919
12920 #if 0
12921                 /* validate data reached card RAM correctly. */
12922                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12923                         u32 val;
12924                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
12925                         if (le32_to_cpu(val) != p[i]) {
12926                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
12927                                 /* ret = -ENODEV here? */
12928                         }
12929                         p[i] = 0;
12930                 }
12931 #endif
12932                 /* Now read it back. */
12933                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
12934                 if (ret) {
12935                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
12936
12937                         break;
12938                 }
12939
12940                 /* Verify it. */
12941                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
12942                         if (p[i] == i)
12943                                 continue;
12944
12945                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12946                             DMA_RWCTRL_WRITE_BNDRY_16) {
12947                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12948                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12949                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12950                                 break;
12951                         } else {
12952                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
12953                                 ret = -ENODEV;
12954                                 goto out;
12955                         }
12956                 }
12957
12958                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
12959                         /* Success. */
12960                         ret = 0;
12961                         break;
12962                 }
12963         }
12964         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
12965             DMA_RWCTRL_WRITE_BNDRY_16) {
12966                 static struct pci_device_id dma_wait_state_chipsets[] = {
12967                         { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
12968                                      PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
12969                         { },
12970                 };
12971
12972                 /* DMA test passed without adjusting DMA boundary,
12973                  * now look for chipsets that are known to expose the
12974                  * DMA bug without failing the test.
12975                  */
12976                 if (pci_dev_present(dma_wait_state_chipsets)) {
12977                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
12978                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
12979                 }
12980                 else
12981                         /* Safe to use the calculated DMA boundary. */
12982                         tp->dma_rwctrl = saved_dma_rwctrl;
12983
12984                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
12985         }
12986
12987 out:
12988         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
12989 out_nofree:
12990         return ret;
12991 }
12992
12993 static void __devinit tg3_init_link_config(struct tg3 *tp)
12994 {
12995         tp->link_config.advertising =
12996                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12997                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12998                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
12999                  ADVERTISED_Autoneg | ADVERTISED_MII);
13000         tp->link_config.speed = SPEED_INVALID;
13001         tp->link_config.duplex = DUPLEX_INVALID;
13002         tp->link_config.autoneg = AUTONEG_ENABLE;
13003         tp->link_config.active_speed = SPEED_INVALID;
13004         tp->link_config.active_duplex = DUPLEX_INVALID;
13005         tp->link_config.phy_is_low_power = 0;
13006         tp->link_config.orig_speed = SPEED_INVALID;
13007         tp->link_config.orig_duplex = DUPLEX_INVALID;
13008         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13009 }
13010
13011 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13012 {
13013         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13014                 tp->bufmgr_config.mbuf_read_dma_low_water =
13015                         DEFAULT_MB_RDMA_LOW_WATER_5705;
13016                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13017                         DEFAULT_MB_MACRX_LOW_WATER_5705;
13018                 tp->bufmgr_config.mbuf_high_water =
13019                         DEFAULT_MB_HIGH_WATER_5705;
13020                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13021                         tp->bufmgr_config.mbuf_mac_rx_low_water =
13022                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
13023                         tp->bufmgr_config.mbuf_high_water =
13024                                 DEFAULT_MB_HIGH_WATER_5906;
13025                 }
13026
13027                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13028                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13029                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13030                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13031                 tp->bufmgr_config.mbuf_high_water_jumbo =
13032                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13033         } else {
13034                 tp->bufmgr_config.mbuf_read_dma_low_water =
13035                         DEFAULT_MB_RDMA_LOW_WATER;
13036                 tp->bufmgr_config.mbuf_mac_rx_low_water =
13037                         DEFAULT_MB_MACRX_LOW_WATER;
13038                 tp->bufmgr_config.mbuf_high_water =
13039                         DEFAULT_MB_HIGH_WATER;
13040
13041                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13042                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13043                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13044                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13045                 tp->bufmgr_config.mbuf_high_water_jumbo =
13046                         DEFAULT_MB_HIGH_WATER_JUMBO;
13047         }
13048
13049         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13050         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13051 }
13052
13053 static char * __devinit tg3_phy_string(struct tg3 *tp)
13054 {
13055         switch (tp->phy_id & PHY_ID_MASK) {
13056         case PHY_ID_BCM5400:    return "5400";
13057         case PHY_ID_BCM5401:    return "5401";
13058         case PHY_ID_BCM5411:    return "5411";
13059         case PHY_ID_BCM5701:    return "5701";
13060         case PHY_ID_BCM5703:    return "5703";
13061         case PHY_ID_BCM5704:    return "5704";
13062         case PHY_ID_BCM5705:    return "5705";
13063         case PHY_ID_BCM5750:    return "5750";
13064         case PHY_ID_BCM5752:    return "5752";
13065         case PHY_ID_BCM5714:    return "5714";
13066         case PHY_ID_BCM5780:    return "5780";
13067         case PHY_ID_BCM5755:    return "5755";
13068         case PHY_ID_BCM5787:    return "5787";
13069         case PHY_ID_BCM5784:    return "5784";
13070         case PHY_ID_BCM5756:    return "5722/5756";
13071         case PHY_ID_BCM5906:    return "5906";
13072         case PHY_ID_BCM5761:    return "5761";
13073         case PHY_ID_BCM8002:    return "8002/serdes";
13074         case 0:                 return "serdes";
13075         default:                return "unknown";
13076         }
13077 }
13078
13079 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13080 {
13081         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13082                 strcpy(str, "PCI Express");
13083                 return str;
13084         } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13085                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13086
13087                 strcpy(str, "PCIX:");
13088
13089                 if ((clock_ctrl == 7) ||
13090                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13091                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13092                         strcat(str, "133MHz");
13093                 else if (clock_ctrl == 0)
13094                         strcat(str, "33MHz");
13095                 else if (clock_ctrl == 2)
13096                         strcat(str, "50MHz");
13097                 else if (clock_ctrl == 4)
13098                         strcat(str, "66MHz");
13099                 else if (clock_ctrl == 6)
13100                         strcat(str, "100MHz");
13101         } else {
13102                 strcpy(str, "PCI:");
13103                 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13104                         strcat(str, "66MHz");
13105                 else
13106                         strcat(str, "33MHz");
13107         }
13108         if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13109                 strcat(str, ":32-bit");
13110         else
13111                 strcat(str, ":64-bit");
13112         return str;
13113 }
13114
13115 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13116 {
13117         struct pci_dev *peer;
13118         unsigned int func, devnr = tp->pdev->devfn & ~7;
13119
13120         for (func = 0; func < 8; func++) {
13121                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13122                 if (peer && peer != tp->pdev)
13123                         break;
13124                 pci_dev_put(peer);
13125         }
13126         /* 5704 can be configured in single-port mode, set peer to
13127          * tp->pdev in that case.
13128          */
13129         if (!peer) {
13130                 peer = tp->pdev;
13131                 return peer;
13132         }
13133
13134         /*
13135          * We don't need to keep the refcount elevated; there's no way
13136          * to remove one half of this device without removing the other
13137          */
13138         pci_dev_put(peer);
13139
13140         return peer;
13141 }
13142
13143 static void __devinit tg3_init_coal(struct tg3 *tp)
13144 {
13145         struct ethtool_coalesce *ec = &tp->coal;
13146
13147         memset(ec, 0, sizeof(*ec));
13148         ec->cmd = ETHTOOL_GCOALESCE;
13149         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13150         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13151         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13152         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13153         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13154         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13155         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13156         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13157         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13158
13159         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13160                                  HOSTCC_MODE_CLRTICK_TXBD)) {
13161                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13162                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13163                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13164                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13165         }
13166
13167         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13168                 ec->rx_coalesce_usecs_irq = 0;
13169                 ec->tx_coalesce_usecs_irq = 0;
13170                 ec->stats_block_coalesce_usecs = 0;
13171         }
13172 }
13173
13174 static const struct net_device_ops tg3_netdev_ops = {
13175         .ndo_open               = tg3_open,
13176         .ndo_stop               = tg3_close,
13177         .ndo_start_xmit         = tg3_start_xmit,
13178         .ndo_get_stats          = tg3_get_stats,
13179         .ndo_validate_addr      = eth_validate_addr,
13180         .ndo_set_multicast_list = tg3_set_rx_mode,
13181         .ndo_set_mac_address    = tg3_set_mac_addr,
13182         .ndo_do_ioctl           = tg3_ioctl,
13183         .ndo_tx_timeout         = tg3_tx_timeout,
13184         .ndo_change_mtu         = tg3_change_mtu,
13185 #if TG3_VLAN_TAG_USED
13186         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13187 #endif
13188 #ifdef CONFIG_NET_POLL_CONTROLLER
13189         .ndo_poll_controller    = tg3_poll_controller,
13190 #endif
13191 };
13192
13193 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
13194         .ndo_open               = tg3_open,
13195         .ndo_stop               = tg3_close,
13196         .ndo_start_xmit         = tg3_start_xmit_dma_bug,
13197         .ndo_get_stats          = tg3_get_stats,
13198         .ndo_validate_addr      = eth_validate_addr,
13199         .ndo_set_multicast_list = tg3_set_rx_mode,
13200         .ndo_set_mac_address    = tg3_set_mac_addr,
13201         .ndo_do_ioctl           = tg3_ioctl,
13202         .ndo_tx_timeout         = tg3_tx_timeout,
13203         .ndo_change_mtu         = tg3_change_mtu,
13204 #if TG3_VLAN_TAG_USED
13205         .ndo_vlan_rx_register   = tg3_vlan_rx_register,
13206 #endif
13207 #ifdef CONFIG_NET_POLL_CONTROLLER
13208         .ndo_poll_controller    = tg3_poll_controller,
13209 #endif
13210 };
13211
13212 static int __devinit tg3_init_one(struct pci_dev *pdev,
13213                                   const struct pci_device_id *ent)
13214 {
13215         static int tg3_version_printed = 0;
13216         struct net_device *dev;
13217         struct tg3 *tp;
13218         int err, pm_cap;
13219         char str[40];
13220         u64 dma_mask, persist_dma_mask;
13221
13222         if (tg3_version_printed++ == 0)
13223                 printk(KERN_INFO "%s", version);
13224
13225         err = pci_enable_device(pdev);
13226         if (err) {
13227                 printk(KERN_ERR PFX "Cannot enable PCI device, "
13228                        "aborting.\n");
13229                 return err;
13230         }
13231
13232         err = pci_request_regions(pdev, DRV_MODULE_NAME);
13233         if (err) {
13234                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13235                        "aborting.\n");
13236                 goto err_out_disable_pdev;
13237         }
13238
13239         pci_set_master(pdev);
13240
13241         /* Find power-management capability. */
13242         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13243         if (pm_cap == 0) {
13244                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13245                        "aborting.\n");
13246                 err = -EIO;
13247                 goto err_out_free_res;
13248         }
13249
13250         dev = alloc_etherdev(sizeof(*tp));
13251         if (!dev) {
13252                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13253                 err = -ENOMEM;
13254                 goto err_out_free_res;
13255         }
13256
13257         SET_NETDEV_DEV(dev, &pdev->dev);
13258
13259 #if TG3_VLAN_TAG_USED
13260         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13261 #endif
13262
13263         tp = netdev_priv(dev);
13264         tp->pdev = pdev;
13265         tp->dev = dev;
13266         tp->pm_cap = pm_cap;
13267         tp->rx_mode = TG3_DEF_RX_MODE;
13268         tp->tx_mode = TG3_DEF_TX_MODE;
13269
13270         if (tg3_debug > 0)
13271                 tp->msg_enable = tg3_debug;
13272         else
13273                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13274
13275         /* The word/byte swap controls here control register access byte
13276          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
13277          * setting below.
13278          */
13279         tp->misc_host_ctrl =
13280                 MISC_HOST_CTRL_MASK_PCI_INT |
13281                 MISC_HOST_CTRL_WORD_SWAP |
13282                 MISC_HOST_CTRL_INDIR_ACCESS |
13283                 MISC_HOST_CTRL_PCISTATE_RW;
13284
13285         /* The NONFRM (non-frame) byte/word swap controls take effect
13286          * on descriptor entries, anything which isn't packet data.
13287          *
13288          * The StrongARM chips on the board (one for tx, one for rx)
13289          * are running in big-endian mode.
13290          */
13291         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13292                         GRC_MODE_WSWAP_NONFRM_DATA);
13293 #ifdef __BIG_ENDIAN
13294         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13295 #endif
13296         spin_lock_init(&tp->lock);
13297         spin_lock_init(&tp->indirect_lock);
13298         INIT_WORK(&tp->reset_task, tg3_reset_task);
13299
13300         tp->regs = pci_ioremap_bar(pdev, BAR_0);
13301         if (!tp->regs) {
13302                 printk(KERN_ERR PFX "Cannot map device registers, "
13303                        "aborting.\n");
13304                 err = -ENOMEM;
13305                 goto err_out_free_dev;
13306         }
13307
13308         tg3_init_link_config(tp);
13309
13310         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13311         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13312         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13313
13314         netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13315         dev->ethtool_ops = &tg3_ethtool_ops;
13316         dev->watchdog_timeo = TG3_TX_TIMEOUT;
13317         dev->irq = pdev->irq;
13318
13319         err = tg3_get_invariants(tp);
13320         if (err) {
13321                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13322                        "aborting.\n");
13323                 goto err_out_iounmap;
13324         }
13325
13326         if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13327             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13328                 dev->netdev_ops = &tg3_netdev_ops;
13329         else
13330                 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
13331
13332
13333         /* The EPB bridge inside 5714, 5715, and 5780 and any
13334          * device behind the EPB cannot support DMA addresses > 40-bit.
13335          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13336          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13337          * do DMA address check in tg3_start_xmit().
13338          */
13339         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13340                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
13341         else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13342                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
13343 #ifdef CONFIG_HIGHMEM
13344                 dma_mask = DMA_BIT_MASK(64);
13345 #endif
13346         } else
13347                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
13348
13349         /* Configure DMA attributes. */
13350         if (dma_mask > DMA_BIT_MASK(32)) {
13351                 err = pci_set_dma_mask(pdev, dma_mask);
13352                 if (!err) {
13353                         dev->features |= NETIF_F_HIGHDMA;
13354                         err = pci_set_consistent_dma_mask(pdev,
13355                                                           persist_dma_mask);
13356                         if (err < 0) {
13357                                 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13358                                        "DMA for consistent allocations\n");
13359                                 goto err_out_iounmap;
13360                         }
13361                 }
13362         }
13363         if (err || dma_mask == DMA_BIT_MASK(32)) {
13364                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
13365                 if (err) {
13366                         printk(KERN_ERR PFX "No usable DMA configuration, "
13367                                "aborting.\n");
13368                         goto err_out_iounmap;
13369                 }
13370         }
13371
13372         tg3_init_bufmgr_config(tp);
13373
13374         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13375                 tp->fw_needed = FIRMWARE_TG3;
13376
13377         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13378                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13379         }
13380         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13381             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13382             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13384             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13385                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13386         } else {
13387                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13388                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13389                         tp->fw_needed = FIRMWARE_TG3TSO5;
13390                 else
13391                         tp->fw_needed = FIRMWARE_TG3TSO;
13392         }
13393
13394         /* TSO is on by default on chips that support hardware TSO.
13395          * Firmware TSO on older chips gives lower performance, so it
13396          * is off by default, but can be enabled using ethtool.
13397          */
13398         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13399                 if (dev->features & NETIF_F_IP_CSUM)
13400                         dev->features |= NETIF_F_TSO;
13401                 if ((dev->features & NETIF_F_IPV6_CSUM) &&
13402                     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2))
13403                         dev->features |= NETIF_F_TSO6;
13404                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13405                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13406                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13407                         GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13408                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13409                         dev->features |= NETIF_F_TSO_ECN;
13410         }
13411
13412
13413         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13414             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13415             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13416                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13417                 tp->rx_pending = 63;
13418         }
13419
13420         err = tg3_get_device_address(tp);
13421         if (err) {
13422                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13423                        "aborting.\n");
13424                 goto err_out_fw;
13425         }
13426
13427         if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13428                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13429                 if (!tp->aperegs) {
13430                         printk(KERN_ERR PFX "Cannot map APE registers, "
13431                                "aborting.\n");
13432                         err = -ENOMEM;
13433                         goto err_out_fw;
13434                 }
13435
13436                 tg3_ape_lock_init(tp);
13437
13438                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
13439                         tg3_read_dash_ver(tp);
13440         }
13441
13442         /*
13443          * Reset chip in case UNDI or EFI driver did not shutdown
13444          * DMA self test will enable WDMAC and we'll see (spurious)
13445          * pending DMA on the PCI bus at that point.
13446          */
13447         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13448             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13449                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13450                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13451         }
13452
13453         err = tg3_test_dma(tp);
13454         if (err) {
13455                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13456                 goto err_out_apeunmap;
13457         }
13458
13459         /* flow control autonegotiation is default behavior */
13460         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13461         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13462
13463         tg3_init_coal(tp);
13464
13465         pci_set_drvdata(pdev, dev);
13466
13467         err = register_netdev(dev);
13468         if (err) {
13469                 printk(KERN_ERR PFX "Cannot register net device, "
13470                        "aborting.\n");
13471                 goto err_out_apeunmap;
13472         }
13473
13474         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13475                dev->name,
13476                tp->board_part_number,
13477                tp->pci_chip_rev_id,
13478                tg3_bus_string(tp, str),
13479                dev->dev_addr);
13480
13481         if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13482                 printk(KERN_INFO
13483                        "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13484                        tp->dev->name,
13485                        tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13486                        dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev));
13487         else
13488                 printk(KERN_INFO
13489                        "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13490                        tp->dev->name, tg3_phy_string(tp),
13491                        ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13492                         ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13493                          "10/100/1000Base-T")),
13494                        (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13495
13496         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13497                dev->name,
13498                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13499                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13500                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13501                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13502                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13503         printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13504                dev->name, tp->dma_rwctrl,
13505                (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
13506                 (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
13507
13508         return 0;
13509
13510 err_out_apeunmap:
13511         if (tp->aperegs) {
13512                 iounmap(tp->aperegs);
13513                 tp->aperegs = NULL;
13514         }
13515
13516 err_out_fw:
13517         if (tp->fw)
13518                 release_firmware(tp->fw);
13519
13520 err_out_iounmap:
13521         if (tp->regs) {
13522                 iounmap(tp->regs);
13523                 tp->regs = NULL;
13524         }
13525
13526 err_out_free_dev:
13527         free_netdev(dev);
13528
13529 err_out_free_res:
13530         pci_release_regions(pdev);
13531
13532 err_out_disable_pdev:
13533         pci_disable_device(pdev);
13534         pci_set_drvdata(pdev, NULL);
13535         return err;
13536 }
13537
13538 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13539 {
13540         struct net_device *dev = pci_get_drvdata(pdev);
13541
13542         if (dev) {
13543                 struct tg3 *tp = netdev_priv(dev);
13544
13545                 if (tp->fw)
13546                         release_firmware(tp->fw);
13547
13548                 flush_scheduled_work();
13549
13550                 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13551                         tg3_phy_fini(tp);
13552                         tg3_mdio_fini(tp);
13553                 }
13554
13555                 unregister_netdev(dev);
13556                 if (tp->aperegs) {
13557                         iounmap(tp->aperegs);
13558                         tp->aperegs = NULL;
13559                 }
13560                 if (tp->regs) {
13561                         iounmap(tp->regs);
13562                         tp->regs = NULL;
13563                 }
13564                 free_netdev(dev);
13565                 pci_release_regions(pdev);
13566                 pci_disable_device(pdev);
13567                 pci_set_drvdata(pdev, NULL);
13568         }
13569 }
13570
13571 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13572 {
13573         struct net_device *dev = pci_get_drvdata(pdev);
13574         struct tg3 *tp = netdev_priv(dev);
13575         pci_power_t target_state;
13576         int err;
13577
13578         /* PCI register 4 needs to be saved whether netif_running() or not.
13579          * MSI address and data need to be saved if using MSI and
13580          * netif_running().
13581          */
13582         pci_save_state(pdev);
13583
13584         if (!netif_running(dev))
13585                 return 0;
13586
13587         flush_scheduled_work();
13588         tg3_phy_stop(tp);
13589         tg3_netif_stop(tp);
13590
13591         del_timer_sync(&tp->timer);
13592
13593         tg3_full_lock(tp, 1);
13594         tg3_disable_ints(tp);
13595         tg3_full_unlock(tp);
13596
13597         netif_device_detach(dev);
13598
13599         tg3_full_lock(tp, 0);
13600         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13601         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13602         tg3_full_unlock(tp);
13603
13604         target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13605
13606         err = tg3_set_power_state(tp, target_state);
13607         if (err) {
13608                 int err2;
13609
13610                 tg3_full_lock(tp, 0);
13611
13612                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13613                 err2 = tg3_restart_hw(tp, 1);
13614                 if (err2)
13615                         goto out;
13616
13617                 tp->timer.expires = jiffies + tp->timer_offset;
13618                 add_timer(&tp->timer);
13619
13620                 netif_device_attach(dev);
13621                 tg3_netif_start(tp);
13622
13623 out:
13624                 tg3_full_unlock(tp);
13625
13626                 if (!err2)
13627                         tg3_phy_start(tp);
13628         }
13629
13630         return err;
13631 }
13632
13633 static int tg3_resume(struct pci_dev *pdev)
13634 {
13635         struct net_device *dev = pci_get_drvdata(pdev);
13636         struct tg3 *tp = netdev_priv(dev);
13637         int err;
13638
13639         pci_restore_state(tp->pdev);
13640
13641         if (!netif_running(dev))
13642                 return 0;
13643
13644         err = tg3_set_power_state(tp, PCI_D0);
13645         if (err)
13646                 return err;
13647
13648         netif_device_attach(dev);
13649
13650         tg3_full_lock(tp, 0);
13651
13652         tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13653         err = tg3_restart_hw(tp, 1);
13654         if (err)
13655                 goto out;
13656
13657         tp->timer.expires = jiffies + tp->timer_offset;
13658         add_timer(&tp->timer);
13659
13660         tg3_netif_start(tp);
13661
13662 out:
13663         tg3_full_unlock(tp);
13664
13665         if (!err)
13666                 tg3_phy_start(tp);
13667
13668         return err;
13669 }
13670
13671 static struct pci_driver tg3_driver = {
13672         .name           = DRV_MODULE_NAME,
13673         .id_table       = tg3_pci_tbl,
13674         .probe          = tg3_init_one,
13675         .remove         = __devexit_p(tg3_remove_one),
13676         .suspend        = tg3_suspend,
13677         .resume         = tg3_resume
13678 };
13679
13680 static int __init tg3_init(void)
13681 {
13682         return pci_register_driver(&tg3_driver);
13683 }
13684
13685 static void __exit tg3_cleanup(void)
13686 {
13687         pci_unregister_driver(&tg3_driver);
13688 }
13689
13690 module_init(tg3_init);
13691 module_exit(tg3_cleanup);