tg3: Check transitions to D0 power state
[linux-2.6-microblaze.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     119
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "May 18, 2011"
96
97 #define TG3_DEF_MAC_MODE        0
98 #define TG3_DEF_RX_MODE         0
99 #define TG3_DEF_TX_MODE         0
100 #define TG3_DEF_MSG_ENABLE        \
101         (NETIF_MSG_DRV          | \
102          NETIF_MSG_PROBE        | \
103          NETIF_MSG_LINK         | \
104          NETIF_MSG_TIMER        | \
105          NETIF_MSG_IFDOWN       | \
106          NETIF_MSG_IFUP         | \
107          NETIF_MSG_RX_ERR       | \
108          NETIF_MSG_TX_ERR)
109
110 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
111
112 /* length of time before we decide the hardware is borked,
113  * and dev->tx_timeout() should be called to fix the problem
114  */
115
116 #define TG3_TX_TIMEOUT                  (5 * HZ)
117
118 /* hardware minimum and maximum for a single frame's data payload */
119 #define TG3_MIN_MTU                     60
120 #define TG3_MAX_MTU(tp) \
121         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
122
123 /* These numbers seem to be hard coded in the NIC firmware somehow.
124  * You can't change the ring sizes, but you can change where you place
125  * them in the NIC onboard memory.
126  */
127 #define TG3_RX_STD_RING_SIZE(tp) \
128         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
129          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
130 #define TG3_DEF_RX_RING_PENDING         200
131 #define TG3_RX_JMB_RING_SIZE(tp) \
132         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
133          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
134 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
135 #define TG3_RSS_INDIR_TBL_SIZE          128
136
137 /* Do not place this n-ring entries value into the tp struct itself,
138  * we really want to expose these constants to GCC so that modulo et
139  * al.  operations are done with shifts and masks instead of with
140  * hw multiply/modulo instructions.  Another solution would be to
141  * replace things like '% foo' with '& (foo - 1)'.
142  */
143
144 #define TG3_TX_RING_SIZE                512
145 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
146
147 #define TG3_RX_STD_RING_BYTES(tp) \
148         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
149 #define TG3_RX_JMB_RING_BYTES(tp) \
150         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
151 #define TG3_RX_RCB_RING_BYTES(tp) \
152         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
153 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
154                                  TG3_TX_RING_SIZE)
155 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
156
157 #define TG3_DMA_BYTE_ENAB               64
158
159 #define TG3_RX_STD_DMA_SZ               1536
160 #define TG3_RX_JMB_DMA_SZ               9046
161
162 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
163
164 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
165 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
166
167 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
169
170 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
172
173 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
174  * that are at least dword aligned when used in PCIX mode.  The driver
175  * works around this bug by double copying the packet.  This workaround
176  * is built into the normal double copy length check for efficiency.
177  *
178  * However, the double copy is only necessary on those architectures
179  * where unaligned memory accesses are inefficient.  For those architectures
180  * where unaligned memory accesses incur little penalty, we can reintegrate
181  * the 5701 in the normal rx path.  Doing so saves a device structure
182  * dereference by hardcoding the double copy threshold in place.
183  */
184 #define TG3_RX_COPY_THRESHOLD           256
185 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
186         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
187 #else
188         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
189 #endif
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
193
194 #define TG3_RAW_IP_ALIGN 2
195
196 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
197
198 #define FIRMWARE_TG3            "tigon/tg3.bin"
199 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
200 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
201
202 static char version[] __devinitdata =
203         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
204
205 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
206 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(DRV_MODULE_VERSION);
209 MODULE_FIRMWARE(FIRMWARE_TG3);
210 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
211 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
212
213 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
214 module_param(tg3_debug, int, 0);
215 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
216
217 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
291         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
292         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
294         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
295         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
296         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
297         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
298         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
299         {}
300 };
301
302 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
303
304 static const struct {
305         const char string[ETH_GSTRING_LEN];
306 } ethtool_stats_keys[] = {
307         { "rx_octets" },
308         { "rx_fragments" },
309         { "rx_ucast_packets" },
310         { "rx_mcast_packets" },
311         { "rx_bcast_packets" },
312         { "rx_fcs_errors" },
313         { "rx_align_errors" },
314         { "rx_xon_pause_rcvd" },
315         { "rx_xoff_pause_rcvd" },
316         { "rx_mac_ctrl_rcvd" },
317         { "rx_xoff_entered" },
318         { "rx_frame_too_long_errors" },
319         { "rx_jabbers" },
320         { "rx_undersize_packets" },
321         { "rx_in_length_errors" },
322         { "rx_out_length_errors" },
323         { "rx_64_or_less_octet_packets" },
324         { "rx_65_to_127_octet_packets" },
325         { "rx_128_to_255_octet_packets" },
326         { "rx_256_to_511_octet_packets" },
327         { "rx_512_to_1023_octet_packets" },
328         { "rx_1024_to_1522_octet_packets" },
329         { "rx_1523_to_2047_octet_packets" },
330         { "rx_2048_to_4095_octet_packets" },
331         { "rx_4096_to_8191_octet_packets" },
332         { "rx_8192_to_9022_octet_packets" },
333
334         { "tx_octets" },
335         { "tx_collisions" },
336
337         { "tx_xon_sent" },
338         { "tx_xoff_sent" },
339         { "tx_flow_control" },
340         { "tx_mac_errors" },
341         { "tx_single_collisions" },
342         { "tx_mult_collisions" },
343         { "tx_deferred" },
344         { "tx_excessive_collisions" },
345         { "tx_late_collisions" },
346         { "tx_collide_2times" },
347         { "tx_collide_3times" },
348         { "tx_collide_4times" },
349         { "tx_collide_5times" },
350         { "tx_collide_6times" },
351         { "tx_collide_7times" },
352         { "tx_collide_8times" },
353         { "tx_collide_9times" },
354         { "tx_collide_10times" },
355         { "tx_collide_11times" },
356         { "tx_collide_12times" },
357         { "tx_collide_13times" },
358         { "tx_collide_14times" },
359         { "tx_collide_15times" },
360         { "tx_ucast_packets" },
361         { "tx_mcast_packets" },
362         { "tx_bcast_packets" },
363         { "tx_carrier_sense_errors" },
364         { "tx_discards" },
365         { "tx_errors" },
366
367         { "dma_writeq_full" },
368         { "dma_write_prioq_full" },
369         { "rxbds_empty" },
370         { "rx_discards" },
371         { "rx_errors" },
372         { "rx_threshold_hit" },
373
374         { "dma_readq_full" },
375         { "dma_read_prioq_full" },
376         { "tx_comp_queue_full" },
377
378         { "ring_set_send_prod_index" },
379         { "ring_status_update" },
380         { "nic_irqs" },
381         { "nic_avoided_irqs" },
382         { "nic_tx_threshold_hit" },
383
384         { "mbuf_lwm_thresh_hit" },
385 };
386
387 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
388
389
390 static const struct {
391         const char string[ETH_GSTRING_LEN];
392 } ethtool_test_keys[] = {
393         { "nvram test     (online) " },
394         { "link test      (online) " },
395         { "register test  (offline)" },
396         { "memory test    (offline)" },
397         { "loopback test  (offline)" },
398         { "interrupt test (offline)" },
399 };
400
401 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
402
403
404 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
405 {
406         writel(val, tp->regs + off);
407 }
408
409 static u32 tg3_read32(struct tg3 *tp, u32 off)
410 {
411         return readl(tp->regs + off);
412 }
413
414 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
415 {
416         writel(val, tp->aperegs + off);
417 }
418
419 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
420 {
421         return readl(tp->aperegs + off);
422 }
423
424 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
425 {
426         unsigned long flags;
427
428         spin_lock_irqsave(&tp->indirect_lock, flags);
429         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
430         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
431         spin_unlock_irqrestore(&tp->indirect_lock, flags);
432 }
433
434 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
435 {
436         writel(val, tp->regs + off);
437         readl(tp->regs + off);
438 }
439
440 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
441 {
442         unsigned long flags;
443         u32 val;
444
445         spin_lock_irqsave(&tp->indirect_lock, flags);
446         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
448         spin_unlock_irqrestore(&tp->indirect_lock, flags);
449         return val;
450 }
451
452 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
453 {
454         unsigned long flags;
455
456         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
457                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
458                                        TG3_64BIT_REG_LOW, val);
459                 return;
460         }
461         if (off == TG3_RX_STD_PROD_IDX_REG) {
462                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
463                                        TG3_64BIT_REG_LOW, val);
464                 return;
465         }
466
467         spin_lock_irqsave(&tp->indirect_lock, flags);
468         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
469         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
470         spin_unlock_irqrestore(&tp->indirect_lock, flags);
471
472         /* In indirect mode when disabling interrupts, we also need
473          * to clear the interrupt bit in the GRC local ctrl register.
474          */
475         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
476             (val == 0x1)) {
477                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
478                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
479         }
480 }
481
482 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
483 {
484         unsigned long flags;
485         u32 val;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
489         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491         return val;
492 }
493
494 /* usec_wait specifies the wait time in usec when writing to certain registers
495  * where it is unsafe to read back the register without some delay.
496  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
497  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
498  */
499 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
500 {
501         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
502                 /* Non-posted methods */
503                 tp->write32(tp, off, val);
504         else {
505                 /* Posted method */
506                 tg3_write32(tp, off, val);
507                 if (usec_wait)
508                         udelay(usec_wait);
509                 tp->read32(tp, off);
510         }
511         /* Wait again after the read for the posted method to guarantee that
512          * the wait time is met.
513          */
514         if (usec_wait)
515                 udelay(usec_wait);
516 }
517
518 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
519 {
520         tp->write32_mbox(tp, off, val);
521         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
522                 tp->read32_mbox(tp, off);
523 }
524
525 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
526 {
527         void __iomem *mbox = tp->regs + off;
528         writel(val, mbox);
529         if (tg3_flag(tp, TXD_MBOX_HWBUG))
530                 writel(val, mbox);
531         if (tg3_flag(tp, MBOX_WRITE_REORDER))
532                 readl(mbox);
533 }
534
535 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
536 {
537         return readl(tp->regs + off + GRCMBOX_BASE);
538 }
539
540 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
541 {
542         writel(val, tp->regs + off + GRCMBOX_BASE);
543 }
544
545 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
546 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
547 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
548 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
549 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
550
551 #define tw32(reg, val)                  tp->write32(tp, reg, val)
552 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
553 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
554 #define tr32(reg)                       tp->read32(tp, reg)
555
556 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
557 {
558         unsigned long flags;
559
560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
561             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
562                 return;
563
564         spin_lock_irqsave(&tp->indirect_lock, flags);
565         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
566                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
568
569                 /* Always leave this as zero. */
570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
571         } else {
572                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
573                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
574
575                 /* Always leave this as zero. */
576                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
577         }
578         spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 }
580
581 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
582 {
583         unsigned long flags;
584
585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
586             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
587                 *val = 0;
588                 return;
589         }
590
591         spin_lock_irqsave(&tp->indirect_lock, flags);
592         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
593                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
594                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
595
596                 /* Always leave this as zero. */
597                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
598         } else {
599                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
600                 *val = tr32(TG3PCI_MEM_WIN_DATA);
601
602                 /* Always leave this as zero. */
603                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
604         }
605         spin_unlock_irqrestore(&tp->indirect_lock, flags);
606 }
607
608 static void tg3_ape_lock_init(struct tg3 *tp)
609 {
610         int i;
611         u32 regbase;
612
613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
614                 regbase = TG3_APE_LOCK_GRANT;
615         else
616                 regbase = TG3_APE_PER_LOCK_GRANT;
617
618         /* Make sure the driver hasn't any stale locks. */
619         for (i = 0; i < 8; i++)
620                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
621 }
622
623 static int tg3_ape_lock(struct tg3 *tp, int locknum)
624 {
625         int i, off;
626         int ret = 0;
627         u32 status, req, gnt;
628
629         if (!tg3_flag(tp, ENABLE_APE))
630                 return 0;
631
632         switch (locknum) {
633         case TG3_APE_LOCK_GRC:
634         case TG3_APE_LOCK_MEM:
635                 break;
636         default:
637                 return -EINVAL;
638         }
639
640         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
641                 req = TG3_APE_LOCK_REQ;
642                 gnt = TG3_APE_LOCK_GRANT;
643         } else {
644                 req = TG3_APE_PER_LOCK_REQ;
645                 gnt = TG3_APE_PER_LOCK_GRANT;
646         }
647
648         off = 4 * locknum;
649
650         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
651
652         /* Wait for up to 1 millisecond to acquire lock. */
653         for (i = 0; i < 100; i++) {
654                 status = tg3_ape_read32(tp, gnt + off);
655                 if (status == APE_LOCK_GRANT_DRIVER)
656                         break;
657                 udelay(10);
658         }
659
660         if (status != APE_LOCK_GRANT_DRIVER) {
661                 /* Revoke the lock request. */
662                 tg3_ape_write32(tp, gnt + off,
663                                 APE_LOCK_GRANT_DRIVER);
664
665                 ret = -EBUSY;
666         }
667
668         return ret;
669 }
670
671 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
672 {
673         u32 gnt;
674
675         if (!tg3_flag(tp, ENABLE_APE))
676                 return;
677
678         switch (locknum) {
679         case TG3_APE_LOCK_GRC:
680         case TG3_APE_LOCK_MEM:
681                 break;
682         default:
683                 return;
684         }
685
686         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
687                 gnt = TG3_APE_LOCK_GRANT;
688         else
689                 gnt = TG3_APE_PER_LOCK_GRANT;
690
691         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
692 }
693
694 static void tg3_disable_ints(struct tg3 *tp)
695 {
696         int i;
697
698         tw32(TG3PCI_MISC_HOST_CTRL,
699              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
700         for (i = 0; i < tp->irq_max; i++)
701                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
702 }
703
704 static void tg3_enable_ints(struct tg3 *tp)
705 {
706         int i;
707
708         tp->irq_sync = 0;
709         wmb();
710
711         tw32(TG3PCI_MISC_HOST_CTRL,
712              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
713
714         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
715         for (i = 0; i < tp->irq_cnt; i++) {
716                 struct tg3_napi *tnapi = &tp->napi[i];
717
718                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
719                 if (tg3_flag(tp, 1SHOT_MSI))
720                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
721
722                 tp->coal_now |= tnapi->coal_now;
723         }
724
725         /* Force an initial interrupt */
726         if (!tg3_flag(tp, TAGGED_STATUS) &&
727             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
728                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
729         else
730                 tw32(HOSTCC_MODE, tp->coal_now);
731
732         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
733 }
734
735 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
736 {
737         struct tg3 *tp = tnapi->tp;
738         struct tg3_hw_status *sblk = tnapi->hw_status;
739         unsigned int work_exists = 0;
740
741         /* check for phy events */
742         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
743                 if (sblk->status & SD_STATUS_LINK_CHG)
744                         work_exists = 1;
745         }
746         /* check for RX/TX work to do */
747         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
748             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
749                 work_exists = 1;
750
751         return work_exists;
752 }
753
754 /* tg3_int_reenable
755  *  similar to tg3_enable_ints, but it accurately determines whether there
756  *  is new work pending and can return without flushing the PIO write
757  *  which reenables interrupts
758  */
759 static void tg3_int_reenable(struct tg3_napi *tnapi)
760 {
761         struct tg3 *tp = tnapi->tp;
762
763         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
764         mmiowb();
765
766         /* When doing tagged status, this work check is unnecessary.
767          * The last_tag we write above tells the chip which piece of
768          * work we've completed.
769          */
770         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
771                 tw32(HOSTCC_MODE, tp->coalesce_mode |
772                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
773 }
774
775 static void tg3_switch_clocks(struct tg3 *tp)
776 {
777         u32 clock_ctrl;
778         u32 orig_clock_ctrl;
779
780         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
781                 return;
782
783         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
784
785         orig_clock_ctrl = clock_ctrl;
786         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
787                        CLOCK_CTRL_CLKRUN_OENABLE |
788                        0x1f);
789         tp->pci_clock_ctrl = clock_ctrl;
790
791         if (tg3_flag(tp, 5705_PLUS)) {
792                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
793                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
794                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
795                 }
796         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
797                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
798                             clock_ctrl |
799                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
800                             40);
801                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
802                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
803                             40);
804         }
805         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
806 }
807
808 #define PHY_BUSY_LOOPS  5000
809
810 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
811 {
812         u32 frame_val;
813         unsigned int loops;
814         int ret;
815
816         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
817                 tw32_f(MAC_MI_MODE,
818                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
819                 udelay(80);
820         }
821
822         *val = 0x0;
823
824         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
825                       MI_COM_PHY_ADDR_MASK);
826         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
827                       MI_COM_REG_ADDR_MASK);
828         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
829
830         tw32_f(MAC_MI_COM, frame_val);
831
832         loops = PHY_BUSY_LOOPS;
833         while (loops != 0) {
834                 udelay(10);
835                 frame_val = tr32(MAC_MI_COM);
836
837                 if ((frame_val & MI_COM_BUSY) == 0) {
838                         udelay(5);
839                         frame_val = tr32(MAC_MI_COM);
840                         break;
841                 }
842                 loops -= 1;
843         }
844
845         ret = -EBUSY;
846         if (loops != 0) {
847                 *val = frame_val & MI_COM_DATA_MASK;
848                 ret = 0;
849         }
850
851         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
852                 tw32_f(MAC_MI_MODE, tp->mi_mode);
853                 udelay(80);
854         }
855
856         return ret;
857 }
858
859 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
860 {
861         u32 frame_val;
862         unsigned int loops;
863         int ret;
864
865         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
866             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
867                 return 0;
868
869         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
870                 tw32_f(MAC_MI_MODE,
871                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
872                 udelay(80);
873         }
874
875         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
876                       MI_COM_PHY_ADDR_MASK);
877         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
878                       MI_COM_REG_ADDR_MASK);
879         frame_val |= (val & MI_COM_DATA_MASK);
880         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
881
882         tw32_f(MAC_MI_COM, frame_val);
883
884         loops = PHY_BUSY_LOOPS;
885         while (loops != 0) {
886                 udelay(10);
887                 frame_val = tr32(MAC_MI_COM);
888                 if ((frame_val & MI_COM_BUSY) == 0) {
889                         udelay(5);
890                         frame_val = tr32(MAC_MI_COM);
891                         break;
892                 }
893                 loops -= 1;
894         }
895
896         ret = -EBUSY;
897         if (loops != 0)
898                 ret = 0;
899
900         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
901                 tw32_f(MAC_MI_MODE, tp->mi_mode);
902                 udelay(80);
903         }
904
905         return ret;
906 }
907
908 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
909 {
910         int err;
911
912         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
913         if (err)
914                 goto done;
915
916         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
917         if (err)
918                 goto done;
919
920         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
921                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
922         if (err)
923                 goto done;
924
925         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
926
927 done:
928         return err;
929 }
930
931 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
932 {
933         int err;
934
935         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
936         if (err)
937                 goto done;
938
939         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
940         if (err)
941                 goto done;
942
943         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
944                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
945         if (err)
946                 goto done;
947
948         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
949
950 done:
951         return err;
952 }
953
954 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
955 {
956         int err;
957
958         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
959         if (!err)
960                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
961
962         return err;
963 }
964
965 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
966 {
967         int err;
968
969         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
970         if (!err)
971                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
972
973         return err;
974 }
975
976 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
977 {
978         int err;
979
980         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
981                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
982                            MII_TG3_AUXCTL_SHDWSEL_MISC);
983         if (!err)
984                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
985
986         return err;
987 }
988
989 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
990 {
991         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
992                 set |= MII_TG3_AUXCTL_MISC_WREN;
993
994         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
995 }
996
997 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
998         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
999                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1000                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1001
1002 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1003         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1004                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1005
1006 static int tg3_bmcr_reset(struct tg3 *tp)
1007 {
1008         u32 phy_control;
1009         int limit, err;
1010
1011         /* OK, reset it, and poll the BMCR_RESET bit until it
1012          * clears or we time out.
1013          */
1014         phy_control = BMCR_RESET;
1015         err = tg3_writephy(tp, MII_BMCR, phy_control);
1016         if (err != 0)
1017                 return -EBUSY;
1018
1019         limit = 5000;
1020         while (limit--) {
1021                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1022                 if (err != 0)
1023                         return -EBUSY;
1024
1025                 if ((phy_control & BMCR_RESET) == 0) {
1026                         udelay(40);
1027                         break;
1028                 }
1029                 udelay(10);
1030         }
1031         if (limit < 0)
1032                 return -EBUSY;
1033
1034         return 0;
1035 }
1036
1037 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1038 {
1039         struct tg3 *tp = bp->priv;
1040         u32 val;
1041
1042         spin_lock_bh(&tp->lock);
1043
1044         if (tg3_readphy(tp, reg, &val))
1045                 val = -EIO;
1046
1047         spin_unlock_bh(&tp->lock);
1048
1049         return val;
1050 }
1051
1052 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1053 {
1054         struct tg3 *tp = bp->priv;
1055         u32 ret = 0;
1056
1057         spin_lock_bh(&tp->lock);
1058
1059         if (tg3_writephy(tp, reg, val))
1060                 ret = -EIO;
1061
1062         spin_unlock_bh(&tp->lock);
1063
1064         return ret;
1065 }
1066
1067 static int tg3_mdio_reset(struct mii_bus *bp)
1068 {
1069         return 0;
1070 }
1071
1072 static void tg3_mdio_config_5785(struct tg3 *tp)
1073 {
1074         u32 val;
1075         struct phy_device *phydev;
1076
1077         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1078         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1079         case PHY_ID_BCM50610:
1080         case PHY_ID_BCM50610M:
1081                 val = MAC_PHYCFG2_50610_LED_MODES;
1082                 break;
1083         case PHY_ID_BCMAC131:
1084                 val = MAC_PHYCFG2_AC131_LED_MODES;
1085                 break;
1086         case PHY_ID_RTL8211C:
1087                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1088                 break;
1089         case PHY_ID_RTL8201E:
1090                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1091                 break;
1092         default:
1093                 return;
1094         }
1095
1096         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1097                 tw32(MAC_PHYCFG2, val);
1098
1099                 val = tr32(MAC_PHYCFG1);
1100                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1101                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1102                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1103                 tw32(MAC_PHYCFG1, val);
1104
1105                 return;
1106         }
1107
1108         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1109                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1110                        MAC_PHYCFG2_FMODE_MASK_MASK |
1111                        MAC_PHYCFG2_GMODE_MASK_MASK |
1112                        MAC_PHYCFG2_ACT_MASK_MASK   |
1113                        MAC_PHYCFG2_QUAL_MASK_MASK |
1114                        MAC_PHYCFG2_INBAND_ENABLE;
1115
1116         tw32(MAC_PHYCFG2, val);
1117
1118         val = tr32(MAC_PHYCFG1);
1119         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1120                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1121         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1122                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1123                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1126         }
1127         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1128                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1129         tw32(MAC_PHYCFG1, val);
1130
1131         val = tr32(MAC_EXT_RGMII_MODE);
1132         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1133                  MAC_RGMII_MODE_RX_QUALITY |
1134                  MAC_RGMII_MODE_RX_ACTIVITY |
1135                  MAC_RGMII_MODE_RX_ENG_DET |
1136                  MAC_RGMII_MODE_TX_ENABLE |
1137                  MAC_RGMII_MODE_TX_LOWPWR |
1138                  MAC_RGMII_MODE_TX_RESET);
1139         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1140                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1141                         val |= MAC_RGMII_MODE_RX_INT_B |
1142                                MAC_RGMII_MODE_RX_QUALITY |
1143                                MAC_RGMII_MODE_RX_ACTIVITY |
1144                                MAC_RGMII_MODE_RX_ENG_DET;
1145                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1146                         val |= MAC_RGMII_MODE_TX_ENABLE |
1147                                MAC_RGMII_MODE_TX_LOWPWR |
1148                                MAC_RGMII_MODE_TX_RESET;
1149         }
1150         tw32(MAC_EXT_RGMII_MODE, val);
1151 }
1152
1153 static void tg3_mdio_start(struct tg3 *tp)
1154 {
1155         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1156         tw32_f(MAC_MI_MODE, tp->mi_mode);
1157         udelay(80);
1158
1159         if (tg3_flag(tp, MDIOBUS_INITED) &&
1160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1161                 tg3_mdio_config_5785(tp);
1162 }
1163
1164 static int tg3_mdio_init(struct tg3 *tp)
1165 {
1166         int i;
1167         u32 reg;
1168         struct phy_device *phydev;
1169
1170         if (tg3_flag(tp, 5717_PLUS)) {
1171                 u32 is_serdes;
1172
1173                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1174
1175                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1176                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1177                 else
1178                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1179                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1180                 if (is_serdes)
1181                         tp->phy_addr += 7;
1182         } else
1183                 tp->phy_addr = TG3_PHY_MII_ADDR;
1184
1185         tg3_mdio_start(tp);
1186
1187         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1188                 return 0;
1189
1190         tp->mdio_bus = mdiobus_alloc();
1191         if (tp->mdio_bus == NULL)
1192                 return -ENOMEM;
1193
1194         tp->mdio_bus->name     = "tg3 mdio bus";
1195         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1196                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1197         tp->mdio_bus->priv     = tp;
1198         tp->mdio_bus->parent   = &tp->pdev->dev;
1199         tp->mdio_bus->read     = &tg3_mdio_read;
1200         tp->mdio_bus->write    = &tg3_mdio_write;
1201         tp->mdio_bus->reset    = &tg3_mdio_reset;
1202         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1203         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1204
1205         for (i = 0; i < PHY_MAX_ADDR; i++)
1206                 tp->mdio_bus->irq[i] = PHY_POLL;
1207
1208         /* The bus registration will look for all the PHYs on the mdio bus.
1209          * Unfortunately, it does not ensure the PHY is powered up before
1210          * accessing the PHY ID registers.  A chip reset is the
1211          * quickest way to bring the device back to an operational state..
1212          */
1213         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1214                 tg3_bmcr_reset(tp);
1215
1216         i = mdiobus_register(tp->mdio_bus);
1217         if (i) {
1218                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1219                 mdiobus_free(tp->mdio_bus);
1220                 return i;
1221         }
1222
1223         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1224
1225         if (!phydev || !phydev->drv) {
1226                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1227                 mdiobus_unregister(tp->mdio_bus);
1228                 mdiobus_free(tp->mdio_bus);
1229                 return -ENODEV;
1230         }
1231
1232         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1233         case PHY_ID_BCM57780:
1234                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1235                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1236                 break;
1237         case PHY_ID_BCM50610:
1238         case PHY_ID_BCM50610M:
1239                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1240                                      PHY_BRCM_RX_REFCLK_UNUSED |
1241                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1242                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1243                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1244                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1245                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1246                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1249                 /* fallthru */
1250         case PHY_ID_RTL8211C:
1251                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1252                 break;
1253         case PHY_ID_RTL8201E:
1254         case PHY_ID_BCMAC131:
1255                 phydev->interface = PHY_INTERFACE_MODE_MII;
1256                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1257                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1258                 break;
1259         }
1260
1261         tg3_flag_set(tp, MDIOBUS_INITED);
1262
1263         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1264                 tg3_mdio_config_5785(tp);
1265
1266         return 0;
1267 }
1268
1269 static void tg3_mdio_fini(struct tg3 *tp)
1270 {
1271         if (tg3_flag(tp, MDIOBUS_INITED)) {
1272                 tg3_flag_clear(tp, MDIOBUS_INITED);
1273                 mdiobus_unregister(tp->mdio_bus);
1274                 mdiobus_free(tp->mdio_bus);
1275         }
1276 }
1277
1278 /* tp->lock is held. */
1279 static inline void tg3_generate_fw_event(struct tg3 *tp)
1280 {
1281         u32 val;
1282
1283         val = tr32(GRC_RX_CPU_EVENT);
1284         val |= GRC_RX_CPU_DRIVER_EVENT;
1285         tw32_f(GRC_RX_CPU_EVENT, val);
1286
1287         tp->last_event_jiffies = jiffies;
1288 }
1289
1290 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1291
1292 /* tp->lock is held. */
1293 static void tg3_wait_for_event_ack(struct tg3 *tp)
1294 {
1295         int i;
1296         unsigned int delay_cnt;
1297         long time_remain;
1298
1299         /* If enough time has passed, no wait is necessary. */
1300         time_remain = (long)(tp->last_event_jiffies + 1 +
1301                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1302                       (long)jiffies;
1303         if (time_remain < 0)
1304                 return;
1305
1306         /* Check if we can shorten the wait time. */
1307         delay_cnt = jiffies_to_usecs(time_remain);
1308         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1309                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1310         delay_cnt = (delay_cnt >> 3) + 1;
1311
1312         for (i = 0; i < delay_cnt; i++) {
1313                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1314                         break;
1315                 udelay(8);
1316         }
1317 }
1318
1319 /* tp->lock is held. */
1320 static void tg3_ump_link_report(struct tg3 *tp)
1321 {
1322         u32 reg;
1323         u32 val;
1324
1325         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1326                 return;
1327
1328         tg3_wait_for_event_ack(tp);
1329
1330         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1333
1334         val = 0;
1335         if (!tg3_readphy(tp, MII_BMCR, &reg))
1336                 val = reg << 16;
1337         if (!tg3_readphy(tp, MII_BMSR, &reg))
1338                 val |= (reg & 0xffff);
1339         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1340
1341         val = 0;
1342         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1343                 val = reg << 16;
1344         if (!tg3_readphy(tp, MII_LPA, &reg))
1345                 val |= (reg & 0xffff);
1346         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1347
1348         val = 0;
1349         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1350                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1351                         val = reg << 16;
1352                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1353                         val |= (reg & 0xffff);
1354         }
1355         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1356
1357         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1358                 val = reg << 16;
1359         else
1360                 val = 0;
1361         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1362
1363         tg3_generate_fw_event(tp);
1364 }
1365
1366 static void tg3_link_report(struct tg3 *tp)
1367 {
1368         if (!netif_carrier_ok(tp->dev)) {
1369                 netif_info(tp, link, tp->dev, "Link is down\n");
1370                 tg3_ump_link_report(tp);
1371         } else if (netif_msg_link(tp)) {
1372                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1373                             (tp->link_config.active_speed == SPEED_1000 ?
1374                              1000 :
1375                              (tp->link_config.active_speed == SPEED_100 ?
1376                               100 : 10)),
1377                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1378                              "full" : "half"));
1379
1380                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1381                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1382                             "on" : "off",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1384                             "on" : "off");
1385
1386                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1387                         netdev_info(tp->dev, "EEE is %s\n",
1388                                     tp->setlpicnt ? "enabled" : "disabled");
1389
1390                 tg3_ump_link_report(tp);
1391         }
1392 }
1393
1394 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1395 {
1396         u16 miireg;
1397
1398         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1399                 miireg = ADVERTISE_PAUSE_CAP;
1400         else if (flow_ctrl & FLOW_CTRL_TX)
1401                 miireg = ADVERTISE_PAUSE_ASYM;
1402         else if (flow_ctrl & FLOW_CTRL_RX)
1403                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1404         else
1405                 miireg = 0;
1406
1407         return miireg;
1408 }
1409
1410 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1411 {
1412         u16 miireg;
1413
1414         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1415                 miireg = ADVERTISE_1000XPAUSE;
1416         else if (flow_ctrl & FLOW_CTRL_TX)
1417                 miireg = ADVERTISE_1000XPSE_ASYM;
1418         else if (flow_ctrl & FLOW_CTRL_RX)
1419                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1420         else
1421                 miireg = 0;
1422
1423         return miireg;
1424 }
1425
1426 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1427 {
1428         u8 cap = 0;
1429
1430         if (lcladv & ADVERTISE_1000XPAUSE) {
1431                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1432                         if (rmtadv & LPA_1000XPAUSE)
1433                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1434                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1435                                 cap = FLOW_CTRL_RX;
1436                 } else {
1437                         if (rmtadv & LPA_1000XPAUSE)
1438                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1439                 }
1440         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1441                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1442                         cap = FLOW_CTRL_TX;
1443         }
1444
1445         return cap;
1446 }
1447
1448 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1449 {
1450         u8 autoneg;
1451         u8 flowctrl = 0;
1452         u32 old_rx_mode = tp->rx_mode;
1453         u32 old_tx_mode = tp->tx_mode;
1454
1455         if (tg3_flag(tp, USE_PHYLIB))
1456                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1457         else
1458                 autoneg = tp->link_config.autoneg;
1459
1460         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1461                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1462                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1463                 else
1464                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1465         } else
1466                 flowctrl = tp->link_config.flowctrl;
1467
1468         tp->link_config.active_flowctrl = flowctrl;
1469
1470         if (flowctrl & FLOW_CTRL_RX)
1471                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1472         else
1473                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1474
1475         if (old_rx_mode != tp->rx_mode)
1476                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1477
1478         if (flowctrl & FLOW_CTRL_TX)
1479                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1480         else
1481                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1482
1483         if (old_tx_mode != tp->tx_mode)
1484                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1485 }
1486
1487 static void tg3_adjust_link(struct net_device *dev)
1488 {
1489         u8 oldflowctrl, linkmesg = 0;
1490         u32 mac_mode, lcl_adv, rmt_adv;
1491         struct tg3 *tp = netdev_priv(dev);
1492         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1493
1494         spin_lock_bh(&tp->lock);
1495
1496         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1497                                     MAC_MODE_HALF_DUPLEX);
1498
1499         oldflowctrl = tp->link_config.active_flowctrl;
1500
1501         if (phydev->link) {
1502                 lcl_adv = 0;
1503                 rmt_adv = 0;
1504
1505                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1506                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1507                 else if (phydev->speed == SPEED_1000 ||
1508                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1509                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1510                 else
1511                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1512
1513                 if (phydev->duplex == DUPLEX_HALF)
1514                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1515                 else {
1516                         lcl_adv = tg3_advert_flowctrl_1000T(
1517                                   tp->link_config.flowctrl);
1518
1519                         if (phydev->pause)
1520                                 rmt_adv = LPA_PAUSE_CAP;
1521                         if (phydev->asym_pause)
1522                                 rmt_adv |= LPA_PAUSE_ASYM;
1523                 }
1524
1525                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1526         } else
1527                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1528
1529         if (mac_mode != tp->mac_mode) {
1530                 tp->mac_mode = mac_mode;
1531                 tw32_f(MAC_MODE, tp->mac_mode);
1532                 udelay(40);
1533         }
1534
1535         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1536                 if (phydev->speed == SPEED_10)
1537                         tw32(MAC_MI_STAT,
1538                              MAC_MI_STAT_10MBPS_MODE |
1539                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1540                 else
1541                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542         }
1543
1544         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1545                 tw32(MAC_TX_LENGTHS,
1546                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1547                       (6 << TX_LENGTHS_IPG_SHIFT) |
1548                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1549         else
1550                 tw32(MAC_TX_LENGTHS,
1551                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1552                       (6 << TX_LENGTHS_IPG_SHIFT) |
1553                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1554
1555         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1556             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1557             phydev->speed != tp->link_config.active_speed ||
1558             phydev->duplex != tp->link_config.active_duplex ||
1559             oldflowctrl != tp->link_config.active_flowctrl)
1560                 linkmesg = 1;
1561
1562         tp->link_config.active_speed = phydev->speed;
1563         tp->link_config.active_duplex = phydev->duplex;
1564
1565         spin_unlock_bh(&tp->lock);
1566
1567         if (linkmesg)
1568                 tg3_link_report(tp);
1569 }
1570
1571 static int tg3_phy_init(struct tg3 *tp)
1572 {
1573         struct phy_device *phydev;
1574
1575         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1576                 return 0;
1577
1578         /* Bring the PHY back to a known state. */
1579         tg3_bmcr_reset(tp);
1580
1581         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1582
1583         /* Attach the MAC to the PHY. */
1584         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1585                              phydev->dev_flags, phydev->interface);
1586         if (IS_ERR(phydev)) {
1587                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1588                 return PTR_ERR(phydev);
1589         }
1590
1591         /* Mask with MAC supported features. */
1592         switch (phydev->interface) {
1593         case PHY_INTERFACE_MODE_GMII:
1594         case PHY_INTERFACE_MODE_RGMII:
1595                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1596                         phydev->supported &= (PHY_GBIT_FEATURES |
1597                                               SUPPORTED_Pause |
1598                                               SUPPORTED_Asym_Pause);
1599                         break;
1600                 }
1601                 /* fallthru */
1602         case PHY_INTERFACE_MODE_MII:
1603                 phydev->supported &= (PHY_BASIC_FEATURES |
1604                                       SUPPORTED_Pause |
1605                                       SUPPORTED_Asym_Pause);
1606                 break;
1607         default:
1608                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1609                 return -EINVAL;
1610         }
1611
1612         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1613
1614         phydev->advertising = phydev->supported;
1615
1616         return 0;
1617 }
1618
1619 static void tg3_phy_start(struct tg3 *tp)
1620 {
1621         struct phy_device *phydev;
1622
1623         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1624                 return;
1625
1626         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1627
1628         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1629                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1630                 phydev->speed = tp->link_config.orig_speed;
1631                 phydev->duplex = tp->link_config.orig_duplex;
1632                 phydev->autoneg = tp->link_config.orig_autoneg;
1633                 phydev->advertising = tp->link_config.orig_advertising;
1634         }
1635
1636         phy_start(phydev);
1637
1638         phy_start_aneg(phydev);
1639 }
1640
1641 static void tg3_phy_stop(struct tg3 *tp)
1642 {
1643         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1644                 return;
1645
1646         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1647 }
1648
1649 static void tg3_phy_fini(struct tg3 *tp)
1650 {
1651         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1652                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1653                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1654         }
1655 }
1656
1657 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1658 {
1659         u32 phytest;
1660
1661         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1662                 u32 phy;
1663
1664                 tg3_writephy(tp, MII_TG3_FET_TEST,
1665                              phytest | MII_TG3_FET_SHADOW_EN);
1666                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1667                         if (enable)
1668                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1669                         else
1670                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1672                 }
1673                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1674         }
1675 }
1676
1677 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1678 {
1679         u32 reg;
1680
1681         if (!tg3_flag(tp, 5705_PLUS) ||
1682             (tg3_flag(tp, 5717_PLUS) &&
1683              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1684                 return;
1685
1686         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1687                 tg3_phy_fet_toggle_apd(tp, enable);
1688                 return;
1689         }
1690
1691         reg = MII_TG3_MISC_SHDW_WREN |
1692               MII_TG3_MISC_SHDW_SCR5_SEL |
1693               MII_TG3_MISC_SHDW_SCR5_LPED |
1694               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1695               MII_TG3_MISC_SHDW_SCR5_SDTL |
1696               MII_TG3_MISC_SHDW_SCR5_C125OE;
1697         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1698                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1699
1700         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1701
1702
1703         reg = MII_TG3_MISC_SHDW_WREN |
1704               MII_TG3_MISC_SHDW_APD_SEL |
1705               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1706         if (enable)
1707                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1708
1709         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1710 }
1711
1712 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1713 {
1714         u32 phy;
1715
1716         if (!tg3_flag(tp, 5705_PLUS) ||
1717             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1718                 return;
1719
1720         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1721                 u32 ephy;
1722
1723                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1724                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1725
1726                         tg3_writephy(tp, MII_TG3_FET_TEST,
1727                                      ephy | MII_TG3_FET_SHADOW_EN);
1728                         if (!tg3_readphy(tp, reg, &phy)) {
1729                                 if (enable)
1730                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1731                                 else
1732                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 tg3_writephy(tp, reg, phy);
1734                         }
1735                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1736                 }
1737         } else {
1738                 int ret;
1739
1740                 ret = tg3_phy_auxctl_read(tp,
1741                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1742                 if (!ret) {
1743                         if (enable)
1744                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1745                         else
1746                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         tg3_phy_auxctl_write(tp,
1748                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1749                 }
1750         }
1751 }
1752
1753 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1754 {
1755         int ret;
1756         u32 val;
1757
1758         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1759                 return;
1760
1761         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1762         if (!ret)
1763                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1764                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1765 }
1766
1767 static void tg3_phy_apply_otp(struct tg3 *tp)
1768 {
1769         u32 otp, phy;
1770
1771         if (!tp->phy_otp)
1772                 return;
1773
1774         otp = tp->phy_otp;
1775
1776         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1777                 return;
1778
1779         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1780         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1781         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1782
1783         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1784               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1785         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1786
1787         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1788         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1789         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1790
1791         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1792         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1793
1794         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1795         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1796
1797         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1798               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1799         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1800
1801         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1802 }
1803
1804 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1805 {
1806         u32 val;
1807
1808         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1809                 return;
1810
1811         tp->setlpicnt = 0;
1812
1813         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1814             current_link_up == 1 &&
1815             tp->link_config.active_duplex == DUPLEX_FULL &&
1816             (tp->link_config.active_speed == SPEED_100 ||
1817              tp->link_config.active_speed == SPEED_1000)) {
1818                 u32 eeectl;
1819
1820                 if (tp->link_config.active_speed == SPEED_1000)
1821                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1822                 else
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1824
1825                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1826
1827                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1828                                   TG3_CL45_D7_EEERES_STAT, &val);
1829
1830                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1831                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1832                         tp->setlpicnt = 2;
1833         }
1834
1835         if (!tp->setlpicnt) {
1836                 val = tr32(TG3_CPMU_EEE_MODE);
1837                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1838         }
1839 }
1840
1841 static void tg3_phy_eee_enable(struct tg3 *tp)
1842 {
1843         u32 val;
1844
1845         if (tp->link_config.active_speed == SPEED_1000 &&
1846             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1847              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1848              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1849             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1850                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1851                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1852         }
1853
1854         val = tr32(TG3_CPMU_EEE_MODE);
1855         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1856 }
1857
1858 static int tg3_wait_macro_done(struct tg3 *tp)
1859 {
1860         int limit = 100;
1861
1862         while (limit--) {
1863                 u32 tmp32;
1864
1865                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1866                         if ((tmp32 & 0x1000) == 0)
1867                                 break;
1868                 }
1869         }
1870         if (limit < 0)
1871                 return -EBUSY;
1872
1873         return 0;
1874 }
1875
1876 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1877 {
1878         static const u32 test_pat[4][6] = {
1879         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1880         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1881         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1882         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1883         };
1884         int chan;
1885
1886         for (chan = 0; chan < 4; chan++) {
1887                 int i;
1888
1889                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1890                              (chan * 0x2000) | 0x0200);
1891                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1892
1893                 for (i = 0; i < 6; i++)
1894                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1895                                      test_pat[chan][i]);
1896
1897                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1898                 if (tg3_wait_macro_done(tp)) {
1899                         *resetp = 1;
1900                         return -EBUSY;
1901                 }
1902
1903                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1904                              (chan * 0x2000) | 0x0200);
1905                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1906                 if (tg3_wait_macro_done(tp)) {
1907                         *resetp = 1;
1908                         return -EBUSY;
1909                 }
1910
1911                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1912                 if (tg3_wait_macro_done(tp)) {
1913                         *resetp = 1;
1914                         return -EBUSY;
1915                 }
1916
1917                 for (i = 0; i < 6; i += 2) {
1918                         u32 low, high;
1919
1920                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1921                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1922                             tg3_wait_macro_done(tp)) {
1923                                 *resetp = 1;
1924                                 return -EBUSY;
1925                         }
1926                         low &= 0x7fff;
1927                         high &= 0x000f;
1928                         if (low != test_pat[chan][i] ||
1929                             high != test_pat[chan][i+1]) {
1930                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1931                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1932                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1933
1934                                 return -EBUSY;
1935                         }
1936                 }
1937         }
1938
1939         return 0;
1940 }
1941
1942 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1943 {
1944         int chan;
1945
1946         for (chan = 0; chan < 4; chan++) {
1947                 int i;
1948
1949                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1950                              (chan * 0x2000) | 0x0200);
1951                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1952                 for (i = 0; i < 6; i++)
1953                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1954                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1955                 if (tg3_wait_macro_done(tp))
1956                         return -EBUSY;
1957         }
1958
1959         return 0;
1960 }
1961
1962 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1963 {
1964         u32 reg32, phy9_orig;
1965         int retries, do_phy_reset, err;
1966
1967         retries = 10;
1968         do_phy_reset = 1;
1969         do {
1970                 if (do_phy_reset) {
1971                         err = tg3_bmcr_reset(tp);
1972                         if (err)
1973                                 return err;
1974                         do_phy_reset = 0;
1975                 }
1976
1977                 /* Disable transmitter and interrupt.  */
1978                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1979                         continue;
1980
1981                 reg32 |= 0x3000;
1982                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1983
1984                 /* Set full-duplex, 1000 mbps.  */
1985                 tg3_writephy(tp, MII_BMCR,
1986                              BMCR_FULLDPLX | BMCR_SPEED1000);
1987
1988                 /* Set to master mode.  */
1989                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
1990                         continue;
1991
1992                 tg3_writephy(tp, MII_CTRL1000,
1993                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
1994
1995                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1996                 if (err)
1997                         return err;
1998
1999                 /* Block the PHY control access.  */
2000                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2001
2002                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2003                 if (!err)
2004                         break;
2005         } while (--retries);
2006
2007         err = tg3_phy_reset_chanpat(tp);
2008         if (err)
2009                 return err;
2010
2011         tg3_phydsp_write(tp, 0x8005, 0x0000);
2012
2013         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2014         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2015
2016         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2017
2018         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2019
2020         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2021                 reg32 &= ~0x3000;
2022                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2023         } else if (!err)
2024                 err = -EBUSY;
2025
2026         return err;
2027 }
2028
2029 /* This will reset the tigon3 PHY if there is no valid
2030  * link unless the FORCE argument is non-zero.
2031  */
2032 static int tg3_phy_reset(struct tg3 *tp)
2033 {
2034         u32 val, cpmuctrl;
2035         int err;
2036
2037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2038                 val = tr32(GRC_MISC_CFG);
2039                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2040                 udelay(40);
2041         }
2042         err  = tg3_readphy(tp, MII_BMSR, &val);
2043         err |= tg3_readphy(tp, MII_BMSR, &val);
2044         if (err != 0)
2045                 return -EBUSY;
2046
2047         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2048                 netif_carrier_off(tp->dev);
2049                 tg3_link_report(tp);
2050         }
2051
2052         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2053             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2054             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2055                 err = tg3_phy_reset_5703_4_5(tp);
2056                 if (err)
2057                         return err;
2058                 goto out;
2059         }
2060
2061         cpmuctrl = 0;
2062         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2063             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2064                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2065                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2066                         tw32(TG3_CPMU_CTRL,
2067                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2068         }
2069
2070         err = tg3_bmcr_reset(tp);
2071         if (err)
2072                 return err;
2073
2074         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2075                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2076                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2077
2078                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2079         }
2080
2081         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2082             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2083                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2084                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2085                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2086                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2087                         udelay(40);
2088                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2089                 }
2090         }
2091
2092         if (tg3_flag(tp, 5717_PLUS) &&
2093             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2094                 return 0;
2095
2096         tg3_phy_apply_otp(tp);
2097
2098         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2099                 tg3_phy_toggle_apd(tp, true);
2100         else
2101                 tg3_phy_toggle_apd(tp, false);
2102
2103 out:
2104         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2105             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2106                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2107                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2108                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109         }
2110
2111         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2112                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2113                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2114         }
2115
2116         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2117                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2118                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2119                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2120                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2121                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2122                 }
2123         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2124                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2126                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2127                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2128                                 tg3_writephy(tp, MII_TG3_TEST1,
2129                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2130                         } else
2131                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2132
2133                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2134                 }
2135         }
2136
2137         /* Set Extended packet length bit (bit 14) on all chips that */
2138         /* support jumbo frames */
2139         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2140                 /* Cannot do read-modify-write on 5401 */
2141                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2142         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2143                 /* Set bit 14 with read-modify-write to preserve other bits */
2144                 err = tg3_phy_auxctl_read(tp,
2145                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2146                 if (!err)
2147                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2148                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2149         }
2150
2151         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2152          * jumbo frames transmission.
2153          */
2154         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2155                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2156                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2157                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2158         }
2159
2160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2161                 /* adjust output voltage */
2162                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2163         }
2164
2165         tg3_phy_toggle_automdix(tp, 1);
2166         tg3_phy_set_wirespeed(tp);
2167         return 0;
2168 }
2169
2170 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2171 {
2172         if (!tg3_flag(tp, IS_NIC))
2173                 return 0;
2174
2175         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2176                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2177
2178         return 0;
2179 }
2180
2181 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2182 {
2183         u32 grc_local_ctrl;
2184
2185         if (!tg3_flag(tp, IS_NIC) ||
2186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2188                 return;
2189
2190         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2191
2192         tw32_wait_f(GRC_LOCAL_CTRL,
2193                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2194                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2195
2196         tw32_wait_f(GRC_LOCAL_CTRL,
2197                     grc_local_ctrl,
2198                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2199
2200         tw32_wait_f(GRC_LOCAL_CTRL,
2201                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2202                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2203 }
2204
2205 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2206 {
2207         if (!tg3_flag(tp, IS_NIC))
2208                 return;
2209
2210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2212                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213                             (GRC_LCLCTRL_GPIO_OE0 |
2214                              GRC_LCLCTRL_GPIO_OE1 |
2215                              GRC_LCLCTRL_GPIO_OE2 |
2216                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2217                              GRC_LCLCTRL_GPIO_OUTPUT1),
2218                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2219         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2220                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2221                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2222                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2223                                      GRC_LCLCTRL_GPIO_OE1 |
2224                                      GRC_LCLCTRL_GPIO_OE2 |
2225                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2226                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2227                                      tp->grc_local_ctrl;
2228                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2229                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2230
2231                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2232                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2233                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2234
2235                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2236                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2237                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2238         } else {
2239                 u32 no_gpio2;
2240                 u32 grc_local_ctrl = 0;
2241
2242                 /* Workaround to prevent overdrawing Amps. */
2243                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2244                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2245                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2246                                     grc_local_ctrl,
2247                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2248                 }
2249
2250                 /* On 5753 and variants, GPIO2 cannot be used. */
2251                 no_gpio2 = tp->nic_sram_data_cfg &
2252                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2253
2254                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2255                                   GRC_LCLCTRL_GPIO_OE1 |
2256                                   GRC_LCLCTRL_GPIO_OE2 |
2257                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2258                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2259                 if (no_gpio2) {
2260                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2261                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2262                 }
2263                 tw32_wait_f(GRC_LOCAL_CTRL,
2264                             tp->grc_local_ctrl | grc_local_ctrl,
2265                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2266
2267                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2268
2269                 tw32_wait_f(GRC_LOCAL_CTRL,
2270                             tp->grc_local_ctrl | grc_local_ctrl,
2271                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2272
2273                 if (!no_gpio2) {
2274                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2275                         tw32_wait_f(GRC_LOCAL_CTRL,
2276                                     tp->grc_local_ctrl | grc_local_ctrl,
2277                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2278                 }
2279         }
2280 }
2281
2282 static void tg3_frob_aux_power(struct tg3 *tp)
2283 {
2284         bool need_vaux = false;
2285
2286         /* The GPIOs do something completely different on 57765. */
2287         if (!tg3_flag(tp, IS_NIC) ||
2288             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2290                 return;
2291
2292         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2293              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2294              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2295              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2296             tp->pdev_peer != tp->pdev) {
2297                 struct net_device *dev_peer;
2298
2299                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2300
2301                 /* remove_one() may have been run on the peer. */
2302                 if (dev_peer) {
2303                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2304
2305                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2306                                 return;
2307
2308                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2309                             tg3_flag(tp_peer, ENABLE_ASF))
2310                                 need_vaux = true;
2311                 }
2312         }
2313
2314         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2315                 need_vaux = true;
2316
2317         if (need_vaux)
2318                 tg3_pwrsrc_switch_to_vaux(tp);
2319         else
2320                 tg3_pwrsrc_die_with_vmain(tp);
2321 }
2322
2323 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2324 {
2325         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2326                 return 1;
2327         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2328                 if (speed != SPEED_10)
2329                         return 1;
2330         } else if (speed == SPEED_10)
2331                 return 1;
2332
2333         return 0;
2334 }
2335
2336 static int tg3_setup_phy(struct tg3 *, int);
2337
2338 #define RESET_KIND_SHUTDOWN     0
2339 #define RESET_KIND_INIT         1
2340 #define RESET_KIND_SUSPEND      2
2341
2342 static void tg3_write_sig_post_reset(struct tg3 *, int);
2343 static int tg3_halt_cpu(struct tg3 *, u32);
2344
2345 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2346 {
2347         u32 val;
2348
2349         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2351                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2352                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2353
2354                         sg_dig_ctrl |=
2355                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2356                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2357                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2358                 }
2359                 return;
2360         }
2361
2362         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2363                 tg3_bmcr_reset(tp);
2364                 val = tr32(GRC_MISC_CFG);
2365                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2366                 udelay(40);
2367                 return;
2368         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2369                 u32 phytest;
2370                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2371                         u32 phy;
2372
2373                         tg3_writephy(tp, MII_ADVERTISE, 0);
2374                         tg3_writephy(tp, MII_BMCR,
2375                                      BMCR_ANENABLE | BMCR_ANRESTART);
2376
2377                         tg3_writephy(tp, MII_TG3_FET_TEST,
2378                                      phytest | MII_TG3_FET_SHADOW_EN);
2379                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2380                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2381                                 tg3_writephy(tp,
2382                                              MII_TG3_FET_SHDW_AUXMODE4,
2383                                              phy);
2384                         }
2385                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2386                 }
2387                 return;
2388         } else if (do_low_power) {
2389                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2390                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2391
2392                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2393                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2394                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2395                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2396         }
2397
2398         /* The PHY should not be powered down on some chips because
2399          * of bugs.
2400          */
2401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2402             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2403             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2404              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2405                 return;
2406
2407         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2408             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2409                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2410                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2411                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2412                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2413         }
2414
2415         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2416 }
2417
2418 /* tp->lock is held. */
2419 static int tg3_nvram_lock(struct tg3 *tp)
2420 {
2421         if (tg3_flag(tp, NVRAM)) {
2422                 int i;
2423
2424                 if (tp->nvram_lock_cnt == 0) {
2425                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2426                         for (i = 0; i < 8000; i++) {
2427                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2428                                         break;
2429                                 udelay(20);
2430                         }
2431                         if (i == 8000) {
2432                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2433                                 return -ENODEV;
2434                         }
2435                 }
2436                 tp->nvram_lock_cnt++;
2437         }
2438         return 0;
2439 }
2440
2441 /* tp->lock is held. */
2442 static void tg3_nvram_unlock(struct tg3 *tp)
2443 {
2444         if (tg3_flag(tp, NVRAM)) {
2445                 if (tp->nvram_lock_cnt > 0)
2446                         tp->nvram_lock_cnt--;
2447                 if (tp->nvram_lock_cnt == 0)
2448                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2449         }
2450 }
2451
2452 /* tp->lock is held. */
2453 static void tg3_enable_nvram_access(struct tg3 *tp)
2454 {
2455         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2456                 u32 nvaccess = tr32(NVRAM_ACCESS);
2457
2458                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2459         }
2460 }
2461
2462 /* tp->lock is held. */
2463 static void tg3_disable_nvram_access(struct tg3 *tp)
2464 {
2465         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2466                 u32 nvaccess = tr32(NVRAM_ACCESS);
2467
2468                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2469         }
2470 }
2471
2472 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2473                                         u32 offset, u32 *val)
2474 {
2475         u32 tmp;
2476         int i;
2477
2478         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2479                 return -EINVAL;
2480
2481         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2482                                         EEPROM_ADDR_DEVID_MASK |
2483                                         EEPROM_ADDR_READ);
2484         tw32(GRC_EEPROM_ADDR,
2485              tmp |
2486              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2487              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2488               EEPROM_ADDR_ADDR_MASK) |
2489              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2490
2491         for (i = 0; i < 1000; i++) {
2492                 tmp = tr32(GRC_EEPROM_ADDR);
2493
2494                 if (tmp & EEPROM_ADDR_COMPLETE)
2495                         break;
2496                 msleep(1);
2497         }
2498         if (!(tmp & EEPROM_ADDR_COMPLETE))
2499                 return -EBUSY;
2500
2501         tmp = tr32(GRC_EEPROM_DATA);
2502
2503         /*
2504          * The data will always be opposite the native endian
2505          * format.  Perform a blind byteswap to compensate.
2506          */
2507         *val = swab32(tmp);
2508
2509         return 0;
2510 }
2511
2512 #define NVRAM_CMD_TIMEOUT 10000
2513
2514 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2515 {
2516         int i;
2517
2518         tw32(NVRAM_CMD, nvram_cmd);
2519         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2520                 udelay(10);
2521                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2522                         udelay(10);
2523                         break;
2524                 }
2525         }
2526
2527         if (i == NVRAM_CMD_TIMEOUT)
2528                 return -EBUSY;
2529
2530         return 0;
2531 }
2532
2533 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2534 {
2535         if (tg3_flag(tp, NVRAM) &&
2536             tg3_flag(tp, NVRAM_BUFFERED) &&
2537             tg3_flag(tp, FLASH) &&
2538             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2539             (tp->nvram_jedecnum == JEDEC_ATMEL))
2540
2541                 addr = ((addr / tp->nvram_pagesize) <<
2542                         ATMEL_AT45DB0X1B_PAGE_POS) +
2543                        (addr % tp->nvram_pagesize);
2544
2545         return addr;
2546 }
2547
2548 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2549 {
2550         if (tg3_flag(tp, NVRAM) &&
2551             tg3_flag(tp, NVRAM_BUFFERED) &&
2552             tg3_flag(tp, FLASH) &&
2553             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2554             (tp->nvram_jedecnum == JEDEC_ATMEL))
2555
2556                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2557                         tp->nvram_pagesize) +
2558                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2559
2560         return addr;
2561 }
2562
2563 /* NOTE: Data read in from NVRAM is byteswapped according to
2564  * the byteswapping settings for all other register accesses.
2565  * tg3 devices are BE devices, so on a BE machine, the data
2566  * returned will be exactly as it is seen in NVRAM.  On a LE
2567  * machine, the 32-bit value will be byteswapped.
2568  */
2569 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2570 {
2571         int ret;
2572
2573         if (!tg3_flag(tp, NVRAM))
2574                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2575
2576         offset = tg3_nvram_phys_addr(tp, offset);
2577
2578         if (offset > NVRAM_ADDR_MSK)
2579                 return -EINVAL;
2580
2581         ret = tg3_nvram_lock(tp);
2582         if (ret)
2583                 return ret;
2584
2585         tg3_enable_nvram_access(tp);
2586
2587         tw32(NVRAM_ADDR, offset);
2588         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2589                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2590
2591         if (ret == 0)
2592                 *val = tr32(NVRAM_RDDATA);
2593
2594         tg3_disable_nvram_access(tp);
2595
2596         tg3_nvram_unlock(tp);
2597
2598         return ret;
2599 }
2600
2601 /* Ensures NVRAM data is in bytestream format. */
2602 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2603 {
2604         u32 v;
2605         int res = tg3_nvram_read(tp, offset, &v);
2606         if (!res)
2607                 *val = cpu_to_be32(v);
2608         return res;
2609 }
2610
2611 /* tp->lock is held. */
2612 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2613 {
2614         u32 addr_high, addr_low;
2615         int i;
2616
2617         addr_high = ((tp->dev->dev_addr[0] << 8) |
2618                      tp->dev->dev_addr[1]);
2619         addr_low = ((tp->dev->dev_addr[2] << 24) |
2620                     (tp->dev->dev_addr[3] << 16) |
2621                     (tp->dev->dev_addr[4] <<  8) |
2622                     (tp->dev->dev_addr[5] <<  0));
2623         for (i = 0; i < 4; i++) {
2624                 if (i == 1 && skip_mac_1)
2625                         continue;
2626                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2627                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2631             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2632                 for (i = 0; i < 12; i++) {
2633                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2634                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2635                 }
2636         }
2637
2638         addr_high = (tp->dev->dev_addr[0] +
2639                      tp->dev->dev_addr[1] +
2640                      tp->dev->dev_addr[2] +
2641                      tp->dev->dev_addr[3] +
2642                      tp->dev->dev_addr[4] +
2643                      tp->dev->dev_addr[5]) &
2644                 TX_BACKOFF_SEED_MASK;
2645         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2646 }
2647
2648 static void tg3_enable_register_access(struct tg3 *tp)
2649 {
2650         /*
2651          * Make sure register accesses (indirect or otherwise) will function
2652          * correctly.
2653          */
2654         pci_write_config_dword(tp->pdev,
2655                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2656 }
2657
2658 static int tg3_power_up(struct tg3 *tp)
2659 {
2660         int err;
2661
2662         tg3_enable_register_access(tp);
2663
2664         err = pci_set_power_state(tp->pdev, PCI_D0);
2665         if (!err) {
2666                 /* Switch out of Vaux if it is a NIC */
2667                 tg3_pwrsrc_switch_to_vmain(tp);
2668         } else {
2669                 netdev_err(tp->dev, "Transition to D0 failed\n");
2670         }
2671
2672         return err;
2673 }
2674
2675 static int tg3_power_down_prepare(struct tg3 *tp)
2676 {
2677         u32 misc_host_ctrl;
2678         bool device_should_wake, do_low_power;
2679
2680         tg3_enable_register_access(tp);
2681
2682         /* Restore the CLKREQ setting. */
2683         if (tg3_flag(tp, CLKREQ_BUG)) {
2684                 u16 lnkctl;
2685
2686                 pci_read_config_word(tp->pdev,
2687                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2688                                      &lnkctl);
2689                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2690                 pci_write_config_word(tp->pdev,
2691                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
2692                                       lnkctl);
2693         }
2694
2695         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2696         tw32(TG3PCI_MISC_HOST_CTRL,
2697              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2698
2699         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2700                              tg3_flag(tp, WOL_ENABLE);
2701
2702         if (tg3_flag(tp, USE_PHYLIB)) {
2703                 do_low_power = false;
2704                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2705                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2706                         struct phy_device *phydev;
2707                         u32 phyid, advertising;
2708
2709                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2710
2711                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2712
2713                         tp->link_config.orig_speed = phydev->speed;
2714                         tp->link_config.orig_duplex = phydev->duplex;
2715                         tp->link_config.orig_autoneg = phydev->autoneg;
2716                         tp->link_config.orig_advertising = phydev->advertising;
2717
2718                         advertising = ADVERTISED_TP |
2719                                       ADVERTISED_Pause |
2720                                       ADVERTISED_Autoneg |
2721                                       ADVERTISED_10baseT_Half;
2722
2723                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2724                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2725                                         advertising |=
2726                                                 ADVERTISED_100baseT_Half |
2727                                                 ADVERTISED_100baseT_Full |
2728                                                 ADVERTISED_10baseT_Full;
2729                                 else
2730                                         advertising |= ADVERTISED_10baseT_Full;
2731                         }
2732
2733                         phydev->advertising = advertising;
2734
2735                         phy_start_aneg(phydev);
2736
2737                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2738                         if (phyid != PHY_ID_BCMAC131) {
2739                                 phyid &= PHY_BCM_OUI_MASK;
2740                                 if (phyid == PHY_BCM_OUI_1 ||
2741                                     phyid == PHY_BCM_OUI_2 ||
2742                                     phyid == PHY_BCM_OUI_3)
2743                                         do_low_power = true;
2744                         }
2745                 }
2746         } else {
2747                 do_low_power = true;
2748
2749                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2750                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2751                         tp->link_config.orig_speed = tp->link_config.speed;
2752                         tp->link_config.orig_duplex = tp->link_config.duplex;
2753                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2754                 }
2755
2756                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2757                         tp->link_config.speed = SPEED_10;
2758                         tp->link_config.duplex = DUPLEX_HALF;
2759                         tp->link_config.autoneg = AUTONEG_ENABLE;
2760                         tg3_setup_phy(tp, 0);
2761                 }
2762         }
2763
2764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2765                 u32 val;
2766
2767                 val = tr32(GRC_VCPU_EXT_CTRL);
2768                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2769         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2770                 int i;
2771                 u32 val;
2772
2773                 for (i = 0; i < 200; i++) {
2774                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2775                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2776                                 break;
2777                         msleep(1);
2778                 }
2779         }
2780         if (tg3_flag(tp, WOL_CAP))
2781                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2782                                                      WOL_DRV_STATE_SHUTDOWN |
2783                                                      WOL_DRV_WOL |
2784                                                      WOL_SET_MAGIC_PKT);
2785
2786         if (device_should_wake) {
2787                 u32 mac_mode;
2788
2789                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2790                         if (do_low_power &&
2791                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2792                                 tg3_phy_auxctl_write(tp,
2793                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2794                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2795                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2796                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2797                                 udelay(40);
2798                         }
2799
2800                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2801                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2802                         else
2803                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2804
2805                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2806                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2807                             ASIC_REV_5700) {
2808                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2809                                              SPEED_100 : SPEED_10;
2810                                 if (tg3_5700_link_polarity(tp, speed))
2811                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2812                                 else
2813                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2814                         }
2815                 } else {
2816                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2817                 }
2818
2819                 if (!tg3_flag(tp, 5750_PLUS))
2820                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2821
2822                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2823                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2824                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2825                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2826
2827                 if (tg3_flag(tp, ENABLE_APE))
2828                         mac_mode |= MAC_MODE_APE_TX_EN |
2829                                     MAC_MODE_APE_RX_EN |
2830                                     MAC_MODE_TDE_ENABLE;
2831
2832                 tw32_f(MAC_MODE, mac_mode);
2833                 udelay(100);
2834
2835                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2836                 udelay(10);
2837         }
2838
2839         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2840             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2841              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2842                 u32 base_val;
2843
2844                 base_val = tp->pci_clock_ctrl;
2845                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2846                              CLOCK_CTRL_TXCLK_DISABLE);
2847
2848                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2849                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2850         } else if (tg3_flag(tp, 5780_CLASS) ||
2851                    tg3_flag(tp, CPMU_PRESENT) ||
2852                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2853                 /* do nothing */
2854         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2855                 u32 newbits1, newbits2;
2856
2857                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2858                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2859                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2860                                     CLOCK_CTRL_TXCLK_DISABLE |
2861                                     CLOCK_CTRL_ALTCLK);
2862                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2863                 } else if (tg3_flag(tp, 5705_PLUS)) {
2864                         newbits1 = CLOCK_CTRL_625_CORE;
2865                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2866                 } else {
2867                         newbits1 = CLOCK_CTRL_ALTCLK;
2868                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2869                 }
2870
2871                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2872                             40);
2873
2874                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2875                             40);
2876
2877                 if (!tg3_flag(tp, 5705_PLUS)) {
2878                         u32 newbits3;
2879
2880                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2881                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2882                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2883                                             CLOCK_CTRL_TXCLK_DISABLE |
2884                                             CLOCK_CTRL_44MHZ_CORE);
2885                         } else {
2886                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2887                         }
2888
2889                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2890                                     tp->pci_clock_ctrl | newbits3, 40);
2891                 }
2892         }
2893
2894         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2895                 tg3_power_down_phy(tp, do_low_power);
2896
2897         tg3_frob_aux_power(tp);
2898
2899         /* Workaround for unstable PLL clock */
2900         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2901             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2902                 u32 val = tr32(0x7d00);
2903
2904                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2905                 tw32(0x7d00, val);
2906                 if (!tg3_flag(tp, ENABLE_ASF)) {
2907                         int err;
2908
2909                         err = tg3_nvram_lock(tp);
2910                         tg3_halt_cpu(tp, RX_CPU_BASE);
2911                         if (!err)
2912                                 tg3_nvram_unlock(tp);
2913                 }
2914         }
2915
2916         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2917
2918         return 0;
2919 }
2920
2921 static void tg3_power_down(struct tg3 *tp)
2922 {
2923         tg3_power_down_prepare(tp);
2924
2925         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2926         pci_set_power_state(tp->pdev, PCI_D3hot);
2927 }
2928
2929 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2930 {
2931         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2932         case MII_TG3_AUX_STAT_10HALF:
2933                 *speed = SPEED_10;
2934                 *duplex = DUPLEX_HALF;
2935                 break;
2936
2937         case MII_TG3_AUX_STAT_10FULL:
2938                 *speed = SPEED_10;
2939                 *duplex = DUPLEX_FULL;
2940                 break;
2941
2942         case MII_TG3_AUX_STAT_100HALF:
2943                 *speed = SPEED_100;
2944                 *duplex = DUPLEX_HALF;
2945                 break;
2946
2947         case MII_TG3_AUX_STAT_100FULL:
2948                 *speed = SPEED_100;
2949                 *duplex = DUPLEX_FULL;
2950                 break;
2951
2952         case MII_TG3_AUX_STAT_1000HALF:
2953                 *speed = SPEED_1000;
2954                 *duplex = DUPLEX_HALF;
2955                 break;
2956
2957         case MII_TG3_AUX_STAT_1000FULL:
2958                 *speed = SPEED_1000;
2959                 *duplex = DUPLEX_FULL;
2960                 break;
2961
2962         default:
2963                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2964                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2965                                  SPEED_10;
2966                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2967                                   DUPLEX_HALF;
2968                         break;
2969                 }
2970                 *speed = SPEED_INVALID;
2971                 *duplex = DUPLEX_INVALID;
2972                 break;
2973         }
2974 }
2975
2976 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2977 {
2978         int err = 0;
2979         u32 val, new_adv;
2980
2981         new_adv = ADVERTISE_CSMA;
2982         if (advertise & ADVERTISED_10baseT_Half)
2983                 new_adv |= ADVERTISE_10HALF;
2984         if (advertise & ADVERTISED_10baseT_Full)
2985                 new_adv |= ADVERTISE_10FULL;
2986         if (advertise & ADVERTISED_100baseT_Half)
2987                 new_adv |= ADVERTISE_100HALF;
2988         if (advertise & ADVERTISED_100baseT_Full)
2989                 new_adv |= ADVERTISE_100FULL;
2990
2991         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2992
2993         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2994         if (err)
2995                 goto done;
2996
2997         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2998                 goto done;
2999
3000         new_adv = 0;
3001         if (advertise & ADVERTISED_1000baseT_Half)
3002                 new_adv |= ADVERTISE_1000HALF;
3003         if (advertise & ADVERTISED_1000baseT_Full)
3004                 new_adv |= ADVERTISE_1000FULL;
3005
3006         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3007             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3008                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3009
3010         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3011         if (err)
3012                 goto done;
3013
3014         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3015                 goto done;
3016
3017         tw32(TG3_CPMU_EEE_MODE,
3018              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3019
3020         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3021         if (!err) {
3022                 u32 err2;
3023
3024                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3025                 case ASIC_REV_5717:
3026                 case ASIC_REV_57765:
3027                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3028                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3029                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3030                         /* Fall through */
3031                 case ASIC_REV_5719:
3032                         val = MII_TG3_DSP_TAP26_ALNOKO |
3033                               MII_TG3_DSP_TAP26_RMRXSTO |
3034                               MII_TG3_DSP_TAP26_OPCSINPT;
3035                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3036                 }
3037
3038                 val = 0;
3039                 /* Advertise 100-BaseTX EEE ability */
3040                 if (advertise & ADVERTISED_100baseT_Full)
3041                         val |= MDIO_AN_EEE_ADV_100TX;
3042                 /* Advertise 1000-BaseT EEE ability */
3043                 if (advertise & ADVERTISED_1000baseT_Full)
3044                         val |= MDIO_AN_EEE_ADV_1000T;
3045                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3046
3047                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3048                 if (!err)
3049                         err = err2;
3050         }
3051
3052 done:
3053         return err;
3054 }
3055
3056 static void tg3_phy_copper_begin(struct tg3 *tp)
3057 {
3058         u32 new_adv;
3059         int i;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3062                 new_adv = ADVERTISED_10baseT_Half |
3063                           ADVERTISED_10baseT_Full;
3064                 if (tg3_flag(tp, WOL_SPEED_100MB))
3065                         new_adv |= ADVERTISED_100baseT_Half |
3066                                    ADVERTISED_100baseT_Full;
3067
3068                 tg3_phy_autoneg_cfg(tp, new_adv,
3069                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3070         } else if (tp->link_config.speed == SPEED_INVALID) {
3071                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3072                         tp->link_config.advertising &=
3073                                 ~(ADVERTISED_1000baseT_Half |
3074                                   ADVERTISED_1000baseT_Full);
3075
3076                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3077                                     tp->link_config.flowctrl);
3078         } else {
3079                 /* Asking for a specific link mode. */
3080                 if (tp->link_config.speed == SPEED_1000) {
3081                         if (tp->link_config.duplex == DUPLEX_FULL)
3082                                 new_adv = ADVERTISED_1000baseT_Full;
3083                         else
3084                                 new_adv = ADVERTISED_1000baseT_Half;
3085                 } else if (tp->link_config.speed == SPEED_100) {
3086                         if (tp->link_config.duplex == DUPLEX_FULL)
3087                                 new_adv = ADVERTISED_100baseT_Full;
3088                         else
3089                                 new_adv = ADVERTISED_100baseT_Half;
3090                 } else {
3091                         if (tp->link_config.duplex == DUPLEX_FULL)
3092                                 new_adv = ADVERTISED_10baseT_Full;
3093                         else
3094                                 new_adv = ADVERTISED_10baseT_Half;
3095                 }
3096
3097                 tg3_phy_autoneg_cfg(tp, new_adv,
3098                                     tp->link_config.flowctrl);
3099         }
3100
3101         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3102             tp->link_config.speed != SPEED_INVALID) {
3103                 u32 bmcr, orig_bmcr;
3104
3105                 tp->link_config.active_speed = tp->link_config.speed;
3106                 tp->link_config.active_duplex = tp->link_config.duplex;
3107
3108                 bmcr = 0;
3109                 switch (tp->link_config.speed) {
3110                 default:
3111                 case SPEED_10:
3112                         break;
3113
3114                 case SPEED_100:
3115                         bmcr |= BMCR_SPEED100;
3116                         break;
3117
3118                 case SPEED_1000:
3119                         bmcr |= BMCR_SPEED1000;
3120                         break;
3121                 }
3122
3123                 if (tp->link_config.duplex == DUPLEX_FULL)
3124                         bmcr |= BMCR_FULLDPLX;
3125
3126                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3127                     (bmcr != orig_bmcr)) {
3128                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3129                         for (i = 0; i < 1500; i++) {
3130                                 u32 tmp;
3131
3132                                 udelay(10);
3133                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3134                                     tg3_readphy(tp, MII_BMSR, &tmp))
3135                                         continue;
3136                                 if (!(tmp & BMSR_LSTATUS)) {
3137                                         udelay(40);
3138                                         break;
3139                                 }
3140                         }
3141                         tg3_writephy(tp, MII_BMCR, bmcr);
3142                         udelay(40);
3143                 }
3144         } else {
3145                 tg3_writephy(tp, MII_BMCR,
3146                              BMCR_ANENABLE | BMCR_ANRESTART);
3147         }
3148 }
3149
3150 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3151 {
3152         int err;
3153
3154         /* Turn off tap power management. */
3155         /* Set Extended packet length bit */
3156         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3157
3158         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3159         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3160         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3161         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3162         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3163
3164         udelay(40);
3165
3166         return err;
3167 }
3168
3169 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3170 {
3171         u32 adv_reg, all_mask = 0;
3172
3173         if (mask & ADVERTISED_10baseT_Half)
3174                 all_mask |= ADVERTISE_10HALF;
3175         if (mask & ADVERTISED_10baseT_Full)
3176                 all_mask |= ADVERTISE_10FULL;
3177         if (mask & ADVERTISED_100baseT_Half)
3178                 all_mask |= ADVERTISE_100HALF;
3179         if (mask & ADVERTISED_100baseT_Full)
3180                 all_mask |= ADVERTISE_100FULL;
3181
3182         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3183                 return 0;
3184
3185         if ((adv_reg & all_mask) != all_mask)
3186                 return 0;
3187         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3188                 u32 tg3_ctrl;
3189
3190                 all_mask = 0;
3191                 if (mask & ADVERTISED_1000baseT_Half)
3192                         all_mask |= ADVERTISE_1000HALF;
3193                 if (mask & ADVERTISED_1000baseT_Full)
3194                         all_mask |= ADVERTISE_1000FULL;
3195
3196                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3197                         return 0;
3198
3199                 if ((tg3_ctrl & all_mask) != all_mask)
3200                         return 0;
3201         }
3202         return 1;
3203 }
3204
3205 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3206 {
3207         u32 curadv, reqadv;
3208
3209         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3210                 return 1;
3211
3212         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3213         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3214
3215         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3216                 if (curadv != reqadv)
3217                         return 0;
3218
3219                 if (tg3_flag(tp, PAUSE_AUTONEG))
3220                         tg3_readphy(tp, MII_LPA, rmtadv);
3221         } else {
3222                 /* Reprogram the advertisement register, even if it
3223                  * does not affect the current link.  If the link
3224                  * gets renegotiated in the future, we can save an
3225                  * additional renegotiation cycle by advertising
3226                  * it correctly in the first place.
3227                  */
3228                 if (curadv != reqadv) {
3229                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3230                                      ADVERTISE_PAUSE_ASYM);
3231                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3232                 }
3233         }
3234
3235         return 1;
3236 }
3237
3238 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3239 {
3240         int current_link_up;
3241         u32 bmsr, val;
3242         u32 lcl_adv, rmt_adv;
3243         u16 current_speed;
3244         u8 current_duplex;
3245         int i, err;
3246
3247         tw32(MAC_EVENT, 0);
3248
3249         tw32_f(MAC_STATUS,
3250              (MAC_STATUS_SYNC_CHANGED |
3251               MAC_STATUS_CFG_CHANGED |
3252               MAC_STATUS_MI_COMPLETION |
3253               MAC_STATUS_LNKSTATE_CHANGED));
3254         udelay(40);
3255
3256         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3257                 tw32_f(MAC_MI_MODE,
3258                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3259                 udelay(80);
3260         }
3261
3262         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3263
3264         /* Some third-party PHYs need to be reset on link going
3265          * down.
3266          */
3267         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3268              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3269              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3270             netif_carrier_ok(tp->dev)) {
3271                 tg3_readphy(tp, MII_BMSR, &bmsr);
3272                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3273                     !(bmsr & BMSR_LSTATUS))
3274                         force_reset = 1;
3275         }
3276         if (force_reset)
3277                 tg3_phy_reset(tp);
3278
3279         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3280                 tg3_readphy(tp, MII_BMSR, &bmsr);
3281                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3282                     !tg3_flag(tp, INIT_COMPLETE))
3283                         bmsr = 0;
3284
3285                 if (!(bmsr & BMSR_LSTATUS)) {
3286                         err = tg3_init_5401phy_dsp(tp);
3287                         if (err)
3288                                 return err;
3289
3290                         tg3_readphy(tp, MII_BMSR, &bmsr);
3291                         for (i = 0; i < 1000; i++) {
3292                                 udelay(10);
3293                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3294                                     (bmsr & BMSR_LSTATUS)) {
3295                                         udelay(40);
3296                                         break;
3297                                 }
3298                         }
3299
3300                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3301                             TG3_PHY_REV_BCM5401_B0 &&
3302                             !(bmsr & BMSR_LSTATUS) &&
3303                             tp->link_config.active_speed == SPEED_1000) {
3304                                 err = tg3_phy_reset(tp);
3305                                 if (!err)
3306                                         err = tg3_init_5401phy_dsp(tp);
3307                                 if (err)
3308                                         return err;
3309                         }
3310                 }
3311         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3312                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3313                 /* 5701 {A0,B0} CRC bug workaround */
3314                 tg3_writephy(tp, 0x15, 0x0a75);
3315                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3316                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3317                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3318         }
3319
3320         /* Clear pending interrupts... */
3321         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3322         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3323
3324         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3325                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3326         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3327                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3328
3329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3331                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3332                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3333                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3334                 else
3335                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3336         }
3337
3338         current_link_up = 0;
3339         current_speed = SPEED_INVALID;
3340         current_duplex = DUPLEX_INVALID;
3341
3342         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3343                 err = tg3_phy_auxctl_read(tp,
3344                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3345                                           &val);
3346                 if (!err && !(val & (1 << 10))) {
3347                         tg3_phy_auxctl_write(tp,
3348                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3349                                              val | (1 << 10));
3350                         goto relink;
3351                 }
3352         }
3353
3354         bmsr = 0;
3355         for (i = 0; i < 100; i++) {
3356                 tg3_readphy(tp, MII_BMSR, &bmsr);
3357                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3358                     (bmsr & BMSR_LSTATUS))
3359                         break;
3360                 udelay(40);
3361         }
3362
3363         if (bmsr & BMSR_LSTATUS) {
3364                 u32 aux_stat, bmcr;
3365
3366                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3367                 for (i = 0; i < 2000; i++) {
3368                         udelay(10);
3369                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3370                             aux_stat)
3371                                 break;
3372                 }
3373
3374                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3375                                              &current_speed,
3376                                              &current_duplex);
3377
3378                 bmcr = 0;
3379                 for (i = 0; i < 200; i++) {
3380                         tg3_readphy(tp, MII_BMCR, &bmcr);
3381                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3382                                 continue;
3383                         if (bmcr && bmcr != 0x7fff)
3384                                 break;
3385                         udelay(10);
3386                 }
3387
3388                 lcl_adv = 0;
3389                 rmt_adv = 0;
3390
3391                 tp->link_config.active_speed = current_speed;
3392                 tp->link_config.active_duplex = current_duplex;
3393
3394                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3395                         if ((bmcr & BMCR_ANENABLE) &&
3396                             tg3_copper_is_advertising_all(tp,
3397                                                 tp->link_config.advertising)) {
3398                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3399                                                                   &rmt_adv))
3400                                         current_link_up = 1;
3401                         }
3402                 } else {
3403                         if (!(bmcr & BMCR_ANENABLE) &&
3404                             tp->link_config.speed == current_speed &&
3405                             tp->link_config.duplex == current_duplex &&
3406                             tp->link_config.flowctrl ==
3407                             tp->link_config.active_flowctrl) {
3408                                 current_link_up = 1;
3409                         }
3410                 }
3411
3412                 if (current_link_up == 1 &&
3413                     tp->link_config.active_duplex == DUPLEX_FULL)
3414                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3415         }
3416
3417 relink:
3418         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3419                 tg3_phy_copper_begin(tp);
3420
3421                 tg3_readphy(tp, MII_BMSR, &bmsr);
3422                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3423                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3424                         current_link_up = 1;
3425         }
3426
3427         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3428         if (current_link_up == 1) {
3429                 if (tp->link_config.active_speed == SPEED_100 ||
3430                     tp->link_config.active_speed == SPEED_10)
3431                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3432                 else
3433                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3434         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3435                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3436         else
3437                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3438
3439         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3440         if (tp->link_config.active_duplex == DUPLEX_HALF)
3441                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3442
3443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3444                 if (current_link_up == 1 &&
3445                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3446                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3447                 else
3448                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3449         }
3450
3451         /* ??? Without this setting Netgear GA302T PHY does not
3452          * ??? send/receive packets...
3453          */
3454         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3455             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3456                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3457                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3458                 udelay(80);
3459         }
3460
3461         tw32_f(MAC_MODE, tp->mac_mode);
3462         udelay(40);
3463
3464         tg3_phy_eee_adjust(tp, current_link_up);
3465
3466         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3467                 /* Polled via timer. */
3468                 tw32_f(MAC_EVENT, 0);
3469         } else {
3470                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3471         }
3472         udelay(40);
3473
3474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3475             current_link_up == 1 &&
3476             tp->link_config.active_speed == SPEED_1000 &&
3477             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3478                 udelay(120);
3479                 tw32_f(MAC_STATUS,
3480                      (MAC_STATUS_SYNC_CHANGED |
3481                       MAC_STATUS_CFG_CHANGED));
3482                 udelay(40);
3483                 tg3_write_mem(tp,
3484                               NIC_SRAM_FIRMWARE_MBOX,
3485                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3486         }
3487
3488         /* Prevent send BD corruption. */
3489         if (tg3_flag(tp, CLKREQ_BUG)) {
3490                 u16 oldlnkctl, newlnkctl;
3491
3492                 pci_read_config_word(tp->pdev,
3493                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3494                                      &oldlnkctl);
3495                 if (tp->link_config.active_speed == SPEED_100 ||
3496                     tp->link_config.active_speed == SPEED_10)
3497                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3498                 else
3499                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3500                 if (newlnkctl != oldlnkctl)
3501                         pci_write_config_word(tp->pdev,
3502                                               pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3503                                               newlnkctl);
3504         }
3505
3506         if (current_link_up != netif_carrier_ok(tp->dev)) {
3507                 if (current_link_up)
3508                         netif_carrier_on(tp->dev);
3509                 else
3510                         netif_carrier_off(tp->dev);
3511                 tg3_link_report(tp);
3512         }
3513
3514         return 0;
3515 }
3516
3517 struct tg3_fiber_aneginfo {
3518         int state;
3519 #define ANEG_STATE_UNKNOWN              0
3520 #define ANEG_STATE_AN_ENABLE            1
3521 #define ANEG_STATE_RESTART_INIT         2
3522 #define ANEG_STATE_RESTART              3
3523 #define ANEG_STATE_DISABLE_LINK_OK      4
3524 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3525 #define ANEG_STATE_ABILITY_DETECT       6
3526 #define ANEG_STATE_ACK_DETECT_INIT      7
3527 #define ANEG_STATE_ACK_DETECT           8
3528 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3529 #define ANEG_STATE_COMPLETE_ACK         10
3530 #define ANEG_STATE_IDLE_DETECT_INIT     11
3531 #define ANEG_STATE_IDLE_DETECT          12
3532 #define ANEG_STATE_LINK_OK              13
3533 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3534 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3535
3536         u32 flags;
3537 #define MR_AN_ENABLE            0x00000001
3538 #define MR_RESTART_AN           0x00000002
3539 #define MR_AN_COMPLETE          0x00000004
3540 #define MR_PAGE_RX              0x00000008
3541 #define MR_NP_LOADED            0x00000010
3542 #define MR_TOGGLE_TX            0x00000020
3543 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3544 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3545 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3546 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3547 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3548 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3549 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3550 #define MR_TOGGLE_RX            0x00002000
3551 #define MR_NP_RX                0x00004000
3552
3553 #define MR_LINK_OK              0x80000000
3554
3555         unsigned long link_time, cur_time;
3556
3557         u32 ability_match_cfg;
3558         int ability_match_count;
3559
3560         char ability_match, idle_match, ack_match;
3561
3562         u32 txconfig, rxconfig;
3563 #define ANEG_CFG_NP             0x00000080
3564 #define ANEG_CFG_ACK            0x00000040
3565 #define ANEG_CFG_RF2            0x00000020
3566 #define ANEG_CFG_RF1            0x00000010
3567 #define ANEG_CFG_PS2            0x00000001
3568 #define ANEG_CFG_PS1            0x00008000
3569 #define ANEG_CFG_HD             0x00004000
3570 #define ANEG_CFG_FD             0x00002000
3571 #define ANEG_CFG_INVAL          0x00001f06
3572
3573 };
3574 #define ANEG_OK         0
3575 #define ANEG_DONE       1
3576 #define ANEG_TIMER_ENAB 2
3577 #define ANEG_FAILED     -1
3578
3579 #define ANEG_STATE_SETTLE_TIME  10000
3580
3581 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3582                                    struct tg3_fiber_aneginfo *ap)
3583 {
3584         u16 flowctrl;
3585         unsigned long delta;
3586         u32 rx_cfg_reg;
3587         int ret;
3588
3589         if (ap->state == ANEG_STATE_UNKNOWN) {
3590                 ap->rxconfig = 0;
3591                 ap->link_time = 0;
3592                 ap->cur_time = 0;
3593                 ap->ability_match_cfg = 0;
3594                 ap->ability_match_count = 0;
3595                 ap->ability_match = 0;
3596                 ap->idle_match = 0;
3597                 ap->ack_match = 0;
3598         }
3599         ap->cur_time++;
3600
3601         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3602                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3603
3604                 if (rx_cfg_reg != ap->ability_match_cfg) {
3605                         ap->ability_match_cfg = rx_cfg_reg;
3606                         ap->ability_match = 0;
3607                         ap->ability_match_count = 0;
3608                 } else {
3609                         if (++ap->ability_match_count > 1) {
3610                                 ap->ability_match = 1;
3611                                 ap->ability_match_cfg = rx_cfg_reg;
3612                         }
3613                 }
3614                 if (rx_cfg_reg & ANEG_CFG_ACK)
3615                         ap->ack_match = 1;
3616                 else
3617                         ap->ack_match = 0;
3618
3619                 ap->idle_match = 0;
3620         } else {
3621                 ap->idle_match = 1;
3622                 ap->ability_match_cfg = 0;
3623                 ap->ability_match_count = 0;
3624                 ap->ability_match = 0;
3625                 ap->ack_match = 0;
3626
3627                 rx_cfg_reg = 0;
3628         }
3629
3630         ap->rxconfig = rx_cfg_reg;
3631         ret = ANEG_OK;
3632
3633         switch (ap->state) {
3634         case ANEG_STATE_UNKNOWN:
3635                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3636                         ap->state = ANEG_STATE_AN_ENABLE;
3637
3638                 /* fallthru */
3639         case ANEG_STATE_AN_ENABLE:
3640                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3641                 if (ap->flags & MR_AN_ENABLE) {
3642                         ap->link_time = 0;
3643                         ap->cur_time = 0;
3644                         ap->ability_match_cfg = 0;
3645                         ap->ability_match_count = 0;
3646                         ap->ability_match = 0;
3647                         ap->idle_match = 0;
3648                         ap->ack_match = 0;
3649
3650                         ap->state = ANEG_STATE_RESTART_INIT;
3651                 } else {
3652                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3653                 }
3654                 break;
3655
3656         case ANEG_STATE_RESTART_INIT:
3657                 ap->link_time = ap->cur_time;
3658                 ap->flags &= ~(MR_NP_LOADED);
3659                 ap->txconfig = 0;
3660                 tw32(MAC_TX_AUTO_NEG, 0);
3661                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3662                 tw32_f(MAC_MODE, tp->mac_mode);
3663                 udelay(40);
3664
3665                 ret = ANEG_TIMER_ENAB;
3666                 ap->state = ANEG_STATE_RESTART;
3667
3668                 /* fallthru */
3669         case ANEG_STATE_RESTART:
3670                 delta = ap->cur_time - ap->link_time;
3671                 if (delta > ANEG_STATE_SETTLE_TIME)
3672                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3673                 else
3674                         ret = ANEG_TIMER_ENAB;
3675                 break;
3676
3677         case ANEG_STATE_DISABLE_LINK_OK:
3678                 ret = ANEG_DONE;
3679                 break;
3680
3681         case ANEG_STATE_ABILITY_DETECT_INIT:
3682                 ap->flags &= ~(MR_TOGGLE_TX);
3683                 ap->txconfig = ANEG_CFG_FD;
3684                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3685                 if (flowctrl & ADVERTISE_1000XPAUSE)
3686                         ap->txconfig |= ANEG_CFG_PS1;
3687                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3688                         ap->txconfig |= ANEG_CFG_PS2;
3689                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3690                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3691                 tw32_f(MAC_MODE, tp->mac_mode);
3692                 udelay(40);
3693
3694                 ap->state = ANEG_STATE_ABILITY_DETECT;
3695                 break;
3696
3697         case ANEG_STATE_ABILITY_DETECT:
3698                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3699                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3700                 break;
3701
3702         case ANEG_STATE_ACK_DETECT_INIT:
3703                 ap->txconfig |= ANEG_CFG_ACK;
3704                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3705                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3706                 tw32_f(MAC_MODE, tp->mac_mode);
3707                 udelay(40);
3708
3709                 ap->state = ANEG_STATE_ACK_DETECT;
3710
3711                 /* fallthru */
3712         case ANEG_STATE_ACK_DETECT:
3713                 if (ap->ack_match != 0) {
3714                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3715                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3716                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3717                         } else {
3718                                 ap->state = ANEG_STATE_AN_ENABLE;
3719                         }
3720                 } else if (ap->ability_match != 0 &&
3721                            ap->rxconfig == 0) {
3722                         ap->state = ANEG_STATE_AN_ENABLE;
3723                 }
3724                 break;
3725
3726         case ANEG_STATE_COMPLETE_ACK_INIT:
3727                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3728                         ret = ANEG_FAILED;
3729                         break;
3730                 }
3731                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3732                                MR_LP_ADV_HALF_DUPLEX |
3733                                MR_LP_ADV_SYM_PAUSE |
3734                                MR_LP_ADV_ASYM_PAUSE |
3735                                MR_LP_ADV_REMOTE_FAULT1 |
3736                                MR_LP_ADV_REMOTE_FAULT2 |
3737                                MR_LP_ADV_NEXT_PAGE |
3738                                MR_TOGGLE_RX |
3739                                MR_NP_RX);
3740                 if (ap->rxconfig & ANEG_CFG_FD)
3741                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3742                 if (ap->rxconfig & ANEG_CFG_HD)
3743                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3744                 if (ap->rxconfig & ANEG_CFG_PS1)
3745                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3746                 if (ap->rxconfig & ANEG_CFG_PS2)
3747                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3748                 if (ap->rxconfig & ANEG_CFG_RF1)
3749                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3750                 if (ap->rxconfig & ANEG_CFG_RF2)
3751                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3752                 if (ap->rxconfig & ANEG_CFG_NP)
3753                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3754
3755                 ap->link_time = ap->cur_time;
3756
3757                 ap->flags ^= (MR_TOGGLE_TX);
3758                 if (ap->rxconfig & 0x0008)
3759                         ap->flags |= MR_TOGGLE_RX;
3760                 if (ap->rxconfig & ANEG_CFG_NP)
3761                         ap->flags |= MR_NP_RX;
3762                 ap->flags |= MR_PAGE_RX;
3763
3764                 ap->state = ANEG_STATE_COMPLETE_ACK;
3765                 ret = ANEG_TIMER_ENAB;
3766                 break;
3767
3768         case ANEG_STATE_COMPLETE_ACK:
3769                 if (ap->ability_match != 0 &&
3770                     ap->rxconfig == 0) {
3771                         ap->state = ANEG_STATE_AN_ENABLE;
3772                         break;
3773                 }
3774                 delta = ap->cur_time - ap->link_time;
3775                 if (delta > ANEG_STATE_SETTLE_TIME) {
3776                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3777                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3778                         } else {
3779                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3780                                     !(ap->flags & MR_NP_RX)) {
3781                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3782                                 } else {
3783                                         ret = ANEG_FAILED;
3784                                 }
3785                         }
3786                 }
3787                 break;
3788
3789         case ANEG_STATE_IDLE_DETECT_INIT:
3790                 ap->link_time = ap->cur_time;
3791                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3792                 tw32_f(MAC_MODE, tp->mac_mode);
3793                 udelay(40);
3794
3795                 ap->state = ANEG_STATE_IDLE_DETECT;
3796                 ret = ANEG_TIMER_ENAB;
3797                 break;
3798
3799         case ANEG_STATE_IDLE_DETECT:
3800                 if (ap->ability_match != 0 &&
3801                     ap->rxconfig == 0) {
3802                         ap->state = ANEG_STATE_AN_ENABLE;
3803                         break;
3804                 }
3805                 delta = ap->cur_time - ap->link_time;
3806                 if (delta > ANEG_STATE_SETTLE_TIME) {
3807                         /* XXX another gem from the Broadcom driver :( */
3808                         ap->state = ANEG_STATE_LINK_OK;
3809                 }
3810                 break;
3811
3812         case ANEG_STATE_LINK_OK:
3813                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3814                 ret = ANEG_DONE;
3815                 break;
3816
3817         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3818                 /* ??? unimplemented */
3819                 break;
3820
3821         case ANEG_STATE_NEXT_PAGE_WAIT:
3822                 /* ??? unimplemented */
3823                 break;
3824
3825         default:
3826                 ret = ANEG_FAILED;
3827                 break;
3828         }
3829
3830         return ret;
3831 }
3832
3833 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3834 {
3835         int res = 0;
3836         struct tg3_fiber_aneginfo aninfo;
3837         int status = ANEG_FAILED;
3838         unsigned int tick;
3839         u32 tmp;
3840
3841         tw32_f(MAC_TX_AUTO_NEG, 0);
3842
3843         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3844         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3845         udelay(40);
3846
3847         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3848         udelay(40);
3849
3850         memset(&aninfo, 0, sizeof(aninfo));
3851         aninfo.flags |= MR_AN_ENABLE;
3852         aninfo.state = ANEG_STATE_UNKNOWN;
3853         aninfo.cur_time = 0;
3854         tick = 0;
3855         while (++tick < 195000) {
3856                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3857                 if (status == ANEG_DONE || status == ANEG_FAILED)
3858                         break;
3859
3860                 udelay(1);
3861         }
3862
3863         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3864         tw32_f(MAC_MODE, tp->mac_mode);
3865         udelay(40);
3866
3867         *txflags = aninfo.txconfig;
3868         *rxflags = aninfo.flags;
3869
3870         if (status == ANEG_DONE &&
3871             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3872                              MR_LP_ADV_FULL_DUPLEX)))
3873                 res = 1;
3874
3875         return res;
3876 }
3877
3878 static void tg3_init_bcm8002(struct tg3 *tp)
3879 {
3880         u32 mac_status = tr32(MAC_STATUS);
3881         int i;
3882
3883         /* Reset when initting first time or we have a link. */
3884         if (tg3_flag(tp, INIT_COMPLETE) &&
3885             !(mac_status & MAC_STATUS_PCS_SYNCED))
3886                 return;
3887
3888         /* Set PLL lock range. */
3889         tg3_writephy(tp, 0x16, 0x8007);
3890
3891         /* SW reset */
3892         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3893
3894         /* Wait for reset to complete. */
3895         /* XXX schedule_timeout() ... */
3896         for (i = 0; i < 500; i++)
3897                 udelay(10);
3898
3899         /* Config mode; select PMA/Ch 1 regs. */
3900         tg3_writephy(tp, 0x10, 0x8411);
3901
3902         /* Enable auto-lock and comdet, select txclk for tx. */
3903         tg3_writephy(tp, 0x11, 0x0a10);
3904
3905         tg3_writephy(tp, 0x18, 0x00a0);
3906         tg3_writephy(tp, 0x16, 0x41ff);
3907
3908         /* Assert and deassert POR. */
3909         tg3_writephy(tp, 0x13, 0x0400);
3910         udelay(40);
3911         tg3_writephy(tp, 0x13, 0x0000);
3912
3913         tg3_writephy(tp, 0x11, 0x0a50);
3914         udelay(40);
3915         tg3_writephy(tp, 0x11, 0x0a10);
3916
3917         /* Wait for signal to stabilize */
3918         /* XXX schedule_timeout() ... */
3919         for (i = 0; i < 15000; i++)
3920                 udelay(10);
3921
3922         /* Deselect the channel register so we can read the PHYID
3923          * later.
3924          */
3925         tg3_writephy(tp, 0x10, 0x8011);
3926 }
3927
3928 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3929 {
3930         u16 flowctrl;
3931         u32 sg_dig_ctrl, sg_dig_status;
3932         u32 serdes_cfg, expected_sg_dig_ctrl;
3933         int workaround, port_a;
3934         int current_link_up;
3935
3936         serdes_cfg = 0;
3937         expected_sg_dig_ctrl = 0;
3938         workaround = 0;
3939         port_a = 1;
3940         current_link_up = 0;
3941
3942         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3943             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3944                 workaround = 1;
3945                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3946                         port_a = 0;
3947
3948                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3949                 /* preserve bits 20-23 for voltage regulator */
3950                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3951         }
3952
3953         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3954
3955         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3956                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3957                         if (workaround) {
3958                                 u32 val = serdes_cfg;
3959
3960                                 if (port_a)
3961                                         val |= 0xc010000;
3962                                 else
3963                                         val |= 0x4010000;
3964                                 tw32_f(MAC_SERDES_CFG, val);
3965                         }
3966
3967                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3968                 }
3969                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3970                         tg3_setup_flow_control(tp, 0, 0);
3971                         current_link_up = 1;
3972                 }
3973                 goto out;
3974         }
3975
3976         /* Want auto-negotiation.  */
3977         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3978
3979         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3980         if (flowctrl & ADVERTISE_1000XPAUSE)
3981                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3982         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3983                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3984
3985         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3986                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3987                     tp->serdes_counter &&
3988                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3989                                     MAC_STATUS_RCVD_CFG)) ==
3990                      MAC_STATUS_PCS_SYNCED)) {
3991                         tp->serdes_counter--;
3992                         current_link_up = 1;
3993                         goto out;
3994                 }
3995 restart_autoneg:
3996                 if (workaround)
3997                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3998                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3999                 udelay(5);
4000                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4001
4002                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4003                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4004         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4005                                  MAC_STATUS_SIGNAL_DET)) {
4006                 sg_dig_status = tr32(SG_DIG_STATUS);
4007                 mac_status = tr32(MAC_STATUS);
4008
4009                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4010                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4011                         u32 local_adv = 0, remote_adv = 0;
4012
4013                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4014                                 local_adv |= ADVERTISE_1000XPAUSE;
4015                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4016                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4017
4018                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4019                                 remote_adv |= LPA_1000XPAUSE;
4020                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4021                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4022
4023                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4024                         current_link_up = 1;
4025                         tp->serdes_counter = 0;
4026                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4027                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4028                         if (tp->serdes_counter)
4029                                 tp->serdes_counter--;
4030                         else {
4031                                 if (workaround) {
4032                                         u32 val = serdes_cfg;
4033
4034                                         if (port_a)
4035                                                 val |= 0xc010000;
4036                                         else
4037                                                 val |= 0x4010000;
4038
4039                                         tw32_f(MAC_SERDES_CFG, val);
4040                                 }
4041
4042                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4043                                 udelay(40);
4044
4045                                 /* Link parallel detection - link is up */
4046                                 /* only if we have PCS_SYNC and not */
4047                                 /* receiving config code words */
4048                                 mac_status = tr32(MAC_STATUS);
4049                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4050                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4051                                         tg3_setup_flow_control(tp, 0, 0);
4052                                         current_link_up = 1;
4053                                         tp->phy_flags |=
4054                                                 TG3_PHYFLG_PARALLEL_DETECT;
4055                                         tp->serdes_counter =
4056                                                 SERDES_PARALLEL_DET_TIMEOUT;
4057                                 } else
4058                                         goto restart_autoneg;
4059                         }
4060                 }
4061         } else {
4062                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4063                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4064         }
4065
4066 out:
4067         return current_link_up;
4068 }
4069
4070 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4071 {
4072         int current_link_up = 0;
4073
4074         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4075                 goto out;
4076
4077         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4078                 u32 txflags, rxflags;
4079                 int i;
4080
4081                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4082                         u32 local_adv = 0, remote_adv = 0;
4083
4084                         if (txflags & ANEG_CFG_PS1)
4085                                 local_adv |= ADVERTISE_1000XPAUSE;
4086                         if (txflags & ANEG_CFG_PS2)
4087                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4088
4089                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4090                                 remote_adv |= LPA_1000XPAUSE;
4091                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4092                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4093
4094                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4095
4096                         current_link_up = 1;
4097                 }
4098                 for (i = 0; i < 30; i++) {
4099                         udelay(20);
4100                         tw32_f(MAC_STATUS,
4101                                (MAC_STATUS_SYNC_CHANGED |
4102                                 MAC_STATUS_CFG_CHANGED));
4103                         udelay(40);
4104                         if ((tr32(MAC_STATUS) &
4105                              (MAC_STATUS_SYNC_CHANGED |
4106                               MAC_STATUS_CFG_CHANGED)) == 0)
4107                                 break;
4108                 }
4109
4110                 mac_status = tr32(MAC_STATUS);
4111                 if (current_link_up == 0 &&
4112                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4113                     !(mac_status & MAC_STATUS_RCVD_CFG))
4114                         current_link_up = 1;
4115         } else {
4116                 tg3_setup_flow_control(tp, 0, 0);
4117
4118                 /* Forcing 1000FD link up. */
4119                 current_link_up = 1;
4120
4121                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4122                 udelay(40);
4123
4124                 tw32_f(MAC_MODE, tp->mac_mode);
4125                 udelay(40);
4126         }
4127
4128 out:
4129         return current_link_up;
4130 }
4131
4132 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4133 {
4134         u32 orig_pause_cfg;
4135         u16 orig_active_speed;
4136         u8 orig_active_duplex;
4137         u32 mac_status;
4138         int current_link_up;
4139         int i;
4140
4141         orig_pause_cfg = tp->link_config.active_flowctrl;
4142         orig_active_speed = tp->link_config.active_speed;
4143         orig_active_duplex = tp->link_config.active_duplex;
4144
4145         if (!tg3_flag(tp, HW_AUTONEG) &&
4146             netif_carrier_ok(tp->dev) &&
4147             tg3_flag(tp, INIT_COMPLETE)) {
4148                 mac_status = tr32(MAC_STATUS);
4149                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4150                                MAC_STATUS_SIGNAL_DET |
4151                                MAC_STATUS_CFG_CHANGED |
4152                                MAC_STATUS_RCVD_CFG);
4153                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4154                                    MAC_STATUS_SIGNAL_DET)) {
4155                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4156                                             MAC_STATUS_CFG_CHANGED));
4157                         return 0;
4158                 }
4159         }
4160
4161         tw32_f(MAC_TX_AUTO_NEG, 0);
4162
4163         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4164         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4165         tw32_f(MAC_MODE, tp->mac_mode);
4166         udelay(40);
4167
4168         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4169                 tg3_init_bcm8002(tp);
4170
4171         /* Enable link change event even when serdes polling.  */
4172         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4173         udelay(40);
4174
4175         current_link_up = 0;
4176         mac_status = tr32(MAC_STATUS);
4177
4178         if (tg3_flag(tp, HW_AUTONEG))
4179                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4180         else
4181                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4182
4183         tp->napi[0].hw_status->status =
4184                 (SD_STATUS_UPDATED |
4185                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4186
4187         for (i = 0; i < 100; i++) {
4188                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4189                                     MAC_STATUS_CFG_CHANGED));
4190                 udelay(5);
4191                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4192                                          MAC_STATUS_CFG_CHANGED |
4193                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4194                         break;
4195         }
4196
4197         mac_status = tr32(MAC_STATUS);
4198         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4199                 current_link_up = 0;
4200                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4201                     tp->serdes_counter == 0) {
4202                         tw32_f(MAC_MODE, (tp->mac_mode |
4203                                           MAC_MODE_SEND_CONFIGS));
4204                         udelay(1);
4205                         tw32_f(MAC_MODE, tp->mac_mode);
4206                 }
4207         }
4208
4209         if (current_link_up == 1) {
4210                 tp->link_config.active_speed = SPEED_1000;
4211                 tp->link_config.active_duplex = DUPLEX_FULL;
4212                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4213                                     LED_CTRL_LNKLED_OVERRIDE |
4214                                     LED_CTRL_1000MBPS_ON));
4215         } else {
4216                 tp->link_config.active_speed = SPEED_INVALID;
4217                 tp->link_config.active_duplex = DUPLEX_INVALID;
4218                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4219                                     LED_CTRL_LNKLED_OVERRIDE |
4220                                     LED_CTRL_TRAFFIC_OVERRIDE));
4221         }
4222
4223         if (current_link_up != netif_carrier_ok(tp->dev)) {
4224                 if (current_link_up)
4225                         netif_carrier_on(tp->dev);
4226                 else
4227                         netif_carrier_off(tp->dev);
4228                 tg3_link_report(tp);
4229         } else {
4230                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4231                 if (orig_pause_cfg != now_pause_cfg ||
4232                     orig_active_speed != tp->link_config.active_speed ||
4233                     orig_active_duplex != tp->link_config.active_duplex)
4234                         tg3_link_report(tp);
4235         }
4236
4237         return 0;
4238 }
4239
4240 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4241 {
4242         int current_link_up, err = 0;
4243         u32 bmsr, bmcr;
4244         u16 current_speed;
4245         u8 current_duplex;
4246         u32 local_adv, remote_adv;
4247
4248         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4249         tw32_f(MAC_MODE, tp->mac_mode);
4250         udelay(40);
4251
4252         tw32(MAC_EVENT, 0);
4253
4254         tw32_f(MAC_STATUS,
4255              (MAC_STATUS_SYNC_CHANGED |
4256               MAC_STATUS_CFG_CHANGED |
4257               MAC_STATUS_MI_COMPLETION |
4258               MAC_STATUS_LNKSTATE_CHANGED));
4259         udelay(40);
4260
4261         if (force_reset)
4262                 tg3_phy_reset(tp);
4263
4264         current_link_up = 0;
4265         current_speed = SPEED_INVALID;
4266         current_duplex = DUPLEX_INVALID;
4267
4268         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4269         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4270         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4271                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4272                         bmsr |= BMSR_LSTATUS;
4273                 else
4274                         bmsr &= ~BMSR_LSTATUS;
4275         }
4276
4277         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4278
4279         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4280             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4281                 /* do nothing, just check for link up at the end */
4282         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4283                 u32 adv, new_adv;
4284
4285                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4286                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4287                                   ADVERTISE_1000XPAUSE |
4288                                   ADVERTISE_1000XPSE_ASYM |
4289                                   ADVERTISE_SLCT);
4290
4291                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4292
4293                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4294                         new_adv |= ADVERTISE_1000XHALF;
4295                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4296                         new_adv |= ADVERTISE_1000XFULL;
4297
4298                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4299                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4300                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4301                         tg3_writephy(tp, MII_BMCR, bmcr);
4302
4303                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4304                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4305                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4306
4307                         return err;
4308                 }
4309         } else {
4310                 u32 new_bmcr;
4311
4312                 bmcr &= ~BMCR_SPEED1000;
4313                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4314
4315                 if (tp->link_config.duplex == DUPLEX_FULL)
4316                         new_bmcr |= BMCR_FULLDPLX;
4317
4318                 if (new_bmcr != bmcr) {
4319                         /* BMCR_SPEED1000 is a reserved bit that needs
4320                          * to be set on write.
4321                          */
4322                         new_bmcr |= BMCR_SPEED1000;
4323
4324                         /* Force a linkdown */
4325                         if (netif_carrier_ok(tp->dev)) {
4326                                 u32 adv;
4327
4328                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4329                                 adv &= ~(ADVERTISE_1000XFULL |
4330                                          ADVERTISE_1000XHALF |
4331                                          ADVERTISE_SLCT);
4332                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4333                                 tg3_writephy(tp, MII_BMCR, bmcr |
4334                                                            BMCR_ANRESTART |
4335                                                            BMCR_ANENABLE);
4336                                 udelay(10);
4337                                 netif_carrier_off(tp->dev);
4338                         }
4339                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4340                         bmcr = new_bmcr;
4341                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4342                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4343                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4344                             ASIC_REV_5714) {
4345                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4346                                         bmsr |= BMSR_LSTATUS;
4347                                 else
4348                                         bmsr &= ~BMSR_LSTATUS;
4349                         }
4350                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4351                 }
4352         }
4353
4354         if (bmsr & BMSR_LSTATUS) {
4355                 current_speed = SPEED_1000;
4356                 current_link_up = 1;
4357                 if (bmcr & BMCR_FULLDPLX)
4358                         current_duplex = DUPLEX_FULL;
4359                 else
4360                         current_duplex = DUPLEX_HALF;
4361
4362                 local_adv = 0;
4363                 remote_adv = 0;
4364
4365                 if (bmcr & BMCR_ANENABLE) {
4366                         u32 common;
4367
4368                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4369                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4370                         common = local_adv & remote_adv;
4371                         if (common & (ADVERTISE_1000XHALF |
4372                                       ADVERTISE_1000XFULL)) {
4373                                 if (common & ADVERTISE_1000XFULL)
4374                                         current_duplex = DUPLEX_FULL;
4375                                 else
4376                                         current_duplex = DUPLEX_HALF;
4377                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4378                                 /* Link is up via parallel detect */
4379                         } else {
4380                                 current_link_up = 0;
4381                         }
4382                 }
4383         }
4384
4385         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4386                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4387
4388         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4389         if (tp->link_config.active_duplex == DUPLEX_HALF)
4390                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4391
4392         tw32_f(MAC_MODE, tp->mac_mode);
4393         udelay(40);
4394
4395         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4396
4397         tp->link_config.active_speed = current_speed;
4398         tp->link_config.active_duplex = current_duplex;
4399
4400         if (current_link_up != netif_carrier_ok(tp->dev)) {
4401                 if (current_link_up)
4402                         netif_carrier_on(tp->dev);
4403                 else {
4404                         netif_carrier_off(tp->dev);
4405                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4406                 }
4407                 tg3_link_report(tp);
4408         }
4409         return err;
4410 }
4411
4412 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4413 {
4414         if (tp->serdes_counter) {
4415                 /* Give autoneg time to complete. */
4416                 tp->serdes_counter--;
4417                 return;
4418         }
4419
4420         if (!netif_carrier_ok(tp->dev) &&
4421             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4422                 u32 bmcr;
4423
4424                 tg3_readphy(tp, MII_BMCR, &bmcr);
4425                 if (bmcr & BMCR_ANENABLE) {
4426                         u32 phy1, phy2;
4427
4428                         /* Select shadow register 0x1f */
4429                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4430                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4431
4432                         /* Select expansion interrupt status register */
4433                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4434                                          MII_TG3_DSP_EXP1_INT_STAT);
4435                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4436                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4437
4438                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4439                                 /* We have signal detect and not receiving
4440                                  * config code words, link is up by parallel
4441                                  * detection.
4442                                  */
4443
4444                                 bmcr &= ~BMCR_ANENABLE;
4445                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4446                                 tg3_writephy(tp, MII_BMCR, bmcr);
4447                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4448                         }
4449                 }
4450         } else if (netif_carrier_ok(tp->dev) &&
4451                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4452                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4453                 u32 phy2;
4454
4455                 /* Select expansion interrupt status register */
4456                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4457                                  MII_TG3_DSP_EXP1_INT_STAT);
4458                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4459                 if (phy2 & 0x20) {
4460                         u32 bmcr;
4461
4462                         /* Config code words received, turn on autoneg. */
4463                         tg3_readphy(tp, MII_BMCR, &bmcr);
4464                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4465
4466                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4467
4468                 }
4469         }
4470 }
4471
4472 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4473 {
4474         u32 val;
4475         int err;
4476
4477         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4478                 err = tg3_setup_fiber_phy(tp, force_reset);
4479         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4480                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4481         else
4482                 err = tg3_setup_copper_phy(tp, force_reset);
4483
4484         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4485                 u32 scale;
4486
4487                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4488                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4489                         scale = 65;
4490                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4491                         scale = 6;
4492                 else
4493                         scale = 12;
4494
4495                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4496                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4497                 tw32(GRC_MISC_CFG, val);
4498         }
4499
4500         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4501               (6 << TX_LENGTHS_IPG_SHIFT);
4502         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4503                 val |= tr32(MAC_TX_LENGTHS) &
4504                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4505                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4506
4507         if (tp->link_config.active_speed == SPEED_1000 &&
4508             tp->link_config.active_duplex == DUPLEX_HALF)
4509                 tw32(MAC_TX_LENGTHS, val |
4510                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4511         else
4512                 tw32(MAC_TX_LENGTHS, val |
4513                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4514
4515         if (!tg3_flag(tp, 5705_PLUS)) {
4516                 if (netif_carrier_ok(tp->dev)) {
4517                         tw32(HOSTCC_STAT_COAL_TICKS,
4518                              tp->coal.stats_block_coalesce_usecs);
4519                 } else {
4520                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4521                 }
4522         }
4523
4524         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4525                 val = tr32(PCIE_PWR_MGMT_THRESH);
4526                 if (!netif_carrier_ok(tp->dev))
4527                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4528                               tp->pwrmgmt_thresh;
4529                 else
4530                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4531                 tw32(PCIE_PWR_MGMT_THRESH, val);
4532         }
4533
4534         return err;
4535 }
4536
4537 static inline int tg3_irq_sync(struct tg3 *tp)
4538 {
4539         return tp->irq_sync;
4540 }
4541
4542 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4543 {
4544         int i;
4545
4546         dst = (u32 *)((u8 *)dst + off);
4547         for (i = 0; i < len; i += sizeof(u32))
4548                 *dst++ = tr32(off + i);
4549 }
4550
4551 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4552 {
4553         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4554         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4555         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4556         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4557         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4558         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4559         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4560         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4561         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4562         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4563         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4564         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4565         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4566         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4567         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4568         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4569         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4570         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4571         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4572
4573         if (tg3_flag(tp, SUPPORT_MSIX))
4574                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4575
4576         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4577         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4578         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4579         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4580         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4581         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4582         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4583         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4584
4585         if (!tg3_flag(tp, 5705_PLUS)) {
4586                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4587                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4588                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4589         }
4590
4591         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4592         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4593         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4594         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4595         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4596
4597         if (tg3_flag(tp, NVRAM))
4598                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4599 }
4600
4601 static void tg3_dump_state(struct tg3 *tp)
4602 {
4603         int i;
4604         u32 *regs;
4605
4606         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4607         if (!regs) {
4608                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4609                 return;
4610         }
4611
4612         if (tg3_flag(tp, PCI_EXPRESS)) {
4613                 /* Read up to but not including private PCI registers */
4614                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4615                         regs[i / sizeof(u32)] = tr32(i);
4616         } else
4617                 tg3_dump_legacy_regs(tp, regs);
4618
4619         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4620                 if (!regs[i + 0] && !regs[i + 1] &&
4621                     !regs[i + 2] && !regs[i + 3])
4622                         continue;
4623
4624                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4625                            i * 4,
4626                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4627         }
4628
4629         kfree(regs);
4630
4631         for (i = 0; i < tp->irq_cnt; i++) {
4632                 struct tg3_napi *tnapi = &tp->napi[i];
4633
4634                 /* SW status block */
4635                 netdev_err(tp->dev,
4636                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4637                            i,
4638                            tnapi->hw_status->status,
4639                            tnapi->hw_status->status_tag,
4640                            tnapi->hw_status->rx_jumbo_consumer,
4641                            tnapi->hw_status->rx_consumer,
4642                            tnapi->hw_status->rx_mini_consumer,
4643                            tnapi->hw_status->idx[0].rx_producer,
4644                            tnapi->hw_status->idx[0].tx_consumer);
4645
4646                 netdev_err(tp->dev,
4647                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4648                            i,
4649                            tnapi->last_tag, tnapi->last_irq_tag,
4650                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4651                            tnapi->rx_rcb_ptr,
4652                            tnapi->prodring.rx_std_prod_idx,
4653                            tnapi->prodring.rx_std_cons_idx,
4654                            tnapi->prodring.rx_jmb_prod_idx,
4655                            tnapi->prodring.rx_jmb_cons_idx);
4656         }
4657 }
4658
4659 /* This is called whenever we suspect that the system chipset is re-
4660  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4661  * is bogus tx completions. We try to recover by setting the
4662  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4663  * in the workqueue.
4664  */
4665 static void tg3_tx_recover(struct tg3 *tp)
4666 {
4667         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4668                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4669
4670         netdev_warn(tp->dev,
4671                     "The system may be re-ordering memory-mapped I/O "
4672                     "cycles to the network device, attempting to recover. "
4673                     "Please report the problem to the driver maintainer "
4674                     "and include system chipset information.\n");
4675
4676         spin_lock(&tp->lock);
4677         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4678         spin_unlock(&tp->lock);
4679 }
4680
4681 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4682 {
4683         /* Tell compiler to fetch tx indices from memory. */
4684         barrier();
4685         return tnapi->tx_pending -
4686                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4687 }
4688
4689 /* Tigon3 never reports partial packet sends.  So we do not
4690  * need special logic to handle SKBs that have not had all
4691  * of their frags sent yet, like SunGEM does.
4692  */
4693 static void tg3_tx(struct tg3_napi *tnapi)
4694 {
4695         struct tg3 *tp = tnapi->tp;
4696         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4697         u32 sw_idx = tnapi->tx_cons;
4698         struct netdev_queue *txq;
4699         int index = tnapi - tp->napi;
4700
4701         if (tg3_flag(tp, ENABLE_TSS))
4702                 index--;
4703
4704         txq = netdev_get_tx_queue(tp->dev, index);
4705
4706         while (sw_idx != hw_idx) {
4707                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4708                 struct sk_buff *skb = ri->skb;
4709                 int i, tx_bug = 0;
4710
4711                 if (unlikely(skb == NULL)) {
4712                         tg3_tx_recover(tp);
4713                         return;
4714                 }
4715
4716                 pci_unmap_single(tp->pdev,
4717                                  dma_unmap_addr(ri, mapping),
4718                                  skb_headlen(skb),
4719                                  PCI_DMA_TODEVICE);
4720
4721                 ri->skb = NULL;
4722
4723                 sw_idx = NEXT_TX(sw_idx);
4724
4725                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4726                         ri = &tnapi->tx_buffers[sw_idx];
4727                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4728                                 tx_bug = 1;
4729
4730                         pci_unmap_page(tp->pdev,
4731                                        dma_unmap_addr(ri, mapping),
4732                                        skb_shinfo(skb)->frags[i].size,
4733                                        PCI_DMA_TODEVICE);
4734                         sw_idx = NEXT_TX(sw_idx);
4735                 }
4736
4737                 dev_kfree_skb(skb);
4738
4739                 if (unlikely(tx_bug)) {
4740                         tg3_tx_recover(tp);
4741                         return;
4742                 }
4743         }
4744
4745         tnapi->tx_cons = sw_idx;
4746
4747         /* Need to make the tx_cons update visible to tg3_start_xmit()
4748          * before checking for netif_queue_stopped().  Without the
4749          * memory barrier, there is a small possibility that tg3_start_xmit()
4750          * will miss it and cause the queue to be stopped forever.
4751          */
4752         smp_mb();
4753
4754         if (unlikely(netif_tx_queue_stopped(txq) &&
4755                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4756                 __netif_tx_lock(txq, smp_processor_id());
4757                 if (netif_tx_queue_stopped(txq) &&
4758                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4759                         netif_tx_wake_queue(txq);
4760                 __netif_tx_unlock(txq);
4761         }
4762 }
4763
4764 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4765 {
4766         if (!ri->skb)
4767                 return;
4768
4769         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4770                          map_sz, PCI_DMA_FROMDEVICE);
4771         dev_kfree_skb_any(ri->skb);
4772         ri->skb = NULL;
4773 }
4774
4775 /* Returns size of skb allocated or < 0 on error.
4776  *
4777  * We only need to fill in the address because the other members
4778  * of the RX descriptor are invariant, see tg3_init_rings.
4779  *
4780  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4781  * posting buffers we only dirty the first cache line of the RX
4782  * descriptor (containing the address).  Whereas for the RX status
4783  * buffers the cpu only reads the last cacheline of the RX descriptor
4784  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4785  */
4786 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4787                             u32 opaque_key, u32 dest_idx_unmasked)
4788 {
4789         struct tg3_rx_buffer_desc *desc;
4790         struct ring_info *map;
4791         struct sk_buff *skb;
4792         dma_addr_t mapping;
4793         int skb_size, dest_idx;
4794
4795         switch (opaque_key) {
4796         case RXD_OPAQUE_RING_STD:
4797                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4798                 desc = &tpr->rx_std[dest_idx];
4799                 map = &tpr->rx_std_buffers[dest_idx];
4800                 skb_size = tp->rx_pkt_map_sz;
4801                 break;
4802
4803         case RXD_OPAQUE_RING_JUMBO:
4804                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4805                 desc = &tpr->rx_jmb[dest_idx].std;
4806                 map = &tpr->rx_jmb_buffers[dest_idx];
4807                 skb_size = TG3_RX_JMB_MAP_SZ;
4808                 break;
4809
4810         default:
4811                 return -EINVAL;
4812         }
4813
4814         /* Do not overwrite any of the map or rp information
4815          * until we are sure we can commit to a new buffer.
4816          *
4817          * Callers depend upon this behavior and assume that
4818          * we leave everything unchanged if we fail.
4819          */
4820         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4821         if (skb == NULL)
4822                 return -ENOMEM;
4823
4824         skb_reserve(skb, tp->rx_offset);
4825
4826         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4827                                  PCI_DMA_FROMDEVICE);
4828         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4829                 dev_kfree_skb(skb);
4830                 return -EIO;
4831         }
4832
4833         map->skb = skb;
4834         dma_unmap_addr_set(map, mapping, mapping);
4835
4836         desc->addr_hi = ((u64)mapping >> 32);
4837         desc->addr_lo = ((u64)mapping & 0xffffffff);
4838
4839         return skb_size;
4840 }
4841
4842 /* We only need to move over in the address because the other
4843  * members of the RX descriptor are invariant.  See notes above
4844  * tg3_alloc_rx_skb for full details.
4845  */
4846 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4847                            struct tg3_rx_prodring_set *dpr,
4848                            u32 opaque_key, int src_idx,
4849                            u32 dest_idx_unmasked)
4850 {
4851         struct tg3 *tp = tnapi->tp;
4852         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4853         struct ring_info *src_map, *dest_map;
4854         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4855         int dest_idx;
4856
4857         switch (opaque_key) {
4858         case RXD_OPAQUE_RING_STD:
4859                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4860                 dest_desc = &dpr->rx_std[dest_idx];
4861                 dest_map = &dpr->rx_std_buffers[dest_idx];
4862                 src_desc = &spr->rx_std[src_idx];
4863                 src_map = &spr->rx_std_buffers[src_idx];
4864                 break;
4865
4866         case RXD_OPAQUE_RING_JUMBO:
4867                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4868                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4869                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4870                 src_desc = &spr->rx_jmb[src_idx].std;
4871                 src_map = &spr->rx_jmb_buffers[src_idx];
4872                 break;
4873
4874         default:
4875                 return;
4876         }
4877
4878         dest_map->skb = src_map->skb;
4879         dma_unmap_addr_set(dest_map, mapping,
4880                            dma_unmap_addr(src_map, mapping));
4881         dest_desc->addr_hi = src_desc->addr_hi;
4882         dest_desc->addr_lo = src_desc->addr_lo;
4883
4884         /* Ensure that the update to the skb happens after the physical
4885          * addresses have been transferred to the new BD location.
4886          */
4887         smp_wmb();
4888
4889         src_map->skb = NULL;
4890 }
4891
4892 /* The RX ring scheme is composed of multiple rings which post fresh
4893  * buffers to the chip, and one special ring the chip uses to report
4894  * status back to the host.
4895  *
4896  * The special ring reports the status of received packets to the
4897  * host.  The chip does not write into the original descriptor the
4898  * RX buffer was obtained from.  The chip simply takes the original
4899  * descriptor as provided by the host, updates the status and length
4900  * field, then writes this into the next status ring entry.
4901  *
4902  * Each ring the host uses to post buffers to the chip is described
4903  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4904  * it is first placed into the on-chip ram.  When the packet's length
4905  * is known, it walks down the TG3_BDINFO entries to select the ring.
4906  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4907  * which is within the range of the new packet's length is chosen.
4908  *
4909  * The "separate ring for rx status" scheme may sound queer, but it makes
4910  * sense from a cache coherency perspective.  If only the host writes
4911  * to the buffer post rings, and only the chip writes to the rx status
4912  * rings, then cache lines never move beyond shared-modified state.
4913  * If both the host and chip were to write into the same ring, cache line
4914  * eviction could occur since both entities want it in an exclusive state.
4915  */
4916 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4917 {
4918         struct tg3 *tp = tnapi->tp;
4919         u32 work_mask, rx_std_posted = 0;
4920         u32 std_prod_idx, jmb_prod_idx;
4921         u32 sw_idx = tnapi->rx_rcb_ptr;
4922         u16 hw_idx;
4923         int received;
4924         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4925
4926         hw_idx = *(tnapi->rx_rcb_prod_idx);
4927         /*
4928          * We need to order the read of hw_idx and the read of
4929          * the opaque cookie.
4930          */
4931         rmb();
4932         work_mask = 0;
4933         received = 0;
4934         std_prod_idx = tpr->rx_std_prod_idx;
4935         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4936         while (sw_idx != hw_idx && budget > 0) {
4937                 struct ring_info *ri;
4938                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4939                 unsigned int len;
4940                 struct sk_buff *skb;
4941                 dma_addr_t dma_addr;
4942                 u32 opaque_key, desc_idx, *post_ptr;
4943
4944                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4945                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4946                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4947                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4948                         dma_addr = dma_unmap_addr(ri, mapping);
4949                         skb = ri->skb;
4950                         post_ptr = &std_prod_idx;
4951                         rx_std_posted++;
4952                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4953                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4954                         dma_addr = dma_unmap_addr(ri, mapping);
4955                         skb = ri->skb;
4956                         post_ptr = &jmb_prod_idx;
4957                 } else
4958                         goto next_pkt_nopost;
4959
4960                 work_mask |= opaque_key;
4961
4962                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4963                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4964                 drop_it:
4965                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4966                                        desc_idx, *post_ptr);
4967                 drop_it_no_recycle:
4968                         /* Other statistics kept track of by card. */
4969                         tp->rx_dropped++;
4970                         goto next_pkt;
4971                 }
4972
4973                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4974                       ETH_FCS_LEN;
4975
4976                 if (len > TG3_RX_COPY_THRESH(tp)) {
4977                         int skb_size;
4978
4979                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4980                                                     *post_ptr);
4981                         if (skb_size < 0)
4982                                 goto drop_it;
4983
4984                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4985                                          PCI_DMA_FROMDEVICE);
4986
4987                         /* Ensure that the update to the skb happens
4988                          * after the usage of the old DMA mapping.
4989                          */
4990                         smp_wmb();
4991
4992                         ri->skb = NULL;
4993
4994                         skb_put(skb, len);
4995                 } else {
4996                         struct sk_buff *copy_skb;
4997
4998                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4999                                        desc_idx, *post_ptr);
5000
5001                         copy_skb = netdev_alloc_skb(tp->dev, len +
5002                                                     TG3_RAW_IP_ALIGN);
5003                         if (copy_skb == NULL)
5004                                 goto drop_it_no_recycle;
5005
5006                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5007                         skb_put(copy_skb, len);
5008                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5009                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5010                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5011
5012                         /* We'll reuse the original ring buffer. */
5013                         skb = copy_skb;
5014                 }
5015
5016                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5017                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5018                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5019                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5020                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5021                 else
5022                         skb_checksum_none_assert(skb);
5023
5024                 skb->protocol = eth_type_trans(skb, tp->dev);
5025
5026                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5027                     skb->protocol != htons(ETH_P_8021Q)) {
5028                         dev_kfree_skb(skb);
5029                         goto drop_it_no_recycle;
5030                 }
5031
5032                 if (desc->type_flags & RXD_FLAG_VLAN &&
5033                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5034                         __vlan_hwaccel_put_tag(skb,
5035                                                desc->err_vlan & RXD_VLAN_MASK);
5036
5037                 napi_gro_receive(&tnapi->napi, skb);
5038
5039                 received++;
5040                 budget--;
5041
5042 next_pkt:
5043                 (*post_ptr)++;
5044
5045                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5046                         tpr->rx_std_prod_idx = std_prod_idx &
5047                                                tp->rx_std_ring_mask;
5048                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5049                                      tpr->rx_std_prod_idx);
5050                         work_mask &= ~RXD_OPAQUE_RING_STD;
5051                         rx_std_posted = 0;
5052                 }
5053 next_pkt_nopost:
5054                 sw_idx++;
5055                 sw_idx &= tp->rx_ret_ring_mask;
5056
5057                 /* Refresh hw_idx to see if there is new work */
5058                 if (sw_idx == hw_idx) {
5059                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5060                         rmb();
5061                 }
5062         }
5063
5064         /* ACK the status ring. */
5065         tnapi->rx_rcb_ptr = sw_idx;
5066         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5067
5068         /* Refill RX ring(s). */
5069         if (!tg3_flag(tp, ENABLE_RSS)) {
5070                 if (work_mask & RXD_OPAQUE_RING_STD) {
5071                         tpr->rx_std_prod_idx = std_prod_idx &
5072                                                tp->rx_std_ring_mask;
5073                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5074                                      tpr->rx_std_prod_idx);
5075                 }
5076                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5077                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5078                                                tp->rx_jmb_ring_mask;
5079                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5080                                      tpr->rx_jmb_prod_idx);
5081                 }
5082                 mmiowb();
5083         } else if (work_mask) {
5084                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5085                  * updated before the producer indices can be updated.
5086                  */
5087                 smp_wmb();
5088
5089                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5090                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5091
5092                 if (tnapi != &tp->napi[1])
5093                         napi_schedule(&tp->napi[1].napi);
5094         }
5095
5096         return received;
5097 }
5098
5099 static void tg3_poll_link(struct tg3 *tp)
5100 {
5101         /* handle link change and other phy events */
5102         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5103                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5104
5105                 if (sblk->status & SD_STATUS_LINK_CHG) {
5106                         sblk->status = SD_STATUS_UPDATED |
5107                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5108                         spin_lock(&tp->lock);
5109                         if (tg3_flag(tp, USE_PHYLIB)) {
5110                                 tw32_f(MAC_STATUS,
5111                                      (MAC_STATUS_SYNC_CHANGED |
5112                                       MAC_STATUS_CFG_CHANGED |
5113                                       MAC_STATUS_MI_COMPLETION |
5114                                       MAC_STATUS_LNKSTATE_CHANGED));
5115                                 udelay(40);
5116                         } else
5117                                 tg3_setup_phy(tp, 0);
5118                         spin_unlock(&tp->lock);
5119                 }
5120         }
5121 }
5122
5123 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5124                                 struct tg3_rx_prodring_set *dpr,
5125                                 struct tg3_rx_prodring_set *spr)
5126 {
5127         u32 si, di, cpycnt, src_prod_idx;
5128         int i, err = 0;
5129
5130         while (1) {
5131                 src_prod_idx = spr->rx_std_prod_idx;
5132
5133                 /* Make sure updates to the rx_std_buffers[] entries and the
5134                  * standard producer index are seen in the correct order.
5135                  */
5136                 smp_rmb();
5137
5138                 if (spr->rx_std_cons_idx == src_prod_idx)
5139                         break;
5140
5141                 if (spr->rx_std_cons_idx < src_prod_idx)
5142                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5143                 else
5144                         cpycnt = tp->rx_std_ring_mask + 1 -
5145                                  spr->rx_std_cons_idx;
5146
5147                 cpycnt = min(cpycnt,
5148                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5149
5150                 si = spr->rx_std_cons_idx;
5151                 di = dpr->rx_std_prod_idx;
5152
5153                 for (i = di; i < di + cpycnt; i++) {
5154                         if (dpr->rx_std_buffers[i].skb) {
5155                                 cpycnt = i - di;
5156                                 err = -ENOSPC;
5157                                 break;
5158                         }
5159                 }
5160
5161                 if (!cpycnt)
5162                         break;
5163
5164                 /* Ensure that updates to the rx_std_buffers ring and the
5165                  * shadowed hardware producer ring from tg3_recycle_skb() are
5166                  * ordered correctly WRT the skb check above.
5167                  */
5168                 smp_rmb();
5169
5170                 memcpy(&dpr->rx_std_buffers[di],
5171                        &spr->rx_std_buffers[si],
5172                        cpycnt * sizeof(struct ring_info));
5173
5174                 for (i = 0; i < cpycnt; i++, di++, si++) {
5175                         struct tg3_rx_buffer_desc *sbd, *dbd;
5176                         sbd = &spr->rx_std[si];
5177                         dbd = &dpr->rx_std[di];
5178                         dbd->addr_hi = sbd->addr_hi;
5179                         dbd->addr_lo = sbd->addr_lo;
5180                 }
5181
5182                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5183                                        tp->rx_std_ring_mask;
5184                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5185                                        tp->rx_std_ring_mask;
5186         }
5187
5188         while (1) {
5189                 src_prod_idx = spr->rx_jmb_prod_idx;
5190
5191                 /* Make sure updates to the rx_jmb_buffers[] entries and
5192                  * the jumbo producer index are seen in the correct order.
5193                  */
5194                 smp_rmb();
5195
5196                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5197                         break;
5198
5199                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5200                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5201                 else
5202                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5203                                  spr->rx_jmb_cons_idx;
5204
5205                 cpycnt = min(cpycnt,
5206                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5207
5208                 si = spr->rx_jmb_cons_idx;
5209                 di = dpr->rx_jmb_prod_idx;
5210
5211                 for (i = di; i < di + cpycnt; i++) {
5212                         if (dpr->rx_jmb_buffers[i].skb) {
5213                                 cpycnt = i - di;
5214                                 err = -ENOSPC;
5215                                 break;
5216                         }
5217                 }
5218
5219                 if (!cpycnt)
5220                         break;
5221
5222                 /* Ensure that updates to the rx_jmb_buffers ring and the
5223                  * shadowed hardware producer ring from tg3_recycle_skb() are
5224                  * ordered correctly WRT the skb check above.
5225                  */
5226                 smp_rmb();
5227
5228                 memcpy(&dpr->rx_jmb_buffers[di],
5229                        &spr->rx_jmb_buffers[si],
5230                        cpycnt * sizeof(struct ring_info));
5231
5232                 for (i = 0; i < cpycnt; i++, di++, si++) {
5233                         struct tg3_rx_buffer_desc *sbd, *dbd;
5234                         sbd = &spr->rx_jmb[si].std;
5235                         dbd = &dpr->rx_jmb[di].std;
5236                         dbd->addr_hi = sbd->addr_hi;
5237                         dbd->addr_lo = sbd->addr_lo;
5238                 }
5239
5240                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5241                                        tp->rx_jmb_ring_mask;
5242                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5243                                        tp->rx_jmb_ring_mask;
5244         }
5245
5246         return err;
5247 }
5248
5249 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5250 {
5251         struct tg3 *tp = tnapi->tp;
5252
5253         /* run TX completion thread */
5254         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5255                 tg3_tx(tnapi);
5256                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5257                         return work_done;
5258         }
5259
5260         /* run RX thread, within the bounds set by NAPI.
5261          * All RX "locking" is done by ensuring outside
5262          * code synchronizes with tg3->napi.poll()
5263          */
5264         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5265                 work_done += tg3_rx(tnapi, budget - work_done);
5266
5267         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5268                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5269                 int i, err = 0;
5270                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5271                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5272
5273                 for (i = 1; i < tp->irq_cnt; i++)
5274                         err |= tg3_rx_prodring_xfer(tp, dpr,
5275                                                     &tp->napi[i].prodring);
5276
5277                 wmb();
5278
5279                 if (std_prod_idx != dpr->rx_std_prod_idx)
5280                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5281                                      dpr->rx_std_prod_idx);
5282
5283                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5284                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5285                                      dpr->rx_jmb_prod_idx);
5286
5287                 mmiowb();
5288
5289                 if (err)
5290                         tw32_f(HOSTCC_MODE, tp->coal_now);
5291         }
5292
5293         return work_done;
5294 }
5295
5296 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5297 {
5298         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5299         struct tg3 *tp = tnapi->tp;
5300         int work_done = 0;
5301         struct tg3_hw_status *sblk = tnapi->hw_status;
5302
5303         while (1) {
5304                 work_done = tg3_poll_work(tnapi, work_done, budget);
5305
5306                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5307                         goto tx_recovery;
5308
5309                 if (unlikely(work_done >= budget))
5310                         break;
5311
5312                 /* tp->last_tag is used in tg3_int_reenable() below
5313                  * to tell the hw how much work has been processed,
5314                  * so we must read it before checking for more work.
5315                  */
5316                 tnapi->last_tag = sblk->status_tag;
5317                 tnapi->last_irq_tag = tnapi->last_tag;
5318                 rmb();
5319
5320                 /* check for RX/TX work to do */
5321                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5322                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5323                         napi_complete(napi);
5324                         /* Reenable interrupts. */
5325                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5326                         mmiowb();
5327                         break;
5328                 }
5329         }
5330
5331         return work_done;
5332
5333 tx_recovery:
5334         /* work_done is guaranteed to be less than budget. */
5335         napi_complete(napi);
5336         schedule_work(&tp->reset_task);
5337         return work_done;
5338 }
5339
5340 static void tg3_process_error(struct tg3 *tp)
5341 {
5342         u32 val;
5343         bool real_error = false;
5344
5345         if (tg3_flag(tp, ERROR_PROCESSED))
5346                 return;
5347
5348         /* Check Flow Attention register */
5349         val = tr32(HOSTCC_FLOW_ATTN);
5350         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5351                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5352                 real_error = true;
5353         }
5354
5355         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5356                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5357                 real_error = true;
5358         }
5359
5360         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5361                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5362                 real_error = true;
5363         }
5364
5365         if (!real_error)
5366                 return;
5367
5368         tg3_dump_state(tp);
5369
5370         tg3_flag_set(tp, ERROR_PROCESSED);
5371         schedule_work(&tp->reset_task);
5372 }
5373
5374 static int tg3_poll(struct napi_struct *napi, int budget)
5375 {
5376         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5377         struct tg3 *tp = tnapi->tp;
5378         int work_done = 0;
5379         struct tg3_hw_status *sblk = tnapi->hw_status;
5380
5381         while (1) {
5382                 if (sblk->status & SD_STATUS_ERROR)
5383                         tg3_process_error(tp);
5384
5385                 tg3_poll_link(tp);
5386
5387                 work_done = tg3_poll_work(tnapi, work_done, budget);
5388
5389                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5390                         goto tx_recovery;
5391
5392                 if (unlikely(work_done >= budget))
5393                         break;
5394
5395                 if (tg3_flag(tp, TAGGED_STATUS)) {
5396                         /* tp->last_tag is used in tg3_int_reenable() below
5397                          * to tell the hw how much work has been processed,
5398                          * so we must read it before checking for more work.
5399                          */
5400                         tnapi->last_tag = sblk->status_tag;
5401                         tnapi->last_irq_tag = tnapi->last_tag;
5402                         rmb();
5403                 } else
5404                         sblk->status &= ~SD_STATUS_UPDATED;
5405
5406                 if (likely(!tg3_has_work(tnapi))) {
5407                         napi_complete(napi);
5408                         tg3_int_reenable(tnapi);
5409                         break;
5410                 }
5411         }
5412
5413         return work_done;
5414
5415 tx_recovery:
5416         /* work_done is guaranteed to be less than budget. */
5417         napi_complete(napi);
5418         schedule_work(&tp->reset_task);
5419         return work_done;
5420 }
5421
5422 static void tg3_napi_disable(struct tg3 *tp)
5423 {
5424         int i;
5425
5426         for (i = tp->irq_cnt - 1; i >= 0; i--)
5427                 napi_disable(&tp->napi[i].napi);
5428 }
5429
5430 static void tg3_napi_enable(struct tg3 *tp)
5431 {
5432         int i;
5433
5434         for (i = 0; i < tp->irq_cnt; i++)
5435                 napi_enable(&tp->napi[i].napi);
5436 }
5437
5438 static void tg3_napi_init(struct tg3 *tp)
5439 {
5440         int i;
5441
5442         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5443         for (i = 1; i < tp->irq_cnt; i++)
5444                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5445 }
5446
5447 static void tg3_napi_fini(struct tg3 *tp)
5448 {
5449         int i;
5450
5451         for (i = 0; i < tp->irq_cnt; i++)
5452                 netif_napi_del(&tp->napi[i].napi);
5453 }
5454
5455 static inline void tg3_netif_stop(struct tg3 *tp)
5456 {
5457         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5458         tg3_napi_disable(tp);
5459         netif_tx_disable(tp->dev);
5460 }
5461
5462 static inline void tg3_netif_start(struct tg3 *tp)
5463 {
5464         /* NOTE: unconditional netif_tx_wake_all_queues is only
5465          * appropriate so long as all callers are assured to
5466          * have free tx slots (such as after tg3_init_hw)
5467          */
5468         netif_tx_wake_all_queues(tp->dev);
5469
5470         tg3_napi_enable(tp);
5471         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5472         tg3_enable_ints(tp);
5473 }
5474
5475 static void tg3_irq_quiesce(struct tg3 *tp)
5476 {
5477         int i;
5478
5479         BUG_ON(tp->irq_sync);
5480
5481         tp->irq_sync = 1;
5482         smp_mb();
5483
5484         for (i = 0; i < tp->irq_cnt; i++)
5485                 synchronize_irq(tp->napi[i].irq_vec);
5486 }
5487
5488 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5489  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5490  * with as well.  Most of the time, this is not necessary except when
5491  * shutting down the device.
5492  */
5493 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5494 {
5495         spin_lock_bh(&tp->lock);
5496         if (irq_sync)
5497                 tg3_irq_quiesce(tp);
5498 }
5499
5500 static inline void tg3_full_unlock(struct tg3 *tp)
5501 {
5502         spin_unlock_bh(&tp->lock);
5503 }
5504
5505 /* One-shot MSI handler - Chip automatically disables interrupt
5506  * after sending MSI so driver doesn't have to do it.
5507  */
5508 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5509 {
5510         struct tg3_napi *tnapi = dev_id;
5511         struct tg3 *tp = tnapi->tp;
5512
5513         prefetch(tnapi->hw_status);
5514         if (tnapi->rx_rcb)
5515                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5516
5517         if (likely(!tg3_irq_sync(tp)))
5518                 napi_schedule(&tnapi->napi);
5519
5520         return IRQ_HANDLED;
5521 }
5522
5523 /* MSI ISR - No need to check for interrupt sharing and no need to
5524  * flush status block and interrupt mailbox. PCI ordering rules
5525  * guarantee that MSI will arrive after the status block.
5526  */
5527 static irqreturn_t tg3_msi(int irq, void *dev_id)
5528 {
5529         struct tg3_napi *tnapi = dev_id;
5530         struct tg3 *tp = tnapi->tp;
5531
5532         prefetch(tnapi->hw_status);
5533         if (tnapi->rx_rcb)
5534                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5535         /*
5536          * Writing any value to intr-mbox-0 clears PCI INTA# and
5537          * chip-internal interrupt pending events.
5538          * Writing non-zero to intr-mbox-0 additional tells the
5539          * NIC to stop sending us irqs, engaging "in-intr-handler"
5540          * event coalescing.
5541          */
5542         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5543         if (likely(!tg3_irq_sync(tp)))
5544                 napi_schedule(&tnapi->napi);
5545
5546         return IRQ_RETVAL(1);
5547 }
5548
5549 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5550 {
5551         struct tg3_napi *tnapi = dev_id;
5552         struct tg3 *tp = tnapi->tp;
5553         struct tg3_hw_status *sblk = tnapi->hw_status;
5554         unsigned int handled = 1;
5555
5556         /* In INTx mode, it is possible for the interrupt to arrive at
5557          * the CPU before the status block posted prior to the interrupt.
5558          * Reading the PCI State register will confirm whether the
5559          * interrupt is ours and will flush the status block.
5560          */
5561         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5562                 if (tg3_flag(tp, CHIP_RESETTING) ||
5563                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5564                         handled = 0;
5565                         goto out;
5566                 }
5567         }
5568
5569         /*
5570          * Writing any value to intr-mbox-0 clears PCI INTA# and
5571          * chip-internal interrupt pending events.
5572          * Writing non-zero to intr-mbox-0 additional tells the
5573          * NIC to stop sending us irqs, engaging "in-intr-handler"
5574          * event coalescing.
5575          *
5576          * Flush the mailbox to de-assert the IRQ immediately to prevent
5577          * spurious interrupts.  The flush impacts performance but
5578          * excessive spurious interrupts can be worse in some cases.
5579          */
5580         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5581         if (tg3_irq_sync(tp))
5582                 goto out;
5583         sblk->status &= ~SD_STATUS_UPDATED;
5584         if (likely(tg3_has_work(tnapi))) {
5585                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5586                 napi_schedule(&tnapi->napi);
5587         } else {
5588                 /* No work, shared interrupt perhaps?  re-enable
5589                  * interrupts, and flush that PCI write
5590                  */
5591                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5592                                0x00000000);
5593         }
5594 out:
5595         return IRQ_RETVAL(handled);
5596 }
5597
5598 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5599 {
5600         struct tg3_napi *tnapi = dev_id;
5601         struct tg3 *tp = tnapi->tp;
5602         struct tg3_hw_status *sblk = tnapi->hw_status;
5603         unsigned int handled = 1;
5604
5605         /* In INTx mode, it is possible for the interrupt to arrive at
5606          * the CPU before the status block posted prior to the interrupt.
5607          * Reading the PCI State register will confirm whether the
5608          * interrupt is ours and will flush the status block.
5609          */
5610         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5611                 if (tg3_flag(tp, CHIP_RESETTING) ||
5612                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5613                         handled = 0;
5614                         goto out;
5615                 }
5616         }
5617
5618         /*
5619          * writing any value to intr-mbox-0 clears PCI INTA# and
5620          * chip-internal interrupt pending events.
5621          * writing non-zero to intr-mbox-0 additional tells the
5622          * NIC to stop sending us irqs, engaging "in-intr-handler"
5623          * event coalescing.
5624          *
5625          * Flush the mailbox to de-assert the IRQ immediately to prevent
5626          * spurious interrupts.  The flush impacts performance but
5627          * excessive spurious interrupts can be worse in some cases.
5628          */
5629         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5630
5631         /*
5632          * In a shared interrupt configuration, sometimes other devices'
5633          * interrupts will scream.  We record the current status tag here
5634          * so that the above check can report that the screaming interrupts
5635          * are unhandled.  Eventually they will be silenced.
5636          */
5637         tnapi->last_irq_tag = sblk->status_tag;
5638
5639         if (tg3_irq_sync(tp))
5640                 goto out;
5641
5642         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5643
5644         napi_schedule(&tnapi->napi);
5645
5646 out:
5647         return IRQ_RETVAL(handled);
5648 }
5649
5650 /* ISR for interrupt test */
5651 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5652 {
5653         struct tg3_napi *tnapi = dev_id;
5654         struct tg3 *tp = tnapi->tp;
5655         struct tg3_hw_status *sblk = tnapi->hw_status;
5656
5657         if ((sblk->status & SD_STATUS_UPDATED) ||
5658             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5659                 tg3_disable_ints(tp);
5660                 return IRQ_RETVAL(1);
5661         }
5662         return IRQ_RETVAL(0);
5663 }
5664
5665 static int tg3_init_hw(struct tg3 *, int);
5666 static int tg3_halt(struct tg3 *, int, int);
5667
5668 /* Restart hardware after configuration changes, self-test, etc.
5669  * Invoked with tp->lock held.
5670  */
5671 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5672         __releases(tp->lock)
5673         __acquires(tp->lock)
5674 {
5675         int err;
5676
5677         err = tg3_init_hw(tp, reset_phy);
5678         if (err) {
5679                 netdev_err(tp->dev,
5680                            "Failed to re-initialize device, aborting\n");
5681                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5682                 tg3_full_unlock(tp);
5683                 del_timer_sync(&tp->timer);
5684                 tp->irq_sync = 0;
5685                 tg3_napi_enable(tp);
5686                 dev_close(tp->dev);
5687                 tg3_full_lock(tp, 0);
5688         }
5689         return err;
5690 }
5691
5692 #ifdef CONFIG_NET_POLL_CONTROLLER
5693 static void tg3_poll_controller(struct net_device *dev)
5694 {
5695         int i;
5696         struct tg3 *tp = netdev_priv(dev);
5697
5698         for (i = 0; i < tp->irq_cnt; i++)
5699                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5700 }
5701 #endif
5702
5703 static void tg3_reset_task(struct work_struct *work)
5704 {
5705         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5706         int err;
5707         unsigned int restart_timer;
5708
5709         tg3_full_lock(tp, 0);
5710
5711         if (!netif_running(tp->dev)) {
5712                 tg3_full_unlock(tp);
5713                 return;
5714         }
5715
5716         tg3_full_unlock(tp);
5717
5718         tg3_phy_stop(tp);
5719
5720         tg3_netif_stop(tp);
5721
5722         tg3_full_lock(tp, 1);
5723
5724         restart_timer = tg3_flag(tp, RESTART_TIMER);
5725         tg3_flag_clear(tp, RESTART_TIMER);
5726
5727         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5728                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5729                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5730                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5731                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5732         }
5733
5734         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5735         err = tg3_init_hw(tp, 1);
5736         if (err)
5737                 goto out;
5738
5739         tg3_netif_start(tp);
5740
5741         if (restart_timer)
5742                 mod_timer(&tp->timer, jiffies + 1);
5743
5744 out:
5745         tg3_full_unlock(tp);
5746
5747         if (!err)
5748                 tg3_phy_start(tp);
5749 }
5750
5751 static void tg3_tx_timeout(struct net_device *dev)
5752 {
5753         struct tg3 *tp = netdev_priv(dev);
5754
5755         if (netif_msg_tx_err(tp)) {
5756                 netdev_err(dev, "transmit timed out, resetting\n");
5757                 tg3_dump_state(tp);
5758         }
5759
5760         schedule_work(&tp->reset_task);
5761 }
5762
5763 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5764 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5765 {
5766         u32 base = (u32) mapping & 0xffffffff;
5767
5768         return (base > 0xffffdcc0) && (base + len + 8 < base);
5769 }
5770
5771 /* Test for DMA addresses > 40-bit */
5772 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5773                                           int len)
5774 {
5775 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5776         if (tg3_flag(tp, 40BIT_DMA_BUG))
5777                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5778         return 0;
5779 #else
5780         return 0;
5781 #endif
5782 }
5783
5784 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5785                         dma_addr_t mapping, int len, u32 flags,
5786                         u32 mss_and_is_end)
5787 {
5788         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5789         int is_end = (mss_and_is_end & 0x1);
5790         u32 mss = (mss_and_is_end >> 1);
5791         u32 vlan_tag = 0;
5792
5793         if (is_end)
5794                 flags |= TXD_FLAG_END;
5795         if (flags & TXD_FLAG_VLAN) {
5796                 vlan_tag = flags >> 16;
5797                 flags &= 0xffff;
5798         }
5799         vlan_tag |= (mss << TXD_MSS_SHIFT);
5800
5801         txd->addr_hi = ((u64) mapping >> 32);
5802         txd->addr_lo = ((u64) mapping & 0xffffffff);
5803         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5804         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5805 }
5806
5807 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5808                                 struct sk_buff *skb, int last)
5809 {
5810         int i;
5811         u32 entry = tnapi->tx_prod;
5812         struct ring_info *txb = &tnapi->tx_buffers[entry];
5813
5814         pci_unmap_single(tnapi->tp->pdev,
5815                          dma_unmap_addr(txb, mapping),
5816                          skb_headlen(skb),
5817                          PCI_DMA_TODEVICE);
5818         for (i = 0; i < last; i++) {
5819                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5820
5821                 entry = NEXT_TX(entry);
5822                 txb = &tnapi->tx_buffers[entry];
5823
5824                 pci_unmap_page(tnapi->tp->pdev,
5825                                dma_unmap_addr(txb, mapping),
5826                                frag->size, PCI_DMA_TODEVICE);
5827         }
5828 }
5829
5830 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5831 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5832                                        struct sk_buff *skb,
5833                                        u32 base_flags, u32 mss)
5834 {
5835         struct tg3 *tp = tnapi->tp;
5836         struct sk_buff *new_skb;
5837         dma_addr_t new_addr = 0;
5838         u32 entry = tnapi->tx_prod;
5839         int ret = 0;
5840
5841         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5842                 new_skb = skb_copy(skb, GFP_ATOMIC);
5843         else {
5844                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5845
5846                 new_skb = skb_copy_expand(skb,
5847                                           skb_headroom(skb) + more_headroom,
5848                                           skb_tailroom(skb), GFP_ATOMIC);
5849         }
5850
5851         if (!new_skb) {
5852                 ret = -1;
5853         } else {
5854                 /* New SKB is guaranteed to be linear. */
5855                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5856                                           PCI_DMA_TODEVICE);
5857                 /* Make sure the mapping succeeded */
5858                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5859                         ret = -1;
5860                         dev_kfree_skb(new_skb);
5861
5862                 /* Make sure new skb does not cross any 4G boundaries.
5863                  * Drop the packet if it does.
5864                  */
5865                 } else if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
5866                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5867                                          PCI_DMA_TODEVICE);
5868                         ret = -1;
5869                         dev_kfree_skb(new_skb);
5870                 } else {
5871                         tnapi->tx_buffers[entry].skb = new_skb;
5872                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5873                                            mapping, new_addr);
5874
5875                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5876                                     base_flags, 1 | (mss << 1));
5877                 }
5878         }
5879
5880         dev_kfree_skb(skb);
5881
5882         return ret;
5883 }
5884
5885 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5886
5887 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5888  * TSO header is greater than 80 bytes.
5889  */
5890 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5891 {
5892         struct sk_buff *segs, *nskb;
5893         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5894
5895         /* Estimate the number of fragments in the worst case */
5896         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5897                 netif_stop_queue(tp->dev);
5898
5899                 /* netif_tx_stop_queue() must be done before checking
5900                  * checking tx index in tg3_tx_avail() below, because in
5901                  * tg3_tx(), we update tx index before checking for
5902                  * netif_tx_queue_stopped().
5903                  */
5904                 smp_mb();
5905                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5906                         return NETDEV_TX_BUSY;
5907
5908                 netif_wake_queue(tp->dev);
5909         }
5910
5911         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5912         if (IS_ERR(segs))
5913                 goto tg3_tso_bug_end;
5914
5915         do {
5916                 nskb = segs;
5917                 segs = segs->next;
5918                 nskb->next = NULL;
5919                 tg3_start_xmit(nskb, tp->dev);
5920         } while (segs);
5921
5922 tg3_tso_bug_end:
5923         dev_kfree_skb(skb);
5924
5925         return NETDEV_TX_OK;
5926 }
5927
5928 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5929  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5930  */
5931 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5932 {
5933         struct tg3 *tp = netdev_priv(dev);
5934         u32 len, entry, base_flags, mss;
5935         int i = -1, would_hit_hwbug;
5936         dma_addr_t mapping;
5937         struct tg3_napi *tnapi;
5938         struct netdev_queue *txq;
5939         unsigned int last;
5940
5941         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5942         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5943         if (tg3_flag(tp, ENABLE_TSS))
5944                 tnapi++;
5945
5946         /* We are running in BH disabled context with netif_tx_lock
5947          * and TX reclaim runs via tp->napi.poll inside of a software
5948          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5949          * no IRQ context deadlocks to worry about either.  Rejoice!
5950          */
5951         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5952                 if (!netif_tx_queue_stopped(txq)) {
5953                         netif_tx_stop_queue(txq);
5954
5955                         /* This is a hard error, log it. */
5956                         netdev_err(dev,
5957                                    "BUG! Tx Ring full when queue awake!\n");
5958                 }
5959                 return NETDEV_TX_BUSY;
5960         }
5961
5962         entry = tnapi->tx_prod;
5963         base_flags = 0;
5964         if (skb->ip_summed == CHECKSUM_PARTIAL)
5965                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5966
5967         mss = skb_shinfo(skb)->gso_size;
5968         if (mss) {
5969                 struct iphdr *iph;
5970                 u32 tcp_opt_len, hdr_len;
5971
5972                 if (skb_header_cloned(skb) &&
5973                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5974                         dev_kfree_skb(skb);
5975                         goto out_unlock;
5976                 }
5977
5978                 iph = ip_hdr(skb);
5979                 tcp_opt_len = tcp_optlen(skb);
5980
5981                 if (skb_is_gso_v6(skb)) {
5982                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5983                 } else {
5984                         u32 ip_tcp_len;
5985
5986                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5987                         hdr_len = ip_tcp_len + tcp_opt_len;
5988
5989                         iph->check = 0;
5990                         iph->tot_len = htons(mss + hdr_len);
5991                 }
5992
5993                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5994                     tg3_flag(tp, TSO_BUG))
5995                         return tg3_tso_bug(tp, skb);
5996
5997                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5998                                TXD_FLAG_CPU_POST_DMA);
5999
6000                 if (tg3_flag(tp, HW_TSO_1) ||
6001                     tg3_flag(tp, HW_TSO_2) ||
6002                     tg3_flag(tp, HW_TSO_3)) {
6003                         tcp_hdr(skb)->check = 0;
6004                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6005                 } else
6006                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6007                                                                  iph->daddr, 0,
6008                                                                  IPPROTO_TCP,
6009                                                                  0);
6010
6011                 if (tg3_flag(tp, HW_TSO_3)) {
6012                         mss |= (hdr_len & 0xc) << 12;
6013                         if (hdr_len & 0x10)
6014                                 base_flags |= 0x00000010;
6015                         base_flags |= (hdr_len & 0x3e0) << 5;
6016                 } else if (tg3_flag(tp, HW_TSO_2))
6017                         mss |= hdr_len << 9;
6018                 else if (tg3_flag(tp, HW_TSO_1) ||
6019                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6020                         if (tcp_opt_len || iph->ihl > 5) {
6021                                 int tsflags;
6022
6023                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6024                                 mss |= (tsflags << 11);
6025                         }
6026                 } else {
6027                         if (tcp_opt_len || iph->ihl > 5) {
6028                                 int tsflags;
6029
6030                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6031                                 base_flags |= tsflags << 12;
6032                         }
6033                 }
6034         }
6035
6036         if (vlan_tx_tag_present(skb))
6037                 base_flags |= (TXD_FLAG_VLAN |
6038                                (vlan_tx_tag_get(skb) << 16));
6039
6040         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6041             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6042                 base_flags |= TXD_FLAG_JMB_PKT;
6043
6044         len = skb_headlen(skb);
6045
6046         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6047         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6048                 dev_kfree_skb(skb);
6049                 goto out_unlock;
6050         }
6051
6052         tnapi->tx_buffers[entry].skb = skb;
6053         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6054
6055         would_hit_hwbug = 0;
6056
6057         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6058                 would_hit_hwbug = 1;
6059
6060         if (tg3_4g_overflow_test(mapping, len))
6061                 would_hit_hwbug = 1;
6062
6063         if (tg3_40bit_overflow_test(tp, mapping, len))
6064                 would_hit_hwbug = 1;
6065
6066         if (tg3_flag(tp, 5701_DMA_BUG))
6067                 would_hit_hwbug = 1;
6068
6069         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6070                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6071
6072         entry = NEXT_TX(entry);
6073
6074         /* Now loop through additional data fragments, and queue them. */
6075         if (skb_shinfo(skb)->nr_frags > 0) {
6076                 last = skb_shinfo(skb)->nr_frags - 1;
6077                 for (i = 0; i <= last; i++) {
6078                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6079
6080                         len = frag->size;
6081                         mapping = pci_map_page(tp->pdev,
6082                                                frag->page,
6083                                                frag->page_offset,
6084                                                len, PCI_DMA_TODEVICE);
6085
6086                         tnapi->tx_buffers[entry].skb = NULL;
6087                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6088                                            mapping);
6089                         if (pci_dma_mapping_error(tp->pdev, mapping))
6090                                 goto dma_error;
6091
6092                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6093                             len <= 8)
6094                                 would_hit_hwbug = 1;
6095
6096                         if (tg3_4g_overflow_test(mapping, len))
6097                                 would_hit_hwbug = 1;
6098
6099                         if (tg3_40bit_overflow_test(tp, mapping, len))
6100                                 would_hit_hwbug = 1;
6101
6102                         if (tg3_flag(tp, HW_TSO_1) ||
6103                             tg3_flag(tp, HW_TSO_2) ||
6104                             tg3_flag(tp, HW_TSO_3))
6105                                 tg3_set_txd(tnapi, entry, mapping, len,
6106                                             base_flags, (i == last)|(mss << 1));
6107                         else
6108                                 tg3_set_txd(tnapi, entry, mapping, len,
6109                                             base_flags, (i == last));
6110
6111                         entry = NEXT_TX(entry);
6112                 }
6113         }
6114
6115         if (would_hit_hwbug) {
6116                 tg3_skb_error_unmap(tnapi, skb, i);
6117
6118                 /* If the workaround fails due to memory/mapping
6119                  * failure, silently drop this packet.
6120                  */
6121                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6122                         goto out_unlock;
6123
6124                 entry = NEXT_TX(tnapi->tx_prod);
6125         }
6126
6127         skb_tx_timestamp(skb);
6128
6129         /* Packets are ready, update Tx producer idx local and on card. */
6130         tw32_tx_mbox(tnapi->prodmbox, entry);
6131
6132         tnapi->tx_prod = entry;
6133         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6134                 netif_tx_stop_queue(txq);
6135
6136                 /* netif_tx_stop_queue() must be done before checking
6137                  * checking tx index in tg3_tx_avail() below, because in
6138                  * tg3_tx(), we update tx index before checking for
6139                  * netif_tx_queue_stopped().
6140                  */
6141                 smp_mb();
6142                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6143                         netif_tx_wake_queue(txq);
6144         }
6145
6146 out_unlock:
6147         mmiowb();
6148
6149         return NETDEV_TX_OK;
6150
6151 dma_error:
6152         tg3_skb_error_unmap(tnapi, skb, i);
6153         dev_kfree_skb(skb);
6154         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6155         return NETDEV_TX_OK;
6156 }
6157
6158 static void tg3_set_loopback(struct net_device *dev, u32 features)
6159 {
6160         struct tg3 *tp = netdev_priv(dev);
6161
6162         if (features & NETIF_F_LOOPBACK) {
6163                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6164                         return;
6165
6166                 /*
6167                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6168                  * loopback mode if Half-Duplex mode was negotiated earlier.
6169                  */
6170                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6171
6172                 /* Enable internal MAC loopback mode */
6173                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6174                 spin_lock_bh(&tp->lock);
6175                 tw32(MAC_MODE, tp->mac_mode);
6176                 netif_carrier_on(tp->dev);
6177                 spin_unlock_bh(&tp->lock);
6178                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6179         } else {
6180                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6181                         return;
6182
6183                 /* Disable internal MAC loopback mode */
6184                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6185                 spin_lock_bh(&tp->lock);
6186                 tw32(MAC_MODE, tp->mac_mode);
6187                 /* Force link status check */
6188                 tg3_setup_phy(tp, 1);
6189                 spin_unlock_bh(&tp->lock);
6190                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6191         }
6192 }
6193
6194 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6195 {
6196         struct tg3 *tp = netdev_priv(dev);
6197
6198         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6199                 features &= ~NETIF_F_ALL_TSO;
6200
6201         return features;
6202 }
6203
6204 static int tg3_set_features(struct net_device *dev, u32 features)
6205 {
6206         u32 changed = dev->features ^ features;
6207
6208         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6209                 tg3_set_loopback(dev, features);
6210
6211         return 0;
6212 }
6213
6214 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6215                                int new_mtu)
6216 {
6217         dev->mtu = new_mtu;
6218
6219         if (new_mtu > ETH_DATA_LEN) {
6220                 if (tg3_flag(tp, 5780_CLASS)) {
6221                         netdev_update_features(dev);
6222                         tg3_flag_clear(tp, TSO_CAPABLE);
6223                 } else {
6224                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6225                 }
6226         } else {
6227                 if (tg3_flag(tp, 5780_CLASS)) {
6228                         tg3_flag_set(tp, TSO_CAPABLE);
6229                         netdev_update_features(dev);
6230                 }
6231                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6232         }
6233 }
6234
6235 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6236 {
6237         struct tg3 *tp = netdev_priv(dev);
6238         int err;
6239
6240         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6241                 return -EINVAL;
6242
6243         if (!netif_running(dev)) {
6244                 /* We'll just catch it later when the
6245                  * device is up'd.
6246                  */
6247                 tg3_set_mtu(dev, tp, new_mtu);
6248                 return 0;
6249         }
6250
6251         tg3_phy_stop(tp);
6252
6253         tg3_netif_stop(tp);
6254
6255         tg3_full_lock(tp, 1);
6256
6257         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6258
6259         tg3_set_mtu(dev, tp, new_mtu);
6260
6261         err = tg3_restart_hw(tp, 0);
6262
6263         if (!err)
6264                 tg3_netif_start(tp);
6265
6266         tg3_full_unlock(tp);
6267
6268         if (!err)
6269                 tg3_phy_start(tp);
6270
6271         return err;
6272 }
6273
6274 static void tg3_rx_prodring_free(struct tg3 *tp,
6275                                  struct tg3_rx_prodring_set *tpr)
6276 {
6277         int i;
6278
6279         if (tpr != &tp->napi[0].prodring) {
6280                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6281                      i = (i + 1) & tp->rx_std_ring_mask)
6282                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6283                                         tp->rx_pkt_map_sz);
6284
6285                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6286                         for (i = tpr->rx_jmb_cons_idx;
6287                              i != tpr->rx_jmb_prod_idx;
6288                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6289                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6290                                                 TG3_RX_JMB_MAP_SZ);
6291                         }
6292                 }
6293
6294                 return;
6295         }
6296
6297         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6298                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6299                                 tp->rx_pkt_map_sz);
6300
6301         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6302                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6303                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6304                                         TG3_RX_JMB_MAP_SZ);
6305         }
6306 }
6307
6308 /* Initialize rx rings for packet processing.
6309  *
6310  * The chip has been shut down and the driver detached from
6311  * the networking, so no interrupts or new tx packets will
6312  * end up in the driver.  tp->{tx,}lock are held and thus
6313  * we may not sleep.
6314  */
6315 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6316                                  struct tg3_rx_prodring_set *tpr)
6317 {
6318         u32 i, rx_pkt_dma_sz;
6319
6320         tpr->rx_std_cons_idx = 0;
6321         tpr->rx_std_prod_idx = 0;
6322         tpr->rx_jmb_cons_idx = 0;
6323         tpr->rx_jmb_prod_idx = 0;
6324
6325         if (tpr != &tp->napi[0].prodring) {
6326                 memset(&tpr->rx_std_buffers[0], 0,
6327                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6328                 if (tpr->rx_jmb_buffers)
6329                         memset(&tpr->rx_jmb_buffers[0], 0,
6330                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6331                 goto done;
6332         }
6333
6334         /* Zero out all descriptors. */
6335         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6336
6337         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6338         if (tg3_flag(tp, 5780_CLASS) &&
6339             tp->dev->mtu > ETH_DATA_LEN)
6340                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6341         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6342
6343         /* Initialize invariants of the rings, we only set this
6344          * stuff once.  This works because the card does not
6345          * write into the rx buffer posting rings.
6346          */
6347         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6348                 struct tg3_rx_buffer_desc *rxd;
6349
6350                 rxd = &tpr->rx_std[i];
6351                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6352                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6353                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6354                                (i << RXD_OPAQUE_INDEX_SHIFT));
6355         }
6356
6357         /* Now allocate fresh SKBs for each rx ring. */
6358         for (i = 0; i < tp->rx_pending; i++) {
6359                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6360                         netdev_warn(tp->dev,
6361                                     "Using a smaller RX standard ring. Only "
6362                                     "%d out of %d buffers were allocated "
6363                                     "successfully\n", i, tp->rx_pending);
6364                         if (i == 0)
6365                                 goto initfail;
6366                         tp->rx_pending = i;
6367                         break;
6368                 }
6369         }
6370
6371         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6372                 goto done;
6373
6374         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6375
6376         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6377                 goto done;
6378
6379         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6380                 struct tg3_rx_buffer_desc *rxd;
6381
6382                 rxd = &tpr->rx_jmb[i].std;
6383                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6384                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6385                                   RXD_FLAG_JUMBO;
6386                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6387                        (i << RXD_OPAQUE_INDEX_SHIFT));
6388         }
6389
6390         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6391                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6392                         netdev_warn(tp->dev,
6393                                     "Using a smaller RX jumbo ring. Only %d "
6394                                     "out of %d buffers were allocated "
6395                                     "successfully\n", i, tp->rx_jumbo_pending);
6396                         if (i == 0)
6397                                 goto initfail;
6398                         tp->rx_jumbo_pending = i;
6399                         break;
6400                 }
6401         }
6402
6403 done:
6404         return 0;
6405
6406 initfail:
6407         tg3_rx_prodring_free(tp, tpr);
6408         return -ENOMEM;
6409 }
6410
6411 static void tg3_rx_prodring_fini(struct tg3 *tp,
6412                                  struct tg3_rx_prodring_set *tpr)
6413 {
6414         kfree(tpr->rx_std_buffers);
6415         tpr->rx_std_buffers = NULL;
6416         kfree(tpr->rx_jmb_buffers);
6417         tpr->rx_jmb_buffers = NULL;
6418         if (tpr->rx_std) {
6419                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6420                                   tpr->rx_std, tpr->rx_std_mapping);
6421                 tpr->rx_std = NULL;
6422         }
6423         if (tpr->rx_jmb) {
6424                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6425                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6426                 tpr->rx_jmb = NULL;
6427         }
6428 }
6429
6430 static int tg3_rx_prodring_init(struct tg3 *tp,
6431                                 struct tg3_rx_prodring_set *tpr)
6432 {
6433         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6434                                       GFP_KERNEL);
6435         if (!tpr->rx_std_buffers)
6436                 return -ENOMEM;
6437
6438         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6439                                          TG3_RX_STD_RING_BYTES(tp),
6440                                          &tpr->rx_std_mapping,
6441                                          GFP_KERNEL);
6442         if (!tpr->rx_std)
6443                 goto err_out;
6444
6445         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6446                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6447                                               GFP_KERNEL);
6448                 if (!tpr->rx_jmb_buffers)
6449                         goto err_out;
6450
6451                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6452                                                  TG3_RX_JMB_RING_BYTES(tp),
6453                                                  &tpr->rx_jmb_mapping,
6454                                                  GFP_KERNEL);
6455                 if (!tpr->rx_jmb)
6456                         goto err_out;
6457         }
6458
6459         return 0;
6460
6461 err_out:
6462         tg3_rx_prodring_fini(tp, tpr);
6463         return -ENOMEM;
6464 }
6465
6466 /* Free up pending packets in all rx/tx rings.
6467  *
6468  * The chip has been shut down and the driver detached from
6469  * the networking, so no interrupts or new tx packets will
6470  * end up in the driver.  tp->{tx,}lock is not held and we are not
6471  * in an interrupt context and thus may sleep.
6472  */
6473 static void tg3_free_rings(struct tg3 *tp)
6474 {
6475         int i, j;
6476
6477         for (j = 0; j < tp->irq_cnt; j++) {
6478                 struct tg3_napi *tnapi = &tp->napi[j];
6479
6480                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6481
6482                 if (!tnapi->tx_buffers)
6483                         continue;
6484
6485                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6486                         struct ring_info *txp;
6487                         struct sk_buff *skb;
6488                         unsigned int k;
6489
6490                         txp = &tnapi->tx_buffers[i];
6491                         skb = txp->skb;
6492
6493                         if (skb == NULL) {
6494                                 i++;
6495                                 continue;
6496                         }
6497
6498                         pci_unmap_single(tp->pdev,
6499                                          dma_unmap_addr(txp, mapping),
6500                                          skb_headlen(skb),
6501                                          PCI_DMA_TODEVICE);
6502                         txp->skb = NULL;
6503
6504                         i++;
6505
6506                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6507                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6508                                 pci_unmap_page(tp->pdev,
6509                                                dma_unmap_addr(txp, mapping),
6510                                                skb_shinfo(skb)->frags[k].size,
6511                                                PCI_DMA_TODEVICE);
6512                                 i++;
6513                         }
6514
6515                         dev_kfree_skb_any(skb);
6516                 }
6517         }
6518 }
6519
6520 /* Initialize tx/rx rings for packet processing.
6521  *
6522  * The chip has been shut down and the driver detached from
6523  * the networking, so no interrupts or new tx packets will
6524  * end up in the driver.  tp->{tx,}lock are held and thus
6525  * we may not sleep.
6526  */
6527 static int tg3_init_rings(struct tg3 *tp)
6528 {
6529         int i;
6530
6531         /* Free up all the SKBs. */
6532         tg3_free_rings(tp);
6533
6534         for (i = 0; i < tp->irq_cnt; i++) {
6535                 struct tg3_napi *tnapi = &tp->napi[i];
6536
6537                 tnapi->last_tag = 0;
6538                 tnapi->last_irq_tag = 0;
6539                 tnapi->hw_status->status = 0;
6540                 tnapi->hw_status->status_tag = 0;
6541                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6542
6543                 tnapi->tx_prod = 0;
6544                 tnapi->tx_cons = 0;
6545                 if (tnapi->tx_ring)
6546                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6547
6548                 tnapi->rx_rcb_ptr = 0;
6549                 if (tnapi->rx_rcb)
6550                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6551
6552                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6553                         tg3_free_rings(tp);
6554                         return -ENOMEM;
6555                 }
6556         }
6557
6558         return 0;
6559 }
6560
6561 /*
6562  * Must not be invoked with interrupt sources disabled and
6563  * the hardware shutdown down.
6564  */
6565 static void tg3_free_consistent(struct tg3 *tp)
6566 {
6567         int i;
6568
6569         for (i = 0; i < tp->irq_cnt; i++) {
6570                 struct tg3_napi *tnapi = &tp->napi[i];
6571
6572                 if (tnapi->tx_ring) {
6573                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6574                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6575                         tnapi->tx_ring = NULL;
6576                 }
6577
6578                 kfree(tnapi->tx_buffers);
6579                 tnapi->tx_buffers = NULL;
6580
6581                 if (tnapi->rx_rcb) {
6582                         dma_free_coherent(&tp->pdev->dev,
6583                                           TG3_RX_RCB_RING_BYTES(tp),
6584                                           tnapi->rx_rcb,
6585                                           tnapi->rx_rcb_mapping);
6586                         tnapi->rx_rcb = NULL;
6587                 }
6588
6589                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6590
6591                 if (tnapi->hw_status) {
6592                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6593                                           tnapi->hw_status,
6594                                           tnapi->status_mapping);
6595                         tnapi->hw_status = NULL;
6596                 }
6597         }
6598
6599         if (tp->hw_stats) {
6600                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6601                                   tp->hw_stats, tp->stats_mapping);
6602                 tp->hw_stats = NULL;
6603         }
6604 }
6605
6606 /*
6607  * Must not be invoked with interrupt sources disabled and
6608  * the hardware shutdown down.  Can sleep.
6609  */
6610 static int tg3_alloc_consistent(struct tg3 *tp)
6611 {
6612         int i;
6613
6614         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6615                                           sizeof(struct tg3_hw_stats),
6616                                           &tp->stats_mapping,
6617                                           GFP_KERNEL);
6618         if (!tp->hw_stats)
6619                 goto err_out;
6620
6621         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6622
6623         for (i = 0; i < tp->irq_cnt; i++) {
6624                 struct tg3_napi *tnapi = &tp->napi[i];
6625                 struct tg3_hw_status *sblk;
6626
6627                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6628                                                       TG3_HW_STATUS_SIZE,
6629                                                       &tnapi->status_mapping,
6630                                                       GFP_KERNEL);
6631                 if (!tnapi->hw_status)
6632                         goto err_out;
6633
6634                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6635                 sblk = tnapi->hw_status;
6636
6637                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6638                         goto err_out;
6639
6640                 /* If multivector TSS is enabled, vector 0 does not handle
6641                  * tx interrupts.  Don't allocate any resources for it.
6642                  */
6643                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6644                     (i && tg3_flag(tp, ENABLE_TSS))) {
6645                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6646                                                     TG3_TX_RING_SIZE,
6647                                                     GFP_KERNEL);
6648                         if (!tnapi->tx_buffers)
6649                                 goto err_out;
6650
6651                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6652                                                             TG3_TX_RING_BYTES,
6653                                                         &tnapi->tx_desc_mapping,
6654                                                             GFP_KERNEL);
6655                         if (!tnapi->tx_ring)
6656                                 goto err_out;
6657                 }
6658
6659                 /*
6660                  * When RSS is enabled, the status block format changes
6661                  * slightly.  The "rx_jumbo_consumer", "reserved",
6662                  * and "rx_mini_consumer" members get mapped to the
6663                  * other three rx return ring producer indexes.
6664                  */
6665                 switch (i) {
6666                 default:
6667                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6668                         break;
6669                 case 2:
6670                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6671                         break;
6672                 case 3:
6673                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6674                         break;
6675                 case 4:
6676                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6677                         break;
6678                 }
6679
6680                 /*
6681                  * If multivector RSS is enabled, vector 0 does not handle
6682                  * rx or tx interrupts.  Don't allocate any resources for it.
6683                  */
6684                 if (!i && tg3_flag(tp, ENABLE_RSS))
6685                         continue;
6686
6687                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6688                                                    TG3_RX_RCB_RING_BYTES(tp),
6689                                                    &tnapi->rx_rcb_mapping,
6690                                                    GFP_KERNEL);
6691                 if (!tnapi->rx_rcb)
6692                         goto err_out;
6693
6694                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6695         }
6696
6697         return 0;
6698
6699 err_out:
6700         tg3_free_consistent(tp);
6701         return -ENOMEM;
6702 }
6703
6704 #define MAX_WAIT_CNT 1000
6705
6706 /* To stop a block, clear the enable bit and poll till it
6707  * clears.  tp->lock is held.
6708  */
6709 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6710 {
6711         unsigned int i;
6712         u32 val;
6713
6714         if (tg3_flag(tp, 5705_PLUS)) {
6715                 switch (ofs) {
6716                 case RCVLSC_MODE:
6717                 case DMAC_MODE:
6718                 case MBFREE_MODE:
6719                 case BUFMGR_MODE:
6720                 case MEMARB_MODE:
6721                         /* We can't enable/disable these bits of the
6722                          * 5705/5750, just say success.
6723                          */
6724                         return 0;
6725
6726                 default:
6727                         break;
6728                 }
6729         }
6730
6731         val = tr32(ofs);
6732         val &= ~enable_bit;
6733         tw32_f(ofs, val);
6734
6735         for (i = 0; i < MAX_WAIT_CNT; i++) {
6736                 udelay(100);
6737                 val = tr32(ofs);
6738                 if ((val & enable_bit) == 0)
6739                         break;
6740         }
6741
6742         if (i == MAX_WAIT_CNT && !silent) {
6743                 dev_err(&tp->pdev->dev,
6744                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6745                         ofs, enable_bit);
6746                 return -ENODEV;
6747         }
6748
6749         return 0;
6750 }
6751
6752 /* tp->lock is held. */
6753 static int tg3_abort_hw(struct tg3 *tp, int silent)
6754 {
6755         int i, err;
6756
6757         tg3_disable_ints(tp);
6758
6759         tp->rx_mode &= ~RX_MODE_ENABLE;
6760         tw32_f(MAC_RX_MODE, tp->rx_mode);
6761         udelay(10);
6762
6763         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6764         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6765         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6766         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6767         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6768         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6769
6770         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6771         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6772         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6773         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6774         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6775         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6776         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6777
6778         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6779         tw32_f(MAC_MODE, tp->mac_mode);
6780         udelay(40);
6781
6782         tp->tx_mode &= ~TX_MODE_ENABLE;
6783         tw32_f(MAC_TX_MODE, tp->tx_mode);
6784
6785         for (i = 0; i < MAX_WAIT_CNT; i++) {
6786                 udelay(100);
6787                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6788                         break;
6789         }
6790         if (i >= MAX_WAIT_CNT) {
6791                 dev_err(&tp->pdev->dev,
6792                         "%s timed out, TX_MODE_ENABLE will not clear "
6793                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6794                 err |= -ENODEV;
6795         }
6796
6797         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6798         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6799         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6800
6801         tw32(FTQ_RESET, 0xffffffff);
6802         tw32(FTQ_RESET, 0x00000000);
6803
6804         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6805         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6806
6807         for (i = 0; i < tp->irq_cnt; i++) {
6808                 struct tg3_napi *tnapi = &tp->napi[i];
6809                 if (tnapi->hw_status)
6810                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6811         }
6812         if (tp->hw_stats)
6813                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6814
6815         return err;
6816 }
6817
6818 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6819 {
6820         int i;
6821         u32 apedata;
6822
6823         /* NCSI does not support APE events */
6824         if (tg3_flag(tp, APE_HAS_NCSI))
6825                 return;
6826
6827         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6828         if (apedata != APE_SEG_SIG_MAGIC)
6829                 return;
6830
6831         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6832         if (!(apedata & APE_FW_STATUS_READY))
6833                 return;
6834
6835         /* Wait for up to 1 millisecond for APE to service previous event. */
6836         for (i = 0; i < 10; i++) {
6837                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6838                         return;
6839
6840                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6841
6842                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6843                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6844                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6845
6846                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6847
6848                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6849                         break;
6850
6851                 udelay(100);
6852         }
6853
6854         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6855                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6856 }
6857
6858 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6859 {
6860         u32 event;
6861         u32 apedata;
6862
6863         if (!tg3_flag(tp, ENABLE_APE))
6864                 return;
6865
6866         switch (kind) {
6867         case RESET_KIND_INIT:
6868                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6869                                 APE_HOST_SEG_SIG_MAGIC);
6870                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6871                                 APE_HOST_SEG_LEN_MAGIC);
6872                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6873                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6874                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6875                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6876                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6877                                 APE_HOST_BEHAV_NO_PHYLOCK);
6878                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6879                                     TG3_APE_HOST_DRVR_STATE_START);
6880
6881                 event = APE_EVENT_STATUS_STATE_START;
6882                 break;
6883         case RESET_KIND_SHUTDOWN:
6884                 /* With the interface we are currently using,
6885                  * APE does not track driver state.  Wiping
6886                  * out the HOST SEGMENT SIGNATURE forces
6887                  * the APE to assume OS absent status.
6888                  */
6889                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6890
6891                 if (device_may_wakeup(&tp->pdev->dev) &&
6892                     tg3_flag(tp, WOL_ENABLE)) {
6893                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6894                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6895                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6896                 } else
6897                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6898
6899                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6900
6901                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6902                 break;
6903         case RESET_KIND_SUSPEND:
6904                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6905                 break;
6906         default:
6907                 return;
6908         }
6909
6910         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6911
6912         tg3_ape_send_event(tp, event);
6913 }
6914
6915 /* tp->lock is held. */
6916 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6917 {
6918         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6919                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6920
6921         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6922                 switch (kind) {
6923                 case RESET_KIND_INIT:
6924                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6925                                       DRV_STATE_START);
6926                         break;
6927
6928                 case RESET_KIND_SHUTDOWN:
6929                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6930                                       DRV_STATE_UNLOAD);
6931                         break;
6932
6933                 case RESET_KIND_SUSPEND:
6934                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6935                                       DRV_STATE_SUSPEND);
6936                         break;
6937
6938                 default:
6939                         break;
6940                 }
6941         }
6942
6943         if (kind == RESET_KIND_INIT ||
6944             kind == RESET_KIND_SUSPEND)
6945                 tg3_ape_driver_state_change(tp, kind);
6946 }
6947
6948 /* tp->lock is held. */
6949 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6950 {
6951         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6952                 switch (kind) {
6953                 case RESET_KIND_INIT:
6954                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955                                       DRV_STATE_START_DONE);
6956                         break;
6957
6958                 case RESET_KIND_SHUTDOWN:
6959                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6960                                       DRV_STATE_UNLOAD_DONE);
6961                         break;
6962
6963                 default:
6964                         break;
6965                 }
6966         }
6967
6968         if (kind == RESET_KIND_SHUTDOWN)
6969                 tg3_ape_driver_state_change(tp, kind);
6970 }
6971
6972 /* tp->lock is held. */
6973 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6974 {
6975         if (tg3_flag(tp, ENABLE_ASF)) {
6976                 switch (kind) {
6977                 case RESET_KIND_INIT:
6978                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6979                                       DRV_STATE_START);
6980                         break;
6981
6982                 case RESET_KIND_SHUTDOWN:
6983                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6984                                       DRV_STATE_UNLOAD);
6985                         break;
6986
6987                 case RESET_KIND_SUSPEND:
6988                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6989                                       DRV_STATE_SUSPEND);
6990                         break;
6991
6992                 default:
6993                         break;
6994                 }
6995         }
6996 }
6997
6998 static int tg3_poll_fw(struct tg3 *tp)
6999 {
7000         int i;
7001         u32 val;
7002
7003         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7004                 /* Wait up to 20ms for init done. */
7005                 for (i = 0; i < 200; i++) {
7006                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
7007                                 return 0;
7008                         udelay(100);
7009                 }
7010                 return -ENODEV;
7011         }
7012
7013         /* Wait for firmware initialization to complete. */
7014         for (i = 0; i < 100000; i++) {
7015                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
7016                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
7017                         break;
7018                 udelay(10);
7019         }
7020
7021         /* Chip might not be fitted with firmware.  Some Sun onboard
7022          * parts are configured like that.  So don't signal the timeout
7023          * of the above loop as an error, but do report the lack of
7024          * running firmware once.
7025          */
7026         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7027                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7028
7029                 netdev_info(tp->dev, "No firmware running\n");
7030         }
7031
7032         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7033                 /* The 57765 A0 needs a little more
7034                  * time to do some important work.
7035                  */
7036                 mdelay(10);
7037         }
7038
7039         return 0;
7040 }
7041
7042 /* Save PCI command register before chip reset */
7043 static void tg3_save_pci_state(struct tg3 *tp)
7044 {
7045         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7046 }
7047
7048 /* Restore PCI state after chip reset */
7049 static void tg3_restore_pci_state(struct tg3 *tp)
7050 {
7051         u32 val;
7052
7053         /* Re-enable indirect register accesses. */
7054         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7055                                tp->misc_host_ctrl);
7056
7057         /* Set MAX PCI retry to zero. */
7058         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7059         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7060             tg3_flag(tp, PCIX_MODE))
7061                 val |= PCISTATE_RETRY_SAME_DMA;
7062         /* Allow reads and writes to the APE register and memory space. */
7063         if (tg3_flag(tp, ENABLE_APE))
7064                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7065                        PCISTATE_ALLOW_APE_SHMEM_WR |
7066                        PCISTATE_ALLOW_APE_PSPACE_WR;
7067         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7068
7069         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7070
7071         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7072                 if (tg3_flag(tp, PCI_EXPRESS))
7073                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7074                 else {
7075                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7076                                               tp->pci_cacheline_sz);
7077                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7078                                               tp->pci_lat_timer);
7079                 }
7080         }
7081
7082         /* Make sure PCI-X relaxed ordering bit is clear. */
7083         if (tg3_flag(tp, PCIX_MODE)) {
7084                 u16 pcix_cmd;
7085
7086                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7087                                      &pcix_cmd);
7088                 pcix_cmd &= ~PCI_X_CMD_ERO;
7089                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7090                                       pcix_cmd);
7091         }
7092
7093         if (tg3_flag(tp, 5780_CLASS)) {
7094
7095                 /* Chip reset on 5780 will reset MSI enable bit,
7096                  * so need to restore it.
7097                  */
7098                 if (tg3_flag(tp, USING_MSI)) {
7099                         u16 ctrl;
7100
7101                         pci_read_config_word(tp->pdev,
7102                                              tp->msi_cap + PCI_MSI_FLAGS,
7103                                              &ctrl);
7104                         pci_write_config_word(tp->pdev,
7105                                               tp->msi_cap + PCI_MSI_FLAGS,
7106                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7107                         val = tr32(MSGINT_MODE);
7108                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7109                 }
7110         }
7111 }
7112
7113 static void tg3_stop_fw(struct tg3 *);
7114
7115 /* tp->lock is held. */
7116 static int tg3_chip_reset(struct tg3 *tp)
7117 {
7118         u32 val;
7119         void (*write_op)(struct tg3 *, u32, u32);
7120         int i, err;
7121
7122         tg3_nvram_lock(tp);
7123
7124         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7125
7126         /* No matching tg3_nvram_unlock() after this because
7127          * chip reset below will undo the nvram lock.
7128          */
7129         tp->nvram_lock_cnt = 0;
7130
7131         /* GRC_MISC_CFG core clock reset will clear the memory
7132          * enable bit in PCI register 4 and the MSI enable bit
7133          * on some chips, so we save relevant registers here.
7134          */
7135         tg3_save_pci_state(tp);
7136
7137         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7138             tg3_flag(tp, 5755_PLUS))
7139                 tw32(GRC_FASTBOOT_PC, 0);
7140
7141         /*
7142          * We must avoid the readl() that normally takes place.
7143          * It locks machines, causes machine checks, and other
7144          * fun things.  So, temporarily disable the 5701
7145          * hardware workaround, while we do the reset.
7146          */
7147         write_op = tp->write32;
7148         if (write_op == tg3_write_flush_reg32)
7149                 tp->write32 = tg3_write32;
7150
7151         /* Prevent the irq handler from reading or writing PCI registers
7152          * during chip reset when the memory enable bit in the PCI command
7153          * register may be cleared.  The chip does not generate interrupt
7154          * at this time, but the irq handler may still be called due to irq
7155          * sharing or irqpoll.
7156          */
7157         tg3_flag_set(tp, CHIP_RESETTING);
7158         for (i = 0; i < tp->irq_cnt; i++) {
7159                 struct tg3_napi *tnapi = &tp->napi[i];
7160                 if (tnapi->hw_status) {
7161                         tnapi->hw_status->status = 0;
7162                         tnapi->hw_status->status_tag = 0;
7163                 }
7164                 tnapi->last_tag = 0;
7165                 tnapi->last_irq_tag = 0;
7166         }
7167         smp_mb();
7168
7169         for (i = 0; i < tp->irq_cnt; i++)
7170                 synchronize_irq(tp->napi[i].irq_vec);
7171
7172         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7173                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7174                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7175         }
7176
7177         /* do the reset */
7178         val = GRC_MISC_CFG_CORECLK_RESET;
7179
7180         if (tg3_flag(tp, PCI_EXPRESS)) {
7181                 /* Force PCIe 1.0a mode */
7182                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7183                     !tg3_flag(tp, 57765_PLUS) &&
7184                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7185                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7186                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7187
7188                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7189                         tw32(GRC_MISC_CFG, (1 << 29));
7190                         val |= (1 << 29);
7191                 }
7192         }
7193
7194         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7195                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7196                 tw32(GRC_VCPU_EXT_CTRL,
7197                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7198         }
7199
7200         /* Manage gphy power for all CPMU absent PCIe devices. */
7201         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7202                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7203
7204         tw32(GRC_MISC_CFG, val);
7205
7206         /* restore 5701 hardware bug workaround write method */
7207         tp->write32 = write_op;
7208
7209         /* Unfortunately, we have to delay before the PCI read back.
7210          * Some 575X chips even will not respond to a PCI cfg access
7211          * when the reset command is given to the chip.
7212          *
7213          * How do these hardware designers expect things to work
7214          * properly if the PCI write is posted for a long period
7215          * of time?  It is always necessary to have some method by
7216          * which a register read back can occur to push the write
7217          * out which does the reset.
7218          *
7219          * For most tg3 variants the trick below was working.
7220          * Ho hum...
7221          */
7222         udelay(120);
7223
7224         /* Flush PCI posted writes.  The normal MMIO registers
7225          * are inaccessible at this time so this is the only
7226          * way to make this reliably (actually, this is no longer
7227          * the case, see above).  I tried to use indirect
7228          * register read/write but this upset some 5701 variants.
7229          */
7230         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7231
7232         udelay(120);
7233
7234         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7235                 u16 val16;
7236
7237                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7238                         int i;
7239                         u32 cfg_val;
7240
7241                         /* Wait for link training to complete.  */
7242                         for (i = 0; i < 5000; i++)
7243                                 udelay(100);
7244
7245                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7246                         pci_write_config_dword(tp->pdev, 0xc4,
7247                                                cfg_val | (1 << 15));
7248                 }
7249
7250                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7251                 pci_read_config_word(tp->pdev,
7252                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7253                                      &val16);
7254                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7255                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7256                 /*
7257                  * Older PCIe devices only support the 128 byte
7258                  * MPS setting.  Enforce the restriction.
7259                  */
7260                 if (!tg3_flag(tp, CPMU_PRESENT))
7261                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7262                 pci_write_config_word(tp->pdev,
7263                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7264                                       val16);
7265
7266                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7267
7268                 /* Clear error status */
7269                 pci_write_config_word(tp->pdev,
7270                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7271                                       PCI_EXP_DEVSTA_CED |
7272                                       PCI_EXP_DEVSTA_NFED |
7273                                       PCI_EXP_DEVSTA_FED |
7274                                       PCI_EXP_DEVSTA_URD);
7275         }
7276
7277         tg3_restore_pci_state(tp);
7278
7279         tg3_flag_clear(tp, CHIP_RESETTING);
7280         tg3_flag_clear(tp, ERROR_PROCESSED);
7281
7282         val = 0;
7283         if (tg3_flag(tp, 5780_CLASS))
7284                 val = tr32(MEMARB_MODE);
7285         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7286
7287         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7288                 tg3_stop_fw(tp);
7289                 tw32(0x5000, 0x400);
7290         }
7291
7292         tw32(GRC_MODE, tp->grc_mode);
7293
7294         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7295                 val = tr32(0xc4);
7296
7297                 tw32(0xc4, val | (1 << 15));
7298         }
7299
7300         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7301             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7302                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7303                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7304                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7305                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7306         }
7307
7308         if (tg3_flag(tp, ENABLE_APE))
7309                 tp->mac_mode = MAC_MODE_APE_TX_EN |
7310                                MAC_MODE_APE_RX_EN |
7311                                MAC_MODE_TDE_ENABLE;
7312
7313         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7314                 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7315                 val = tp->mac_mode;
7316         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7317                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7318                 val = tp->mac_mode;
7319         } else
7320                 val = 0;
7321
7322         tw32_f(MAC_MODE, val);
7323         udelay(40);
7324
7325         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7326
7327         err = tg3_poll_fw(tp);
7328         if (err)
7329                 return err;
7330
7331         tg3_mdio_start(tp);
7332
7333         if (tg3_flag(tp, PCI_EXPRESS) &&
7334             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7335             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7336             !tg3_flag(tp, 57765_PLUS)) {
7337                 val = tr32(0x7c00);
7338
7339                 tw32(0x7c00, val | (1 << 25));
7340         }
7341
7342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7343                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7344                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7345         }
7346
7347         /* Reprobe ASF enable state.  */
7348         tg3_flag_clear(tp, ENABLE_ASF);
7349         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7350         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7351         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7352                 u32 nic_cfg;
7353
7354                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7355                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7356                         tg3_flag_set(tp, ENABLE_ASF);
7357                         tp->last_event_jiffies = jiffies;
7358                         if (tg3_flag(tp, 5750_PLUS))
7359                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7360                 }
7361         }
7362
7363         return 0;
7364 }
7365
7366 /* tp->lock is held. */
7367 static void tg3_stop_fw(struct tg3 *tp)
7368 {
7369         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7370                 /* Wait for RX cpu to ACK the previous event. */
7371                 tg3_wait_for_event_ack(tp);
7372
7373                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7374
7375                 tg3_generate_fw_event(tp);
7376
7377                 /* Wait for RX cpu to ACK this event. */
7378                 tg3_wait_for_event_ack(tp);
7379         }
7380 }
7381
7382 /* tp->lock is held. */
7383 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7384 {
7385         int err;
7386
7387         tg3_stop_fw(tp);
7388
7389         tg3_write_sig_pre_reset(tp, kind);
7390
7391         tg3_abort_hw(tp, silent);
7392         err = tg3_chip_reset(tp);
7393
7394         __tg3_set_mac_addr(tp, 0);
7395
7396         tg3_write_sig_legacy(tp, kind);
7397         tg3_write_sig_post_reset(tp, kind);
7398
7399         if (err)
7400                 return err;
7401
7402         return 0;
7403 }
7404
7405 #define RX_CPU_SCRATCH_BASE     0x30000
7406 #define RX_CPU_SCRATCH_SIZE     0x04000
7407 #define TX_CPU_SCRATCH_BASE     0x34000
7408 #define TX_CPU_SCRATCH_SIZE     0x04000
7409
7410 /* tp->lock is held. */
7411 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7412 {
7413         int i;
7414
7415         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7416
7417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7418                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7419
7420                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7421                 return 0;
7422         }
7423         if (offset == RX_CPU_BASE) {
7424                 for (i = 0; i < 10000; i++) {
7425                         tw32(offset + CPU_STATE, 0xffffffff);
7426                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7427                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7428                                 break;
7429                 }
7430
7431                 tw32(offset + CPU_STATE, 0xffffffff);
7432                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7433                 udelay(10);
7434         } else {
7435                 for (i = 0; i < 10000; i++) {
7436                         tw32(offset + CPU_STATE, 0xffffffff);
7437                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7438                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7439                                 break;
7440                 }
7441         }
7442
7443         if (i >= 10000) {
7444                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7445                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7446                 return -ENODEV;
7447         }
7448
7449         /* Clear firmware's nvram arbitration. */
7450         if (tg3_flag(tp, NVRAM))
7451                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7452         return 0;
7453 }
7454
7455 struct fw_info {
7456         unsigned int fw_base;
7457         unsigned int fw_len;
7458         const __be32 *fw_data;
7459 };
7460
7461 /* tp->lock is held. */
7462 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7463                                  int cpu_scratch_size, struct fw_info *info)
7464 {
7465         int err, lock_err, i;
7466         void (*write_op)(struct tg3 *, u32, u32);
7467
7468         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7469                 netdev_err(tp->dev,
7470                            "%s: Trying to load TX cpu firmware which is 5705\n",
7471                            __func__);
7472                 return -EINVAL;
7473         }
7474
7475         if (tg3_flag(tp, 5705_PLUS))
7476                 write_op = tg3_write_mem;
7477         else
7478                 write_op = tg3_write_indirect_reg32;
7479
7480         /* It is possible that bootcode is still loading at this point.
7481          * Get the nvram lock first before halting the cpu.
7482          */
7483         lock_err = tg3_nvram_lock(tp);
7484         err = tg3_halt_cpu(tp, cpu_base);
7485         if (!lock_err)
7486                 tg3_nvram_unlock(tp);
7487         if (err)
7488                 goto out;
7489
7490         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7491                 write_op(tp, cpu_scratch_base + i, 0);
7492         tw32(cpu_base + CPU_STATE, 0xffffffff);
7493         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7494         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7495                 write_op(tp, (cpu_scratch_base +
7496                               (info->fw_base & 0xffff) +
7497                               (i * sizeof(u32))),
7498                               be32_to_cpu(info->fw_data[i]));
7499
7500         err = 0;
7501
7502 out:
7503         return err;
7504 }
7505
7506 /* tp->lock is held. */
7507 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7508 {
7509         struct fw_info info;
7510         const __be32 *fw_data;
7511         int err, i;
7512
7513         fw_data = (void *)tp->fw->data;
7514
7515         /* Firmware blob starts with version numbers, followed by
7516            start address and length. We are setting complete length.
7517            length = end_address_of_bss - start_address_of_text.
7518            Remainder is the blob to be loaded contiguously
7519            from start address. */
7520
7521         info.fw_base = be32_to_cpu(fw_data[1]);
7522         info.fw_len = tp->fw->size - 12;
7523         info.fw_data = &fw_data[3];
7524
7525         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7526                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7527                                     &info);
7528         if (err)
7529                 return err;
7530
7531         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7532                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7533                                     &info);
7534         if (err)
7535                 return err;
7536
7537         /* Now startup only the RX cpu. */
7538         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7539         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7540
7541         for (i = 0; i < 5; i++) {
7542                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7543                         break;
7544                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7545                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7546                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7547                 udelay(1000);
7548         }
7549         if (i >= 5) {
7550                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7551                            "should be %08x\n", __func__,
7552                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7553                 return -ENODEV;
7554         }
7555         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7556         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7557
7558         return 0;
7559 }
7560
7561 /* tp->lock is held. */
7562 static int tg3_load_tso_firmware(struct tg3 *tp)
7563 {
7564         struct fw_info info;
7565         const __be32 *fw_data;
7566         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7567         int err, i;
7568
7569         if (tg3_flag(tp, HW_TSO_1) ||
7570             tg3_flag(tp, HW_TSO_2) ||
7571             tg3_flag(tp, HW_TSO_3))
7572                 return 0;
7573
7574         fw_data = (void *)tp->fw->data;
7575
7576         /* Firmware blob starts with version numbers, followed by
7577            start address and length. We are setting complete length.
7578            length = end_address_of_bss - start_address_of_text.
7579            Remainder is the blob to be loaded contiguously
7580            from start address. */
7581
7582         info.fw_base = be32_to_cpu(fw_data[1]);
7583         cpu_scratch_size = tp->fw_len;
7584         info.fw_len = tp->fw->size - 12;
7585         info.fw_data = &fw_data[3];
7586
7587         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7588                 cpu_base = RX_CPU_BASE;
7589                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7590         } else {
7591                 cpu_base = TX_CPU_BASE;
7592                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7593                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7594         }
7595
7596         err = tg3_load_firmware_cpu(tp, cpu_base,
7597                                     cpu_scratch_base, cpu_scratch_size,
7598                                     &info);
7599         if (err)
7600                 return err;
7601
7602         /* Now startup the cpu. */
7603         tw32(cpu_base + CPU_STATE, 0xffffffff);
7604         tw32_f(cpu_base + CPU_PC, info.fw_base);
7605
7606         for (i = 0; i < 5; i++) {
7607                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7608                         break;
7609                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7610                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7611                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7612                 udelay(1000);
7613         }
7614         if (i >= 5) {
7615                 netdev_err(tp->dev,
7616                            "%s fails to set CPU PC, is %08x should be %08x\n",
7617                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7618                 return -ENODEV;
7619         }
7620         tw32(cpu_base + CPU_STATE, 0xffffffff);
7621         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7622         return 0;
7623 }
7624
7625
7626 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7627 {
7628         struct tg3 *tp = netdev_priv(dev);
7629         struct sockaddr *addr = p;
7630         int err = 0, skip_mac_1 = 0;
7631
7632         if (!is_valid_ether_addr(addr->sa_data))
7633                 return -EINVAL;
7634
7635         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7636
7637         if (!netif_running(dev))
7638                 return 0;
7639
7640         if (tg3_flag(tp, ENABLE_ASF)) {
7641                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7642
7643                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7644                 addr0_low = tr32(MAC_ADDR_0_LOW);
7645                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7646                 addr1_low = tr32(MAC_ADDR_1_LOW);
7647
7648                 /* Skip MAC addr 1 if ASF is using it. */
7649                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7650                     !(addr1_high == 0 && addr1_low == 0))
7651                         skip_mac_1 = 1;
7652         }
7653         spin_lock_bh(&tp->lock);
7654         __tg3_set_mac_addr(tp, skip_mac_1);
7655         spin_unlock_bh(&tp->lock);
7656
7657         return err;
7658 }
7659
7660 /* tp->lock is held. */
7661 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7662                            dma_addr_t mapping, u32 maxlen_flags,
7663                            u32 nic_addr)
7664 {
7665         tg3_write_mem(tp,
7666                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7667                       ((u64) mapping >> 32));
7668         tg3_write_mem(tp,
7669                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7670                       ((u64) mapping & 0xffffffff));
7671         tg3_write_mem(tp,
7672                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7673                        maxlen_flags);
7674
7675         if (!tg3_flag(tp, 5705_PLUS))
7676                 tg3_write_mem(tp,
7677                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7678                               nic_addr);
7679 }
7680
7681 static void __tg3_set_rx_mode(struct net_device *);
7682 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7683 {
7684         int i;
7685
7686         if (!tg3_flag(tp, ENABLE_TSS)) {
7687                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7688                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7689                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7690         } else {
7691                 tw32(HOSTCC_TXCOL_TICKS, 0);
7692                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7693                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7694         }
7695
7696         if (!tg3_flag(tp, ENABLE_RSS)) {
7697                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7698                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7699                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7700         } else {
7701                 tw32(HOSTCC_RXCOL_TICKS, 0);
7702                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7703                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7704         }
7705
7706         if (!tg3_flag(tp, 5705_PLUS)) {
7707                 u32 val = ec->stats_block_coalesce_usecs;
7708
7709                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7710                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7711
7712                 if (!netif_carrier_ok(tp->dev))
7713                         val = 0;
7714
7715                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7716         }
7717
7718         for (i = 0; i < tp->irq_cnt - 1; i++) {
7719                 u32 reg;
7720
7721                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7722                 tw32(reg, ec->rx_coalesce_usecs);
7723                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7724                 tw32(reg, ec->rx_max_coalesced_frames);
7725                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7726                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7727
7728                 if (tg3_flag(tp, ENABLE_TSS)) {
7729                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7730                         tw32(reg, ec->tx_coalesce_usecs);
7731                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7732                         tw32(reg, ec->tx_max_coalesced_frames);
7733                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7734                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7735                 }
7736         }
7737
7738         for (; i < tp->irq_max - 1; i++) {
7739                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7740                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7741                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7742
7743                 if (tg3_flag(tp, ENABLE_TSS)) {
7744                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7745                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7746                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7747                 }
7748         }
7749 }
7750
7751 /* tp->lock is held. */
7752 static void tg3_rings_reset(struct tg3 *tp)
7753 {
7754         int i;
7755         u32 stblk, txrcb, rxrcb, limit;
7756         struct tg3_napi *tnapi = &tp->napi[0];
7757
7758         /* Disable all transmit rings but the first. */
7759         if (!tg3_flag(tp, 5705_PLUS))
7760                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7761         else if (tg3_flag(tp, 5717_PLUS))
7762                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7763         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7764                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7765         else
7766                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7767
7768         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7769              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7770                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7771                               BDINFO_FLAGS_DISABLED);
7772
7773
7774         /* Disable all receive return rings but the first. */
7775         if (tg3_flag(tp, 5717_PLUS))
7776                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7777         else if (!tg3_flag(tp, 5705_PLUS))
7778                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7779         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7780                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7781                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7782         else
7783                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7784
7785         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7786              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7787                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7788                               BDINFO_FLAGS_DISABLED);
7789
7790         /* Disable interrupts */
7791         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7792         tp->napi[0].chk_msi_cnt = 0;
7793         tp->napi[0].last_rx_cons = 0;
7794         tp->napi[0].last_tx_cons = 0;
7795
7796         /* Zero mailbox registers. */
7797         if (tg3_flag(tp, SUPPORT_MSIX)) {
7798                 for (i = 1; i < tp->irq_max; i++) {
7799                         tp->napi[i].tx_prod = 0;
7800                         tp->napi[i].tx_cons = 0;
7801                         if (tg3_flag(tp, ENABLE_TSS))
7802                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7803                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7804                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7805                         tp->napi[0].chk_msi_cnt = 0;
7806                         tp->napi[i].last_rx_cons = 0;
7807                         tp->napi[i].last_tx_cons = 0;
7808                 }
7809                 if (!tg3_flag(tp, ENABLE_TSS))
7810                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7811         } else {
7812                 tp->napi[0].tx_prod = 0;
7813                 tp->napi[0].tx_cons = 0;
7814                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7815                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7816         }
7817
7818         /* Make sure the NIC-based send BD rings are disabled. */
7819         if (!tg3_flag(tp, 5705_PLUS)) {
7820                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7821                 for (i = 0; i < 16; i++)
7822                         tw32_tx_mbox(mbox + i * 8, 0);
7823         }
7824
7825         txrcb = NIC_SRAM_SEND_RCB;
7826         rxrcb = NIC_SRAM_RCV_RET_RCB;
7827
7828         /* Clear status block in ram. */
7829         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7830
7831         /* Set status block DMA address */
7832         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7833              ((u64) tnapi->status_mapping >> 32));
7834         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7835              ((u64) tnapi->status_mapping & 0xffffffff));
7836
7837         if (tnapi->tx_ring) {
7838                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7839                                (TG3_TX_RING_SIZE <<
7840                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7841                                NIC_SRAM_TX_BUFFER_DESC);
7842                 txrcb += TG3_BDINFO_SIZE;
7843         }
7844
7845         if (tnapi->rx_rcb) {
7846                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7847                                (tp->rx_ret_ring_mask + 1) <<
7848                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7849                 rxrcb += TG3_BDINFO_SIZE;
7850         }
7851
7852         stblk = HOSTCC_STATBLCK_RING1;
7853
7854         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7855                 u64 mapping = (u64)tnapi->status_mapping;
7856                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7857                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7858
7859                 /* Clear status block in ram. */
7860                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7861
7862                 if (tnapi->tx_ring) {
7863                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7864                                        (TG3_TX_RING_SIZE <<
7865                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7866                                        NIC_SRAM_TX_BUFFER_DESC);
7867                         txrcb += TG3_BDINFO_SIZE;
7868                 }
7869
7870                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7871                                ((tp->rx_ret_ring_mask + 1) <<
7872                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7873
7874                 stblk += 8;
7875                 rxrcb += TG3_BDINFO_SIZE;
7876         }
7877 }
7878
7879 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7880 {
7881         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7882
7883         if (!tg3_flag(tp, 5750_PLUS) ||
7884             tg3_flag(tp, 5780_CLASS) ||
7885             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7886             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7887                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7888         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7889                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7890                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7891         else
7892                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7893
7894         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7895         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7896
7897         val = min(nic_rep_thresh, host_rep_thresh);
7898         tw32(RCVBDI_STD_THRESH, val);
7899
7900         if (tg3_flag(tp, 57765_PLUS))
7901                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7902
7903         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7904                 return;
7905
7906         if (!tg3_flag(tp, 5705_PLUS))
7907                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7908         else
7909                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7910
7911         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7912
7913         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7914         tw32(RCVBDI_JUMBO_THRESH, val);
7915
7916         if (tg3_flag(tp, 57765_PLUS))
7917                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7918 }
7919
7920 /* tp->lock is held. */
7921 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7922 {
7923         u32 val, rdmac_mode;
7924         int i, err, limit;
7925         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7926
7927         tg3_disable_ints(tp);
7928
7929         tg3_stop_fw(tp);
7930
7931         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7932
7933         if (tg3_flag(tp, INIT_COMPLETE))
7934                 tg3_abort_hw(tp, 1);
7935
7936         /* Enable MAC control of LPI */
7937         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7938                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7939                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7940                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7941
7942                 tw32_f(TG3_CPMU_EEE_CTRL,
7943                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7944
7945                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7946                       TG3_CPMU_EEEMD_LPI_IN_TX |
7947                       TG3_CPMU_EEEMD_LPI_IN_RX |
7948                       TG3_CPMU_EEEMD_EEE_ENABLE;
7949
7950                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7951                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7952
7953                 if (tg3_flag(tp, ENABLE_APE))
7954                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7955
7956                 tw32_f(TG3_CPMU_EEE_MODE, val);
7957
7958                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7959                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7960                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7961
7962                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7963                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7964                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7965         }
7966
7967         if (reset_phy)
7968                 tg3_phy_reset(tp);
7969
7970         err = tg3_chip_reset(tp);
7971         if (err)
7972                 return err;
7973
7974         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7975
7976         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7977                 val = tr32(TG3_CPMU_CTRL);
7978                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7979                 tw32(TG3_CPMU_CTRL, val);
7980
7981                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7982                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7983                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7984                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7985
7986                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7987                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7988                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7989                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7990
7991                 val = tr32(TG3_CPMU_HST_ACC);
7992                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7993                 val |= CPMU_HST_ACC_MACCLK_6_25;
7994                 tw32(TG3_CPMU_HST_ACC, val);
7995         }
7996
7997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7998                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7999                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8000                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8001                 tw32(PCIE_PWR_MGMT_THRESH, val);
8002
8003                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8004                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8005
8006                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8007
8008                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8009                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8010         }
8011
8012         if (tg3_flag(tp, L1PLLPD_EN)) {
8013                 u32 grc_mode = tr32(GRC_MODE);
8014
8015                 /* Access the lower 1K of PL PCIE block registers. */
8016                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8017                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8018
8019                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8020                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8021                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8022
8023                 tw32(GRC_MODE, grc_mode);
8024         }
8025
8026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8027                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8028                         u32 grc_mode = tr32(GRC_MODE);
8029
8030                         /* Access the lower 1K of PL PCIE block registers. */
8031                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8032                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8033
8034                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8035                                    TG3_PCIE_PL_LO_PHYCTL5);
8036                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8037                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8038
8039                         tw32(GRC_MODE, grc_mode);
8040                 }
8041
8042                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8043                         u32 grc_mode = tr32(GRC_MODE);
8044
8045                         /* Access the lower 1K of DL PCIE block registers. */
8046                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8047                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8048
8049                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8050                                    TG3_PCIE_DL_LO_FTSMAX);
8051                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8052                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8053                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8054
8055                         tw32(GRC_MODE, grc_mode);
8056                 }
8057
8058                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8059                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8060                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8061                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8062         }
8063
8064         /* This works around an issue with Athlon chipsets on
8065          * B3 tigon3 silicon.  This bit has no effect on any
8066          * other revision.  But do not set this on PCI Express
8067          * chips and don't even touch the clocks if the CPMU is present.
8068          */
8069         if (!tg3_flag(tp, CPMU_PRESENT)) {
8070                 if (!tg3_flag(tp, PCI_EXPRESS))
8071                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8072                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8073         }
8074
8075         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8076             tg3_flag(tp, PCIX_MODE)) {
8077                 val = tr32(TG3PCI_PCISTATE);
8078                 val |= PCISTATE_RETRY_SAME_DMA;
8079                 tw32(TG3PCI_PCISTATE, val);
8080         }
8081
8082         if (tg3_flag(tp, ENABLE_APE)) {
8083                 /* Allow reads and writes to the
8084                  * APE register and memory space.
8085                  */
8086                 val = tr32(TG3PCI_PCISTATE);
8087                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8088                        PCISTATE_ALLOW_APE_SHMEM_WR |
8089                        PCISTATE_ALLOW_APE_PSPACE_WR;
8090                 tw32(TG3PCI_PCISTATE, val);
8091         }
8092
8093         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8094                 /* Enable some hw fixes.  */
8095                 val = tr32(TG3PCI_MSI_DATA);
8096                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8097                 tw32(TG3PCI_MSI_DATA, val);
8098         }
8099
8100         /* Descriptor ring init may make accesses to the
8101          * NIC SRAM area to setup the TX descriptors, so we
8102          * can only do this after the hardware has been
8103          * successfully reset.
8104          */
8105         err = tg3_init_rings(tp);
8106         if (err)
8107                 return err;
8108
8109         if (tg3_flag(tp, 57765_PLUS)) {
8110                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8111                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8112                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8113                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8114                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8115                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8116                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8117                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8118         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8119                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8120                 /* This value is determined during the probe time DMA
8121                  * engine test, tg3_test_dma.
8122                  */
8123                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8124         }
8125
8126         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8127                           GRC_MODE_4X_NIC_SEND_RINGS |
8128                           GRC_MODE_NO_TX_PHDR_CSUM |
8129                           GRC_MODE_NO_RX_PHDR_CSUM);
8130         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8131
8132         /* Pseudo-header checksum is done by hardware logic and not
8133          * the offload processers, so make the chip do the pseudo-
8134          * header checksums on receive.  For transmit it is more
8135          * convenient to do the pseudo-header checksum in software
8136          * as Linux does that on transmit for us in all cases.
8137          */
8138         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8139
8140         tw32(GRC_MODE,
8141              tp->grc_mode |
8142              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8143
8144         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8145         val = tr32(GRC_MISC_CFG);
8146         val &= ~0xff;
8147         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8148         tw32(GRC_MISC_CFG, val);
8149
8150         /* Initialize MBUF/DESC pool. */
8151         if (tg3_flag(tp, 5750_PLUS)) {
8152                 /* Do nothing.  */
8153         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8154                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8156                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8157                 else
8158                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8159                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8160                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8161         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8162                 int fw_len;
8163
8164                 fw_len = tp->fw_len;
8165                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8166                 tw32(BUFMGR_MB_POOL_ADDR,
8167                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8168                 tw32(BUFMGR_MB_POOL_SIZE,
8169                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8170         }
8171
8172         if (tp->dev->mtu <= ETH_DATA_LEN) {
8173                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8174                      tp->bufmgr_config.mbuf_read_dma_low_water);
8175                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8176                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8177                 tw32(BUFMGR_MB_HIGH_WATER,
8178                      tp->bufmgr_config.mbuf_high_water);
8179         } else {
8180                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8181                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8182                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8183                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8184                 tw32(BUFMGR_MB_HIGH_WATER,
8185                      tp->bufmgr_config.mbuf_high_water_jumbo);
8186         }
8187         tw32(BUFMGR_DMA_LOW_WATER,
8188              tp->bufmgr_config.dma_low_water);
8189         tw32(BUFMGR_DMA_HIGH_WATER,
8190              tp->bufmgr_config.dma_high_water);
8191
8192         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8194                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8195         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8196             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8197             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8198                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8199         tw32(BUFMGR_MODE, val);
8200         for (i = 0; i < 2000; i++) {
8201                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8202                         break;
8203                 udelay(10);
8204         }
8205         if (i >= 2000) {
8206                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8207                 return -ENODEV;
8208         }
8209
8210         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8211                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8212
8213         tg3_setup_rxbd_thresholds(tp);
8214
8215         /* Initialize TG3_BDINFO's at:
8216          *  RCVDBDI_STD_BD:     standard eth size rx ring
8217          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8218          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8219          *
8220          * like so:
8221          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8222          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8223          *                              ring attribute flags
8224          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8225          *
8226          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8227          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8228          *
8229          * The size of each ring is fixed in the firmware, but the location is
8230          * configurable.
8231          */
8232         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8233              ((u64) tpr->rx_std_mapping >> 32));
8234         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8235              ((u64) tpr->rx_std_mapping & 0xffffffff));
8236         if (!tg3_flag(tp, 5717_PLUS))
8237                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8238                      NIC_SRAM_RX_BUFFER_DESC);
8239
8240         /* Disable the mini ring */
8241         if (!tg3_flag(tp, 5705_PLUS))
8242                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8243                      BDINFO_FLAGS_DISABLED);
8244
8245         /* Program the jumbo buffer descriptor ring control
8246          * blocks on those devices that have them.
8247          */
8248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8249             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8250
8251                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8252                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8253                              ((u64) tpr->rx_jmb_mapping >> 32));
8254                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8255                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8256                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8257                               BDINFO_FLAGS_MAXLEN_SHIFT;
8258                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8259                              val | BDINFO_FLAGS_USE_EXT_RECV);
8260                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8261                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8262                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8263                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8264                 } else {
8265                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8266                              BDINFO_FLAGS_DISABLED);
8267                 }
8268
8269                 if (tg3_flag(tp, 57765_PLUS)) {
8270                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8271                                 val = TG3_RX_STD_MAX_SIZE_5700;
8272                         else
8273                                 val = TG3_RX_STD_MAX_SIZE_5717;
8274                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8275                         val |= (TG3_RX_STD_DMA_SZ << 2);
8276                 } else
8277                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8278         } else
8279                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8280
8281         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8282
8283         tpr->rx_std_prod_idx = tp->rx_pending;
8284         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8285
8286         tpr->rx_jmb_prod_idx =
8287                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8288         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8289
8290         tg3_rings_reset(tp);
8291
8292         /* Initialize MAC address and backoff seed. */
8293         __tg3_set_mac_addr(tp, 0);
8294
8295         /* MTU + ethernet header + FCS + optional VLAN tag */
8296         tw32(MAC_RX_MTU_SIZE,
8297              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8298
8299         /* The slot time is changed by tg3_setup_phy if we
8300          * run at gigabit with half duplex.
8301          */
8302         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8303               (6 << TX_LENGTHS_IPG_SHIFT) |
8304               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8305
8306         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8307                 val |= tr32(MAC_TX_LENGTHS) &
8308                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8309                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8310
8311         tw32(MAC_TX_LENGTHS, val);
8312
8313         /* Receive rules. */
8314         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8315         tw32(RCVLPC_CONFIG, 0x0181);
8316
8317         /* Calculate RDMAC_MODE setting early, we need it to determine
8318          * the RCVLPC_STATE_ENABLE mask.
8319          */
8320         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8321                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8322                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8323                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8324                       RDMAC_MODE_LNGREAD_ENAB);
8325
8326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8327                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8328
8329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8332                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8333                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8334                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8335
8336         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8337             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8338                 if (tg3_flag(tp, TSO_CAPABLE) &&
8339                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8340                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8341                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8342                            !tg3_flag(tp, IS_5788)) {
8343                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8344                 }
8345         }
8346
8347         if (tg3_flag(tp, PCI_EXPRESS))
8348                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8349
8350         if (tg3_flag(tp, HW_TSO_1) ||
8351             tg3_flag(tp, HW_TSO_2) ||
8352             tg3_flag(tp, HW_TSO_3))
8353                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8354
8355         if (tg3_flag(tp, 57765_PLUS) ||
8356             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8357             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8358                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8359
8360         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8361                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8362
8363         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8364             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8365             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8366             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8367             tg3_flag(tp, 57765_PLUS)) {
8368                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8369                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8370                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8371                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8372                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8373                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8374                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8375                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8376                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8377                 }
8378                 tw32(TG3_RDMA_RSRVCTRL_REG,
8379                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8380         }
8381
8382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8383             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8384                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8385                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8386                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8387                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8388         }
8389
8390         /* Receive/send statistics. */
8391         if (tg3_flag(tp, 5750_PLUS)) {
8392                 val = tr32(RCVLPC_STATS_ENABLE);
8393                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8394                 tw32(RCVLPC_STATS_ENABLE, val);
8395         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8396                    tg3_flag(tp, TSO_CAPABLE)) {
8397                 val = tr32(RCVLPC_STATS_ENABLE);
8398                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8399                 tw32(RCVLPC_STATS_ENABLE, val);
8400         } else {
8401                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8402         }
8403         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8404         tw32(SNDDATAI_STATSENAB, 0xffffff);
8405         tw32(SNDDATAI_STATSCTRL,
8406              (SNDDATAI_SCTRL_ENABLE |
8407               SNDDATAI_SCTRL_FASTUPD));
8408
8409         /* Setup host coalescing engine. */
8410         tw32(HOSTCC_MODE, 0);
8411         for (i = 0; i < 2000; i++) {
8412                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8413                         break;
8414                 udelay(10);
8415         }
8416
8417         __tg3_set_coalesce(tp, &tp->coal);
8418
8419         if (!tg3_flag(tp, 5705_PLUS)) {
8420                 /* Status/statistics block address.  See tg3_timer,
8421                  * the tg3_periodic_fetch_stats call there, and
8422                  * tg3_get_stats to see how this works for 5705/5750 chips.
8423                  */
8424                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8425                      ((u64) tp->stats_mapping >> 32));
8426                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8427                      ((u64) tp->stats_mapping & 0xffffffff));
8428                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8429
8430                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8431
8432                 /* Clear statistics and status block memory areas */
8433                 for (i = NIC_SRAM_STATS_BLK;
8434                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8435                      i += sizeof(u32)) {
8436                         tg3_write_mem(tp, i, 0);
8437                         udelay(40);
8438                 }
8439         }
8440
8441         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8442
8443         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8444         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8445         if (!tg3_flag(tp, 5705_PLUS))
8446                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8447
8448         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8449                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8450                 /* reset to prevent losing 1st rx packet intermittently */
8451                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8452                 udelay(10);
8453         }
8454
8455         if (tg3_flag(tp, ENABLE_APE))
8456                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8457         else
8458                 tp->mac_mode = 0;
8459         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8460                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8461         if (!tg3_flag(tp, 5705_PLUS) &&
8462             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8463             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8464                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8465         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8466         udelay(40);
8467
8468         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8469          * If TG3_FLAG_IS_NIC is zero, we should read the
8470          * register to preserve the GPIO settings for LOMs. The GPIOs,
8471          * whether used as inputs or outputs, are set by boot code after
8472          * reset.
8473          */
8474         if (!tg3_flag(tp, IS_NIC)) {
8475                 u32 gpio_mask;
8476
8477                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8478                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8479                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8480
8481                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8482                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8483                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8484
8485                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8486                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8487
8488                 tp->grc_local_ctrl &= ~gpio_mask;
8489                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8490
8491                 /* GPIO1 must be driven high for eeprom write protect */
8492                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8493                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8494                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8495         }
8496         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8497         udelay(100);
8498
8499         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8500                 val = tr32(MSGINT_MODE);
8501                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8502                 tw32(MSGINT_MODE, val);
8503         }
8504
8505         if (!tg3_flag(tp, 5705_PLUS)) {
8506                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8507                 udelay(40);
8508         }
8509
8510         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8511                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8512                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8513                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8514                WDMAC_MODE_LNGREAD_ENAB);
8515
8516         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8517             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8518                 if (tg3_flag(tp, TSO_CAPABLE) &&
8519                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8520                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8521                         /* nothing */
8522                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8523                            !tg3_flag(tp, IS_5788)) {
8524                         val |= WDMAC_MODE_RX_ACCEL;
8525                 }
8526         }
8527
8528         /* Enable host coalescing bug fix */
8529         if (tg3_flag(tp, 5755_PLUS))
8530                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8531
8532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8533                 val |= WDMAC_MODE_BURST_ALL_DATA;
8534
8535         tw32_f(WDMAC_MODE, val);
8536         udelay(40);
8537
8538         if (tg3_flag(tp, PCIX_MODE)) {
8539                 u16 pcix_cmd;
8540
8541                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8542                                      &pcix_cmd);
8543                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8544                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8545                         pcix_cmd |= PCI_X_CMD_READ_2K;
8546                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8547                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8548                         pcix_cmd |= PCI_X_CMD_READ_2K;
8549                 }
8550                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8551                                       pcix_cmd);
8552         }
8553
8554         tw32_f(RDMAC_MODE, rdmac_mode);
8555         udelay(40);
8556
8557         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8558         if (!tg3_flag(tp, 5705_PLUS))
8559                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8560
8561         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8562                 tw32(SNDDATAC_MODE,
8563                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8564         else
8565                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8566
8567         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8568         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8569         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8570         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8571                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8572         tw32(RCVDBDI_MODE, val);
8573         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8574         if (tg3_flag(tp, HW_TSO_1) ||
8575             tg3_flag(tp, HW_TSO_2) ||
8576             tg3_flag(tp, HW_TSO_3))
8577                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8578         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8579         if (tg3_flag(tp, ENABLE_TSS))
8580                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8581         tw32(SNDBDI_MODE, val);
8582         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8583
8584         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8585                 err = tg3_load_5701_a0_firmware_fix(tp);
8586                 if (err)
8587                         return err;
8588         }
8589
8590         if (tg3_flag(tp, TSO_CAPABLE)) {
8591                 err = tg3_load_tso_firmware(tp);
8592                 if (err)
8593                         return err;
8594         }
8595
8596         tp->tx_mode = TX_MODE_ENABLE;
8597
8598         if (tg3_flag(tp, 5755_PLUS) ||
8599             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8600                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8601
8602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8603                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8604                 tp->tx_mode &= ~val;
8605                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8606         }
8607
8608         tw32_f(MAC_TX_MODE, tp->tx_mode);
8609         udelay(100);
8610
8611         if (tg3_flag(tp, ENABLE_RSS)) {
8612                 u32 reg = MAC_RSS_INDIR_TBL_0;
8613                 u8 *ent = (u8 *)&val;
8614
8615                 /* Setup the indirection table */
8616                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8617                         int idx = i % sizeof(val);
8618
8619                         ent[idx] = i % (tp->irq_cnt - 1);
8620                         if (idx == sizeof(val) - 1) {
8621                                 tw32(reg, val);
8622                                 reg += 4;
8623                         }
8624                 }
8625
8626                 /* Setup the "secret" hash key. */
8627                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8628                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8629                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8630                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8631                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8632                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8633                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8634                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8635                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8636                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8637         }
8638
8639         tp->rx_mode = RX_MODE_ENABLE;
8640         if (tg3_flag(tp, 5755_PLUS))
8641                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8642
8643         if (tg3_flag(tp, ENABLE_RSS))
8644                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8645                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8646                                RX_MODE_RSS_IPV6_HASH_EN |
8647                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8648                                RX_MODE_RSS_IPV4_HASH_EN |
8649                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8650
8651         tw32_f(MAC_RX_MODE, tp->rx_mode);
8652         udelay(10);
8653
8654         tw32(MAC_LED_CTRL, tp->led_ctrl);
8655
8656         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8657         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8658                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8659                 udelay(10);
8660         }
8661         tw32_f(MAC_RX_MODE, tp->rx_mode);
8662         udelay(10);
8663
8664         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8665                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8666                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8667                         /* Set drive transmission level to 1.2V  */
8668                         /* only if the signal pre-emphasis bit is not set  */
8669                         val = tr32(MAC_SERDES_CFG);
8670                         val &= 0xfffff000;
8671                         val |= 0x880;
8672                         tw32(MAC_SERDES_CFG, val);
8673                 }
8674                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8675                         tw32(MAC_SERDES_CFG, 0x616000);
8676         }
8677
8678         /* Prevent chip from dropping frames when flow control
8679          * is enabled.
8680          */
8681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8682                 val = 1;
8683         else
8684                 val = 2;
8685         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8686
8687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8688             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8689                 /* Use hardware link auto-negotiation */
8690                 tg3_flag_set(tp, HW_AUTONEG);
8691         }
8692
8693         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8694             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8695                 u32 tmp;
8696
8697                 tmp = tr32(SERDES_RX_CTRL);
8698                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8699                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8700                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8701                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8702         }
8703
8704         if (!tg3_flag(tp, USE_PHYLIB)) {
8705                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8706                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8707                         tp->link_config.speed = tp->link_config.orig_speed;
8708                         tp->link_config.duplex = tp->link_config.orig_duplex;
8709                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8710                 }
8711
8712                 err = tg3_setup_phy(tp, 0);
8713                 if (err)
8714                         return err;
8715
8716                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8717                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8718                         u32 tmp;
8719
8720                         /* Clear CRC stats. */
8721                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8722                                 tg3_writephy(tp, MII_TG3_TEST1,
8723                                              tmp | MII_TG3_TEST1_CRC_EN);
8724                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8725                         }
8726                 }
8727         }
8728
8729         __tg3_set_rx_mode(tp->dev);
8730
8731         /* Initialize receive rules. */
8732         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8733         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8734         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8735         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8736
8737         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8738                 limit = 8;
8739         else
8740                 limit = 16;
8741         if (tg3_flag(tp, ENABLE_ASF))
8742                 limit -= 4;
8743         switch (limit) {
8744         case 16:
8745                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8746         case 15:
8747                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8748         case 14:
8749                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8750         case 13:
8751                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8752         case 12:
8753                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8754         case 11:
8755                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8756         case 10:
8757                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8758         case 9:
8759                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8760         case 8:
8761                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8762         case 7:
8763                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8764         case 6:
8765                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8766         case 5:
8767                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8768         case 4:
8769                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8770         case 3:
8771                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8772         case 2:
8773         case 1:
8774
8775         default:
8776                 break;
8777         }
8778
8779         if (tg3_flag(tp, ENABLE_APE))
8780                 /* Write our heartbeat update interval to APE. */
8781                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8782                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8783
8784         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8785
8786         return 0;
8787 }
8788
8789 /* Called at device open time to get the chip ready for
8790  * packet processing.  Invoked with tp->lock held.
8791  */
8792 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8793 {
8794         tg3_switch_clocks(tp);
8795
8796         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8797
8798         return tg3_reset_hw(tp, reset_phy);
8799 }
8800
8801 #define TG3_STAT_ADD32(PSTAT, REG) \
8802 do {    u32 __val = tr32(REG); \
8803         (PSTAT)->low += __val; \
8804         if ((PSTAT)->low < __val) \
8805                 (PSTAT)->high += 1; \
8806 } while (0)
8807
8808 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8809 {
8810         struct tg3_hw_stats *sp = tp->hw_stats;
8811
8812         if (!netif_carrier_ok(tp->dev))
8813                 return;
8814
8815         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8816         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8817         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8818         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8819         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8820         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8821         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8822         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8823         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8824         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8825         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8826         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8827         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8828
8829         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8830         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8831         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8832         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8833         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8834         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8835         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8836         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8837         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8838         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8839         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8840         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8841         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8842         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8843
8844         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8845         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8846             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8847             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8848                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8849         } else {
8850                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8851                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8852                 if (val) {
8853                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8854                         sp->rx_discards.low += val;
8855                         if (sp->rx_discards.low < val)
8856                                 sp->rx_discards.high += 1;
8857                 }
8858                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8859         }
8860         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8861 }
8862
8863 static void tg3_chk_missed_msi(struct tg3 *tp)
8864 {
8865         u32 i;
8866
8867         for (i = 0; i < tp->irq_cnt; i++) {
8868                 struct tg3_napi *tnapi = &tp->napi[i];
8869
8870                 if (tg3_has_work(tnapi)) {
8871                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
8872                             tnapi->last_tx_cons == tnapi->tx_cons) {
8873                                 if (tnapi->chk_msi_cnt < 1) {
8874                                         tnapi->chk_msi_cnt++;
8875                                         return;
8876                                 }
8877                                 tw32_mailbox(tnapi->int_mbox,
8878                                              tnapi->last_tag << 24);
8879                         }
8880                 }
8881                 tnapi->chk_msi_cnt = 0;
8882                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
8883                 tnapi->last_tx_cons = tnapi->tx_cons;
8884         }
8885 }
8886
8887 static void tg3_timer(unsigned long __opaque)
8888 {
8889         struct tg3 *tp = (struct tg3 *) __opaque;
8890
8891         if (tp->irq_sync)
8892                 goto restart_timer;
8893
8894         spin_lock(&tp->lock);
8895
8896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8898                 tg3_chk_missed_msi(tp);
8899
8900         if (!tg3_flag(tp, TAGGED_STATUS)) {
8901                 /* All of this garbage is because when using non-tagged
8902                  * IRQ status the mailbox/status_block protocol the chip
8903                  * uses with the cpu is race prone.
8904                  */
8905                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8906                         tw32(GRC_LOCAL_CTRL,
8907                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8908                 } else {
8909                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8910                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8911                 }
8912
8913                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8914                         tg3_flag_set(tp, RESTART_TIMER);
8915                         spin_unlock(&tp->lock);
8916                         schedule_work(&tp->reset_task);
8917                         return;
8918                 }
8919         }
8920
8921         /* This part only runs once per second. */
8922         if (!--tp->timer_counter) {
8923                 if (tg3_flag(tp, 5705_PLUS))
8924                         tg3_periodic_fetch_stats(tp);
8925
8926                 if (tp->setlpicnt && !--tp->setlpicnt)
8927                         tg3_phy_eee_enable(tp);
8928
8929                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8930                         u32 mac_stat;
8931                         int phy_event;
8932
8933                         mac_stat = tr32(MAC_STATUS);
8934
8935                         phy_event = 0;
8936                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8937                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8938                                         phy_event = 1;
8939                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8940                                 phy_event = 1;
8941
8942                         if (phy_event)
8943                                 tg3_setup_phy(tp, 0);
8944                 } else if (tg3_flag(tp, POLL_SERDES)) {
8945                         u32 mac_stat = tr32(MAC_STATUS);
8946                         int need_setup = 0;
8947
8948                         if (netif_carrier_ok(tp->dev) &&
8949                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8950                                 need_setup = 1;
8951                         }
8952                         if (!netif_carrier_ok(tp->dev) &&
8953                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8954                                          MAC_STATUS_SIGNAL_DET))) {
8955                                 need_setup = 1;
8956                         }
8957                         if (need_setup) {
8958                                 if (!tp->serdes_counter) {
8959                                         tw32_f(MAC_MODE,
8960                                              (tp->mac_mode &
8961                                               ~MAC_MODE_PORT_MODE_MASK));
8962                                         udelay(40);
8963                                         tw32_f(MAC_MODE, tp->mac_mode);
8964                                         udelay(40);
8965                                 }
8966                                 tg3_setup_phy(tp, 0);
8967                         }
8968                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8969                            tg3_flag(tp, 5780_CLASS)) {
8970                         tg3_serdes_parallel_detect(tp);
8971                 }
8972
8973                 tp->timer_counter = tp->timer_multiplier;
8974         }
8975
8976         /* Heartbeat is only sent once every 2 seconds.
8977          *
8978          * The heartbeat is to tell the ASF firmware that the host
8979          * driver is still alive.  In the event that the OS crashes,
8980          * ASF needs to reset the hardware to free up the FIFO space
8981          * that may be filled with rx packets destined for the host.
8982          * If the FIFO is full, ASF will no longer function properly.
8983          *
8984          * Unintended resets have been reported on real time kernels
8985          * where the timer doesn't run on time.  Netpoll will also have
8986          * same problem.
8987          *
8988          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8989          * to check the ring condition when the heartbeat is expiring
8990          * before doing the reset.  This will prevent most unintended
8991          * resets.
8992          */
8993         if (!--tp->asf_counter) {
8994                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8995                         tg3_wait_for_event_ack(tp);
8996
8997                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8998                                       FWCMD_NICDRV_ALIVE3);
8999                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9000                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9001                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9002
9003                         tg3_generate_fw_event(tp);
9004                 }
9005                 tp->asf_counter = tp->asf_multiplier;
9006         }
9007
9008         spin_unlock(&tp->lock);
9009
9010 restart_timer:
9011         tp->timer.expires = jiffies + tp->timer_offset;
9012         add_timer(&tp->timer);
9013 }
9014
9015 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9016 {
9017         irq_handler_t fn;
9018         unsigned long flags;
9019         char *name;
9020         struct tg3_napi *tnapi = &tp->napi[irq_num];
9021
9022         if (tp->irq_cnt == 1)
9023                 name = tp->dev->name;
9024         else {
9025                 name = &tnapi->irq_lbl[0];
9026                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9027                 name[IFNAMSIZ-1] = 0;
9028         }
9029
9030         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9031                 fn = tg3_msi;
9032                 if (tg3_flag(tp, 1SHOT_MSI))
9033                         fn = tg3_msi_1shot;
9034                 flags = 0;
9035         } else {
9036                 fn = tg3_interrupt;
9037                 if (tg3_flag(tp, TAGGED_STATUS))
9038                         fn = tg3_interrupt_tagged;
9039                 flags = IRQF_SHARED;
9040         }
9041
9042         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9043 }
9044
9045 static int tg3_test_interrupt(struct tg3 *tp)
9046 {
9047         struct tg3_napi *tnapi = &tp->napi[0];
9048         struct net_device *dev = tp->dev;
9049         int err, i, intr_ok = 0;
9050         u32 val;
9051
9052         if (!netif_running(dev))
9053                 return -ENODEV;
9054
9055         tg3_disable_ints(tp);
9056
9057         free_irq(tnapi->irq_vec, tnapi);
9058
9059         /*
9060          * Turn off MSI one shot mode.  Otherwise this test has no
9061          * observable way to know whether the interrupt was delivered.
9062          */
9063         if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9064                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9065                 tw32(MSGINT_MODE, val);
9066         }
9067
9068         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9069                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9070         if (err)
9071                 return err;
9072
9073         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9074         tg3_enable_ints(tp);
9075
9076         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9077                tnapi->coal_now);
9078
9079         for (i = 0; i < 5; i++) {
9080                 u32 int_mbox, misc_host_ctrl;
9081
9082                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9083                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9084
9085                 if ((int_mbox != 0) ||
9086                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9087                         intr_ok = 1;
9088                         break;
9089                 }
9090
9091                 msleep(10);
9092         }
9093
9094         tg3_disable_ints(tp);
9095
9096         free_irq(tnapi->irq_vec, tnapi);
9097
9098         err = tg3_request_irq(tp, 0);
9099
9100         if (err)
9101                 return err;
9102
9103         if (intr_ok) {
9104                 /* Reenable MSI one shot mode. */
9105                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9106                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9107                         tw32(MSGINT_MODE, val);
9108                 }
9109                 return 0;
9110         }
9111
9112         return -EIO;
9113 }
9114
9115 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9116  * successfully restored
9117  */
9118 static int tg3_test_msi(struct tg3 *tp)
9119 {
9120         int err;
9121         u16 pci_cmd;
9122
9123         if (!tg3_flag(tp, USING_MSI))
9124                 return 0;
9125
9126         /* Turn off SERR reporting in case MSI terminates with Master
9127          * Abort.
9128          */
9129         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9130         pci_write_config_word(tp->pdev, PCI_COMMAND,
9131                               pci_cmd & ~PCI_COMMAND_SERR);
9132
9133         err = tg3_test_interrupt(tp);
9134
9135         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9136
9137         if (!err)
9138                 return 0;
9139
9140         /* other failures */
9141         if (err != -EIO)
9142                 return err;
9143
9144         /* MSI test failed, go back to INTx mode */
9145         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9146                     "to INTx mode. Please report this failure to the PCI "
9147                     "maintainer and include system chipset information\n");
9148
9149         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9150
9151         pci_disable_msi(tp->pdev);
9152
9153         tg3_flag_clear(tp, USING_MSI);
9154         tp->napi[0].irq_vec = tp->pdev->irq;
9155
9156         err = tg3_request_irq(tp, 0);
9157         if (err)
9158                 return err;
9159
9160         /* Need to reset the chip because the MSI cycle may have terminated
9161          * with Master Abort.
9162          */
9163         tg3_full_lock(tp, 1);
9164
9165         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9166         err = tg3_init_hw(tp, 1);
9167
9168         tg3_full_unlock(tp);
9169
9170         if (err)
9171                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9172
9173         return err;
9174 }
9175
9176 static int tg3_request_firmware(struct tg3 *tp)
9177 {
9178         const __be32 *fw_data;
9179
9180         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9181                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9182                            tp->fw_needed);
9183                 return -ENOENT;
9184         }
9185
9186         fw_data = (void *)tp->fw->data;
9187
9188         /* Firmware blob starts with version numbers, followed by
9189          * start address and _full_ length including BSS sections
9190          * (which must be longer than the actual data, of course
9191          */
9192
9193         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9194         if (tp->fw_len < (tp->fw->size - 12)) {
9195                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9196                            tp->fw_len, tp->fw_needed);
9197                 release_firmware(tp->fw);
9198                 tp->fw = NULL;
9199                 return -EINVAL;
9200         }
9201
9202         /* We no longer need firmware; we have it. */
9203         tp->fw_needed = NULL;
9204         return 0;
9205 }
9206
9207 static bool tg3_enable_msix(struct tg3 *tp)
9208 {
9209         int i, rc, cpus = num_online_cpus();
9210         struct msix_entry msix_ent[tp->irq_max];
9211
9212         if (cpus == 1)
9213                 /* Just fallback to the simpler MSI mode. */
9214                 return false;
9215
9216         /*
9217          * We want as many rx rings enabled as there are cpus.
9218          * The first MSIX vector only deals with link interrupts, etc,
9219          * so we add one to the number of vectors we are requesting.
9220          */
9221         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9222
9223         for (i = 0; i < tp->irq_max; i++) {
9224                 msix_ent[i].entry  = i;
9225                 msix_ent[i].vector = 0;
9226         }
9227
9228         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9229         if (rc < 0) {
9230                 return false;
9231         } else if (rc != 0) {
9232                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9233                         return false;
9234                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9235                               tp->irq_cnt, rc);
9236                 tp->irq_cnt = rc;
9237         }
9238
9239         for (i = 0; i < tp->irq_max; i++)
9240                 tp->napi[i].irq_vec = msix_ent[i].vector;
9241
9242         netif_set_real_num_tx_queues(tp->dev, 1);
9243         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9244         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9245                 pci_disable_msix(tp->pdev);
9246                 return false;
9247         }
9248
9249         if (tp->irq_cnt > 1) {
9250                 tg3_flag_set(tp, ENABLE_RSS);
9251
9252                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9253                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9254                         tg3_flag_set(tp, ENABLE_TSS);
9255                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9256                 }
9257         }
9258
9259         return true;
9260 }
9261
9262 static void tg3_ints_init(struct tg3 *tp)
9263 {
9264         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9265             !tg3_flag(tp, TAGGED_STATUS)) {
9266                 /* All MSI supporting chips should support tagged
9267                  * status.  Assert that this is the case.
9268                  */
9269                 netdev_warn(tp->dev,
9270                             "MSI without TAGGED_STATUS? Not using MSI\n");
9271                 goto defcfg;
9272         }
9273
9274         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9275                 tg3_flag_set(tp, USING_MSIX);
9276         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9277                 tg3_flag_set(tp, USING_MSI);
9278
9279         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9280                 u32 msi_mode = tr32(MSGINT_MODE);
9281                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9282                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9283                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9284         }
9285 defcfg:
9286         if (!tg3_flag(tp, USING_MSIX)) {
9287                 tp->irq_cnt = 1;
9288                 tp->napi[0].irq_vec = tp->pdev->irq;
9289                 netif_set_real_num_tx_queues(tp->dev, 1);
9290                 netif_set_real_num_rx_queues(tp->dev, 1);
9291         }
9292 }
9293
9294 static void tg3_ints_fini(struct tg3 *tp)
9295 {
9296         if (tg3_flag(tp, USING_MSIX))
9297                 pci_disable_msix(tp->pdev);
9298         else if (tg3_flag(tp, USING_MSI))
9299                 pci_disable_msi(tp->pdev);
9300         tg3_flag_clear(tp, USING_MSI);
9301         tg3_flag_clear(tp, USING_MSIX);
9302         tg3_flag_clear(tp, ENABLE_RSS);
9303         tg3_flag_clear(tp, ENABLE_TSS);
9304 }
9305
9306 static int tg3_open(struct net_device *dev)
9307 {
9308         struct tg3 *tp = netdev_priv(dev);
9309         int i, err;
9310
9311         if (tp->fw_needed) {
9312                 err = tg3_request_firmware(tp);
9313                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9314                         if (err)
9315                                 return err;
9316                 } else if (err) {
9317                         netdev_warn(tp->dev, "TSO capability disabled\n");
9318                         tg3_flag_clear(tp, TSO_CAPABLE);
9319                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9320                         netdev_notice(tp->dev, "TSO capability restored\n");
9321                         tg3_flag_set(tp, TSO_CAPABLE);
9322                 }
9323         }
9324
9325         netif_carrier_off(tp->dev);
9326
9327         err = tg3_power_up(tp);
9328         if (err)
9329                 return err;
9330
9331         tg3_full_lock(tp, 0);
9332
9333         tg3_disable_ints(tp);
9334         tg3_flag_clear(tp, INIT_COMPLETE);
9335
9336         tg3_full_unlock(tp);
9337
9338         /*
9339          * Setup interrupts first so we know how
9340          * many NAPI resources to allocate
9341          */
9342         tg3_ints_init(tp);
9343
9344         /* The placement of this call is tied
9345          * to the setup and use of Host TX descriptors.
9346          */
9347         err = tg3_alloc_consistent(tp);
9348         if (err)
9349                 goto err_out1;
9350
9351         tg3_napi_init(tp);
9352
9353         tg3_napi_enable(tp);
9354
9355         for (i = 0; i < tp->irq_cnt; i++) {
9356                 struct tg3_napi *tnapi = &tp->napi[i];
9357                 err = tg3_request_irq(tp, i);
9358                 if (err) {
9359                         for (i--; i >= 0; i--)
9360                                 free_irq(tnapi->irq_vec, tnapi);
9361                         break;
9362                 }
9363         }
9364
9365         if (err)
9366                 goto err_out2;
9367
9368         tg3_full_lock(tp, 0);
9369
9370         err = tg3_init_hw(tp, 1);
9371         if (err) {
9372                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9373                 tg3_free_rings(tp);
9374         } else {
9375                 if (tg3_flag(tp, TAGGED_STATUS) &&
9376                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9377                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9378                         tp->timer_offset = HZ;
9379                 else
9380                         tp->timer_offset = HZ / 10;
9381
9382                 BUG_ON(tp->timer_offset > HZ);
9383                 tp->timer_counter = tp->timer_multiplier =
9384                         (HZ / tp->timer_offset);
9385                 tp->asf_counter = tp->asf_multiplier =
9386                         ((HZ / tp->timer_offset) * 2);
9387
9388                 init_timer(&tp->timer);
9389                 tp->timer.expires = jiffies + tp->timer_offset;
9390                 tp->timer.data = (unsigned long) tp;
9391                 tp->timer.function = tg3_timer;
9392         }
9393
9394         tg3_full_unlock(tp);
9395
9396         if (err)
9397                 goto err_out3;
9398
9399         if (tg3_flag(tp, USING_MSI)) {
9400                 err = tg3_test_msi(tp);
9401
9402                 if (err) {
9403                         tg3_full_lock(tp, 0);
9404                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9405                         tg3_free_rings(tp);
9406                         tg3_full_unlock(tp);
9407
9408                         goto err_out2;
9409                 }
9410
9411                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9412                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9413
9414                         tw32(PCIE_TRANSACTION_CFG,
9415                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9416                 }
9417         }
9418
9419         tg3_phy_start(tp);
9420
9421         tg3_full_lock(tp, 0);
9422
9423         add_timer(&tp->timer);
9424         tg3_flag_set(tp, INIT_COMPLETE);
9425         tg3_enable_ints(tp);
9426
9427         tg3_full_unlock(tp);
9428
9429         netif_tx_start_all_queues(dev);
9430
9431         /*
9432          * Reset loopback feature if it was turned on while the device was down
9433          * make sure that it's installed properly now.
9434          */
9435         if (dev->features & NETIF_F_LOOPBACK)
9436                 tg3_set_loopback(dev, dev->features);
9437
9438         return 0;
9439
9440 err_out3:
9441         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9442                 struct tg3_napi *tnapi = &tp->napi[i];
9443                 free_irq(tnapi->irq_vec, tnapi);
9444         }
9445
9446 err_out2:
9447         tg3_napi_disable(tp);
9448         tg3_napi_fini(tp);
9449         tg3_free_consistent(tp);
9450
9451 err_out1:
9452         tg3_ints_fini(tp);
9453         return err;
9454 }
9455
9456 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9457                                                  struct rtnl_link_stats64 *);
9458 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9459
9460 static int tg3_close(struct net_device *dev)
9461 {
9462         int i;
9463         struct tg3 *tp = netdev_priv(dev);
9464
9465         tg3_napi_disable(tp);
9466         cancel_work_sync(&tp->reset_task);
9467
9468         netif_tx_stop_all_queues(dev);
9469
9470         del_timer_sync(&tp->timer);
9471
9472         tg3_phy_stop(tp);
9473
9474         tg3_full_lock(tp, 1);
9475
9476         tg3_disable_ints(tp);
9477
9478         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9479         tg3_free_rings(tp);
9480         tg3_flag_clear(tp, INIT_COMPLETE);
9481
9482         tg3_full_unlock(tp);
9483
9484         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9485                 struct tg3_napi *tnapi = &tp->napi[i];
9486                 free_irq(tnapi->irq_vec, tnapi);
9487         }
9488
9489         tg3_ints_fini(tp);
9490
9491         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9492
9493         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9494                sizeof(tp->estats_prev));
9495
9496         tg3_napi_fini(tp);
9497
9498         tg3_free_consistent(tp);
9499
9500         tg3_power_down(tp);
9501
9502         netif_carrier_off(tp->dev);
9503
9504         return 0;
9505 }
9506
9507 static inline u64 get_stat64(tg3_stat64_t *val)
9508 {
9509        return ((u64)val->high << 32) | ((u64)val->low);
9510 }
9511
9512 static u64 calc_crc_errors(struct tg3 *tp)
9513 {
9514         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9515
9516         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9517             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9518              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9519                 u32 val;
9520
9521                 spin_lock_bh(&tp->lock);
9522                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9523                         tg3_writephy(tp, MII_TG3_TEST1,
9524                                      val | MII_TG3_TEST1_CRC_EN);
9525                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9526                 } else
9527                         val = 0;
9528                 spin_unlock_bh(&tp->lock);
9529
9530                 tp->phy_crc_errors += val;
9531
9532                 return tp->phy_crc_errors;
9533         }
9534
9535         return get_stat64(&hw_stats->rx_fcs_errors);
9536 }
9537
9538 #define ESTAT_ADD(member) \
9539         estats->member =        old_estats->member + \
9540                                 get_stat64(&hw_stats->member)
9541
9542 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9543 {
9544         struct tg3_ethtool_stats *estats = &tp->estats;
9545         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9546         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9547
9548         if (!hw_stats)
9549                 return old_estats;
9550
9551         ESTAT_ADD(rx_octets);
9552         ESTAT_ADD(rx_fragments);
9553         ESTAT_ADD(rx_ucast_packets);
9554         ESTAT_ADD(rx_mcast_packets);
9555         ESTAT_ADD(rx_bcast_packets);
9556         ESTAT_ADD(rx_fcs_errors);
9557         ESTAT_ADD(rx_align_errors);
9558         ESTAT_ADD(rx_xon_pause_rcvd);
9559         ESTAT_ADD(rx_xoff_pause_rcvd);
9560         ESTAT_ADD(rx_mac_ctrl_rcvd);
9561         ESTAT_ADD(rx_xoff_entered);
9562         ESTAT_ADD(rx_frame_too_long_errors);
9563         ESTAT_ADD(rx_jabbers);
9564         ESTAT_ADD(rx_undersize_packets);
9565         ESTAT_ADD(rx_in_length_errors);
9566         ESTAT_ADD(rx_out_length_errors);
9567         ESTAT_ADD(rx_64_or_less_octet_packets);
9568         ESTAT_ADD(rx_65_to_127_octet_packets);
9569         ESTAT_ADD(rx_128_to_255_octet_packets);
9570         ESTAT_ADD(rx_256_to_511_octet_packets);
9571         ESTAT_ADD(rx_512_to_1023_octet_packets);
9572         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9573         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9574         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9575         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9576         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9577
9578         ESTAT_ADD(tx_octets);
9579         ESTAT_ADD(tx_collisions);
9580         ESTAT_ADD(tx_xon_sent);
9581         ESTAT_ADD(tx_xoff_sent);
9582         ESTAT_ADD(tx_flow_control);
9583         ESTAT_ADD(tx_mac_errors);
9584         ESTAT_ADD(tx_single_collisions);
9585         ESTAT_ADD(tx_mult_collisions);
9586         ESTAT_ADD(tx_deferred);
9587         ESTAT_ADD(tx_excessive_collisions);
9588         ESTAT_ADD(tx_late_collisions);
9589         ESTAT_ADD(tx_collide_2times);
9590         ESTAT_ADD(tx_collide_3times);
9591         ESTAT_ADD(tx_collide_4times);
9592         ESTAT_ADD(tx_collide_5times);
9593         ESTAT_ADD(tx_collide_6times);
9594         ESTAT_ADD(tx_collide_7times);
9595         ESTAT_ADD(tx_collide_8times);
9596         ESTAT_ADD(tx_collide_9times);
9597         ESTAT_ADD(tx_collide_10times);
9598         ESTAT_ADD(tx_collide_11times);
9599         ESTAT_ADD(tx_collide_12times);
9600         ESTAT_ADD(tx_collide_13times);
9601         ESTAT_ADD(tx_collide_14times);
9602         ESTAT_ADD(tx_collide_15times);
9603         ESTAT_ADD(tx_ucast_packets);
9604         ESTAT_ADD(tx_mcast_packets);
9605         ESTAT_ADD(tx_bcast_packets);
9606         ESTAT_ADD(tx_carrier_sense_errors);
9607         ESTAT_ADD(tx_discards);
9608         ESTAT_ADD(tx_errors);
9609
9610         ESTAT_ADD(dma_writeq_full);
9611         ESTAT_ADD(dma_write_prioq_full);
9612         ESTAT_ADD(rxbds_empty);
9613         ESTAT_ADD(rx_discards);
9614         ESTAT_ADD(rx_errors);
9615         ESTAT_ADD(rx_threshold_hit);
9616
9617         ESTAT_ADD(dma_readq_full);
9618         ESTAT_ADD(dma_read_prioq_full);
9619         ESTAT_ADD(tx_comp_queue_full);
9620
9621         ESTAT_ADD(ring_set_send_prod_index);
9622         ESTAT_ADD(ring_status_update);
9623         ESTAT_ADD(nic_irqs);
9624         ESTAT_ADD(nic_avoided_irqs);
9625         ESTAT_ADD(nic_tx_threshold_hit);
9626
9627         ESTAT_ADD(mbuf_lwm_thresh_hit);
9628
9629         return estats;
9630 }
9631
9632 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9633                                                  struct rtnl_link_stats64 *stats)
9634 {
9635         struct tg3 *tp = netdev_priv(dev);
9636         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9637         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9638
9639         if (!hw_stats)
9640                 return old_stats;
9641
9642         stats->rx_packets = old_stats->rx_packets +
9643                 get_stat64(&hw_stats->rx_ucast_packets) +
9644                 get_stat64(&hw_stats->rx_mcast_packets) +
9645                 get_stat64(&hw_stats->rx_bcast_packets);
9646
9647         stats->tx_packets = old_stats->tx_packets +
9648                 get_stat64(&hw_stats->tx_ucast_packets) +
9649                 get_stat64(&hw_stats->tx_mcast_packets) +
9650                 get_stat64(&hw_stats->tx_bcast_packets);
9651
9652         stats->rx_bytes = old_stats->rx_bytes +
9653                 get_stat64(&hw_stats->rx_octets);
9654         stats->tx_bytes = old_stats->tx_bytes +
9655                 get_stat64(&hw_stats->tx_octets);
9656
9657         stats->rx_errors = old_stats->rx_errors +
9658                 get_stat64(&hw_stats->rx_errors);
9659         stats->tx_errors = old_stats->tx_errors +
9660                 get_stat64(&hw_stats->tx_errors) +
9661                 get_stat64(&hw_stats->tx_mac_errors) +
9662                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9663                 get_stat64(&hw_stats->tx_discards);
9664
9665         stats->multicast = old_stats->multicast +
9666                 get_stat64(&hw_stats->rx_mcast_packets);
9667         stats->collisions = old_stats->collisions +
9668                 get_stat64(&hw_stats->tx_collisions);
9669
9670         stats->rx_length_errors = old_stats->rx_length_errors +
9671                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9672                 get_stat64(&hw_stats->rx_undersize_packets);
9673
9674         stats->rx_over_errors = old_stats->rx_over_errors +
9675                 get_stat64(&hw_stats->rxbds_empty);
9676         stats->rx_frame_errors = old_stats->rx_frame_errors +
9677                 get_stat64(&hw_stats->rx_align_errors);
9678         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9679                 get_stat64(&hw_stats->tx_discards);
9680         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9681                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9682
9683         stats->rx_crc_errors = old_stats->rx_crc_errors +
9684                 calc_crc_errors(tp);
9685
9686         stats->rx_missed_errors = old_stats->rx_missed_errors +
9687                 get_stat64(&hw_stats->rx_discards);
9688
9689         stats->rx_dropped = tp->rx_dropped;
9690
9691         return stats;
9692 }
9693
9694 static inline u32 calc_crc(unsigned char *buf, int len)
9695 {
9696         u32 reg;
9697         u32 tmp;
9698         int j, k;
9699
9700         reg = 0xffffffff;
9701
9702         for (j = 0; j < len; j++) {
9703                 reg ^= buf[j];
9704
9705                 for (k = 0; k < 8; k++) {
9706                         tmp = reg & 0x01;
9707
9708                         reg >>= 1;
9709
9710                         if (tmp)
9711                                 reg ^= 0xedb88320;
9712                 }
9713         }
9714
9715         return ~reg;
9716 }
9717
9718 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9719 {
9720         /* accept or reject all multicast frames */
9721         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9722         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9723         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9724         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9725 }
9726
9727 static void __tg3_set_rx_mode(struct net_device *dev)
9728 {
9729         struct tg3 *tp = netdev_priv(dev);
9730         u32 rx_mode;
9731
9732         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9733                                   RX_MODE_KEEP_VLAN_TAG);
9734
9735 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9736         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9737          * flag clear.
9738          */
9739         if (!tg3_flag(tp, ENABLE_ASF))
9740                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9741 #endif
9742
9743         if (dev->flags & IFF_PROMISC) {
9744                 /* Promiscuous mode. */
9745                 rx_mode |= RX_MODE_PROMISC;
9746         } else if (dev->flags & IFF_ALLMULTI) {
9747                 /* Accept all multicast. */
9748                 tg3_set_multi(tp, 1);
9749         } else if (netdev_mc_empty(dev)) {
9750                 /* Reject all multicast. */
9751                 tg3_set_multi(tp, 0);
9752         } else {
9753                 /* Accept one or more multicast(s). */
9754                 struct netdev_hw_addr *ha;
9755                 u32 mc_filter[4] = { 0, };
9756                 u32 regidx;
9757                 u32 bit;
9758                 u32 crc;
9759
9760                 netdev_for_each_mc_addr(ha, dev) {
9761                         crc = calc_crc(ha->addr, ETH_ALEN);
9762                         bit = ~crc & 0x7f;
9763                         regidx = (bit & 0x60) >> 5;
9764                         bit &= 0x1f;
9765                         mc_filter[regidx] |= (1 << bit);
9766                 }
9767
9768                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9769                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9770                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9771                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9772         }
9773
9774         if (rx_mode != tp->rx_mode) {
9775                 tp->rx_mode = rx_mode;
9776                 tw32_f(MAC_RX_MODE, rx_mode);
9777                 udelay(10);
9778         }
9779 }
9780
9781 static void tg3_set_rx_mode(struct net_device *dev)
9782 {
9783         struct tg3 *tp = netdev_priv(dev);
9784
9785         if (!netif_running(dev))
9786                 return;
9787
9788         tg3_full_lock(tp, 0);
9789         __tg3_set_rx_mode(dev);
9790         tg3_full_unlock(tp);
9791 }
9792
9793 static int tg3_get_regs_len(struct net_device *dev)
9794 {
9795         return TG3_REG_BLK_SIZE;
9796 }
9797
9798 static void tg3_get_regs(struct net_device *dev,
9799                 struct ethtool_regs *regs, void *_p)
9800 {
9801         struct tg3 *tp = netdev_priv(dev);
9802
9803         regs->version = 0;
9804
9805         memset(_p, 0, TG3_REG_BLK_SIZE);
9806
9807         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9808                 return;
9809
9810         tg3_full_lock(tp, 0);
9811
9812         tg3_dump_legacy_regs(tp, (u32 *)_p);
9813
9814         tg3_full_unlock(tp);
9815 }
9816
9817 static int tg3_get_eeprom_len(struct net_device *dev)
9818 {
9819         struct tg3 *tp = netdev_priv(dev);
9820
9821         return tp->nvram_size;
9822 }
9823
9824 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9825 {
9826         struct tg3 *tp = netdev_priv(dev);
9827         int ret;
9828         u8  *pd;
9829         u32 i, offset, len, b_offset, b_count;
9830         __be32 val;
9831
9832         if (tg3_flag(tp, NO_NVRAM))
9833                 return -EINVAL;
9834
9835         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9836                 return -EAGAIN;
9837
9838         offset = eeprom->offset;
9839         len = eeprom->len;
9840         eeprom->len = 0;
9841
9842         eeprom->magic = TG3_EEPROM_MAGIC;
9843
9844         if (offset & 3) {
9845                 /* adjustments to start on required 4 byte boundary */
9846                 b_offset = offset & 3;
9847                 b_count = 4 - b_offset;
9848                 if (b_count > len) {
9849                         /* i.e. offset=1 len=2 */
9850                         b_count = len;
9851                 }
9852                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9853                 if (ret)
9854                         return ret;
9855                 memcpy(data, ((char *)&val) + b_offset, b_count);
9856                 len -= b_count;
9857                 offset += b_count;
9858                 eeprom->len += b_count;
9859         }
9860
9861         /* read bytes up to the last 4 byte boundary */
9862         pd = &data[eeprom->len];
9863         for (i = 0; i < (len - (len & 3)); i += 4) {
9864                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9865                 if (ret) {
9866                         eeprom->len += i;
9867                         return ret;
9868                 }
9869                 memcpy(pd + i, &val, 4);
9870         }
9871         eeprom->len += i;
9872
9873         if (len & 3) {
9874                 /* read last bytes not ending on 4 byte boundary */
9875                 pd = &data[eeprom->len];
9876                 b_count = len & 3;
9877                 b_offset = offset + len - b_count;
9878                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9879                 if (ret)
9880                         return ret;
9881                 memcpy(pd, &val, b_count);
9882                 eeprom->len += b_count;
9883         }
9884         return 0;
9885 }
9886
9887 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9888
9889 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9890 {
9891         struct tg3 *tp = netdev_priv(dev);
9892         int ret;
9893         u32 offset, len, b_offset, odd_len;
9894         u8 *buf;
9895         __be32 start, end;
9896
9897         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9898                 return -EAGAIN;
9899
9900         if (tg3_flag(tp, NO_NVRAM) ||
9901             eeprom->magic != TG3_EEPROM_MAGIC)
9902                 return -EINVAL;
9903
9904         offset = eeprom->offset;
9905         len = eeprom->len;
9906
9907         if ((b_offset = (offset & 3))) {
9908                 /* adjustments to start on required 4 byte boundary */
9909                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9910                 if (ret)
9911                         return ret;
9912                 len += b_offset;
9913                 offset &= ~3;
9914                 if (len < 4)
9915                         len = 4;
9916         }
9917
9918         odd_len = 0;
9919         if (len & 3) {
9920                 /* adjustments to end on required 4 byte boundary */
9921                 odd_len = 1;
9922                 len = (len + 3) & ~3;
9923                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9924                 if (ret)
9925                         return ret;
9926         }
9927
9928         buf = data;
9929         if (b_offset || odd_len) {
9930                 buf = kmalloc(len, GFP_KERNEL);
9931                 if (!buf)
9932                         return -ENOMEM;
9933                 if (b_offset)
9934                         memcpy(buf, &start, 4);
9935                 if (odd_len)
9936                         memcpy(buf+len-4, &end, 4);
9937                 memcpy(buf + b_offset, data, eeprom->len);
9938         }
9939
9940         ret = tg3_nvram_write_block(tp, offset, len, buf);
9941
9942         if (buf != data)
9943                 kfree(buf);
9944
9945         return ret;
9946 }
9947
9948 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9949 {
9950         struct tg3 *tp = netdev_priv(dev);
9951
9952         if (tg3_flag(tp, USE_PHYLIB)) {
9953                 struct phy_device *phydev;
9954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9955                         return -EAGAIN;
9956                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9957                 return phy_ethtool_gset(phydev, cmd);
9958         }
9959
9960         cmd->supported = (SUPPORTED_Autoneg);
9961
9962         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9963                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9964                                    SUPPORTED_1000baseT_Full);
9965
9966         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9967                 cmd->supported |= (SUPPORTED_100baseT_Half |
9968                                   SUPPORTED_100baseT_Full |
9969                                   SUPPORTED_10baseT_Half |
9970                                   SUPPORTED_10baseT_Full |
9971                                   SUPPORTED_TP);
9972                 cmd->port = PORT_TP;
9973         } else {
9974                 cmd->supported |= SUPPORTED_FIBRE;
9975                 cmd->port = PORT_FIBRE;
9976         }
9977
9978         cmd->advertising = tp->link_config.advertising;
9979         if (tg3_flag(tp, PAUSE_AUTONEG)) {
9980                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
9981                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9982                                 cmd->advertising |= ADVERTISED_Pause;
9983                         } else {
9984                                 cmd->advertising |= ADVERTISED_Pause |
9985                                                     ADVERTISED_Asym_Pause;
9986                         }
9987                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
9988                         cmd->advertising |= ADVERTISED_Asym_Pause;
9989                 }
9990         }
9991         if (netif_running(dev)) {
9992                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9993                 cmd->duplex = tp->link_config.active_duplex;
9994         } else {
9995                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9996                 cmd->duplex = DUPLEX_INVALID;
9997         }
9998         cmd->phy_address = tp->phy_addr;
9999         cmd->transceiver = XCVR_INTERNAL;
10000         cmd->autoneg = tp->link_config.autoneg;
10001         cmd->maxtxpkt = 0;
10002         cmd->maxrxpkt = 0;
10003         return 0;
10004 }
10005
10006 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10007 {
10008         struct tg3 *tp = netdev_priv(dev);
10009         u32 speed = ethtool_cmd_speed(cmd);
10010
10011         if (tg3_flag(tp, USE_PHYLIB)) {
10012                 struct phy_device *phydev;
10013                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10014                         return -EAGAIN;
10015                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10016                 return phy_ethtool_sset(phydev, cmd);
10017         }
10018
10019         if (cmd->autoneg != AUTONEG_ENABLE &&
10020             cmd->autoneg != AUTONEG_DISABLE)
10021                 return -EINVAL;
10022
10023         if (cmd->autoneg == AUTONEG_DISABLE &&
10024             cmd->duplex != DUPLEX_FULL &&
10025             cmd->duplex != DUPLEX_HALF)
10026                 return -EINVAL;
10027
10028         if (cmd->autoneg == AUTONEG_ENABLE) {
10029                 u32 mask = ADVERTISED_Autoneg |
10030                            ADVERTISED_Pause |
10031                            ADVERTISED_Asym_Pause;
10032
10033                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10034                         mask |= ADVERTISED_1000baseT_Half |
10035                                 ADVERTISED_1000baseT_Full;
10036
10037                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10038                         mask |= ADVERTISED_100baseT_Half |
10039                                 ADVERTISED_100baseT_Full |
10040                                 ADVERTISED_10baseT_Half |
10041                                 ADVERTISED_10baseT_Full |
10042                                 ADVERTISED_TP;
10043                 else
10044                         mask |= ADVERTISED_FIBRE;
10045
10046                 if (cmd->advertising & ~mask)
10047                         return -EINVAL;
10048
10049                 mask &= (ADVERTISED_1000baseT_Half |
10050                          ADVERTISED_1000baseT_Full |
10051                          ADVERTISED_100baseT_Half |
10052                          ADVERTISED_100baseT_Full |
10053                          ADVERTISED_10baseT_Half |
10054                          ADVERTISED_10baseT_Full);
10055
10056                 cmd->advertising &= mask;
10057         } else {
10058                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10059                         if (speed != SPEED_1000)
10060                                 return -EINVAL;
10061
10062                         if (cmd->duplex != DUPLEX_FULL)
10063                                 return -EINVAL;
10064                 } else {
10065                         if (speed != SPEED_100 &&
10066                             speed != SPEED_10)
10067                                 return -EINVAL;
10068                 }
10069         }
10070
10071         tg3_full_lock(tp, 0);
10072
10073         tp->link_config.autoneg = cmd->autoneg;
10074         if (cmd->autoneg == AUTONEG_ENABLE) {
10075                 tp->link_config.advertising = (cmd->advertising |
10076                                               ADVERTISED_Autoneg);
10077                 tp->link_config.speed = SPEED_INVALID;
10078                 tp->link_config.duplex = DUPLEX_INVALID;
10079         } else {
10080                 tp->link_config.advertising = 0;
10081                 tp->link_config.speed = speed;
10082                 tp->link_config.duplex = cmd->duplex;
10083         }
10084
10085         tp->link_config.orig_speed = tp->link_config.speed;
10086         tp->link_config.orig_duplex = tp->link_config.duplex;
10087         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10088
10089         if (netif_running(dev))
10090                 tg3_setup_phy(tp, 1);
10091
10092         tg3_full_unlock(tp);
10093
10094         return 0;
10095 }
10096
10097 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10098 {
10099         struct tg3 *tp = netdev_priv(dev);
10100
10101         strcpy(info->driver, DRV_MODULE_NAME);
10102         strcpy(info->version, DRV_MODULE_VERSION);
10103         strcpy(info->fw_version, tp->fw_ver);
10104         strcpy(info->bus_info, pci_name(tp->pdev));
10105 }
10106
10107 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10108 {
10109         struct tg3 *tp = netdev_priv(dev);
10110
10111         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10112                 wol->supported = WAKE_MAGIC;
10113         else
10114                 wol->supported = 0;
10115         wol->wolopts = 0;
10116         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10117                 wol->wolopts = WAKE_MAGIC;
10118         memset(&wol->sopass, 0, sizeof(wol->sopass));
10119 }
10120
10121 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10122 {
10123         struct tg3 *tp = netdev_priv(dev);
10124         struct device *dp = &tp->pdev->dev;
10125
10126         if (wol->wolopts & ~WAKE_MAGIC)
10127                 return -EINVAL;
10128         if ((wol->wolopts & WAKE_MAGIC) &&
10129             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10130                 return -EINVAL;
10131
10132         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10133
10134         spin_lock_bh(&tp->lock);
10135         if (device_may_wakeup(dp))
10136                 tg3_flag_set(tp, WOL_ENABLE);
10137         else
10138                 tg3_flag_clear(tp, WOL_ENABLE);
10139         spin_unlock_bh(&tp->lock);
10140
10141         return 0;
10142 }
10143
10144 static u32 tg3_get_msglevel(struct net_device *dev)
10145 {
10146         struct tg3 *tp = netdev_priv(dev);
10147         return tp->msg_enable;
10148 }
10149
10150 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10151 {
10152         struct tg3 *tp = netdev_priv(dev);
10153         tp->msg_enable = value;
10154 }
10155
10156 static int tg3_nway_reset(struct net_device *dev)
10157 {
10158         struct tg3 *tp = netdev_priv(dev);
10159         int r;
10160
10161         if (!netif_running(dev))
10162                 return -EAGAIN;
10163
10164         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10165                 return -EINVAL;
10166
10167         if (tg3_flag(tp, USE_PHYLIB)) {
10168                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10169                         return -EAGAIN;
10170                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10171         } else {
10172                 u32 bmcr;
10173
10174                 spin_lock_bh(&tp->lock);
10175                 r = -EINVAL;
10176                 tg3_readphy(tp, MII_BMCR, &bmcr);
10177                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10178                     ((bmcr & BMCR_ANENABLE) ||
10179                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10180                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10181                                                    BMCR_ANENABLE);
10182                         r = 0;
10183                 }
10184                 spin_unlock_bh(&tp->lock);
10185         }
10186
10187         return r;
10188 }
10189
10190 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10191 {
10192         struct tg3 *tp = netdev_priv(dev);
10193
10194         ering->rx_max_pending = tp->rx_std_ring_mask;
10195         ering->rx_mini_max_pending = 0;
10196         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10197                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10198         else
10199                 ering->rx_jumbo_max_pending = 0;
10200
10201         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10202
10203         ering->rx_pending = tp->rx_pending;
10204         ering->rx_mini_pending = 0;
10205         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10206                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10207         else
10208                 ering->rx_jumbo_pending = 0;
10209
10210         ering->tx_pending = tp->napi[0].tx_pending;
10211 }
10212
10213 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10214 {
10215         struct tg3 *tp = netdev_priv(dev);
10216         int i, irq_sync = 0, err = 0;
10217
10218         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10219             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10220             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10221             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10222             (tg3_flag(tp, TSO_BUG) &&
10223              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10224                 return -EINVAL;
10225
10226         if (netif_running(dev)) {
10227                 tg3_phy_stop(tp);
10228                 tg3_netif_stop(tp);
10229                 irq_sync = 1;
10230         }
10231
10232         tg3_full_lock(tp, irq_sync);
10233
10234         tp->rx_pending = ering->rx_pending;
10235
10236         if (tg3_flag(tp, MAX_RXPEND_64) &&
10237             tp->rx_pending > 63)
10238                 tp->rx_pending = 63;
10239         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10240
10241         for (i = 0; i < tp->irq_max; i++)
10242                 tp->napi[i].tx_pending = ering->tx_pending;
10243
10244         if (netif_running(dev)) {
10245                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10246                 err = tg3_restart_hw(tp, 1);
10247                 if (!err)
10248                         tg3_netif_start(tp);
10249         }
10250
10251         tg3_full_unlock(tp);
10252
10253         if (irq_sync && !err)
10254                 tg3_phy_start(tp);
10255
10256         return err;
10257 }
10258
10259 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10260 {
10261         struct tg3 *tp = netdev_priv(dev);
10262
10263         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10264
10265         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10266                 epause->rx_pause = 1;
10267         else
10268                 epause->rx_pause = 0;
10269
10270         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10271                 epause->tx_pause = 1;
10272         else
10273                 epause->tx_pause = 0;
10274 }
10275
10276 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10277 {
10278         struct tg3 *tp = netdev_priv(dev);
10279         int err = 0;
10280
10281         if (tg3_flag(tp, USE_PHYLIB)) {
10282                 u32 newadv;
10283                 struct phy_device *phydev;
10284
10285                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10286
10287                 if (!(phydev->supported & SUPPORTED_Pause) ||
10288                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10289                      (epause->rx_pause != epause->tx_pause)))
10290                         return -EINVAL;
10291
10292                 tp->link_config.flowctrl = 0;
10293                 if (epause->rx_pause) {
10294                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10295
10296                         if (epause->tx_pause) {
10297                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10298                                 newadv = ADVERTISED_Pause;
10299                         } else
10300                                 newadv = ADVERTISED_Pause |
10301                                          ADVERTISED_Asym_Pause;
10302                 } else if (epause->tx_pause) {
10303                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10304                         newadv = ADVERTISED_Asym_Pause;
10305                 } else
10306                         newadv = 0;
10307
10308                 if (epause->autoneg)
10309                         tg3_flag_set(tp, PAUSE_AUTONEG);
10310                 else
10311                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10312
10313                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10314                         u32 oldadv = phydev->advertising &
10315                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10316                         if (oldadv != newadv) {
10317                                 phydev->advertising &=
10318                                         ~(ADVERTISED_Pause |
10319                                           ADVERTISED_Asym_Pause);
10320                                 phydev->advertising |= newadv;
10321                                 if (phydev->autoneg) {
10322                                         /*
10323                                          * Always renegotiate the link to
10324                                          * inform our link partner of our
10325                                          * flow control settings, even if the
10326                                          * flow control is forced.  Let
10327                                          * tg3_adjust_link() do the final
10328                                          * flow control setup.
10329                                          */
10330                                         return phy_start_aneg(phydev);
10331                                 }
10332                         }
10333
10334                         if (!epause->autoneg)
10335                                 tg3_setup_flow_control(tp, 0, 0);
10336                 } else {
10337                         tp->link_config.orig_advertising &=
10338                                         ~(ADVERTISED_Pause |
10339                                           ADVERTISED_Asym_Pause);
10340                         tp->link_config.orig_advertising |= newadv;
10341                 }
10342         } else {
10343                 int irq_sync = 0;
10344
10345                 if (netif_running(dev)) {
10346                         tg3_netif_stop(tp);
10347                         irq_sync = 1;
10348                 }
10349
10350                 tg3_full_lock(tp, irq_sync);
10351
10352                 if (epause->autoneg)
10353                         tg3_flag_set(tp, PAUSE_AUTONEG);
10354                 else
10355                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10356                 if (epause->rx_pause)
10357                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10358                 else
10359                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10360                 if (epause->tx_pause)
10361                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10362                 else
10363                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10364
10365                 if (netif_running(dev)) {
10366                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10367                         err = tg3_restart_hw(tp, 1);
10368                         if (!err)
10369                                 tg3_netif_start(tp);
10370                 }
10371
10372                 tg3_full_unlock(tp);
10373         }
10374
10375         return err;
10376 }
10377
10378 static int tg3_get_sset_count(struct net_device *dev, int sset)
10379 {
10380         switch (sset) {
10381         case ETH_SS_TEST:
10382                 return TG3_NUM_TEST;
10383         case ETH_SS_STATS:
10384                 return TG3_NUM_STATS;
10385         default:
10386                 return -EOPNOTSUPP;
10387         }
10388 }
10389
10390 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10391 {
10392         switch (stringset) {
10393         case ETH_SS_STATS:
10394                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10395                 break;
10396         case ETH_SS_TEST:
10397                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10398                 break;
10399         default:
10400                 WARN_ON(1);     /* we need a WARN() */
10401                 break;
10402         }
10403 }
10404
10405 static int tg3_set_phys_id(struct net_device *dev,
10406                             enum ethtool_phys_id_state state)
10407 {
10408         struct tg3 *tp = netdev_priv(dev);
10409
10410         if (!netif_running(tp->dev))
10411                 return -EAGAIN;
10412
10413         switch (state) {
10414         case ETHTOOL_ID_ACTIVE:
10415                 return 1;       /* cycle on/off once per second */
10416
10417         case ETHTOOL_ID_ON:
10418                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10419                      LED_CTRL_1000MBPS_ON |
10420                      LED_CTRL_100MBPS_ON |
10421                      LED_CTRL_10MBPS_ON |
10422                      LED_CTRL_TRAFFIC_OVERRIDE |
10423                      LED_CTRL_TRAFFIC_BLINK |
10424                      LED_CTRL_TRAFFIC_LED);
10425                 break;
10426
10427         case ETHTOOL_ID_OFF:
10428                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10429                      LED_CTRL_TRAFFIC_OVERRIDE);
10430                 break;
10431
10432         case ETHTOOL_ID_INACTIVE:
10433                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10434                 break;
10435         }
10436
10437         return 0;
10438 }
10439
10440 static void tg3_get_ethtool_stats(struct net_device *dev,
10441                                    struct ethtool_stats *estats, u64 *tmp_stats)
10442 {
10443         struct tg3 *tp = netdev_priv(dev);
10444         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10445 }
10446
10447 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10448 {
10449         int i;
10450         __be32 *buf;
10451         u32 offset = 0, len = 0;
10452         u32 magic, val;
10453
10454         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10455                 return NULL;
10456
10457         if (magic == TG3_EEPROM_MAGIC) {
10458                 for (offset = TG3_NVM_DIR_START;
10459                      offset < TG3_NVM_DIR_END;
10460                      offset += TG3_NVM_DIRENT_SIZE) {
10461                         if (tg3_nvram_read(tp, offset, &val))
10462                                 return NULL;
10463
10464                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10465                             TG3_NVM_DIRTYPE_EXTVPD)
10466                                 break;
10467                 }
10468
10469                 if (offset != TG3_NVM_DIR_END) {
10470                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10471                         if (tg3_nvram_read(tp, offset + 4, &offset))
10472                                 return NULL;
10473
10474                         offset = tg3_nvram_logical_addr(tp, offset);
10475                 }
10476         }
10477
10478         if (!offset || !len) {
10479                 offset = TG3_NVM_VPD_OFF;
10480                 len = TG3_NVM_VPD_LEN;
10481         }
10482
10483         buf = kmalloc(len, GFP_KERNEL);
10484         if (buf == NULL)
10485                 return NULL;
10486
10487         if (magic == TG3_EEPROM_MAGIC) {
10488                 for (i = 0; i < len; i += 4) {
10489                         /* The data is in little-endian format in NVRAM.
10490                          * Use the big-endian read routines to preserve
10491                          * the byte order as it exists in NVRAM.
10492                          */
10493                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10494                                 goto error;
10495                 }
10496         } else {
10497                 u8 *ptr;
10498                 ssize_t cnt;
10499                 unsigned int pos = 0;
10500
10501                 ptr = (u8 *)&buf[0];
10502                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10503                         cnt = pci_read_vpd(tp->pdev, pos,
10504                                            len - pos, ptr);
10505                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10506                                 cnt = 0;
10507                         else if (cnt < 0)
10508                                 goto error;
10509                 }
10510                 if (pos != len)
10511                         goto error;
10512         }
10513
10514         return buf;
10515
10516 error:
10517         kfree(buf);
10518         return NULL;
10519 }
10520
10521 #define NVRAM_TEST_SIZE 0x100
10522 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10523 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10524 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10525 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10526 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10527 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x4c
10528 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10529 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10530
10531 static int tg3_test_nvram(struct tg3 *tp)
10532 {
10533         u32 csum, magic;
10534         __be32 *buf;
10535         int i, j, k, err = 0, size;
10536
10537         if (tg3_flag(tp, NO_NVRAM))
10538                 return 0;
10539
10540         if (tg3_nvram_read(tp, 0, &magic) != 0)
10541                 return -EIO;
10542
10543         if (magic == TG3_EEPROM_MAGIC)
10544                 size = NVRAM_TEST_SIZE;
10545         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10546                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10547                     TG3_EEPROM_SB_FORMAT_1) {
10548                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10549                         case TG3_EEPROM_SB_REVISION_0:
10550                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10551                                 break;
10552                         case TG3_EEPROM_SB_REVISION_2:
10553                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10554                                 break;
10555                         case TG3_EEPROM_SB_REVISION_3:
10556                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10557                                 break;
10558                         case TG3_EEPROM_SB_REVISION_4:
10559                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10560                                 break;
10561                         case TG3_EEPROM_SB_REVISION_5:
10562                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10563                                 break;
10564                         case TG3_EEPROM_SB_REVISION_6:
10565                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10566                                 break;
10567                         default:
10568                                 return -EIO;
10569                         }
10570                 } else
10571                         return 0;
10572         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10573                 size = NVRAM_SELFBOOT_HW_SIZE;
10574         else
10575                 return -EIO;
10576
10577         buf = kmalloc(size, GFP_KERNEL);
10578         if (buf == NULL)
10579                 return -ENOMEM;
10580
10581         err = -EIO;
10582         for (i = 0, j = 0; i < size; i += 4, j++) {
10583                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10584                 if (err)
10585                         break;
10586         }
10587         if (i < size)
10588                 goto out;
10589
10590         /* Selfboot format */
10591         magic = be32_to_cpu(buf[0]);
10592         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10593             TG3_EEPROM_MAGIC_FW) {
10594                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10595
10596                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10597                     TG3_EEPROM_SB_REVISION_2) {
10598                         /* For rev 2, the csum doesn't include the MBA. */
10599                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10600                                 csum8 += buf8[i];
10601                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10602                                 csum8 += buf8[i];
10603                 } else {
10604                         for (i = 0; i < size; i++)
10605                                 csum8 += buf8[i];
10606                 }
10607
10608                 if (csum8 == 0) {
10609                         err = 0;
10610                         goto out;
10611                 }
10612
10613                 err = -EIO;
10614                 goto out;
10615         }
10616
10617         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10618             TG3_EEPROM_MAGIC_HW) {
10619                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10620                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10621                 u8 *buf8 = (u8 *) buf;
10622
10623                 /* Separate the parity bits and the data bytes.  */
10624                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10625                         if ((i == 0) || (i == 8)) {
10626                                 int l;
10627                                 u8 msk;
10628
10629                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10630                                         parity[k++] = buf8[i] & msk;
10631                                 i++;
10632                         } else if (i == 16) {
10633                                 int l;
10634                                 u8 msk;
10635
10636                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10637                                         parity[k++] = buf8[i] & msk;
10638                                 i++;
10639
10640                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10641                                         parity[k++] = buf8[i] & msk;
10642                                 i++;
10643                         }
10644                         data[j++] = buf8[i];
10645                 }
10646
10647                 err = -EIO;
10648                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10649                         u8 hw8 = hweight8(data[i]);
10650
10651                         if ((hw8 & 0x1) && parity[i])
10652                                 goto out;
10653                         else if (!(hw8 & 0x1) && !parity[i])
10654                                 goto out;
10655                 }
10656                 err = 0;
10657                 goto out;
10658         }
10659
10660         err = -EIO;
10661
10662         /* Bootstrap checksum at offset 0x10 */
10663         csum = calc_crc((unsigned char *) buf, 0x10);
10664         if (csum != le32_to_cpu(buf[0x10/4]))
10665                 goto out;
10666
10667         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10668         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10669         if (csum != le32_to_cpu(buf[0xfc/4]))
10670                 goto out;
10671
10672         kfree(buf);
10673
10674         buf = tg3_vpd_readblock(tp);
10675         if (!buf)
10676                 return -ENOMEM;
10677
10678         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10679                              PCI_VPD_LRDT_RO_DATA);
10680         if (i > 0) {
10681                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10682                 if (j < 0)
10683                         goto out;
10684
10685                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10686                         goto out;
10687
10688                 i += PCI_VPD_LRDT_TAG_SIZE;
10689                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10690                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10691                 if (j > 0) {
10692                         u8 csum8 = 0;
10693
10694                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10695
10696                         for (i = 0; i <= j; i++)
10697                                 csum8 += ((u8 *)buf)[i];
10698
10699                         if (csum8)
10700                                 goto out;
10701                 }
10702         }
10703
10704         err = 0;
10705
10706 out:
10707         kfree(buf);
10708         return err;
10709 }
10710
10711 #define TG3_SERDES_TIMEOUT_SEC  2
10712 #define TG3_COPPER_TIMEOUT_SEC  6
10713
10714 static int tg3_test_link(struct tg3 *tp)
10715 {
10716         int i, max;
10717
10718         if (!netif_running(tp->dev))
10719                 return -ENODEV;
10720
10721         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10722                 max = TG3_SERDES_TIMEOUT_SEC;
10723         else
10724                 max = TG3_COPPER_TIMEOUT_SEC;
10725
10726         for (i = 0; i < max; i++) {
10727                 if (netif_carrier_ok(tp->dev))
10728                         return 0;
10729
10730                 if (msleep_interruptible(1000))
10731                         break;
10732         }
10733
10734         return -EIO;
10735 }
10736
10737 /* Only test the commonly used registers */
10738 static int tg3_test_registers(struct tg3 *tp)
10739 {
10740         int i, is_5705, is_5750;
10741         u32 offset, read_mask, write_mask, val, save_val, read_val;
10742         static struct {
10743                 u16 offset;
10744                 u16 flags;
10745 #define TG3_FL_5705     0x1
10746 #define TG3_FL_NOT_5705 0x2
10747 #define TG3_FL_NOT_5788 0x4
10748 #define TG3_FL_NOT_5750 0x8
10749                 u32 read_mask;
10750                 u32 write_mask;
10751         } reg_tbl[] = {
10752                 /* MAC Control Registers */
10753                 { MAC_MODE, TG3_FL_NOT_5705,
10754                         0x00000000, 0x00ef6f8c },
10755                 { MAC_MODE, TG3_FL_5705,
10756                         0x00000000, 0x01ef6b8c },
10757                 { MAC_STATUS, TG3_FL_NOT_5705,
10758                         0x03800107, 0x00000000 },
10759                 { MAC_STATUS, TG3_FL_5705,
10760                         0x03800100, 0x00000000 },
10761                 { MAC_ADDR_0_HIGH, 0x0000,
10762                         0x00000000, 0x0000ffff },
10763                 { MAC_ADDR_0_LOW, 0x0000,
10764                         0x00000000, 0xffffffff },
10765                 { MAC_RX_MTU_SIZE, 0x0000,
10766                         0x00000000, 0x0000ffff },
10767                 { MAC_TX_MODE, 0x0000,
10768                         0x00000000, 0x00000070 },
10769                 { MAC_TX_LENGTHS, 0x0000,
10770                         0x00000000, 0x00003fff },
10771                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10772                         0x00000000, 0x000007fc },
10773                 { MAC_RX_MODE, TG3_FL_5705,
10774                         0x00000000, 0x000007dc },
10775                 { MAC_HASH_REG_0, 0x0000,
10776                         0x00000000, 0xffffffff },
10777                 { MAC_HASH_REG_1, 0x0000,
10778                         0x00000000, 0xffffffff },
10779                 { MAC_HASH_REG_2, 0x0000,
10780                         0x00000000, 0xffffffff },
10781                 { MAC_HASH_REG_3, 0x0000,
10782                         0x00000000, 0xffffffff },
10783
10784                 /* Receive Data and Receive BD Initiator Control Registers. */
10785                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10786                         0x00000000, 0xffffffff },
10787                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10788                         0x00000000, 0xffffffff },
10789                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10790                         0x00000000, 0x00000003 },
10791                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10792                         0x00000000, 0xffffffff },
10793                 { RCVDBDI_STD_BD+0, 0x0000,
10794                         0x00000000, 0xffffffff },
10795                 { RCVDBDI_STD_BD+4, 0x0000,
10796                         0x00000000, 0xffffffff },
10797                 { RCVDBDI_STD_BD+8, 0x0000,
10798                         0x00000000, 0xffff0002 },
10799                 { RCVDBDI_STD_BD+0xc, 0x0000,
10800                         0x00000000, 0xffffffff },
10801
10802                 /* Receive BD Initiator Control Registers. */
10803                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10804                         0x00000000, 0xffffffff },
10805                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10806                         0x00000000, 0x000003ff },
10807                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10808                         0x00000000, 0xffffffff },
10809
10810                 /* Host Coalescing Control Registers. */
10811                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10812                         0x00000000, 0x00000004 },
10813                 { HOSTCC_MODE, TG3_FL_5705,
10814                         0x00000000, 0x000000f6 },
10815                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10816                         0x00000000, 0xffffffff },
10817                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10818                         0x00000000, 0x000003ff },
10819                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10820                         0x00000000, 0xffffffff },
10821                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10822                         0x00000000, 0x000003ff },
10823                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10824                         0x00000000, 0xffffffff },
10825                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10826                         0x00000000, 0x000000ff },
10827                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10828                         0x00000000, 0xffffffff },
10829                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10830                         0x00000000, 0x000000ff },
10831                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10832                         0x00000000, 0xffffffff },
10833                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10834                         0x00000000, 0xffffffff },
10835                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10836                         0x00000000, 0xffffffff },
10837                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10838                         0x00000000, 0x000000ff },
10839                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10840                         0x00000000, 0xffffffff },
10841                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10842                         0x00000000, 0x000000ff },
10843                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10844                         0x00000000, 0xffffffff },
10845                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10846                         0x00000000, 0xffffffff },
10847                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10848                         0x00000000, 0xffffffff },
10849                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10850                         0x00000000, 0xffffffff },
10851                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10852                         0x00000000, 0xffffffff },
10853                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10854                         0xffffffff, 0x00000000 },
10855                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10856                         0xffffffff, 0x00000000 },
10857
10858                 /* Buffer Manager Control Registers. */
10859                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10860                         0x00000000, 0x007fff80 },
10861                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10862                         0x00000000, 0x007fffff },
10863                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10864                         0x00000000, 0x0000003f },
10865                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10866                         0x00000000, 0x000001ff },
10867                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10868                         0x00000000, 0x000001ff },
10869                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10870                         0xffffffff, 0x00000000 },
10871                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10872                         0xffffffff, 0x00000000 },
10873
10874                 /* Mailbox Registers */
10875                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10876                         0x00000000, 0x000001ff },
10877                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10878                         0x00000000, 0x000001ff },
10879                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10880                         0x00000000, 0x000007ff },
10881                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10882                         0x00000000, 0x000001ff },
10883
10884                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10885         };
10886
10887         is_5705 = is_5750 = 0;
10888         if (tg3_flag(tp, 5705_PLUS)) {
10889                 is_5705 = 1;
10890                 if (tg3_flag(tp, 5750_PLUS))
10891                         is_5750 = 1;
10892         }
10893
10894         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10895                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10896                         continue;
10897
10898                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10899                         continue;
10900
10901                 if (tg3_flag(tp, IS_5788) &&
10902                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10903                         continue;
10904
10905                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10906                         continue;
10907
10908                 offset = (u32) reg_tbl[i].offset;
10909                 read_mask = reg_tbl[i].read_mask;
10910                 write_mask = reg_tbl[i].write_mask;
10911
10912                 /* Save the original register content */
10913                 save_val = tr32(offset);
10914
10915                 /* Determine the read-only value. */
10916                 read_val = save_val & read_mask;
10917
10918                 /* Write zero to the register, then make sure the read-only bits
10919                  * are not changed and the read/write bits are all zeros.
10920                  */
10921                 tw32(offset, 0);
10922
10923                 val = tr32(offset);
10924
10925                 /* Test the read-only and read/write bits. */
10926                 if (((val & read_mask) != read_val) || (val & write_mask))
10927                         goto out;
10928
10929                 /* Write ones to all the bits defined by RdMask and WrMask, then
10930                  * make sure the read-only bits are not changed and the
10931                  * read/write bits are all ones.
10932                  */
10933                 tw32(offset, read_mask | write_mask);
10934
10935                 val = tr32(offset);
10936
10937                 /* Test the read-only bits. */
10938                 if ((val & read_mask) != read_val)
10939                         goto out;
10940
10941                 /* Test the read/write bits. */
10942                 if ((val & write_mask) != write_mask)
10943                         goto out;
10944
10945                 tw32(offset, save_val);
10946         }
10947
10948         return 0;
10949
10950 out:
10951         if (netif_msg_hw(tp))
10952                 netdev_err(tp->dev,
10953                            "Register test failed at offset %x\n", offset);
10954         tw32(offset, save_val);
10955         return -EIO;
10956 }
10957
10958 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10959 {
10960         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10961         int i;
10962         u32 j;
10963
10964         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10965                 for (j = 0; j < len; j += 4) {
10966                         u32 val;
10967
10968                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10969                         tg3_read_mem(tp, offset + j, &val);
10970                         if (val != test_pattern[i])
10971                                 return -EIO;
10972                 }
10973         }
10974         return 0;
10975 }
10976
10977 static int tg3_test_memory(struct tg3 *tp)
10978 {
10979         static struct mem_entry {
10980                 u32 offset;
10981                 u32 len;
10982         } mem_tbl_570x[] = {
10983                 { 0x00000000, 0x00b50},
10984                 { 0x00002000, 0x1c000},
10985                 { 0xffffffff, 0x00000}
10986         }, mem_tbl_5705[] = {
10987                 { 0x00000100, 0x0000c},
10988                 { 0x00000200, 0x00008},
10989                 { 0x00004000, 0x00800},
10990                 { 0x00006000, 0x01000},
10991                 { 0x00008000, 0x02000},
10992                 { 0x00010000, 0x0e000},
10993                 { 0xffffffff, 0x00000}
10994         }, mem_tbl_5755[] = {
10995                 { 0x00000200, 0x00008},
10996                 { 0x00004000, 0x00800},
10997                 { 0x00006000, 0x00800},
10998                 { 0x00008000, 0x02000},
10999                 { 0x00010000, 0x0c000},
11000                 { 0xffffffff, 0x00000}
11001         }, mem_tbl_5906[] = {
11002                 { 0x00000200, 0x00008},
11003                 { 0x00004000, 0x00400},
11004                 { 0x00006000, 0x00400},
11005                 { 0x00008000, 0x01000},
11006                 { 0x00010000, 0x01000},
11007                 { 0xffffffff, 0x00000}
11008         }, mem_tbl_5717[] = {
11009                 { 0x00000200, 0x00008},
11010                 { 0x00010000, 0x0a000},
11011                 { 0x00020000, 0x13c00},
11012                 { 0xffffffff, 0x00000}
11013         }, mem_tbl_57765[] = {
11014                 { 0x00000200, 0x00008},
11015                 { 0x00004000, 0x00800},
11016                 { 0x00006000, 0x09800},
11017                 { 0x00010000, 0x0a000},
11018                 { 0xffffffff, 0x00000}
11019         };
11020         struct mem_entry *mem_tbl;
11021         int err = 0;
11022         int i;
11023
11024         if (tg3_flag(tp, 5717_PLUS))
11025                 mem_tbl = mem_tbl_5717;
11026         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11027                 mem_tbl = mem_tbl_57765;
11028         else if (tg3_flag(tp, 5755_PLUS))
11029                 mem_tbl = mem_tbl_5755;
11030         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11031                 mem_tbl = mem_tbl_5906;
11032         else if (tg3_flag(tp, 5705_PLUS))
11033                 mem_tbl = mem_tbl_5705;
11034         else
11035                 mem_tbl = mem_tbl_570x;
11036
11037         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11038                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11039                 if (err)
11040                         break;
11041         }
11042
11043         return err;
11044 }
11045
11046 #define TG3_MAC_LOOPBACK        0
11047 #define TG3_PHY_LOOPBACK        1
11048 #define TG3_TSO_LOOPBACK        2
11049
11050 #define TG3_TSO_MSS             500
11051
11052 #define TG3_TSO_IP_HDR_LEN      20
11053 #define TG3_TSO_TCP_HDR_LEN     20
11054 #define TG3_TSO_TCP_OPT_LEN     12
11055
11056 static const u8 tg3_tso_header[] = {
11057 0x08, 0x00,
11058 0x45, 0x00, 0x00, 0x00,
11059 0x00, 0x00, 0x40, 0x00,
11060 0x40, 0x06, 0x00, 0x00,
11061 0x0a, 0x00, 0x00, 0x01,
11062 0x0a, 0x00, 0x00, 0x02,
11063 0x0d, 0x00, 0xe0, 0x00,
11064 0x00, 0x00, 0x01, 0x00,
11065 0x00, 0x00, 0x02, 0x00,
11066 0x80, 0x10, 0x10, 0x00,
11067 0x14, 0x09, 0x00, 0x00,
11068 0x01, 0x01, 0x08, 0x0a,
11069 0x11, 0x11, 0x11, 0x11,
11070 0x11, 0x11, 0x11, 0x11,
11071 };
11072
11073 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
11074 {
11075         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
11076         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11077         struct sk_buff *skb, *rx_skb;
11078         u8 *tx_data;
11079         dma_addr_t map;
11080         int num_pkts, tx_len, rx_len, i, err;
11081         struct tg3_rx_buffer_desc *desc;
11082         struct tg3_napi *tnapi, *rnapi;
11083         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11084
11085         tnapi = &tp->napi[0];
11086         rnapi = &tp->napi[0];
11087         if (tp->irq_cnt > 1) {
11088                 if (tg3_flag(tp, ENABLE_RSS))
11089                         rnapi = &tp->napi[1];
11090                 if (tg3_flag(tp, ENABLE_TSS))
11091                         tnapi = &tp->napi[1];
11092         }
11093         coal_now = tnapi->coal_now | rnapi->coal_now;
11094
11095         if (loopback_mode == TG3_MAC_LOOPBACK) {
11096                 /* HW errata - mac loopback fails in some cases on 5780.
11097                  * Normal traffic and PHY loopback are not affected by
11098                  * errata.  Also, the MAC loopback test is deprecated for
11099                  * all newer ASIC revisions.
11100                  */
11101                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11102                     tg3_flag(tp, CPMU_PRESENT))
11103                         return 0;
11104
11105                 mac_mode = tp->mac_mode &
11106                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11107                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11108                 if (!tg3_flag(tp, 5705_PLUS))
11109                         mac_mode |= MAC_MODE_LINK_POLARITY;
11110                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11111                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11112                 else
11113                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11114                 tw32(MAC_MODE, mac_mode);
11115         } else {
11116                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11117                         tg3_phy_fet_toggle_apd(tp, false);
11118                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11119                 } else
11120                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11121
11122                 tg3_phy_toggle_automdix(tp, 0);
11123
11124                 tg3_writephy(tp, MII_BMCR, val);
11125                 udelay(40);
11126
11127                 mac_mode = tp->mac_mode &
11128                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11129                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11130                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11131                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11132                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11133                         /* The write needs to be flushed for the AC131 */
11134                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11135                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11136                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11137                 } else
11138                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11139
11140                 /* reset to prevent losing 1st rx packet intermittently */
11141                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11142                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11143                         udelay(10);
11144                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11145                 }
11146                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11147                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11148                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11149                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11150                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11151                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11152                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11153                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11154                 }
11155                 tw32(MAC_MODE, mac_mode);
11156
11157                 /* Wait for link */
11158                 for (i = 0; i < 100; i++) {
11159                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11160                                 break;
11161                         mdelay(1);
11162                 }
11163         }
11164
11165         err = -EIO;
11166
11167         tx_len = pktsz;
11168         skb = netdev_alloc_skb(tp->dev, tx_len);
11169         if (!skb)
11170                 return -ENOMEM;
11171
11172         tx_data = skb_put(skb, tx_len);
11173         memcpy(tx_data, tp->dev->dev_addr, 6);
11174         memset(tx_data + 6, 0x0, 8);
11175
11176         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11177
11178         if (loopback_mode == TG3_TSO_LOOPBACK) {
11179                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11180
11181                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11182                               TG3_TSO_TCP_OPT_LEN;
11183
11184                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11185                        sizeof(tg3_tso_header));
11186                 mss = TG3_TSO_MSS;
11187
11188                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11189                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11190
11191                 /* Set the total length field in the IP header */
11192                 iph->tot_len = htons((u16)(mss + hdr_len));
11193
11194                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11195                               TXD_FLAG_CPU_POST_DMA);
11196
11197                 if (tg3_flag(tp, HW_TSO_1) ||
11198                     tg3_flag(tp, HW_TSO_2) ||
11199                     tg3_flag(tp, HW_TSO_3)) {
11200                         struct tcphdr *th;
11201                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11202                         th = (struct tcphdr *)&tx_data[val];
11203                         th->check = 0;
11204                 } else
11205                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11206
11207                 if (tg3_flag(tp, HW_TSO_3)) {
11208                         mss |= (hdr_len & 0xc) << 12;
11209                         if (hdr_len & 0x10)
11210                                 base_flags |= 0x00000010;
11211                         base_flags |= (hdr_len & 0x3e0) << 5;
11212                 } else if (tg3_flag(tp, HW_TSO_2))
11213                         mss |= hdr_len << 9;
11214                 else if (tg3_flag(tp, HW_TSO_1) ||
11215                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11216                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11217                 } else {
11218                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11219                 }
11220
11221                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11222         } else {
11223                 num_pkts = 1;
11224                 data_off = ETH_HLEN;
11225         }
11226
11227         for (i = data_off; i < tx_len; i++)
11228                 tx_data[i] = (u8) (i & 0xff);
11229
11230         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11231         if (pci_dma_mapping_error(tp->pdev, map)) {
11232                 dev_kfree_skb(skb);
11233                 return -EIO;
11234         }
11235
11236         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11237                rnapi->coal_now);
11238
11239         udelay(10);
11240
11241         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11242
11243         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11244                     base_flags, (mss << 1) | 1);
11245
11246         tnapi->tx_prod++;
11247
11248         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11249         tr32_mailbox(tnapi->prodmbox);
11250
11251         udelay(10);
11252
11253         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11254         for (i = 0; i < 35; i++) {
11255                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11256                        coal_now);
11257
11258                 udelay(10);
11259
11260                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11261                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11262                 if ((tx_idx == tnapi->tx_prod) &&
11263                     (rx_idx == (rx_start_idx + num_pkts)))
11264                         break;
11265         }
11266
11267         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11268         dev_kfree_skb(skb);
11269
11270         if (tx_idx != tnapi->tx_prod)
11271                 goto out;
11272
11273         if (rx_idx != rx_start_idx + num_pkts)
11274                 goto out;
11275
11276         val = data_off;
11277         while (rx_idx != rx_start_idx) {
11278                 desc = &rnapi->rx_rcb[rx_start_idx++];
11279                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11280                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11281
11282                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11283                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11284                         goto out;
11285
11286                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11287                          - ETH_FCS_LEN;
11288
11289                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11290                         if (rx_len != tx_len)
11291                                 goto out;
11292
11293                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11294                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11295                                         goto out;
11296                         } else {
11297                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11298                                         goto out;
11299                         }
11300                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11301                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11302                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11303                         goto out;
11304                 }
11305
11306                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11307                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11308                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11309                                              mapping);
11310                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11311                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11312                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11313                                              mapping);
11314                 } else
11315                         goto out;
11316
11317                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11318                                             PCI_DMA_FROMDEVICE);
11319
11320                 for (i = data_off; i < rx_len; i++, val++) {
11321                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11322                                 goto out;
11323                 }
11324         }
11325
11326         err = 0;
11327
11328         /* tg3_free_rings will unmap and free the rx_skb */
11329 out:
11330         return err;
11331 }
11332
11333 #define TG3_STD_LOOPBACK_FAILED         1
11334 #define TG3_JMB_LOOPBACK_FAILED         2
11335 #define TG3_TSO_LOOPBACK_FAILED         4
11336
11337 #define TG3_MAC_LOOPBACK_SHIFT          0
11338 #define TG3_PHY_LOOPBACK_SHIFT          4
11339 #define TG3_LOOPBACK_FAILED             0x00000077
11340
11341 static int tg3_test_loopback(struct tg3 *tp)
11342 {
11343         int err = 0;
11344         u32 eee_cap, cpmuctrl = 0;
11345
11346         if (!netif_running(tp->dev))
11347                 return TG3_LOOPBACK_FAILED;
11348
11349         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11350         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11351
11352         err = tg3_reset_hw(tp, 1);
11353         if (err) {
11354                 err = TG3_LOOPBACK_FAILED;
11355                 goto done;
11356         }
11357
11358         if (tg3_flag(tp, ENABLE_RSS)) {
11359                 int i;
11360
11361                 /* Reroute all rx packets to the 1st queue */
11362                 for (i = MAC_RSS_INDIR_TBL_0;
11363                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11364                         tw32(i, 0x0);
11365         }
11366
11367         /* Turn off gphy autopowerdown. */
11368         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11369                 tg3_phy_toggle_apd(tp, false);
11370
11371         if (tg3_flag(tp, CPMU_PRESENT)) {
11372                 int i;
11373                 u32 status;
11374
11375                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11376
11377                 /* Wait for up to 40 microseconds to acquire lock. */
11378                 for (i = 0; i < 4; i++) {
11379                         status = tr32(TG3_CPMU_MUTEX_GNT);
11380                         if (status == CPMU_MUTEX_GNT_DRIVER)
11381                                 break;
11382                         udelay(10);
11383                 }
11384
11385                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11386                         err = TG3_LOOPBACK_FAILED;
11387                         goto done;
11388                 }
11389
11390                 /* Turn off link-based power management. */
11391                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11392                 tw32(TG3_CPMU_CTRL,
11393                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11394                                   CPMU_CTRL_LINK_AWARE_MODE));
11395         }
11396
11397         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11398                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11399
11400         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11401             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11402                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11403
11404         if (tg3_flag(tp, CPMU_PRESENT)) {
11405                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11406
11407                 /* Release the mutex */
11408                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11409         }
11410
11411         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11412             !tg3_flag(tp, USE_PHYLIB)) {
11413                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11414                         err |= TG3_STD_LOOPBACK_FAILED <<
11415                                TG3_PHY_LOOPBACK_SHIFT;
11416                 if (tg3_flag(tp, TSO_CAPABLE) &&
11417                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11418                         err |= TG3_TSO_LOOPBACK_FAILED <<
11419                                TG3_PHY_LOOPBACK_SHIFT;
11420                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11421                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11422                         err |= TG3_JMB_LOOPBACK_FAILED <<
11423                                TG3_PHY_LOOPBACK_SHIFT;
11424         }
11425
11426         /* Re-enable gphy autopowerdown. */
11427         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11428                 tg3_phy_toggle_apd(tp, true);
11429
11430 done:
11431         tp->phy_flags |= eee_cap;
11432
11433         return err;
11434 }
11435
11436 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11437                           u64 *data)
11438 {
11439         struct tg3 *tp = netdev_priv(dev);
11440
11441         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11442             tg3_power_up(tp)) {
11443                 etest->flags |= ETH_TEST_FL_FAILED;
11444                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11445                 return;
11446         }
11447
11448         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11449
11450         if (tg3_test_nvram(tp) != 0) {
11451                 etest->flags |= ETH_TEST_FL_FAILED;
11452                 data[0] = 1;
11453         }
11454         if (tg3_test_link(tp) != 0) {
11455                 etest->flags |= ETH_TEST_FL_FAILED;
11456                 data[1] = 1;
11457         }
11458         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11459                 int err, err2 = 0, irq_sync = 0;
11460
11461                 if (netif_running(dev)) {
11462                         tg3_phy_stop(tp);
11463                         tg3_netif_stop(tp);
11464                         irq_sync = 1;
11465                 }
11466
11467                 tg3_full_lock(tp, irq_sync);
11468
11469                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11470                 err = tg3_nvram_lock(tp);
11471                 tg3_halt_cpu(tp, RX_CPU_BASE);
11472                 if (!tg3_flag(tp, 5705_PLUS))
11473                         tg3_halt_cpu(tp, TX_CPU_BASE);
11474                 if (!err)
11475                         tg3_nvram_unlock(tp);
11476
11477                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11478                         tg3_phy_reset(tp);
11479
11480                 if (tg3_test_registers(tp) != 0) {
11481                         etest->flags |= ETH_TEST_FL_FAILED;
11482                         data[2] = 1;
11483                 }
11484                 if (tg3_test_memory(tp) != 0) {
11485                         etest->flags |= ETH_TEST_FL_FAILED;
11486                         data[3] = 1;
11487                 }
11488                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11489                         etest->flags |= ETH_TEST_FL_FAILED;
11490
11491                 tg3_full_unlock(tp);
11492
11493                 if (tg3_test_interrupt(tp) != 0) {
11494                         etest->flags |= ETH_TEST_FL_FAILED;
11495                         data[5] = 1;
11496                 }
11497
11498                 tg3_full_lock(tp, 0);
11499
11500                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11501                 if (netif_running(dev)) {
11502                         tg3_flag_set(tp, INIT_COMPLETE);
11503                         err2 = tg3_restart_hw(tp, 1);
11504                         if (!err2)
11505                                 tg3_netif_start(tp);
11506                 }
11507
11508                 tg3_full_unlock(tp);
11509
11510                 if (irq_sync && !err2)
11511                         tg3_phy_start(tp);
11512         }
11513         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11514                 tg3_power_down(tp);
11515
11516 }
11517
11518 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11519 {
11520         struct mii_ioctl_data *data = if_mii(ifr);
11521         struct tg3 *tp = netdev_priv(dev);
11522         int err;
11523
11524         if (tg3_flag(tp, USE_PHYLIB)) {
11525                 struct phy_device *phydev;
11526                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11527                         return -EAGAIN;
11528                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11529                 return phy_mii_ioctl(phydev, ifr, cmd);
11530         }
11531
11532         switch (cmd) {
11533         case SIOCGMIIPHY:
11534                 data->phy_id = tp->phy_addr;
11535
11536                 /* fallthru */
11537         case SIOCGMIIREG: {
11538                 u32 mii_regval;
11539
11540                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11541                         break;                  /* We have no PHY */
11542
11543                 if (!netif_running(dev))
11544                         return -EAGAIN;
11545
11546                 spin_lock_bh(&tp->lock);
11547                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11548                 spin_unlock_bh(&tp->lock);
11549
11550                 data->val_out = mii_regval;
11551
11552                 return err;
11553         }
11554
11555         case SIOCSMIIREG:
11556                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11557                         break;                  /* We have no PHY */
11558
11559                 if (!netif_running(dev))
11560                         return -EAGAIN;
11561
11562                 spin_lock_bh(&tp->lock);
11563                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11564                 spin_unlock_bh(&tp->lock);
11565
11566                 return err;
11567
11568         default:
11569                 /* do nothing */
11570                 break;
11571         }
11572         return -EOPNOTSUPP;
11573 }
11574
11575 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11576 {
11577         struct tg3 *tp = netdev_priv(dev);
11578
11579         memcpy(ec, &tp->coal, sizeof(*ec));
11580         return 0;
11581 }
11582
11583 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11584 {
11585         struct tg3 *tp = netdev_priv(dev);
11586         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11587         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11588
11589         if (!tg3_flag(tp, 5705_PLUS)) {
11590                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11591                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11592                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11593                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11594         }
11595
11596         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11597             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11598             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11599             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11600             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11601             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11602             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11603             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11604             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11605             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11606                 return -EINVAL;
11607
11608         /* No rx interrupts will be generated if both are zero */
11609         if ((ec->rx_coalesce_usecs == 0) &&
11610             (ec->rx_max_coalesced_frames == 0))
11611                 return -EINVAL;
11612
11613         /* No tx interrupts will be generated if both are zero */
11614         if ((ec->tx_coalesce_usecs == 0) &&
11615             (ec->tx_max_coalesced_frames == 0))
11616                 return -EINVAL;
11617
11618         /* Only copy relevant parameters, ignore all others. */
11619         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11620         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11621         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11622         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11623         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11624         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11625         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11626         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11627         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11628
11629         if (netif_running(dev)) {
11630                 tg3_full_lock(tp, 0);
11631                 __tg3_set_coalesce(tp, &tp->coal);
11632                 tg3_full_unlock(tp);
11633         }
11634         return 0;
11635 }
11636
11637 static const struct ethtool_ops tg3_ethtool_ops = {
11638         .get_settings           = tg3_get_settings,
11639         .set_settings           = tg3_set_settings,
11640         .get_drvinfo            = tg3_get_drvinfo,
11641         .get_regs_len           = tg3_get_regs_len,
11642         .get_regs               = tg3_get_regs,
11643         .get_wol                = tg3_get_wol,
11644         .set_wol                = tg3_set_wol,
11645         .get_msglevel           = tg3_get_msglevel,
11646         .set_msglevel           = tg3_set_msglevel,
11647         .nway_reset             = tg3_nway_reset,
11648         .get_link               = ethtool_op_get_link,
11649         .get_eeprom_len         = tg3_get_eeprom_len,
11650         .get_eeprom             = tg3_get_eeprom,
11651         .set_eeprom             = tg3_set_eeprom,
11652         .get_ringparam          = tg3_get_ringparam,
11653         .set_ringparam          = tg3_set_ringparam,
11654         .get_pauseparam         = tg3_get_pauseparam,
11655         .set_pauseparam         = tg3_set_pauseparam,
11656         .self_test              = tg3_self_test,
11657         .get_strings            = tg3_get_strings,
11658         .set_phys_id            = tg3_set_phys_id,
11659         .get_ethtool_stats      = tg3_get_ethtool_stats,
11660         .get_coalesce           = tg3_get_coalesce,
11661         .set_coalesce           = tg3_set_coalesce,
11662         .get_sset_count         = tg3_get_sset_count,
11663 };
11664
11665 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11666 {
11667         u32 cursize, val, magic;
11668
11669         tp->nvram_size = EEPROM_CHIP_SIZE;
11670
11671         if (tg3_nvram_read(tp, 0, &magic) != 0)
11672                 return;
11673
11674         if ((magic != TG3_EEPROM_MAGIC) &&
11675             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11676             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11677                 return;
11678
11679         /*
11680          * Size the chip by reading offsets at increasing powers of two.
11681          * When we encounter our validation signature, we know the addressing
11682          * has wrapped around, and thus have our chip size.
11683          */
11684         cursize = 0x10;
11685
11686         while (cursize < tp->nvram_size) {
11687                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11688                         return;
11689
11690                 if (val == magic)
11691                         break;
11692
11693                 cursize <<= 1;
11694         }
11695
11696         tp->nvram_size = cursize;
11697 }
11698
11699 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11700 {
11701         u32 val;
11702
11703         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11704                 return;
11705
11706         /* Selfboot format */
11707         if (val != TG3_EEPROM_MAGIC) {
11708                 tg3_get_eeprom_size(tp);
11709                 return;
11710         }
11711
11712         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11713                 if (val != 0) {
11714                         /* This is confusing.  We want to operate on the
11715                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11716                          * call will read from NVRAM and byteswap the data
11717                          * according to the byteswapping settings for all
11718                          * other register accesses.  This ensures the data we
11719                          * want will always reside in the lower 16-bits.
11720                          * However, the data in NVRAM is in LE format, which
11721                          * means the data from the NVRAM read will always be
11722                          * opposite the endianness of the CPU.  The 16-bit
11723                          * byteswap then brings the data to CPU endianness.
11724                          */
11725                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11726                         return;
11727                 }
11728         }
11729         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11730 }
11731
11732 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11733 {
11734         u32 nvcfg1;
11735
11736         nvcfg1 = tr32(NVRAM_CFG1);
11737         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11738                 tg3_flag_set(tp, FLASH);
11739         } else {
11740                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11741                 tw32(NVRAM_CFG1, nvcfg1);
11742         }
11743
11744         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11745             tg3_flag(tp, 5780_CLASS)) {
11746                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11747                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11748                         tp->nvram_jedecnum = JEDEC_ATMEL;
11749                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11750                         tg3_flag_set(tp, NVRAM_BUFFERED);
11751                         break;
11752                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11753                         tp->nvram_jedecnum = JEDEC_ATMEL;
11754                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11755                         break;
11756                 case FLASH_VENDOR_ATMEL_EEPROM:
11757                         tp->nvram_jedecnum = JEDEC_ATMEL;
11758                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11759                         tg3_flag_set(tp, NVRAM_BUFFERED);
11760                         break;
11761                 case FLASH_VENDOR_ST:
11762                         tp->nvram_jedecnum = JEDEC_ST;
11763                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11764                         tg3_flag_set(tp, NVRAM_BUFFERED);
11765                         break;
11766                 case FLASH_VENDOR_SAIFUN:
11767                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11768                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11769                         break;
11770                 case FLASH_VENDOR_SST_SMALL:
11771                 case FLASH_VENDOR_SST_LARGE:
11772                         tp->nvram_jedecnum = JEDEC_SST;
11773                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11774                         break;
11775                 }
11776         } else {
11777                 tp->nvram_jedecnum = JEDEC_ATMEL;
11778                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11779                 tg3_flag_set(tp, NVRAM_BUFFERED);
11780         }
11781 }
11782
11783 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11784 {
11785         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11786         case FLASH_5752PAGE_SIZE_256:
11787                 tp->nvram_pagesize = 256;
11788                 break;
11789         case FLASH_5752PAGE_SIZE_512:
11790                 tp->nvram_pagesize = 512;
11791                 break;
11792         case FLASH_5752PAGE_SIZE_1K:
11793                 tp->nvram_pagesize = 1024;
11794                 break;
11795         case FLASH_5752PAGE_SIZE_2K:
11796                 tp->nvram_pagesize = 2048;
11797                 break;
11798         case FLASH_5752PAGE_SIZE_4K:
11799                 tp->nvram_pagesize = 4096;
11800                 break;
11801         case FLASH_5752PAGE_SIZE_264:
11802                 tp->nvram_pagesize = 264;
11803                 break;
11804         case FLASH_5752PAGE_SIZE_528:
11805                 tp->nvram_pagesize = 528;
11806                 break;
11807         }
11808 }
11809
11810 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11811 {
11812         u32 nvcfg1;
11813
11814         nvcfg1 = tr32(NVRAM_CFG1);
11815
11816         /* NVRAM protection for TPM */
11817         if (nvcfg1 & (1 << 27))
11818                 tg3_flag_set(tp, PROTECTED_NVRAM);
11819
11820         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11821         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11822         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11823                 tp->nvram_jedecnum = JEDEC_ATMEL;
11824                 tg3_flag_set(tp, NVRAM_BUFFERED);
11825                 break;
11826         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11827                 tp->nvram_jedecnum = JEDEC_ATMEL;
11828                 tg3_flag_set(tp, NVRAM_BUFFERED);
11829                 tg3_flag_set(tp, FLASH);
11830                 break;
11831         case FLASH_5752VENDOR_ST_M45PE10:
11832         case FLASH_5752VENDOR_ST_M45PE20:
11833         case FLASH_5752VENDOR_ST_M45PE40:
11834                 tp->nvram_jedecnum = JEDEC_ST;
11835                 tg3_flag_set(tp, NVRAM_BUFFERED);
11836                 tg3_flag_set(tp, FLASH);
11837                 break;
11838         }
11839
11840         if (tg3_flag(tp, FLASH)) {
11841                 tg3_nvram_get_pagesize(tp, nvcfg1);
11842         } else {
11843                 /* For eeprom, set pagesize to maximum eeprom size */
11844                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11845
11846                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11847                 tw32(NVRAM_CFG1, nvcfg1);
11848         }
11849 }
11850
11851 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11852 {
11853         u32 nvcfg1, protect = 0;
11854
11855         nvcfg1 = tr32(NVRAM_CFG1);
11856
11857         /* NVRAM protection for TPM */
11858         if (nvcfg1 & (1 << 27)) {
11859                 tg3_flag_set(tp, PROTECTED_NVRAM);
11860                 protect = 1;
11861         }
11862
11863         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11864         switch (nvcfg1) {
11865         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11866         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11867         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11868         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11869                 tp->nvram_jedecnum = JEDEC_ATMEL;
11870                 tg3_flag_set(tp, NVRAM_BUFFERED);
11871                 tg3_flag_set(tp, FLASH);
11872                 tp->nvram_pagesize = 264;
11873                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11874                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11875                         tp->nvram_size = (protect ? 0x3e200 :
11876                                           TG3_NVRAM_SIZE_512KB);
11877                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11878                         tp->nvram_size = (protect ? 0x1f200 :
11879                                           TG3_NVRAM_SIZE_256KB);
11880                 else
11881                         tp->nvram_size = (protect ? 0x1f200 :
11882                                           TG3_NVRAM_SIZE_128KB);
11883                 break;
11884         case FLASH_5752VENDOR_ST_M45PE10:
11885         case FLASH_5752VENDOR_ST_M45PE20:
11886         case FLASH_5752VENDOR_ST_M45PE40:
11887                 tp->nvram_jedecnum = JEDEC_ST;
11888                 tg3_flag_set(tp, NVRAM_BUFFERED);
11889                 tg3_flag_set(tp, FLASH);
11890                 tp->nvram_pagesize = 256;
11891                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11892                         tp->nvram_size = (protect ?
11893                                           TG3_NVRAM_SIZE_64KB :
11894                                           TG3_NVRAM_SIZE_128KB);
11895                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11896                         tp->nvram_size = (protect ?
11897                                           TG3_NVRAM_SIZE_64KB :
11898                                           TG3_NVRAM_SIZE_256KB);
11899                 else
11900                         tp->nvram_size = (protect ?
11901                                           TG3_NVRAM_SIZE_128KB :
11902                                           TG3_NVRAM_SIZE_512KB);
11903                 break;
11904         }
11905 }
11906
11907 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11908 {
11909         u32 nvcfg1;
11910
11911         nvcfg1 = tr32(NVRAM_CFG1);
11912
11913         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11914         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11915         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11916         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11917         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11918                 tp->nvram_jedecnum = JEDEC_ATMEL;
11919                 tg3_flag_set(tp, NVRAM_BUFFERED);
11920                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11921
11922                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11923                 tw32(NVRAM_CFG1, nvcfg1);
11924                 break;
11925         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11926         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11927         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11928         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11929                 tp->nvram_jedecnum = JEDEC_ATMEL;
11930                 tg3_flag_set(tp, NVRAM_BUFFERED);
11931                 tg3_flag_set(tp, FLASH);
11932                 tp->nvram_pagesize = 264;
11933                 break;
11934         case FLASH_5752VENDOR_ST_M45PE10:
11935         case FLASH_5752VENDOR_ST_M45PE20:
11936         case FLASH_5752VENDOR_ST_M45PE40:
11937                 tp->nvram_jedecnum = JEDEC_ST;
11938                 tg3_flag_set(tp, NVRAM_BUFFERED);
11939                 tg3_flag_set(tp, FLASH);
11940                 tp->nvram_pagesize = 256;
11941                 break;
11942         }
11943 }
11944
11945 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11946 {
11947         u32 nvcfg1, protect = 0;
11948
11949         nvcfg1 = tr32(NVRAM_CFG1);
11950
11951         /* NVRAM protection for TPM */
11952         if (nvcfg1 & (1 << 27)) {
11953                 tg3_flag_set(tp, PROTECTED_NVRAM);
11954                 protect = 1;
11955         }
11956
11957         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11958         switch (nvcfg1) {
11959         case FLASH_5761VENDOR_ATMEL_ADB021D:
11960         case FLASH_5761VENDOR_ATMEL_ADB041D:
11961         case FLASH_5761VENDOR_ATMEL_ADB081D:
11962         case FLASH_5761VENDOR_ATMEL_ADB161D:
11963         case FLASH_5761VENDOR_ATMEL_MDB021D:
11964         case FLASH_5761VENDOR_ATMEL_MDB041D:
11965         case FLASH_5761VENDOR_ATMEL_MDB081D:
11966         case FLASH_5761VENDOR_ATMEL_MDB161D:
11967                 tp->nvram_jedecnum = JEDEC_ATMEL;
11968                 tg3_flag_set(tp, NVRAM_BUFFERED);
11969                 tg3_flag_set(tp, FLASH);
11970                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11971                 tp->nvram_pagesize = 256;
11972                 break;
11973         case FLASH_5761VENDOR_ST_A_M45PE20:
11974         case FLASH_5761VENDOR_ST_A_M45PE40:
11975         case FLASH_5761VENDOR_ST_A_M45PE80:
11976         case FLASH_5761VENDOR_ST_A_M45PE16:
11977         case FLASH_5761VENDOR_ST_M_M45PE20:
11978         case FLASH_5761VENDOR_ST_M_M45PE40:
11979         case FLASH_5761VENDOR_ST_M_M45PE80:
11980         case FLASH_5761VENDOR_ST_M_M45PE16:
11981                 tp->nvram_jedecnum = JEDEC_ST;
11982                 tg3_flag_set(tp, NVRAM_BUFFERED);
11983                 tg3_flag_set(tp, FLASH);
11984                 tp->nvram_pagesize = 256;
11985                 break;
11986         }
11987
11988         if (protect) {
11989                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11990         } else {
11991                 switch (nvcfg1) {
11992                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11993                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11994                 case FLASH_5761VENDOR_ST_A_M45PE16:
11995                 case FLASH_5761VENDOR_ST_M_M45PE16:
11996                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11997                         break;
11998                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11999                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12000                 case FLASH_5761VENDOR_ST_A_M45PE80:
12001                 case FLASH_5761VENDOR_ST_M_M45PE80:
12002                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12003                         break;
12004                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12005                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12006                 case FLASH_5761VENDOR_ST_A_M45PE40:
12007                 case FLASH_5761VENDOR_ST_M_M45PE40:
12008                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12009                         break;
12010                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12011                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12012                 case FLASH_5761VENDOR_ST_A_M45PE20:
12013                 case FLASH_5761VENDOR_ST_M_M45PE20:
12014                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12015                         break;
12016                 }
12017         }
12018 }
12019
12020 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12021 {
12022         tp->nvram_jedecnum = JEDEC_ATMEL;
12023         tg3_flag_set(tp, NVRAM_BUFFERED);
12024         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12025 }
12026
12027 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12028 {
12029         u32 nvcfg1;
12030
12031         nvcfg1 = tr32(NVRAM_CFG1);
12032
12033         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12034         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12035         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12036                 tp->nvram_jedecnum = JEDEC_ATMEL;
12037                 tg3_flag_set(tp, NVRAM_BUFFERED);
12038                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12039
12040                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12041                 tw32(NVRAM_CFG1, nvcfg1);
12042                 return;
12043         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12044         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12045         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12046         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12047         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12048         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12049         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12050                 tp->nvram_jedecnum = JEDEC_ATMEL;
12051                 tg3_flag_set(tp, NVRAM_BUFFERED);
12052                 tg3_flag_set(tp, FLASH);
12053
12054                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12055                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12056                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12057                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12058                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12059                         break;
12060                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12061                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12062                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12063                         break;
12064                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12065                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12066                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12067                         break;
12068                 }
12069                 break;
12070         case FLASH_5752VENDOR_ST_M45PE10:
12071         case FLASH_5752VENDOR_ST_M45PE20:
12072         case FLASH_5752VENDOR_ST_M45PE40:
12073                 tp->nvram_jedecnum = JEDEC_ST;
12074                 tg3_flag_set(tp, NVRAM_BUFFERED);
12075                 tg3_flag_set(tp, FLASH);
12076
12077                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12078                 case FLASH_5752VENDOR_ST_M45PE10:
12079                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12080                         break;
12081                 case FLASH_5752VENDOR_ST_M45PE20:
12082                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12083                         break;
12084                 case FLASH_5752VENDOR_ST_M45PE40:
12085                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12086                         break;
12087                 }
12088                 break;
12089         default:
12090                 tg3_flag_set(tp, NO_NVRAM);
12091                 return;
12092         }
12093
12094         tg3_nvram_get_pagesize(tp, nvcfg1);
12095         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12096                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12097 }
12098
12099
12100 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12101 {
12102         u32 nvcfg1;
12103
12104         nvcfg1 = tr32(NVRAM_CFG1);
12105
12106         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12107         case FLASH_5717VENDOR_ATMEL_EEPROM:
12108         case FLASH_5717VENDOR_MICRO_EEPROM:
12109                 tp->nvram_jedecnum = JEDEC_ATMEL;
12110                 tg3_flag_set(tp, NVRAM_BUFFERED);
12111                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12112
12113                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12114                 tw32(NVRAM_CFG1, nvcfg1);
12115                 return;
12116         case FLASH_5717VENDOR_ATMEL_MDB011D:
12117         case FLASH_5717VENDOR_ATMEL_ADB011B:
12118         case FLASH_5717VENDOR_ATMEL_ADB011D:
12119         case FLASH_5717VENDOR_ATMEL_MDB021D:
12120         case FLASH_5717VENDOR_ATMEL_ADB021B:
12121         case FLASH_5717VENDOR_ATMEL_ADB021D:
12122         case FLASH_5717VENDOR_ATMEL_45USPT:
12123                 tp->nvram_jedecnum = JEDEC_ATMEL;
12124                 tg3_flag_set(tp, NVRAM_BUFFERED);
12125                 tg3_flag_set(tp, FLASH);
12126
12127                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12128                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12129                         /* Detect size with tg3_nvram_get_size() */
12130                         break;
12131                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12132                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12133                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12134                         break;
12135                 default:
12136                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12137                         break;
12138                 }
12139                 break;
12140         case FLASH_5717VENDOR_ST_M_M25PE10:
12141         case FLASH_5717VENDOR_ST_A_M25PE10:
12142         case FLASH_5717VENDOR_ST_M_M45PE10:
12143         case FLASH_5717VENDOR_ST_A_M45PE10:
12144         case FLASH_5717VENDOR_ST_M_M25PE20:
12145         case FLASH_5717VENDOR_ST_A_M25PE20:
12146         case FLASH_5717VENDOR_ST_M_M45PE20:
12147         case FLASH_5717VENDOR_ST_A_M45PE20:
12148         case FLASH_5717VENDOR_ST_25USPT:
12149         case FLASH_5717VENDOR_ST_45USPT:
12150                 tp->nvram_jedecnum = JEDEC_ST;
12151                 tg3_flag_set(tp, NVRAM_BUFFERED);
12152                 tg3_flag_set(tp, FLASH);
12153
12154                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12155                 case FLASH_5717VENDOR_ST_M_M25PE20:
12156                 case FLASH_5717VENDOR_ST_M_M45PE20:
12157                         /* Detect size with tg3_nvram_get_size() */
12158                         break;
12159                 case FLASH_5717VENDOR_ST_A_M25PE20:
12160                 case FLASH_5717VENDOR_ST_A_M45PE20:
12161                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12162                         break;
12163                 default:
12164                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12165                         break;
12166                 }
12167                 break;
12168         default:
12169                 tg3_flag_set(tp, NO_NVRAM);
12170                 return;
12171         }
12172
12173         tg3_nvram_get_pagesize(tp, nvcfg1);
12174         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12175                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12176 }
12177
12178 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12179 {
12180         u32 nvcfg1, nvmpinstrp;
12181
12182         nvcfg1 = tr32(NVRAM_CFG1);
12183         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12184
12185         switch (nvmpinstrp) {
12186         case FLASH_5720_EEPROM_HD:
12187         case FLASH_5720_EEPROM_LD:
12188                 tp->nvram_jedecnum = JEDEC_ATMEL;
12189                 tg3_flag_set(tp, NVRAM_BUFFERED);
12190
12191                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12192                 tw32(NVRAM_CFG1, nvcfg1);
12193                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12194                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12195                 else
12196                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12197                 return;
12198         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12199         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12200         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12201         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12202         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12203         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12204         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12205         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12206         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12207         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12208         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12209         case FLASH_5720VENDOR_ATMEL_45USPT:
12210                 tp->nvram_jedecnum = JEDEC_ATMEL;
12211                 tg3_flag_set(tp, NVRAM_BUFFERED);
12212                 tg3_flag_set(tp, FLASH);
12213
12214                 switch (nvmpinstrp) {
12215                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12216                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12217                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12218                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12219                         break;
12220                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12221                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12222                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12223                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12224                         break;
12225                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12226                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12227                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12228                         break;
12229                 default:
12230                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12231                         break;
12232                 }
12233                 break;
12234         case FLASH_5720VENDOR_M_ST_M25PE10:
12235         case FLASH_5720VENDOR_M_ST_M45PE10:
12236         case FLASH_5720VENDOR_A_ST_M25PE10:
12237         case FLASH_5720VENDOR_A_ST_M45PE10:
12238         case FLASH_5720VENDOR_M_ST_M25PE20:
12239         case FLASH_5720VENDOR_M_ST_M45PE20:
12240         case FLASH_5720VENDOR_A_ST_M25PE20:
12241         case FLASH_5720VENDOR_A_ST_M45PE20:
12242         case FLASH_5720VENDOR_M_ST_M25PE40:
12243         case FLASH_5720VENDOR_M_ST_M45PE40:
12244         case FLASH_5720VENDOR_A_ST_M25PE40:
12245         case FLASH_5720VENDOR_A_ST_M45PE40:
12246         case FLASH_5720VENDOR_M_ST_M25PE80:
12247         case FLASH_5720VENDOR_M_ST_M45PE80:
12248         case FLASH_5720VENDOR_A_ST_M25PE80:
12249         case FLASH_5720VENDOR_A_ST_M45PE80:
12250         case FLASH_5720VENDOR_ST_25USPT:
12251         case FLASH_5720VENDOR_ST_45USPT:
12252                 tp->nvram_jedecnum = JEDEC_ST;
12253                 tg3_flag_set(tp, NVRAM_BUFFERED);
12254                 tg3_flag_set(tp, FLASH);
12255
12256                 switch (nvmpinstrp) {
12257                 case FLASH_5720VENDOR_M_ST_M25PE20:
12258                 case FLASH_5720VENDOR_M_ST_M45PE20:
12259                 case FLASH_5720VENDOR_A_ST_M25PE20:
12260                 case FLASH_5720VENDOR_A_ST_M45PE20:
12261                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12262                         break;
12263                 case FLASH_5720VENDOR_M_ST_M25PE40:
12264                 case FLASH_5720VENDOR_M_ST_M45PE40:
12265                 case FLASH_5720VENDOR_A_ST_M25PE40:
12266                 case FLASH_5720VENDOR_A_ST_M45PE40:
12267                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12268                         break;
12269                 case FLASH_5720VENDOR_M_ST_M25PE80:
12270                 case FLASH_5720VENDOR_M_ST_M45PE80:
12271                 case FLASH_5720VENDOR_A_ST_M25PE80:
12272                 case FLASH_5720VENDOR_A_ST_M45PE80:
12273                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12274                         break;
12275                 default:
12276                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12277                         break;
12278                 }
12279                 break;
12280         default:
12281                 tg3_flag_set(tp, NO_NVRAM);
12282                 return;
12283         }
12284
12285         tg3_nvram_get_pagesize(tp, nvcfg1);
12286         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12287                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12288 }
12289
12290 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12291 static void __devinit tg3_nvram_init(struct tg3 *tp)
12292 {
12293         tw32_f(GRC_EEPROM_ADDR,
12294              (EEPROM_ADDR_FSM_RESET |
12295               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12296                EEPROM_ADDR_CLKPERD_SHIFT)));
12297
12298         msleep(1);
12299
12300         /* Enable seeprom accesses. */
12301         tw32_f(GRC_LOCAL_CTRL,
12302              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12303         udelay(100);
12304
12305         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12306             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12307                 tg3_flag_set(tp, NVRAM);
12308
12309                 if (tg3_nvram_lock(tp)) {
12310                         netdev_warn(tp->dev,
12311                                     "Cannot get nvram lock, %s failed\n",
12312                                     __func__);
12313                         return;
12314                 }
12315                 tg3_enable_nvram_access(tp);
12316
12317                 tp->nvram_size = 0;
12318
12319                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12320                         tg3_get_5752_nvram_info(tp);
12321                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12322                         tg3_get_5755_nvram_info(tp);
12323                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12324                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12325                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12326                         tg3_get_5787_nvram_info(tp);
12327                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12328                         tg3_get_5761_nvram_info(tp);
12329                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12330                         tg3_get_5906_nvram_info(tp);
12331                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12332                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12333                         tg3_get_57780_nvram_info(tp);
12334                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12335                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12336                         tg3_get_5717_nvram_info(tp);
12337                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12338                         tg3_get_5720_nvram_info(tp);
12339                 else
12340                         tg3_get_nvram_info(tp);
12341
12342                 if (tp->nvram_size == 0)
12343                         tg3_get_nvram_size(tp);
12344
12345                 tg3_disable_nvram_access(tp);
12346                 tg3_nvram_unlock(tp);
12347
12348         } else {
12349                 tg3_flag_clear(tp, NVRAM);
12350                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12351
12352                 tg3_get_eeprom_size(tp);
12353         }
12354 }
12355
12356 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12357                                     u32 offset, u32 len, u8 *buf)
12358 {
12359         int i, j, rc = 0;
12360         u32 val;
12361
12362         for (i = 0; i < len; i += 4) {
12363                 u32 addr;
12364                 __be32 data;
12365
12366                 addr = offset + i;
12367
12368                 memcpy(&data, buf + i, 4);
12369
12370                 /*
12371                  * The SEEPROM interface expects the data to always be opposite
12372                  * the native endian format.  We accomplish this by reversing
12373                  * all the operations that would have been performed on the
12374                  * data from a call to tg3_nvram_read_be32().
12375                  */
12376                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12377
12378                 val = tr32(GRC_EEPROM_ADDR);
12379                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12380
12381                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12382                         EEPROM_ADDR_READ);
12383                 tw32(GRC_EEPROM_ADDR, val |
12384                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12385                         (addr & EEPROM_ADDR_ADDR_MASK) |
12386                         EEPROM_ADDR_START |
12387                         EEPROM_ADDR_WRITE);
12388
12389                 for (j = 0; j < 1000; j++) {
12390                         val = tr32(GRC_EEPROM_ADDR);
12391
12392                         if (val & EEPROM_ADDR_COMPLETE)
12393                                 break;
12394                         msleep(1);
12395                 }
12396                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12397                         rc = -EBUSY;
12398                         break;
12399                 }
12400         }
12401
12402         return rc;
12403 }
12404
12405 /* offset and length are dword aligned */
12406 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12407                 u8 *buf)
12408 {
12409         int ret = 0;
12410         u32 pagesize = tp->nvram_pagesize;
12411         u32 pagemask = pagesize - 1;
12412         u32 nvram_cmd;
12413         u8 *tmp;
12414
12415         tmp = kmalloc(pagesize, GFP_KERNEL);
12416         if (tmp == NULL)
12417                 return -ENOMEM;
12418
12419         while (len) {
12420                 int j;
12421                 u32 phy_addr, page_off, size;
12422
12423                 phy_addr = offset & ~pagemask;
12424
12425                 for (j = 0; j < pagesize; j += 4) {
12426                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12427                                                   (__be32 *) (tmp + j));
12428                         if (ret)
12429                                 break;
12430                 }
12431                 if (ret)
12432                         break;
12433
12434                 page_off = offset & pagemask;
12435                 size = pagesize;
12436                 if (len < size)
12437                         size = len;
12438
12439                 len -= size;
12440
12441                 memcpy(tmp + page_off, buf, size);
12442
12443                 offset = offset + (pagesize - page_off);
12444
12445                 tg3_enable_nvram_access(tp);
12446
12447                 /*
12448                  * Before we can erase the flash page, we need
12449                  * to issue a special "write enable" command.
12450                  */
12451                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12452
12453                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12454                         break;
12455
12456                 /* Erase the target page */
12457                 tw32(NVRAM_ADDR, phy_addr);
12458
12459                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12460                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12461
12462                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12463                         break;
12464
12465                 /* Issue another write enable to start the write. */
12466                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12467
12468                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12469                         break;
12470
12471                 for (j = 0; j < pagesize; j += 4) {
12472                         __be32 data;
12473
12474                         data = *((__be32 *) (tmp + j));
12475
12476                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12477
12478                         tw32(NVRAM_ADDR, phy_addr + j);
12479
12480                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12481                                 NVRAM_CMD_WR;
12482
12483                         if (j == 0)
12484                                 nvram_cmd |= NVRAM_CMD_FIRST;
12485                         else if (j == (pagesize - 4))
12486                                 nvram_cmd |= NVRAM_CMD_LAST;
12487
12488                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12489                                 break;
12490                 }
12491                 if (ret)
12492                         break;
12493         }
12494
12495         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12496         tg3_nvram_exec_cmd(tp, nvram_cmd);
12497
12498         kfree(tmp);
12499
12500         return ret;
12501 }
12502
12503 /* offset and length are dword aligned */
12504 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12505                 u8 *buf)
12506 {
12507         int i, ret = 0;
12508
12509         for (i = 0; i < len; i += 4, offset += 4) {
12510                 u32 page_off, phy_addr, nvram_cmd;
12511                 __be32 data;
12512
12513                 memcpy(&data, buf + i, 4);
12514                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12515
12516                 page_off = offset % tp->nvram_pagesize;
12517
12518                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12519
12520                 tw32(NVRAM_ADDR, phy_addr);
12521
12522                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12523
12524                 if (page_off == 0 || i == 0)
12525                         nvram_cmd |= NVRAM_CMD_FIRST;
12526                 if (page_off == (tp->nvram_pagesize - 4))
12527                         nvram_cmd |= NVRAM_CMD_LAST;
12528
12529                 if (i == (len - 4))
12530                         nvram_cmd |= NVRAM_CMD_LAST;
12531
12532                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12533                     !tg3_flag(tp, 5755_PLUS) &&
12534                     (tp->nvram_jedecnum == JEDEC_ST) &&
12535                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12536
12537                         if ((ret = tg3_nvram_exec_cmd(tp,
12538                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12539                                 NVRAM_CMD_DONE)))
12540
12541                                 break;
12542                 }
12543                 if (!tg3_flag(tp, FLASH)) {
12544                         /* We always do complete word writes to eeprom. */
12545                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12546                 }
12547
12548                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12549                         break;
12550         }
12551         return ret;
12552 }
12553
12554 /* offset and length are dword aligned */
12555 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12556 {
12557         int ret;
12558
12559         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12560                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12561                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12562                 udelay(40);
12563         }
12564
12565         if (!tg3_flag(tp, NVRAM)) {
12566                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12567         } else {
12568                 u32 grc_mode;
12569
12570                 ret = tg3_nvram_lock(tp);
12571                 if (ret)
12572                         return ret;
12573
12574                 tg3_enable_nvram_access(tp);
12575                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12576                         tw32(NVRAM_WRITE1, 0x406);
12577
12578                 grc_mode = tr32(GRC_MODE);
12579                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12580
12581                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12582                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12583                                 buf);
12584                 } else {
12585                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12586                                 buf);
12587                 }
12588
12589                 grc_mode = tr32(GRC_MODE);
12590                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12591
12592                 tg3_disable_nvram_access(tp);
12593                 tg3_nvram_unlock(tp);
12594         }
12595
12596         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12597                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12598                 udelay(40);
12599         }
12600
12601         return ret;
12602 }
12603
12604 struct subsys_tbl_ent {
12605         u16 subsys_vendor, subsys_devid;
12606         u32 phy_id;
12607 };
12608
12609 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12610         /* Broadcom boards. */
12611         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12612           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12613         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12614           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12615         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12616           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12617         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12618           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12619         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12620           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12621         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12622           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12623         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12624           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12625         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12626           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12627         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12628           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12629         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12630           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12631         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12632           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12633
12634         /* 3com boards. */
12635         { TG3PCI_SUBVENDOR_ID_3COM,
12636           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12637         { TG3PCI_SUBVENDOR_ID_3COM,
12638           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12639         { TG3PCI_SUBVENDOR_ID_3COM,
12640           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12641         { TG3PCI_SUBVENDOR_ID_3COM,
12642           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12643         { TG3PCI_SUBVENDOR_ID_3COM,
12644           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12645
12646         /* DELL boards. */
12647         { TG3PCI_SUBVENDOR_ID_DELL,
12648           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12649         { TG3PCI_SUBVENDOR_ID_DELL,
12650           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12651         { TG3PCI_SUBVENDOR_ID_DELL,
12652           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12653         { TG3PCI_SUBVENDOR_ID_DELL,
12654           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12655
12656         /* Compaq boards. */
12657         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12658           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12659         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12660           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12661         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12662           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12663         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12664           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12665         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12666           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12667
12668         /* IBM boards. */
12669         { TG3PCI_SUBVENDOR_ID_IBM,
12670           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12671 };
12672
12673 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12674 {
12675         int i;
12676
12677         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12678                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12679                      tp->pdev->subsystem_vendor) &&
12680                     (subsys_id_to_phy_id[i].subsys_devid ==
12681                      tp->pdev->subsystem_device))
12682                         return &subsys_id_to_phy_id[i];
12683         }
12684         return NULL;
12685 }
12686
12687 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12688 {
12689         u32 val;
12690
12691         tp->phy_id = TG3_PHY_ID_INVALID;
12692         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12693
12694         /* Assume an onboard device and WOL capable by default.  */
12695         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12696         tg3_flag_set(tp, WOL_CAP);
12697
12698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12699                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12700                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12701                         tg3_flag_set(tp, IS_NIC);
12702                 }
12703                 val = tr32(VCPU_CFGSHDW);
12704                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12705                         tg3_flag_set(tp, ASPM_WORKAROUND);
12706                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12707                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12708                         tg3_flag_set(tp, WOL_ENABLE);
12709                         device_set_wakeup_enable(&tp->pdev->dev, true);
12710                 }
12711                 goto done;
12712         }
12713
12714         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12715         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12716                 u32 nic_cfg, led_cfg;
12717                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12718                 int eeprom_phy_serdes = 0;
12719
12720                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12721                 tp->nic_sram_data_cfg = nic_cfg;
12722
12723                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12724                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12725                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12726                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12727                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12728                     (ver > 0) && (ver < 0x100))
12729                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12730
12731                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12732                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12733
12734                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12735                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12736                         eeprom_phy_serdes = 1;
12737
12738                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12739                 if (nic_phy_id != 0) {
12740                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12741                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12742
12743                         eeprom_phy_id  = (id1 >> 16) << 10;
12744                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12745                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12746                 } else
12747                         eeprom_phy_id = 0;
12748
12749                 tp->phy_id = eeprom_phy_id;
12750                 if (eeprom_phy_serdes) {
12751                         if (!tg3_flag(tp, 5705_PLUS))
12752                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12753                         else
12754                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12755                 }
12756
12757                 if (tg3_flag(tp, 5750_PLUS))
12758                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12759                                     SHASTA_EXT_LED_MODE_MASK);
12760                 else
12761                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12762
12763                 switch (led_cfg) {
12764                 default:
12765                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12766                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12767                         break;
12768
12769                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12770                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12771                         break;
12772
12773                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12774                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12775
12776                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12777                          * read on some older 5700/5701 bootcode.
12778                          */
12779                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12780                             ASIC_REV_5700 ||
12781                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12782                             ASIC_REV_5701)
12783                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12784
12785                         break;
12786
12787                 case SHASTA_EXT_LED_SHARED:
12788                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12789                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12790                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12791                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12792                                                  LED_CTRL_MODE_PHY_2);
12793                         break;
12794
12795                 case SHASTA_EXT_LED_MAC:
12796                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12797                         break;
12798
12799                 case SHASTA_EXT_LED_COMBO:
12800                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12801                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12802                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12803                                                  LED_CTRL_MODE_PHY_2);
12804                         break;
12805
12806                 }
12807
12808                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12809                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12810                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12811                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12812
12813                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12814                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12815
12816                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12817                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12818                         if ((tp->pdev->subsystem_vendor ==
12819                              PCI_VENDOR_ID_ARIMA) &&
12820                             (tp->pdev->subsystem_device == 0x205a ||
12821                              tp->pdev->subsystem_device == 0x2063))
12822                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12823                 } else {
12824                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12825                         tg3_flag_set(tp, IS_NIC);
12826                 }
12827
12828                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12829                         tg3_flag_set(tp, ENABLE_ASF);
12830                         if (tg3_flag(tp, 5750_PLUS))
12831                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12832                 }
12833
12834                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12835                     tg3_flag(tp, 5750_PLUS))
12836                         tg3_flag_set(tp, ENABLE_APE);
12837
12838                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12839                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12840                         tg3_flag_clear(tp, WOL_CAP);
12841
12842                 if (tg3_flag(tp, WOL_CAP) &&
12843                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12844                         tg3_flag_set(tp, WOL_ENABLE);
12845                         device_set_wakeup_enable(&tp->pdev->dev, true);
12846                 }
12847
12848                 if (cfg2 & (1 << 17))
12849                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12850
12851                 /* serdes signal pre-emphasis in register 0x590 set by */
12852                 /* bootcode if bit 18 is set */
12853                 if (cfg2 & (1 << 18))
12854                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12855
12856                 if ((tg3_flag(tp, 57765_PLUS) ||
12857                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12858                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12859                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12860                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12861
12862                 if (tg3_flag(tp, PCI_EXPRESS) &&
12863                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12864                     !tg3_flag(tp, 57765_PLUS)) {
12865                         u32 cfg3;
12866
12867                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12868                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12869                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12870                 }
12871
12872                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12873                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12874                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12875                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12876                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12877                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12878         }
12879 done:
12880         if (tg3_flag(tp, WOL_CAP))
12881                 device_set_wakeup_enable(&tp->pdev->dev,
12882                                          tg3_flag(tp, WOL_ENABLE));
12883         else
12884                 device_set_wakeup_capable(&tp->pdev->dev, false);
12885 }
12886
12887 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12888 {
12889         int i;
12890         u32 val;
12891
12892         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12893         tw32(OTP_CTRL, cmd);
12894
12895         /* Wait for up to 1 ms for command to execute. */
12896         for (i = 0; i < 100; i++) {
12897                 val = tr32(OTP_STATUS);
12898                 if (val & OTP_STATUS_CMD_DONE)
12899                         break;
12900                 udelay(10);
12901         }
12902
12903         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12904 }
12905
12906 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12907  * configuration is a 32-bit value that straddles the alignment boundary.
12908  * We do two 32-bit reads and then shift and merge the results.
12909  */
12910 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12911 {
12912         u32 bhalf_otp, thalf_otp;
12913
12914         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12915
12916         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12917                 return 0;
12918
12919         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12920
12921         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12922                 return 0;
12923
12924         thalf_otp = tr32(OTP_READ_DATA);
12925
12926         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12927
12928         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12929                 return 0;
12930
12931         bhalf_otp = tr32(OTP_READ_DATA);
12932
12933         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12934 }
12935
12936 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12937 {
12938         u32 adv = ADVERTISED_Autoneg |
12939                   ADVERTISED_Pause;
12940
12941         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12942                 adv |= ADVERTISED_1000baseT_Half |
12943                        ADVERTISED_1000baseT_Full;
12944
12945         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12946                 adv |= ADVERTISED_100baseT_Half |
12947                        ADVERTISED_100baseT_Full |
12948                        ADVERTISED_10baseT_Half |
12949                        ADVERTISED_10baseT_Full |
12950                        ADVERTISED_TP;
12951         else
12952                 adv |= ADVERTISED_FIBRE;
12953
12954         tp->link_config.advertising = adv;
12955         tp->link_config.speed = SPEED_INVALID;
12956         tp->link_config.duplex = DUPLEX_INVALID;
12957         tp->link_config.autoneg = AUTONEG_ENABLE;
12958         tp->link_config.active_speed = SPEED_INVALID;
12959         tp->link_config.active_duplex = DUPLEX_INVALID;
12960         tp->link_config.orig_speed = SPEED_INVALID;
12961         tp->link_config.orig_duplex = DUPLEX_INVALID;
12962         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12963 }
12964
12965 static int __devinit tg3_phy_probe(struct tg3 *tp)
12966 {
12967         u32 hw_phy_id_1, hw_phy_id_2;
12968         u32 hw_phy_id, hw_phy_id_masked;
12969         int err;
12970
12971         /* flow control autonegotiation is default behavior */
12972         tg3_flag_set(tp, PAUSE_AUTONEG);
12973         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12974
12975         if (tg3_flag(tp, USE_PHYLIB))
12976                 return tg3_phy_init(tp);
12977
12978         /* Reading the PHY ID register can conflict with ASF
12979          * firmware access to the PHY hardware.
12980          */
12981         err = 0;
12982         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12983                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12984         } else {
12985                 /* Now read the physical PHY_ID from the chip and verify
12986                  * that it is sane.  If it doesn't look good, we fall back
12987                  * to either the hard-coded table based PHY_ID and failing
12988                  * that the value found in the eeprom area.
12989                  */
12990                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12991                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12992
12993                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12994                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12995                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12996
12997                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12998         }
12999
13000         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13001                 tp->phy_id = hw_phy_id;
13002                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13003                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13004                 else
13005                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13006         } else {
13007                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13008                         /* Do nothing, phy ID already set up in
13009                          * tg3_get_eeprom_hw_cfg().
13010                          */
13011                 } else {
13012                         struct subsys_tbl_ent *p;
13013
13014                         /* No eeprom signature?  Try the hardcoded
13015                          * subsys device table.
13016                          */
13017                         p = tg3_lookup_by_subsys(tp);
13018                         if (!p)
13019                                 return -ENODEV;
13020
13021                         tp->phy_id = p->phy_id;
13022                         if (!tp->phy_id ||
13023                             tp->phy_id == TG3_PHY_ID_BCM8002)
13024                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13025                 }
13026         }
13027
13028         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13029             ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13030               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13031              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13032               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13033                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13034
13035         tg3_phy_init_link_config(tp);
13036
13037         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13038             !tg3_flag(tp, ENABLE_APE) &&
13039             !tg3_flag(tp, ENABLE_ASF)) {
13040                 u32 bmsr, mask;
13041
13042                 tg3_readphy(tp, MII_BMSR, &bmsr);
13043                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13044                     (bmsr & BMSR_LSTATUS))
13045                         goto skip_phy_reset;
13046
13047                 err = tg3_phy_reset(tp);
13048                 if (err)
13049                         return err;
13050
13051                 tg3_phy_set_wirespeed(tp);
13052
13053                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13054                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13055                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13056                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13057                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13058                                             tp->link_config.flowctrl);
13059
13060                         tg3_writephy(tp, MII_BMCR,
13061                                      BMCR_ANENABLE | BMCR_ANRESTART);
13062                 }
13063         }
13064
13065 skip_phy_reset:
13066         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13067                 err = tg3_init_5401phy_dsp(tp);
13068                 if (err)
13069                         return err;
13070
13071                 err = tg3_init_5401phy_dsp(tp);
13072         }
13073
13074         return err;
13075 }
13076
13077 static void __devinit tg3_read_vpd(struct tg3 *tp)
13078 {
13079         u8 *vpd_data;
13080         unsigned int block_end, rosize, len;
13081         int j, i = 0;
13082
13083         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13084         if (!vpd_data)
13085                 goto out_no_vpd;
13086
13087         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13088                              PCI_VPD_LRDT_RO_DATA);
13089         if (i < 0)
13090                 goto out_not_found;
13091
13092         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13093         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13094         i += PCI_VPD_LRDT_TAG_SIZE;
13095
13096         if (block_end > TG3_NVM_VPD_LEN)
13097                 goto out_not_found;
13098
13099         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13100                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13101         if (j > 0) {
13102                 len = pci_vpd_info_field_size(&vpd_data[j]);
13103
13104                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13105                 if (j + len > block_end || len != 4 ||
13106                     memcmp(&vpd_data[j], "1028", 4))
13107                         goto partno;
13108
13109                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13110                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13111                 if (j < 0)
13112                         goto partno;
13113
13114                 len = pci_vpd_info_field_size(&vpd_data[j]);
13115
13116                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13117                 if (j + len > block_end)
13118                         goto partno;
13119
13120                 memcpy(tp->fw_ver, &vpd_data[j], len);
13121                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13122         }
13123
13124 partno:
13125         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13126                                       PCI_VPD_RO_KEYWORD_PARTNO);
13127         if (i < 0)
13128                 goto out_not_found;
13129
13130         len = pci_vpd_info_field_size(&vpd_data[i]);
13131
13132         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13133         if (len > TG3_BPN_SIZE ||
13134             (len + i) > TG3_NVM_VPD_LEN)
13135                 goto out_not_found;
13136
13137         memcpy(tp->board_part_number, &vpd_data[i], len);
13138
13139 out_not_found:
13140         kfree(vpd_data);
13141         if (tp->board_part_number[0])
13142                 return;
13143
13144 out_no_vpd:
13145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13146                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13147                         strcpy(tp->board_part_number, "BCM5717");
13148                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13149                         strcpy(tp->board_part_number, "BCM5718");
13150                 else
13151                         goto nomatch;
13152         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13153                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13154                         strcpy(tp->board_part_number, "BCM57780");
13155                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13156                         strcpy(tp->board_part_number, "BCM57760");
13157                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13158                         strcpy(tp->board_part_number, "BCM57790");
13159                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13160                         strcpy(tp->board_part_number, "BCM57788");
13161                 else
13162                         goto nomatch;
13163         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13164                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13165                         strcpy(tp->board_part_number, "BCM57761");
13166                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13167                         strcpy(tp->board_part_number, "BCM57765");
13168                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13169                         strcpy(tp->board_part_number, "BCM57781");
13170                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13171                         strcpy(tp->board_part_number, "BCM57785");
13172                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13173                         strcpy(tp->board_part_number, "BCM57791");
13174                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13175                         strcpy(tp->board_part_number, "BCM57795");
13176                 else
13177                         goto nomatch;
13178         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13179                 strcpy(tp->board_part_number, "BCM95906");
13180         } else {
13181 nomatch:
13182                 strcpy(tp->board_part_number, "none");
13183         }
13184 }
13185
13186 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13187 {
13188         u32 val;
13189
13190         if (tg3_nvram_read(tp, offset, &val) ||
13191             (val & 0xfc000000) != 0x0c000000 ||
13192             tg3_nvram_read(tp, offset + 4, &val) ||
13193             val != 0)
13194                 return 0;
13195
13196         return 1;
13197 }
13198
13199 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13200 {
13201         u32 val, offset, start, ver_offset;
13202         int i, dst_off;
13203         bool newver = false;
13204
13205         if (tg3_nvram_read(tp, 0xc, &offset) ||
13206             tg3_nvram_read(tp, 0x4, &start))
13207                 return;
13208
13209         offset = tg3_nvram_logical_addr(tp, offset);
13210
13211         if (tg3_nvram_read(tp, offset, &val))
13212                 return;
13213
13214         if ((val & 0xfc000000) == 0x0c000000) {
13215                 if (tg3_nvram_read(tp, offset + 4, &val))
13216                         return;
13217
13218                 if (val == 0)
13219                         newver = true;
13220         }
13221
13222         dst_off = strlen(tp->fw_ver);
13223
13224         if (newver) {
13225                 if (TG3_VER_SIZE - dst_off < 16 ||
13226                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13227                         return;
13228
13229                 offset = offset + ver_offset - start;
13230                 for (i = 0; i < 16; i += 4) {
13231                         __be32 v;
13232                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13233                                 return;
13234
13235                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13236                 }
13237         } else {
13238                 u32 major, minor;
13239
13240                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13241                         return;
13242
13243                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13244                         TG3_NVM_BCVER_MAJSFT;
13245                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13246                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13247                          "v%d.%02d", major, minor);
13248         }
13249 }
13250
13251 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13252 {
13253         u32 val, major, minor;
13254
13255         /* Use native endian representation */
13256         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13257                 return;
13258
13259         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13260                 TG3_NVM_HWSB_CFG1_MAJSFT;
13261         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13262                 TG3_NVM_HWSB_CFG1_MINSFT;
13263
13264         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13265 }
13266
13267 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13268 {
13269         u32 offset, major, minor, build;
13270
13271         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13272
13273         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13274                 return;
13275
13276         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13277         case TG3_EEPROM_SB_REVISION_0:
13278                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13279                 break;
13280         case TG3_EEPROM_SB_REVISION_2:
13281                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13282                 break;
13283         case TG3_EEPROM_SB_REVISION_3:
13284                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13285                 break;
13286         case TG3_EEPROM_SB_REVISION_4:
13287                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13288                 break;
13289         case TG3_EEPROM_SB_REVISION_5:
13290                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13291                 break;
13292         case TG3_EEPROM_SB_REVISION_6:
13293                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13294                 break;
13295         default:
13296                 return;
13297         }
13298
13299         if (tg3_nvram_read(tp, offset, &val))
13300                 return;
13301
13302         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13303                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13304         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13305                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13306         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13307
13308         if (minor > 99 || build > 26)
13309                 return;
13310
13311         offset = strlen(tp->fw_ver);
13312         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13313                  " v%d.%02d", major, minor);
13314
13315         if (build > 0) {
13316                 offset = strlen(tp->fw_ver);
13317                 if (offset < TG3_VER_SIZE - 1)
13318                         tp->fw_ver[offset] = 'a' + build - 1;
13319         }
13320 }
13321
13322 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13323 {
13324         u32 val, offset, start;
13325         int i, vlen;
13326
13327         for (offset = TG3_NVM_DIR_START;
13328              offset < TG3_NVM_DIR_END;
13329              offset += TG3_NVM_DIRENT_SIZE) {
13330                 if (tg3_nvram_read(tp, offset, &val))
13331                         return;
13332
13333                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13334                         break;
13335         }
13336
13337         if (offset == TG3_NVM_DIR_END)
13338                 return;
13339
13340         if (!tg3_flag(tp, 5705_PLUS))
13341                 start = 0x08000000;
13342         else if (tg3_nvram_read(tp, offset - 4, &start))
13343                 return;
13344
13345         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13346             !tg3_fw_img_is_valid(tp, offset) ||
13347             tg3_nvram_read(tp, offset + 8, &val))
13348                 return;
13349
13350         offset += val - start;
13351
13352         vlen = strlen(tp->fw_ver);
13353
13354         tp->fw_ver[vlen++] = ',';
13355         tp->fw_ver[vlen++] = ' ';
13356
13357         for (i = 0; i < 4; i++) {
13358                 __be32 v;
13359                 if (tg3_nvram_read_be32(tp, offset, &v))
13360                         return;
13361
13362                 offset += sizeof(v);
13363
13364                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13365                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13366                         break;
13367                 }
13368
13369                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13370                 vlen += sizeof(v);
13371         }
13372 }
13373
13374 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13375 {
13376         int vlen;
13377         u32 apedata;
13378         char *fwtype;
13379
13380         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13381                 return;
13382
13383         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13384         if (apedata != APE_SEG_SIG_MAGIC)
13385                 return;
13386
13387         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13388         if (!(apedata & APE_FW_STATUS_READY))
13389                 return;
13390
13391         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13392
13393         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13394                 tg3_flag_set(tp, APE_HAS_NCSI);
13395                 fwtype = "NCSI";
13396         } else {
13397                 fwtype = "DASH";
13398         }
13399
13400         vlen = strlen(tp->fw_ver);
13401
13402         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13403                  fwtype,
13404                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13405                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13406                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13407                  (apedata & APE_FW_VERSION_BLDMSK));
13408 }
13409
13410 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13411 {
13412         u32 val;
13413         bool vpd_vers = false;
13414
13415         if (tp->fw_ver[0] != 0)
13416                 vpd_vers = true;
13417
13418         if (tg3_flag(tp, NO_NVRAM)) {
13419                 strcat(tp->fw_ver, "sb");
13420                 return;
13421         }
13422
13423         if (tg3_nvram_read(tp, 0, &val))
13424                 return;
13425
13426         if (val == TG3_EEPROM_MAGIC)
13427                 tg3_read_bc_ver(tp);
13428         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13429                 tg3_read_sb_ver(tp, val);
13430         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13431                 tg3_read_hwsb_ver(tp);
13432         else
13433                 return;
13434
13435         if (vpd_vers)
13436                 goto done;
13437
13438         if (tg3_flag(tp, ENABLE_APE)) {
13439                 if (tg3_flag(tp, ENABLE_ASF))
13440                         tg3_read_dash_ver(tp);
13441         } else if (tg3_flag(tp, ENABLE_ASF)) {
13442                 tg3_read_mgmtfw_ver(tp);
13443         }
13444
13445 done:
13446         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13447 }
13448
13449 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13450
13451 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13452 {
13453         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13454                 return TG3_RX_RET_MAX_SIZE_5717;
13455         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13456                 return TG3_RX_RET_MAX_SIZE_5700;
13457         else
13458                 return TG3_RX_RET_MAX_SIZE_5705;
13459 }
13460
13461 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13462         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13463         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13464         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13465         { },
13466 };
13467
13468 static int __devinit tg3_get_invariants(struct tg3 *tp)
13469 {
13470         u32 misc_ctrl_reg;
13471         u32 pci_state_reg, grc_misc_cfg;
13472         u32 val;
13473         u16 pci_cmd;
13474         int err;
13475
13476         /* Force memory write invalidate off.  If we leave it on,
13477          * then on 5700_BX chips we have to enable a workaround.
13478          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13479          * to match the cacheline size.  The Broadcom driver have this
13480          * workaround but turns MWI off all the times so never uses
13481          * it.  This seems to suggest that the workaround is insufficient.
13482          */
13483         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13484         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13485         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13486
13487         /* Important! -- Make sure register accesses are byteswapped
13488          * correctly.  Also, for those chips that require it, make
13489          * sure that indirect register accesses are enabled before
13490          * the first operation.
13491          */
13492         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13493                               &misc_ctrl_reg);
13494         tp->misc_host_ctrl |= (misc_ctrl_reg &
13495                                MISC_HOST_CTRL_CHIPREV);
13496         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13497                                tp->misc_host_ctrl);
13498
13499         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13500                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13502                 u32 prod_id_asic_rev;
13503
13504                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13505                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13506                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13507                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13508                         pci_read_config_dword(tp->pdev,
13509                                               TG3PCI_GEN2_PRODID_ASICREV,
13510                                               &prod_id_asic_rev);
13511                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13512                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13513                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13514                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13515                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13516                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13517                         pci_read_config_dword(tp->pdev,
13518                                               TG3PCI_GEN15_PRODID_ASICREV,
13519                                               &prod_id_asic_rev);
13520                 else
13521                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13522                                               &prod_id_asic_rev);
13523
13524                 tp->pci_chip_rev_id = prod_id_asic_rev;
13525         }
13526
13527         /* Wrong chip ID in 5752 A0. This code can be removed later
13528          * as A0 is not in production.
13529          */
13530         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13531                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13532
13533         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13534          * we need to disable memory and use config. cycles
13535          * only to access all registers. The 5702/03 chips
13536          * can mistakenly decode the special cycles from the
13537          * ICH chipsets as memory write cycles, causing corruption
13538          * of register and memory space. Only certain ICH bridges
13539          * will drive special cycles with non-zero data during the
13540          * address phase which can fall within the 5703's address
13541          * range. This is not an ICH bug as the PCI spec allows
13542          * non-zero address during special cycles. However, only
13543          * these ICH bridges are known to drive non-zero addresses
13544          * during special cycles.
13545          *
13546          * Since special cycles do not cross PCI bridges, we only
13547          * enable this workaround if the 5703 is on the secondary
13548          * bus of these ICH bridges.
13549          */
13550         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13551             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13552                 static struct tg3_dev_id {
13553                         u32     vendor;
13554                         u32     device;
13555                         u32     rev;
13556                 } ich_chipsets[] = {
13557                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13558                           PCI_ANY_ID },
13559                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13560                           PCI_ANY_ID },
13561                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13562                           0xa },
13563                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13564                           PCI_ANY_ID },
13565                         { },
13566                 };
13567                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13568                 struct pci_dev *bridge = NULL;
13569
13570                 while (pci_id->vendor != 0) {
13571                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13572                                                 bridge);
13573                         if (!bridge) {
13574                                 pci_id++;
13575                                 continue;
13576                         }
13577                         if (pci_id->rev != PCI_ANY_ID) {
13578                                 if (bridge->revision > pci_id->rev)
13579                                         continue;
13580                         }
13581                         if (bridge->subordinate &&
13582                             (bridge->subordinate->number ==
13583                              tp->pdev->bus->number)) {
13584                                 tg3_flag_set(tp, ICH_WORKAROUND);
13585                                 pci_dev_put(bridge);
13586                                 break;
13587                         }
13588                 }
13589         }
13590
13591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13592                 static struct tg3_dev_id {
13593                         u32     vendor;
13594                         u32     device;
13595                 } bridge_chipsets[] = {
13596                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13597                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13598                         { },
13599                 };
13600                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13601                 struct pci_dev *bridge = NULL;
13602
13603                 while (pci_id->vendor != 0) {
13604                         bridge = pci_get_device(pci_id->vendor,
13605                                                 pci_id->device,
13606                                                 bridge);
13607                         if (!bridge) {
13608                                 pci_id++;
13609                                 continue;
13610                         }
13611                         if (bridge->subordinate &&
13612                             (bridge->subordinate->number <=
13613                              tp->pdev->bus->number) &&
13614                             (bridge->subordinate->subordinate >=
13615                              tp->pdev->bus->number)) {
13616                                 tg3_flag_set(tp, 5701_DMA_BUG);
13617                                 pci_dev_put(bridge);
13618                                 break;
13619                         }
13620                 }
13621         }
13622
13623         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13624          * DMA addresses > 40-bit. This bridge may have other additional
13625          * 57xx devices behind it in some 4-port NIC designs for example.
13626          * Any tg3 device found behind the bridge will also need the 40-bit
13627          * DMA workaround.
13628          */
13629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13630             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13631                 tg3_flag_set(tp, 5780_CLASS);
13632                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13633                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13634         } else {
13635                 struct pci_dev *bridge = NULL;
13636
13637                 do {
13638                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13639                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13640                                                 bridge);
13641                         if (bridge && bridge->subordinate &&
13642                             (bridge->subordinate->number <=
13643                              tp->pdev->bus->number) &&
13644                             (bridge->subordinate->subordinate >=
13645                              tp->pdev->bus->number)) {
13646                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13647                                 pci_dev_put(bridge);
13648                                 break;
13649                         }
13650                 } while (bridge);
13651         }
13652
13653         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13654             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13655             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13656             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13657                 tp->pdev_peer = tg3_find_peer(tp);
13658
13659         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13660             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13662                 tg3_flag_set(tp, 5717_PLUS);
13663
13664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13665             tg3_flag(tp, 5717_PLUS))
13666                 tg3_flag_set(tp, 57765_PLUS);
13667
13668         /* Intentionally exclude ASIC_REV_5906 */
13669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13672             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13673             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13675             tg3_flag(tp, 57765_PLUS))
13676                 tg3_flag_set(tp, 5755_PLUS);
13677
13678         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13679             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13680             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13681             tg3_flag(tp, 5755_PLUS) ||
13682             tg3_flag(tp, 5780_CLASS))
13683                 tg3_flag_set(tp, 5750_PLUS);
13684
13685         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13686             tg3_flag(tp, 5750_PLUS))
13687                 tg3_flag_set(tp, 5705_PLUS);
13688
13689         /* Determine TSO capabilities */
13690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13691                 ; /* Do nothing. HW bug. */
13692         else if (tg3_flag(tp, 57765_PLUS))
13693                 tg3_flag_set(tp, HW_TSO_3);
13694         else if (tg3_flag(tp, 5755_PLUS) ||
13695                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13696                 tg3_flag_set(tp, HW_TSO_2);
13697         else if (tg3_flag(tp, 5750_PLUS)) {
13698                 tg3_flag_set(tp, HW_TSO_1);
13699                 tg3_flag_set(tp, TSO_BUG);
13700                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13701                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13702                         tg3_flag_clear(tp, TSO_BUG);
13703         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13704                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13705                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13706                         tg3_flag_set(tp, TSO_BUG);
13707                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13708                         tp->fw_needed = FIRMWARE_TG3TSO5;
13709                 else
13710                         tp->fw_needed = FIRMWARE_TG3TSO;
13711         }
13712
13713         /* Selectively allow TSO based on operating conditions */
13714         if (tg3_flag(tp, HW_TSO_1) ||
13715             tg3_flag(tp, HW_TSO_2) ||
13716             tg3_flag(tp, HW_TSO_3) ||
13717             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13718                 tg3_flag_set(tp, TSO_CAPABLE);
13719         else {
13720                 tg3_flag_clear(tp, TSO_CAPABLE);
13721                 tg3_flag_clear(tp, TSO_BUG);
13722                 tp->fw_needed = NULL;
13723         }
13724
13725         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13726                 tp->fw_needed = FIRMWARE_TG3;
13727
13728         tp->irq_max = 1;
13729
13730         if (tg3_flag(tp, 5750_PLUS)) {
13731                 tg3_flag_set(tp, SUPPORT_MSI);
13732                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13733                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13734                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13735                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13736                      tp->pdev_peer == tp->pdev))
13737                         tg3_flag_clear(tp, SUPPORT_MSI);
13738
13739                 if (tg3_flag(tp, 5755_PLUS) ||
13740                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13741                         tg3_flag_set(tp, 1SHOT_MSI);
13742                 }
13743
13744                 if (tg3_flag(tp, 57765_PLUS)) {
13745                         tg3_flag_set(tp, SUPPORT_MSIX);
13746                         tp->irq_max = TG3_IRQ_MAX_VECS;
13747                 }
13748         }
13749
13750         if (tg3_flag(tp, 5755_PLUS))
13751                 tg3_flag_set(tp, SHORT_DMA_BUG);
13752
13753         if (tg3_flag(tp, 5717_PLUS))
13754                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13755
13756         if (tg3_flag(tp, 57765_PLUS) &&
13757             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13758                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13759
13760         if (!tg3_flag(tp, 5705_PLUS) ||
13761             tg3_flag(tp, 5780_CLASS) ||
13762             tg3_flag(tp, USE_JUMBO_BDFLAG))
13763                 tg3_flag_set(tp, JUMBO_CAPABLE);
13764
13765         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13766                               &pci_state_reg);
13767
13768         if (pci_is_pcie(tp->pdev)) {
13769                 u16 lnkctl;
13770
13771                 tg3_flag_set(tp, PCI_EXPRESS);
13772
13773                 tp->pcie_readrq = 4096;
13774                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13775                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13776                         tp->pcie_readrq = 2048;
13777
13778                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13779
13780                 pci_read_config_word(tp->pdev,
13781                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
13782                                      &lnkctl);
13783                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13784                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13785                             ASIC_REV_5906) {
13786                                 tg3_flag_clear(tp, HW_TSO_2);
13787                                 tg3_flag_clear(tp, TSO_CAPABLE);
13788                         }
13789                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13790                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13791                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13792                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13793                                 tg3_flag_set(tp, CLKREQ_BUG);
13794                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13795                         tg3_flag_set(tp, L1PLLPD_EN);
13796                 }
13797         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13798                 /* BCM5785 devices are effectively PCIe devices, and should
13799                  * follow PCIe codepaths, but do not have a PCIe capabilities
13800                  * section.
13801                 */
13802                 tg3_flag_set(tp, PCI_EXPRESS);
13803         } else if (!tg3_flag(tp, 5705_PLUS) ||
13804                    tg3_flag(tp, 5780_CLASS)) {
13805                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13806                 if (!tp->pcix_cap) {
13807                         dev_err(&tp->pdev->dev,
13808                                 "Cannot find PCI-X capability, aborting\n");
13809                         return -EIO;
13810                 }
13811
13812                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13813                         tg3_flag_set(tp, PCIX_MODE);
13814         }
13815
13816         /* If we have an AMD 762 or VIA K8T800 chipset, write
13817          * reordering to the mailbox registers done by the host
13818          * controller can cause major troubles.  We read back from
13819          * every mailbox register write to force the writes to be
13820          * posted to the chip in order.
13821          */
13822         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13823             !tg3_flag(tp, PCI_EXPRESS))
13824                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13825
13826         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13827                              &tp->pci_cacheline_sz);
13828         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13829                              &tp->pci_lat_timer);
13830         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13831             tp->pci_lat_timer < 64) {
13832                 tp->pci_lat_timer = 64;
13833                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13834                                       tp->pci_lat_timer);
13835         }
13836
13837         /* Important! -- It is critical that the PCI-X hw workaround
13838          * situation is decided before the first MMIO register access.
13839          */
13840         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13841                 /* 5700 BX chips need to have their TX producer index
13842                  * mailboxes written twice to workaround a bug.
13843                  */
13844                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13845
13846                 /* If we are in PCI-X mode, enable register write workaround.
13847                  *
13848                  * The workaround is to use indirect register accesses
13849                  * for all chip writes not to mailbox registers.
13850                  */
13851                 if (tg3_flag(tp, PCIX_MODE)) {
13852                         u32 pm_reg;
13853
13854                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13855
13856                         /* The chip can have it's power management PCI config
13857                          * space registers clobbered due to this bug.
13858                          * So explicitly force the chip into D0 here.
13859                          */
13860                         pci_read_config_dword(tp->pdev,
13861                                               tp->pm_cap + PCI_PM_CTRL,
13862                                               &pm_reg);
13863                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13864                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13865                         pci_write_config_dword(tp->pdev,
13866                                                tp->pm_cap + PCI_PM_CTRL,
13867                                                pm_reg);
13868
13869                         /* Also, force SERR#/PERR# in PCI command. */
13870                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13871                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13872                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13873                 }
13874         }
13875
13876         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13877                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13878         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13879                 tg3_flag_set(tp, PCI_32BIT);
13880
13881         /* Chip-specific fixup from Broadcom driver */
13882         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13883             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13884                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13885                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13886         }
13887
13888         /* Default fast path register access methods */
13889         tp->read32 = tg3_read32;
13890         tp->write32 = tg3_write32;
13891         tp->read32_mbox = tg3_read32;
13892         tp->write32_mbox = tg3_write32;
13893         tp->write32_tx_mbox = tg3_write32;
13894         tp->write32_rx_mbox = tg3_write32;
13895
13896         /* Various workaround register access methods */
13897         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13898                 tp->write32 = tg3_write_indirect_reg32;
13899         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13900                  (tg3_flag(tp, PCI_EXPRESS) &&
13901                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13902                 /*
13903                  * Back to back register writes can cause problems on these
13904                  * chips, the workaround is to read back all reg writes
13905                  * except those to mailbox regs.
13906                  *
13907                  * See tg3_write_indirect_reg32().
13908                  */
13909                 tp->write32 = tg3_write_flush_reg32;
13910         }
13911
13912         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13913                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13914                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13915                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13916         }
13917
13918         if (tg3_flag(tp, ICH_WORKAROUND)) {
13919                 tp->read32 = tg3_read_indirect_reg32;
13920                 tp->write32 = tg3_write_indirect_reg32;
13921                 tp->read32_mbox = tg3_read_indirect_mbox;
13922                 tp->write32_mbox = tg3_write_indirect_mbox;
13923                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13924                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13925
13926                 iounmap(tp->regs);
13927                 tp->regs = NULL;
13928
13929                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13930                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13931                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13932         }
13933         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13934                 tp->read32_mbox = tg3_read32_mbox_5906;
13935                 tp->write32_mbox = tg3_write32_mbox_5906;
13936                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13937                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13938         }
13939
13940         if (tp->write32 == tg3_write_indirect_reg32 ||
13941             (tg3_flag(tp, PCIX_MODE) &&
13942              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13943               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13944                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13945
13946         /* The memory arbiter has to be enabled in order for SRAM accesses
13947          * to succeed.  Normally on powerup the tg3 chip firmware will make
13948          * sure it is enabled, but other entities such as system netboot
13949          * code might disable it.
13950          */
13951         val = tr32(MEMARB_MODE);
13952         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
13953
13954         /* Get eeprom hw config before calling tg3_set_power_state().
13955          * In particular, the TG3_FLAG_IS_NIC flag must be
13956          * determined before calling tg3_set_power_state() so that
13957          * we know whether or not to switch out of Vaux power.
13958          * When the flag is set, it means that GPIO1 is used for eeprom
13959          * write protect and also implies that it is a LOM where GPIOs
13960          * are not used to switch power.
13961          */
13962         tg3_get_eeprom_hw_cfg(tp);
13963
13964         if (tg3_flag(tp, ENABLE_APE)) {
13965                 /* Allow reads and writes to the
13966                  * APE register and memory space.
13967                  */
13968                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13969                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13970                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13971                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13972                                        pci_state_reg);
13973
13974                 tg3_ape_lock_init(tp);
13975         }
13976
13977         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13979             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13980             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13981             tg3_flag(tp, 57765_PLUS))
13982                 tg3_flag_set(tp, CPMU_PRESENT);
13983
13984         /* Set up tp->grc_local_ctrl before calling
13985          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
13986          * will bring 5700's external PHY out of reset.
13987          * It is also used as eeprom write protect on LOMs.
13988          */
13989         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13990         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13991             tg3_flag(tp, EEPROM_WRITE_PROT))
13992                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13993                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13994         /* Unused GPIO3 must be driven as output on 5752 because there
13995          * are no pull-up resistors on unused GPIO pins.
13996          */
13997         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13998                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13999
14000         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14001             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14002             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14003                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14004
14005         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14006             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14007                 /* Turn off the debug UART. */
14008                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14009                 if (tg3_flag(tp, IS_NIC))
14010                         /* Keep VMain power. */
14011                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14012                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14013         }
14014
14015         /* Switch out of Vaux if it is a NIC */
14016         tg3_pwrsrc_switch_to_vmain(tp);
14017
14018         /* Derive initial jumbo mode from MTU assigned in
14019          * ether_setup() via the alloc_etherdev() call
14020          */
14021         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14022                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14023
14024         /* Determine WakeOnLan speed to use. */
14025         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14026             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14027             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14028             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14029                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14030         } else {
14031                 tg3_flag_set(tp, WOL_SPEED_100MB);
14032         }
14033
14034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14035                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14036
14037         /* A few boards don't want Ethernet@WireSpeed phy feature */
14038         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14039             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14040              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14041              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14042             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14043             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14044                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14045
14046         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14047             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14048                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14049         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14050                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14051
14052         if (tg3_flag(tp, 5705_PLUS) &&
14053             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14054             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14055             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14056             !tg3_flag(tp, 57765_PLUS)) {
14057                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14058                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14059                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14060                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14061                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14062                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14063                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14064                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14065                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14066                 } else
14067                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14068         }
14069
14070         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14071             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14072                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14073                 if (tp->phy_otp == 0)
14074                         tp->phy_otp = TG3_OTP_DEFAULT;
14075         }
14076
14077         if (tg3_flag(tp, CPMU_PRESENT))
14078                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14079         else
14080                 tp->mi_mode = MAC_MI_MODE_BASE;
14081
14082         tp->coalesce_mode = 0;
14083         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14084             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14085                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14086
14087         /* Set these bits to enable statistics workaround. */
14088         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14089             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14090             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14091                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14092                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14093         }
14094
14095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14096             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14097                 tg3_flag_set(tp, USE_PHYLIB);
14098
14099         err = tg3_mdio_init(tp);
14100         if (err)
14101                 return err;
14102
14103         /* Initialize data/descriptor byte/word swapping. */
14104         val = tr32(GRC_MODE);
14105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14106                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14107                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14108                         GRC_MODE_B2HRX_ENABLE |
14109                         GRC_MODE_HTX2B_ENABLE |
14110                         GRC_MODE_HOST_STACKUP);
14111         else
14112                 val &= GRC_MODE_HOST_STACKUP;
14113
14114         tw32(GRC_MODE, val | tp->grc_mode);
14115
14116         tg3_switch_clocks(tp);
14117
14118         /* Clear this out for sanity. */
14119         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14120
14121         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14122                               &pci_state_reg);
14123         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14124             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14125                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14126
14127                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14128                     chiprevid == CHIPREV_ID_5701_B0 ||
14129                     chiprevid == CHIPREV_ID_5701_B2 ||
14130                     chiprevid == CHIPREV_ID_5701_B5) {
14131                         void __iomem *sram_base;
14132
14133                         /* Write some dummy words into the SRAM status block
14134                          * area, see if it reads back correctly.  If the return
14135                          * value is bad, force enable the PCIX workaround.
14136                          */
14137                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14138
14139                         writel(0x00000000, sram_base);
14140                         writel(0x00000000, sram_base + 4);
14141                         writel(0xffffffff, sram_base + 4);
14142                         if (readl(sram_base) != 0x00000000)
14143                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14144                 }
14145         }
14146
14147         udelay(50);
14148         tg3_nvram_init(tp);
14149
14150         grc_misc_cfg = tr32(GRC_MISC_CFG);
14151         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14152
14153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14154             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14155              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14156                 tg3_flag_set(tp, IS_5788);
14157
14158         if (!tg3_flag(tp, IS_5788) &&
14159             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14160                 tg3_flag_set(tp, TAGGED_STATUS);
14161         if (tg3_flag(tp, TAGGED_STATUS)) {
14162                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14163                                       HOSTCC_MODE_CLRTICK_TXBD);
14164
14165                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14166                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14167                                        tp->misc_host_ctrl);
14168         }
14169
14170         /* Preserve the APE MAC_MODE bits */
14171         if (tg3_flag(tp, ENABLE_APE))
14172                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14173         else
14174                 tp->mac_mode = TG3_DEF_MAC_MODE;
14175
14176         /* these are limited to 10/100 only */
14177         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14178              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14179             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14180              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14181              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14182               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14183               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14184             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14185              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14186               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14187               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14188             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14189             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14190             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14191             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14192                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14193
14194         err = tg3_phy_probe(tp);
14195         if (err) {
14196                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14197                 /* ... but do not return immediately ... */
14198                 tg3_mdio_fini(tp);
14199         }
14200
14201         tg3_read_vpd(tp);
14202         tg3_read_fw_ver(tp);
14203
14204         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14205                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14206         } else {
14207                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14208                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14209                 else
14210                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14211         }
14212
14213         /* 5700 {AX,BX} chips have a broken status block link
14214          * change bit implementation, so we must use the
14215          * status register in those cases.
14216          */
14217         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14218                 tg3_flag_set(tp, USE_LINKCHG_REG);
14219         else
14220                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14221
14222         /* The led_ctrl is set during tg3_phy_probe, here we might
14223          * have to force the link status polling mechanism based
14224          * upon subsystem IDs.
14225          */
14226         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14228             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14229                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14230                 tg3_flag_set(tp, USE_LINKCHG_REG);
14231         }
14232
14233         /* For all SERDES we poll the MAC status register. */
14234         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14235                 tg3_flag_set(tp, POLL_SERDES);
14236         else
14237                 tg3_flag_clear(tp, POLL_SERDES);
14238
14239         tp->rx_offset = NET_IP_ALIGN;
14240         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14242             tg3_flag(tp, PCIX_MODE)) {
14243                 tp->rx_offset = 0;
14244 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14245                 tp->rx_copy_thresh = ~(u16)0;
14246 #endif
14247         }
14248
14249         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14250         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14251         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14252
14253         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14254
14255         /* Increment the rx prod index on the rx std ring by at most
14256          * 8 for these chips to workaround hw errata.
14257          */
14258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14259             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14260             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14261                 tp->rx_std_max_post = 8;
14262
14263         if (tg3_flag(tp, ASPM_WORKAROUND))
14264                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14265                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14266
14267         return err;
14268 }
14269
14270 #ifdef CONFIG_SPARC
14271 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14272 {
14273         struct net_device *dev = tp->dev;
14274         struct pci_dev *pdev = tp->pdev;
14275         struct device_node *dp = pci_device_to_OF_node(pdev);
14276         const unsigned char *addr;
14277         int len;
14278
14279         addr = of_get_property(dp, "local-mac-address", &len);
14280         if (addr && len == 6) {
14281                 memcpy(dev->dev_addr, addr, 6);
14282                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14283                 return 0;
14284         }
14285         return -ENODEV;
14286 }
14287
14288 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14289 {
14290         struct net_device *dev = tp->dev;
14291
14292         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14293         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14294         return 0;
14295 }
14296 #endif
14297
14298 static int __devinit tg3_get_device_address(struct tg3 *tp)
14299 {
14300         struct net_device *dev = tp->dev;
14301         u32 hi, lo, mac_offset;
14302         int addr_ok = 0;
14303
14304 #ifdef CONFIG_SPARC
14305         if (!tg3_get_macaddr_sparc(tp))
14306                 return 0;
14307 #endif
14308
14309         mac_offset = 0x7c;
14310         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14311             tg3_flag(tp, 5780_CLASS)) {
14312                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14313                         mac_offset = 0xcc;
14314                 if (tg3_nvram_lock(tp))
14315                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14316                 else
14317                         tg3_nvram_unlock(tp);
14318         } else if (tg3_flag(tp, 5717_PLUS)) {
14319                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14320                         mac_offset = 0xcc;
14321                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14322                         mac_offset += 0x18c;
14323         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14324                 mac_offset = 0x10;
14325
14326         /* First try to get it from MAC address mailbox. */
14327         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14328         if ((hi >> 16) == 0x484b) {
14329                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14330                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14331
14332                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14333                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14334                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14335                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14336                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14337
14338                 /* Some old bootcode may report a 0 MAC address in SRAM */
14339                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14340         }
14341         if (!addr_ok) {
14342                 /* Next, try NVRAM. */
14343                 if (!tg3_flag(tp, NO_NVRAM) &&
14344                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14345                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14346                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14347                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14348                 }
14349                 /* Finally just fetch it out of the MAC control regs. */
14350                 else {
14351                         hi = tr32(MAC_ADDR_0_HIGH);
14352                         lo = tr32(MAC_ADDR_0_LOW);
14353
14354                         dev->dev_addr[5] = lo & 0xff;
14355                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14356                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14357                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14358                         dev->dev_addr[1] = hi & 0xff;
14359                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14360                 }
14361         }
14362
14363         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14364 #ifdef CONFIG_SPARC
14365                 if (!tg3_get_default_macaddr_sparc(tp))
14366                         return 0;
14367 #endif
14368                 return -EINVAL;
14369         }
14370         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14371         return 0;
14372 }
14373
14374 #define BOUNDARY_SINGLE_CACHELINE       1
14375 #define BOUNDARY_MULTI_CACHELINE        2
14376
14377 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14378 {
14379         int cacheline_size;
14380         u8 byte;
14381         int goal;
14382
14383         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14384         if (byte == 0)
14385                 cacheline_size = 1024;
14386         else
14387                 cacheline_size = (int) byte * 4;
14388
14389         /* On 5703 and later chips, the boundary bits have no
14390          * effect.
14391          */
14392         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14393             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14394             !tg3_flag(tp, PCI_EXPRESS))
14395                 goto out;
14396
14397 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14398         goal = BOUNDARY_MULTI_CACHELINE;
14399 #else
14400 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14401         goal = BOUNDARY_SINGLE_CACHELINE;
14402 #else
14403         goal = 0;
14404 #endif
14405 #endif
14406
14407         if (tg3_flag(tp, 57765_PLUS)) {
14408                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14409                 goto out;
14410         }
14411
14412         if (!goal)
14413                 goto out;
14414
14415         /* PCI controllers on most RISC systems tend to disconnect
14416          * when a device tries to burst across a cache-line boundary.
14417          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14418          *
14419          * Unfortunately, for PCI-E there are only limited
14420          * write-side controls for this, and thus for reads
14421          * we will still get the disconnects.  We'll also waste
14422          * these PCI cycles for both read and write for chips
14423          * other than 5700 and 5701 which do not implement the
14424          * boundary bits.
14425          */
14426         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14427                 switch (cacheline_size) {
14428                 case 16:
14429                 case 32:
14430                 case 64:
14431                 case 128:
14432                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14433                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14434                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14435                         } else {
14436                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14437                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14438                         }
14439                         break;
14440
14441                 case 256:
14442                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14443                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14444                         break;
14445
14446                 default:
14447                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14448                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14449                         break;
14450                 }
14451         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14452                 switch (cacheline_size) {
14453                 case 16:
14454                 case 32:
14455                 case 64:
14456                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14457                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14458                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14459                                 break;
14460                         }
14461                         /* fallthrough */
14462                 case 128:
14463                 default:
14464                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14465                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14466                         break;
14467                 }
14468         } else {
14469                 switch (cacheline_size) {
14470                 case 16:
14471                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14472                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14473                                         DMA_RWCTRL_WRITE_BNDRY_16);
14474                                 break;
14475                         }
14476                         /* fallthrough */
14477                 case 32:
14478                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14479                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14480                                         DMA_RWCTRL_WRITE_BNDRY_32);
14481                                 break;
14482                         }
14483                         /* fallthrough */
14484                 case 64:
14485                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14486                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14487                                         DMA_RWCTRL_WRITE_BNDRY_64);
14488                                 break;
14489                         }
14490                         /* fallthrough */
14491                 case 128:
14492                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14493                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14494                                         DMA_RWCTRL_WRITE_BNDRY_128);
14495                                 break;
14496                         }
14497                         /* fallthrough */
14498                 case 256:
14499                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14500                                 DMA_RWCTRL_WRITE_BNDRY_256);
14501                         break;
14502                 case 512:
14503                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14504                                 DMA_RWCTRL_WRITE_BNDRY_512);
14505                         break;
14506                 case 1024:
14507                 default:
14508                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14509                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14510                         break;
14511                 }
14512         }
14513
14514 out:
14515         return val;
14516 }
14517
14518 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14519 {
14520         struct tg3_internal_buffer_desc test_desc;
14521         u32 sram_dma_descs;
14522         int i, ret;
14523
14524         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14525
14526         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14527         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14528         tw32(RDMAC_STATUS, 0);
14529         tw32(WDMAC_STATUS, 0);
14530
14531         tw32(BUFMGR_MODE, 0);
14532         tw32(FTQ_RESET, 0);
14533
14534         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14535         test_desc.addr_lo = buf_dma & 0xffffffff;
14536         test_desc.nic_mbuf = 0x00002100;
14537         test_desc.len = size;
14538
14539         /*
14540          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14541          * the *second* time the tg3 driver was getting loaded after an
14542          * initial scan.
14543          *
14544          * Broadcom tells me:
14545          *   ...the DMA engine is connected to the GRC block and a DMA
14546          *   reset may affect the GRC block in some unpredictable way...
14547          *   The behavior of resets to individual blocks has not been tested.
14548          *
14549          * Broadcom noted the GRC reset will also reset all sub-components.
14550          */
14551         if (to_device) {
14552                 test_desc.cqid_sqid = (13 << 8) | 2;
14553
14554                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14555                 udelay(40);
14556         } else {
14557                 test_desc.cqid_sqid = (16 << 8) | 7;
14558
14559                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14560                 udelay(40);
14561         }
14562         test_desc.flags = 0x00000005;
14563
14564         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14565                 u32 val;
14566
14567                 val = *(((u32 *)&test_desc) + i);
14568                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14569                                        sram_dma_descs + (i * sizeof(u32)));
14570                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14571         }
14572         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14573
14574         if (to_device)
14575                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14576         else
14577                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14578
14579         ret = -ENODEV;
14580         for (i = 0; i < 40; i++) {
14581                 u32 val;
14582
14583                 if (to_device)
14584                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14585                 else
14586                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14587                 if ((val & 0xffff) == sram_dma_descs) {
14588                         ret = 0;
14589                         break;
14590                 }
14591
14592                 udelay(100);
14593         }
14594
14595         return ret;
14596 }
14597
14598 #define TEST_BUFFER_SIZE        0x2000
14599
14600 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14601         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14602         { },
14603 };
14604
14605 static int __devinit tg3_test_dma(struct tg3 *tp)
14606 {
14607         dma_addr_t buf_dma;
14608         u32 *buf, saved_dma_rwctrl;
14609         int ret = 0;
14610
14611         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14612                                  &buf_dma, GFP_KERNEL);
14613         if (!buf) {
14614                 ret = -ENOMEM;
14615                 goto out_nofree;
14616         }
14617
14618         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14619                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14620
14621         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14622
14623         if (tg3_flag(tp, 57765_PLUS))
14624                 goto out;
14625
14626         if (tg3_flag(tp, PCI_EXPRESS)) {
14627                 /* DMA read watermark not used on PCIE */
14628                 tp->dma_rwctrl |= 0x00180000;
14629         } else if (!tg3_flag(tp, PCIX_MODE)) {
14630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14631                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14632                         tp->dma_rwctrl |= 0x003f0000;
14633                 else
14634                         tp->dma_rwctrl |= 0x003f000f;
14635         } else {
14636                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14637                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14638                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14639                         u32 read_water = 0x7;
14640
14641                         /* If the 5704 is behind the EPB bridge, we can
14642                          * do the less restrictive ONE_DMA workaround for
14643                          * better performance.
14644                          */
14645                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14646                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14647                                 tp->dma_rwctrl |= 0x8000;
14648                         else if (ccval == 0x6 || ccval == 0x7)
14649                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14650
14651                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14652                                 read_water = 4;
14653                         /* Set bit 23 to enable PCIX hw bug fix */
14654                         tp->dma_rwctrl |=
14655                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14656                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14657                                 (1 << 23);
14658                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14659                         /* 5780 always in PCIX mode */
14660                         tp->dma_rwctrl |= 0x00144000;
14661                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14662                         /* 5714 always in PCIX mode */
14663                         tp->dma_rwctrl |= 0x00148000;
14664                 } else {
14665                         tp->dma_rwctrl |= 0x001b000f;
14666                 }
14667         }
14668
14669         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14671                 tp->dma_rwctrl &= 0xfffffff0;
14672
14673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14675                 /* Remove this if it causes problems for some boards. */
14676                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14677
14678                 /* On 5700/5701 chips, we need to set this bit.
14679                  * Otherwise the chip will issue cacheline transactions
14680                  * to streamable DMA memory with not all the byte
14681                  * enables turned on.  This is an error on several
14682                  * RISC PCI controllers, in particular sparc64.
14683                  *
14684                  * On 5703/5704 chips, this bit has been reassigned
14685                  * a different meaning.  In particular, it is used
14686                  * on those chips to enable a PCI-X workaround.
14687                  */
14688                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14689         }
14690
14691         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14692
14693 #if 0
14694         /* Unneeded, already done by tg3_get_invariants.  */
14695         tg3_switch_clocks(tp);
14696 #endif
14697
14698         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14699             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14700                 goto out;
14701
14702         /* It is best to perform DMA test with maximum write burst size
14703          * to expose the 5700/5701 write DMA bug.
14704          */
14705         saved_dma_rwctrl = tp->dma_rwctrl;
14706         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14707         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14708
14709         while (1) {
14710                 u32 *p = buf, i;
14711
14712                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14713                         p[i] = i;
14714
14715                 /* Send the buffer to the chip. */
14716                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14717                 if (ret) {
14718                         dev_err(&tp->pdev->dev,
14719                                 "%s: Buffer write failed. err = %d\n",
14720                                 __func__, ret);
14721                         break;
14722                 }
14723
14724 #if 0
14725                 /* validate data reached card RAM correctly. */
14726                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14727                         u32 val;
14728                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14729                         if (le32_to_cpu(val) != p[i]) {
14730                                 dev_err(&tp->pdev->dev,
14731                                         "%s: Buffer corrupted on device! "
14732                                         "(%d != %d)\n", __func__, val, i);
14733                                 /* ret = -ENODEV here? */
14734                         }
14735                         p[i] = 0;
14736                 }
14737 #endif
14738                 /* Now read it back. */
14739                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14740                 if (ret) {
14741                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14742                                 "err = %d\n", __func__, ret);
14743                         break;
14744                 }
14745
14746                 /* Verify it. */
14747                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14748                         if (p[i] == i)
14749                                 continue;
14750
14751                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14752                             DMA_RWCTRL_WRITE_BNDRY_16) {
14753                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14754                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14755                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14756                                 break;
14757                         } else {
14758                                 dev_err(&tp->pdev->dev,
14759                                         "%s: Buffer corrupted on read back! "
14760                                         "(%d != %d)\n", __func__, p[i], i);
14761                                 ret = -ENODEV;
14762                                 goto out;
14763                         }
14764                 }
14765
14766                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14767                         /* Success. */
14768                         ret = 0;
14769                         break;
14770                 }
14771         }
14772         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14773             DMA_RWCTRL_WRITE_BNDRY_16) {
14774                 /* DMA test passed without adjusting DMA boundary,
14775                  * now look for chipsets that are known to expose the
14776                  * DMA bug without failing the test.
14777                  */
14778                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14779                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14780                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14781                 } else {
14782                         /* Safe to use the calculated DMA boundary. */
14783                         tp->dma_rwctrl = saved_dma_rwctrl;
14784                 }
14785
14786                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14787         }
14788
14789 out:
14790         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14791 out_nofree:
14792         return ret;
14793 }
14794
14795 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14796 {
14797         if (tg3_flag(tp, 57765_PLUS)) {
14798                 tp->bufmgr_config.mbuf_read_dma_low_water =
14799                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14800                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14801                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14802                 tp->bufmgr_config.mbuf_high_water =
14803                         DEFAULT_MB_HIGH_WATER_57765;
14804
14805                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14806                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14807                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14808                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14809                 tp->bufmgr_config.mbuf_high_water_jumbo =
14810                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14811         } else if (tg3_flag(tp, 5705_PLUS)) {
14812                 tp->bufmgr_config.mbuf_read_dma_low_water =
14813                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14814                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14815                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14816                 tp->bufmgr_config.mbuf_high_water =
14817                         DEFAULT_MB_HIGH_WATER_5705;
14818                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14819                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14820                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14821                         tp->bufmgr_config.mbuf_high_water =
14822                                 DEFAULT_MB_HIGH_WATER_5906;
14823                 }
14824
14825                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14826                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14827                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14828                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14829                 tp->bufmgr_config.mbuf_high_water_jumbo =
14830                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14831         } else {
14832                 tp->bufmgr_config.mbuf_read_dma_low_water =
14833                         DEFAULT_MB_RDMA_LOW_WATER;
14834                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14835                         DEFAULT_MB_MACRX_LOW_WATER;
14836                 tp->bufmgr_config.mbuf_high_water =
14837                         DEFAULT_MB_HIGH_WATER;
14838
14839                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14840                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14841                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14842                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14843                 tp->bufmgr_config.mbuf_high_water_jumbo =
14844                         DEFAULT_MB_HIGH_WATER_JUMBO;
14845         }
14846
14847         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14848         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14849 }
14850
14851 static char * __devinit tg3_phy_string(struct tg3 *tp)
14852 {
14853         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14854         case TG3_PHY_ID_BCM5400:        return "5400";
14855         case TG3_PHY_ID_BCM5401:        return "5401";
14856         case TG3_PHY_ID_BCM5411:        return "5411";
14857         case TG3_PHY_ID_BCM5701:        return "5701";
14858         case TG3_PHY_ID_BCM5703:        return "5703";
14859         case TG3_PHY_ID_BCM5704:        return "5704";
14860         case TG3_PHY_ID_BCM5705:        return "5705";
14861         case TG3_PHY_ID_BCM5750:        return "5750";
14862         case TG3_PHY_ID_BCM5752:        return "5752";
14863         case TG3_PHY_ID_BCM5714:        return "5714";
14864         case TG3_PHY_ID_BCM5780:        return "5780";
14865         case TG3_PHY_ID_BCM5755:        return "5755";
14866         case TG3_PHY_ID_BCM5787:        return "5787";
14867         case TG3_PHY_ID_BCM5784:        return "5784";
14868         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14869         case TG3_PHY_ID_BCM5906:        return "5906";
14870         case TG3_PHY_ID_BCM5761:        return "5761";
14871         case TG3_PHY_ID_BCM5718C:       return "5718C";
14872         case TG3_PHY_ID_BCM5718S:       return "5718S";
14873         case TG3_PHY_ID_BCM57765:       return "57765";
14874         case TG3_PHY_ID_BCM5719C:       return "5719C";
14875         case TG3_PHY_ID_BCM5720C:       return "5720C";
14876         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14877         case 0:                 return "serdes";
14878         default:                return "unknown";
14879         }
14880 }
14881
14882 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14883 {
14884         if (tg3_flag(tp, PCI_EXPRESS)) {
14885                 strcpy(str, "PCI Express");
14886                 return str;
14887         } else if (tg3_flag(tp, PCIX_MODE)) {
14888                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14889
14890                 strcpy(str, "PCIX:");
14891
14892                 if ((clock_ctrl == 7) ||
14893                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14894                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14895                         strcat(str, "133MHz");
14896                 else if (clock_ctrl == 0)
14897                         strcat(str, "33MHz");
14898                 else if (clock_ctrl == 2)
14899                         strcat(str, "50MHz");
14900                 else if (clock_ctrl == 4)
14901                         strcat(str, "66MHz");
14902                 else if (clock_ctrl == 6)
14903                         strcat(str, "100MHz");
14904         } else {
14905                 strcpy(str, "PCI:");
14906                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14907                         strcat(str, "66MHz");
14908                 else
14909                         strcat(str, "33MHz");
14910         }
14911         if (tg3_flag(tp, PCI_32BIT))
14912                 strcat(str, ":32-bit");
14913         else
14914                 strcat(str, ":64-bit");
14915         return str;
14916 }
14917
14918 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14919 {
14920         struct pci_dev *peer;
14921         unsigned int func, devnr = tp->pdev->devfn & ~7;
14922
14923         for (func = 0; func < 8; func++) {
14924                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14925                 if (peer && peer != tp->pdev)
14926                         break;
14927                 pci_dev_put(peer);
14928         }
14929         /* 5704 can be configured in single-port mode, set peer to
14930          * tp->pdev in that case.
14931          */
14932         if (!peer) {
14933                 peer = tp->pdev;
14934                 return peer;
14935         }
14936
14937         /*
14938          * We don't need to keep the refcount elevated; there's no way
14939          * to remove one half of this device without removing the other
14940          */
14941         pci_dev_put(peer);
14942
14943         return peer;
14944 }
14945
14946 static void __devinit tg3_init_coal(struct tg3 *tp)
14947 {
14948         struct ethtool_coalesce *ec = &tp->coal;
14949
14950         memset(ec, 0, sizeof(*ec));
14951         ec->cmd = ETHTOOL_GCOALESCE;
14952         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14953         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14954         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14955         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14956         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14957         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14958         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14959         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14960         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14961
14962         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14963                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14964                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14965                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14966                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14967                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14968         }
14969
14970         if (tg3_flag(tp, 5705_PLUS)) {
14971                 ec->rx_coalesce_usecs_irq = 0;
14972                 ec->tx_coalesce_usecs_irq = 0;
14973                 ec->stats_block_coalesce_usecs = 0;
14974         }
14975 }
14976
14977 static const struct net_device_ops tg3_netdev_ops = {
14978         .ndo_open               = tg3_open,
14979         .ndo_stop               = tg3_close,
14980         .ndo_start_xmit         = tg3_start_xmit,
14981         .ndo_get_stats64        = tg3_get_stats64,
14982         .ndo_validate_addr      = eth_validate_addr,
14983         .ndo_set_multicast_list = tg3_set_rx_mode,
14984         .ndo_set_mac_address    = tg3_set_mac_addr,
14985         .ndo_do_ioctl           = tg3_ioctl,
14986         .ndo_tx_timeout         = tg3_tx_timeout,
14987         .ndo_change_mtu         = tg3_change_mtu,
14988         .ndo_fix_features       = tg3_fix_features,
14989         .ndo_set_features       = tg3_set_features,
14990 #ifdef CONFIG_NET_POLL_CONTROLLER
14991         .ndo_poll_controller    = tg3_poll_controller,
14992 #endif
14993 };
14994
14995 static int __devinit tg3_init_one(struct pci_dev *pdev,
14996                                   const struct pci_device_id *ent)
14997 {
14998         struct net_device *dev;
14999         struct tg3 *tp;
15000         int i, err, pm_cap;
15001         u32 sndmbx, rcvmbx, intmbx;
15002         char str[40];
15003         u64 dma_mask, persist_dma_mask;
15004         u32 features = 0;
15005
15006         printk_once(KERN_INFO "%s\n", version);
15007
15008         err = pci_enable_device(pdev);
15009         if (err) {
15010                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15011                 return err;
15012         }
15013
15014         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15015         if (err) {
15016                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15017                 goto err_out_disable_pdev;
15018         }
15019
15020         pci_set_master(pdev);
15021
15022         /* Find power-management capability. */
15023         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15024         if (pm_cap == 0) {
15025                 dev_err(&pdev->dev,
15026                         "Cannot find Power Management capability, aborting\n");
15027                 err = -EIO;
15028                 goto err_out_free_res;
15029         }
15030
15031         err = pci_set_power_state(pdev, PCI_D0);
15032         if (err) {
15033                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15034                 goto err_out_free_res;
15035         }
15036
15037         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15038         if (!dev) {
15039                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15040                 err = -ENOMEM;
15041                 goto err_out_power_down;
15042         }
15043
15044         SET_NETDEV_DEV(dev, &pdev->dev);
15045
15046         tp = netdev_priv(dev);
15047         tp->pdev = pdev;
15048         tp->dev = dev;
15049         tp->pm_cap = pm_cap;
15050         tp->rx_mode = TG3_DEF_RX_MODE;
15051         tp->tx_mode = TG3_DEF_TX_MODE;
15052
15053         if (tg3_debug > 0)
15054                 tp->msg_enable = tg3_debug;
15055         else
15056                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15057
15058         /* The word/byte swap controls here control register access byte
15059          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15060          * setting below.
15061          */
15062         tp->misc_host_ctrl =
15063                 MISC_HOST_CTRL_MASK_PCI_INT |
15064                 MISC_HOST_CTRL_WORD_SWAP |
15065                 MISC_HOST_CTRL_INDIR_ACCESS |
15066                 MISC_HOST_CTRL_PCISTATE_RW;
15067
15068         /* The NONFRM (non-frame) byte/word swap controls take effect
15069          * on descriptor entries, anything which isn't packet data.
15070          *
15071          * The StrongARM chips on the board (one for tx, one for rx)
15072          * are running in big-endian mode.
15073          */
15074         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15075                         GRC_MODE_WSWAP_NONFRM_DATA);
15076 #ifdef __BIG_ENDIAN
15077         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15078 #endif
15079         spin_lock_init(&tp->lock);
15080         spin_lock_init(&tp->indirect_lock);
15081         INIT_WORK(&tp->reset_task, tg3_reset_task);
15082
15083         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15084         if (!tp->regs) {
15085                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15086                 err = -ENOMEM;
15087                 goto err_out_free_dev;
15088         }
15089
15090         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15091             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15092             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15093             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15094             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15095             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15096             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15097             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15098                 tg3_flag_set(tp, ENABLE_APE);
15099                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15100                 if (!tp->aperegs) {
15101                         dev_err(&pdev->dev,
15102                                 "Cannot map APE registers, aborting\n");
15103                         err = -ENOMEM;
15104                         goto err_out_iounmap;
15105                 }
15106         }
15107
15108         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15109         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15110
15111         dev->ethtool_ops = &tg3_ethtool_ops;
15112         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15113         dev->netdev_ops = &tg3_netdev_ops;
15114         dev->irq = pdev->irq;
15115
15116         err = tg3_get_invariants(tp);
15117         if (err) {
15118                 dev_err(&pdev->dev,
15119                         "Problem fetching invariants of chip, aborting\n");
15120                 goto err_out_apeunmap;
15121         }
15122
15123         /* The EPB bridge inside 5714, 5715, and 5780 and any
15124          * device behind the EPB cannot support DMA addresses > 40-bit.
15125          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15126          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15127          * do DMA address check in tg3_start_xmit().
15128          */
15129         if (tg3_flag(tp, IS_5788))
15130                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15131         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15132                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15133 #ifdef CONFIG_HIGHMEM
15134                 dma_mask = DMA_BIT_MASK(64);
15135 #endif
15136         } else
15137                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15138
15139         /* Configure DMA attributes. */
15140         if (dma_mask > DMA_BIT_MASK(32)) {
15141                 err = pci_set_dma_mask(pdev, dma_mask);
15142                 if (!err) {
15143                         features |= NETIF_F_HIGHDMA;
15144                         err = pci_set_consistent_dma_mask(pdev,
15145                                                           persist_dma_mask);
15146                         if (err < 0) {
15147                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15148                                         "DMA for consistent allocations\n");
15149                                 goto err_out_apeunmap;
15150                         }
15151                 }
15152         }
15153         if (err || dma_mask == DMA_BIT_MASK(32)) {
15154                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15155                 if (err) {
15156                         dev_err(&pdev->dev,
15157                                 "No usable DMA configuration, aborting\n");
15158                         goto err_out_apeunmap;
15159                 }
15160         }
15161
15162         tg3_init_bufmgr_config(tp);
15163
15164         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15165
15166         /* 5700 B0 chips do not support checksumming correctly due
15167          * to hardware bugs.
15168          */
15169         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15170                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15171
15172                 if (tg3_flag(tp, 5755_PLUS))
15173                         features |= NETIF_F_IPV6_CSUM;
15174         }
15175
15176         /* TSO is on by default on chips that support hardware TSO.
15177          * Firmware TSO on older chips gives lower performance, so it
15178          * is off by default, but can be enabled using ethtool.
15179          */
15180         if ((tg3_flag(tp, HW_TSO_1) ||
15181              tg3_flag(tp, HW_TSO_2) ||
15182              tg3_flag(tp, HW_TSO_3)) &&
15183             (features & NETIF_F_IP_CSUM))
15184                 features |= NETIF_F_TSO;
15185         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15186                 if (features & NETIF_F_IPV6_CSUM)
15187                         features |= NETIF_F_TSO6;
15188                 if (tg3_flag(tp, HW_TSO_3) ||
15189                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15190                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15191                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15192                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15193                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15194                         features |= NETIF_F_TSO_ECN;
15195         }
15196
15197         dev->features |= features;
15198         dev->vlan_features |= features;
15199
15200         /*
15201          * Add loopback capability only for a subset of devices that support
15202          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15203          * loopback for the remaining devices.
15204          */
15205         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15206             !tg3_flag(tp, CPMU_PRESENT))
15207                 /* Add the loopback capability */
15208                 features |= NETIF_F_LOOPBACK;
15209
15210         dev->hw_features |= features;
15211
15212         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15213             !tg3_flag(tp, TSO_CAPABLE) &&
15214             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15215                 tg3_flag_set(tp, MAX_RXPEND_64);
15216                 tp->rx_pending = 63;
15217         }
15218
15219         err = tg3_get_device_address(tp);
15220         if (err) {
15221                 dev_err(&pdev->dev,
15222                         "Could not obtain valid ethernet address, aborting\n");
15223                 goto err_out_apeunmap;
15224         }
15225
15226         /*
15227          * Reset chip in case UNDI or EFI driver did not shutdown
15228          * DMA self test will enable WDMAC and we'll see (spurious)
15229          * pending DMA on the PCI bus at that point.
15230          */
15231         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15232             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15233                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15234                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15235         }
15236
15237         err = tg3_test_dma(tp);
15238         if (err) {
15239                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15240                 goto err_out_apeunmap;
15241         }
15242
15243         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15244         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15245         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15246         for (i = 0; i < tp->irq_max; i++) {
15247                 struct tg3_napi *tnapi = &tp->napi[i];
15248
15249                 tnapi->tp = tp;
15250                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15251
15252                 tnapi->int_mbox = intmbx;
15253                 if (i < 4)
15254                         intmbx += 0x8;
15255                 else
15256                         intmbx += 0x4;
15257
15258                 tnapi->consmbox = rcvmbx;
15259                 tnapi->prodmbox = sndmbx;
15260
15261                 if (i)
15262                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15263                 else
15264                         tnapi->coal_now = HOSTCC_MODE_NOW;
15265
15266                 if (!tg3_flag(tp, SUPPORT_MSIX))
15267                         break;
15268
15269                 /*
15270                  * If we support MSIX, we'll be using RSS.  If we're using
15271                  * RSS, the first vector only handles link interrupts and the
15272                  * remaining vectors handle rx and tx interrupts.  Reuse the
15273                  * mailbox values for the next iteration.  The values we setup
15274                  * above are still useful for the single vectored mode.
15275                  */
15276                 if (!i)
15277                         continue;
15278
15279                 rcvmbx += 0x8;
15280
15281                 if (sndmbx & 0x4)
15282                         sndmbx -= 0x4;
15283                 else
15284                         sndmbx += 0xc;
15285         }
15286
15287         tg3_init_coal(tp);
15288
15289         pci_set_drvdata(pdev, dev);
15290
15291         err = register_netdev(dev);
15292         if (err) {
15293                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15294                 goto err_out_apeunmap;
15295         }
15296
15297         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15298                     tp->board_part_number,
15299                     tp->pci_chip_rev_id,
15300                     tg3_bus_string(tp, str),
15301                     dev->dev_addr);
15302
15303         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15304                 struct phy_device *phydev;
15305                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15306                 netdev_info(dev,
15307                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15308                             phydev->drv->name, dev_name(&phydev->dev));
15309         } else {
15310                 char *ethtype;
15311
15312                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15313                         ethtype = "10/100Base-TX";
15314                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15315                         ethtype = "1000Base-SX";
15316                 else
15317                         ethtype = "10/100/1000Base-T";
15318
15319                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15320                             "(WireSpeed[%d], EEE[%d])\n",
15321                             tg3_phy_string(tp), ethtype,
15322                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15323                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15324         }
15325
15326         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15327                     (dev->features & NETIF_F_RXCSUM) != 0,
15328                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15329                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15330                     tg3_flag(tp, ENABLE_ASF) != 0,
15331                     tg3_flag(tp, TSO_CAPABLE) != 0);
15332         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15333                     tp->dma_rwctrl,
15334                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15335                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15336
15337         pci_save_state(pdev);
15338
15339         return 0;
15340
15341 err_out_apeunmap:
15342         if (tp->aperegs) {
15343                 iounmap(tp->aperegs);
15344                 tp->aperegs = NULL;
15345         }
15346
15347 err_out_iounmap:
15348         if (tp->regs) {
15349                 iounmap(tp->regs);
15350                 tp->regs = NULL;
15351         }
15352
15353 err_out_free_dev:
15354         free_netdev(dev);
15355
15356 err_out_power_down:
15357         pci_set_power_state(pdev, PCI_D3hot);
15358
15359 err_out_free_res:
15360         pci_release_regions(pdev);
15361
15362 err_out_disable_pdev:
15363         pci_disable_device(pdev);
15364         pci_set_drvdata(pdev, NULL);
15365         return err;
15366 }
15367
15368 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15369 {
15370         struct net_device *dev = pci_get_drvdata(pdev);
15371
15372         if (dev) {
15373                 struct tg3 *tp = netdev_priv(dev);
15374
15375                 if (tp->fw)
15376                         release_firmware(tp->fw);
15377
15378                 cancel_work_sync(&tp->reset_task);
15379
15380                 if (!tg3_flag(tp, USE_PHYLIB)) {
15381                         tg3_phy_fini(tp);
15382                         tg3_mdio_fini(tp);
15383                 }
15384
15385                 unregister_netdev(dev);
15386                 if (tp->aperegs) {
15387                         iounmap(tp->aperegs);
15388                         tp->aperegs = NULL;
15389                 }
15390                 if (tp->regs) {
15391                         iounmap(tp->regs);
15392                         tp->regs = NULL;
15393                 }
15394                 free_netdev(dev);
15395                 pci_release_regions(pdev);
15396                 pci_disable_device(pdev);
15397                 pci_set_drvdata(pdev, NULL);
15398         }
15399 }
15400
15401 #ifdef CONFIG_PM_SLEEP
15402 static int tg3_suspend(struct device *device)
15403 {
15404         struct pci_dev *pdev = to_pci_dev(device);
15405         struct net_device *dev = pci_get_drvdata(pdev);
15406         struct tg3 *tp = netdev_priv(dev);
15407         int err;
15408
15409         if (!netif_running(dev))
15410                 return 0;
15411
15412         flush_work_sync(&tp->reset_task);
15413         tg3_phy_stop(tp);
15414         tg3_netif_stop(tp);
15415
15416         del_timer_sync(&tp->timer);
15417
15418         tg3_full_lock(tp, 1);
15419         tg3_disable_ints(tp);
15420         tg3_full_unlock(tp);
15421
15422         netif_device_detach(dev);
15423
15424         tg3_full_lock(tp, 0);
15425         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15426         tg3_flag_clear(tp, INIT_COMPLETE);
15427         tg3_full_unlock(tp);
15428
15429         err = tg3_power_down_prepare(tp);
15430         if (err) {
15431                 int err2;
15432
15433                 tg3_full_lock(tp, 0);
15434
15435                 tg3_flag_set(tp, INIT_COMPLETE);
15436                 err2 = tg3_restart_hw(tp, 1);
15437                 if (err2)
15438                         goto out;
15439
15440                 tp->timer.expires = jiffies + tp->timer_offset;
15441                 add_timer(&tp->timer);
15442
15443                 netif_device_attach(dev);
15444                 tg3_netif_start(tp);
15445
15446 out:
15447                 tg3_full_unlock(tp);
15448
15449                 if (!err2)
15450                         tg3_phy_start(tp);
15451         }
15452
15453         return err;
15454 }
15455
15456 static int tg3_resume(struct device *device)
15457 {
15458         struct pci_dev *pdev = to_pci_dev(device);
15459         struct net_device *dev = pci_get_drvdata(pdev);
15460         struct tg3 *tp = netdev_priv(dev);
15461         int err;
15462
15463         if (!netif_running(dev))
15464                 return 0;
15465
15466         netif_device_attach(dev);
15467
15468         tg3_full_lock(tp, 0);
15469
15470         tg3_flag_set(tp, INIT_COMPLETE);
15471         err = tg3_restart_hw(tp, 1);
15472         if (err)
15473                 goto out;
15474
15475         tp->timer.expires = jiffies + tp->timer_offset;
15476         add_timer(&tp->timer);
15477
15478         tg3_netif_start(tp);
15479
15480 out:
15481         tg3_full_unlock(tp);
15482
15483         if (!err)
15484                 tg3_phy_start(tp);
15485
15486         return err;
15487 }
15488
15489 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15490 #define TG3_PM_OPS (&tg3_pm_ops)
15491
15492 #else
15493
15494 #define TG3_PM_OPS NULL
15495
15496 #endif /* CONFIG_PM_SLEEP */
15497
15498 /**
15499  * tg3_io_error_detected - called when PCI error is detected
15500  * @pdev: Pointer to PCI device
15501  * @state: The current pci connection state
15502  *
15503  * This function is called after a PCI bus error affecting
15504  * this device has been detected.
15505  */
15506 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15507                                               pci_channel_state_t state)
15508 {
15509         struct net_device *netdev = pci_get_drvdata(pdev);
15510         struct tg3 *tp = netdev_priv(netdev);
15511         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15512
15513         netdev_info(netdev, "PCI I/O error detected\n");
15514
15515         rtnl_lock();
15516
15517         if (!netif_running(netdev))
15518                 goto done;
15519
15520         tg3_phy_stop(tp);
15521
15522         tg3_netif_stop(tp);
15523
15524         del_timer_sync(&tp->timer);
15525         tg3_flag_clear(tp, RESTART_TIMER);
15526
15527         /* Want to make sure that the reset task doesn't run */
15528         cancel_work_sync(&tp->reset_task);
15529         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15530         tg3_flag_clear(tp, RESTART_TIMER);
15531
15532         netif_device_detach(netdev);
15533
15534         /* Clean up software state, even if MMIO is blocked */
15535         tg3_full_lock(tp, 0);
15536         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15537         tg3_full_unlock(tp);
15538
15539 done:
15540         if (state == pci_channel_io_perm_failure)
15541                 err = PCI_ERS_RESULT_DISCONNECT;
15542         else
15543                 pci_disable_device(pdev);
15544
15545         rtnl_unlock();
15546
15547         return err;
15548 }
15549
15550 /**
15551  * tg3_io_slot_reset - called after the pci bus has been reset.
15552  * @pdev: Pointer to PCI device
15553  *
15554  * Restart the card from scratch, as if from a cold-boot.
15555  * At this point, the card has exprienced a hard reset,
15556  * followed by fixups by BIOS, and has its config space
15557  * set up identically to what it was at cold boot.
15558  */
15559 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15560 {
15561         struct net_device *netdev = pci_get_drvdata(pdev);
15562         struct tg3 *tp = netdev_priv(netdev);
15563         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15564         int err;
15565
15566         rtnl_lock();
15567
15568         if (pci_enable_device(pdev)) {
15569                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15570                 goto done;
15571         }
15572
15573         pci_set_master(pdev);
15574         pci_restore_state(pdev);
15575         pci_save_state(pdev);
15576
15577         if (!netif_running(netdev)) {
15578                 rc = PCI_ERS_RESULT_RECOVERED;
15579                 goto done;
15580         }
15581
15582         err = tg3_power_up(tp);
15583         if (err)
15584                 goto done;
15585
15586         rc = PCI_ERS_RESULT_RECOVERED;
15587
15588 done:
15589         rtnl_unlock();
15590
15591         return rc;
15592 }
15593
15594 /**
15595  * tg3_io_resume - called when traffic can start flowing again.
15596  * @pdev: Pointer to PCI device
15597  *
15598  * This callback is called when the error recovery driver tells
15599  * us that its OK to resume normal operation.
15600  */
15601 static void tg3_io_resume(struct pci_dev *pdev)
15602 {
15603         struct net_device *netdev = pci_get_drvdata(pdev);
15604         struct tg3 *tp = netdev_priv(netdev);
15605         int err;
15606
15607         rtnl_lock();
15608
15609         if (!netif_running(netdev))
15610                 goto done;
15611
15612         tg3_full_lock(tp, 0);
15613         tg3_flag_set(tp, INIT_COMPLETE);
15614         err = tg3_restart_hw(tp, 1);
15615         tg3_full_unlock(tp);
15616         if (err) {
15617                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15618                 goto done;
15619         }
15620
15621         netif_device_attach(netdev);
15622
15623         tp->timer.expires = jiffies + tp->timer_offset;
15624         add_timer(&tp->timer);
15625
15626         tg3_netif_start(tp);
15627
15628         tg3_phy_start(tp);
15629
15630 done:
15631         rtnl_unlock();
15632 }
15633
15634 static struct pci_error_handlers tg3_err_handler = {
15635         .error_detected = tg3_io_error_detected,
15636         .slot_reset     = tg3_io_slot_reset,
15637         .resume         = tg3_io_resume
15638 };
15639
15640 static struct pci_driver tg3_driver = {
15641         .name           = DRV_MODULE_NAME,
15642         .id_table       = tg3_pci_tbl,
15643         .probe          = tg3_init_one,
15644         .remove         = __devexit_p(tg3_remove_one),
15645         .err_handler    = &tg3_err_handler,
15646         .driver.pm      = TG3_PM_OPS,
15647 };
15648
15649 static int __init tg3_init(void)
15650 {
15651         return pci_register_driver(&tg3_driver);
15652 }
15653
15654 static void __exit tg3_cleanup(void)
15655 {
15656         pci_unregister_driver(&tg3_driver);
15657 }
15658
15659 module_init(tg3_init);
15660 module_exit(tg3_cleanup);