tg3: Fix rx hang on MTU change with 5717/5719
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2014 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/sched/signal.h>
24 #include <linux/types.h>
25 #include <linux/compiler.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/in.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if.h>
41 #include <linux/if_vlan.h>
42 #include <linux/ip.h>
43 #include <linux/tcp.h>
44 #include <linux/workqueue.h>
45 #include <linux/prefetch.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/firmware.h>
48 #include <linux/ssb/ssb_driver_gige.h>
49 #include <linux/hwmon.h>
50 #include <linux/hwmon-sysfs.h>
51
52 #include <net/checksum.h>
53 #include <net/ip.h>
54
55 #include <linux/io.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
58
59 #include <uapi/linux/net_tstamp.h>
60 #include <linux/ptp_clock_kernel.h>
61
62 #ifdef CONFIG_SPARC
63 #include <asm/idprom.h>
64 #include <asm/prom.h>
65 #endif
66
67 #define BAR_0   0
68 #define BAR_2   2
69
70 #include "tg3.h"
71
72 /* Functions & macros to verify TG3_FLAGS types */
73
74 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 {
76         return test_bit(flag, bits);
77 }
78
79 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
80 {
81         set_bit(flag, bits);
82 }
83
84 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 {
86         clear_bit(flag, bits);
87 }
88
89 #define tg3_flag(tp, flag)                              \
90         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_set(tp, flag)                          \
92         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_clear(tp, flag)                        \
94         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95
96 #define DRV_MODULE_NAME         "tg3"
97 #define TG3_MAJ_NUM                     3
98 #define TG3_MIN_NUM                     137
99 #define DRV_MODULE_VERSION      \
100         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
101 #define DRV_MODULE_RELDATE      "May 11, 2014"
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 static char version[] =
224         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
225
226 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
227 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
228 MODULE_LICENSE("GPL");
229 MODULE_VERSION(DRV_MODULE_VERSION);
230 MODULE_FIRMWARE(FIRMWARE_TG3);
231 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
232 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
233
234 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
235 module_param(tg3_debug, int, 0);
236 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
237
238 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
239 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
240
241 static const struct pci_device_id tg3_pci_tbl[] = {
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
261          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
262                         TG3_DRV_DATA_FLAG_5705_10_100},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
268          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
269                         TG3_DRV_DATA_FLAG_5705_10_100},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
276          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
282          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
290         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
291                         PCI_VENDOR_ID_LENOVO,
292                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
293          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
296          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
311         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
312         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
313         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
315         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
316                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
319                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
324          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
334          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
336          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
345         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
346         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
347         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
348         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
349         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
350         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
351         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
352         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
353         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
354         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
355         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
356         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
357         {}
358 };
359
360 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
361
362 static const struct {
363         const char string[ETH_GSTRING_LEN];
364 } ethtool_stats_keys[] = {
365         { "rx_octets" },
366         { "rx_fragments" },
367         { "rx_ucast_packets" },
368         { "rx_mcast_packets" },
369         { "rx_bcast_packets" },
370         { "rx_fcs_errors" },
371         { "rx_align_errors" },
372         { "rx_xon_pause_rcvd" },
373         { "rx_xoff_pause_rcvd" },
374         { "rx_mac_ctrl_rcvd" },
375         { "rx_xoff_entered" },
376         { "rx_frame_too_long_errors" },
377         { "rx_jabbers" },
378         { "rx_undersize_packets" },
379         { "rx_in_length_errors" },
380         { "rx_out_length_errors" },
381         { "rx_64_or_less_octet_packets" },
382         { "rx_65_to_127_octet_packets" },
383         { "rx_128_to_255_octet_packets" },
384         { "rx_256_to_511_octet_packets" },
385         { "rx_512_to_1023_octet_packets" },
386         { "rx_1024_to_1522_octet_packets" },
387         { "rx_1523_to_2047_octet_packets" },
388         { "rx_2048_to_4095_octet_packets" },
389         { "rx_4096_to_8191_octet_packets" },
390         { "rx_8192_to_9022_octet_packets" },
391
392         { "tx_octets" },
393         { "tx_collisions" },
394
395         { "tx_xon_sent" },
396         { "tx_xoff_sent" },
397         { "tx_flow_control" },
398         { "tx_mac_errors" },
399         { "tx_single_collisions" },
400         { "tx_mult_collisions" },
401         { "tx_deferred" },
402         { "tx_excessive_collisions" },
403         { "tx_late_collisions" },
404         { "tx_collide_2times" },
405         { "tx_collide_3times" },
406         { "tx_collide_4times" },
407         { "tx_collide_5times" },
408         { "tx_collide_6times" },
409         { "tx_collide_7times" },
410         { "tx_collide_8times" },
411         { "tx_collide_9times" },
412         { "tx_collide_10times" },
413         { "tx_collide_11times" },
414         { "tx_collide_12times" },
415         { "tx_collide_13times" },
416         { "tx_collide_14times" },
417         { "tx_collide_15times" },
418         { "tx_ucast_packets" },
419         { "tx_mcast_packets" },
420         { "tx_bcast_packets" },
421         { "tx_carrier_sense_errors" },
422         { "tx_discards" },
423         { "tx_errors" },
424
425         { "dma_writeq_full" },
426         { "dma_write_prioq_full" },
427         { "rxbds_empty" },
428         { "rx_discards" },
429         { "rx_errors" },
430         { "rx_threshold_hit" },
431
432         { "dma_readq_full" },
433         { "dma_read_prioq_full" },
434         { "tx_comp_queue_full" },
435
436         { "ring_set_send_prod_index" },
437         { "ring_status_update" },
438         { "nic_irqs" },
439         { "nic_avoided_irqs" },
440         { "nic_tx_threshold_hit" },
441
442         { "mbuf_lwm_thresh_hit" },
443 };
444
445 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
446 #define TG3_NVRAM_TEST          0
447 #define TG3_LINK_TEST           1
448 #define TG3_REGISTER_TEST       2
449 #define TG3_MEMORY_TEST         3
450 #define TG3_MAC_LOOPB_TEST      4
451 #define TG3_PHY_LOOPB_TEST      5
452 #define TG3_EXT_LOOPB_TEST      6
453 #define TG3_INTERRUPT_TEST      7
454
455
456 static const struct {
457         const char string[ETH_GSTRING_LEN];
458 } ethtool_test_keys[] = {
459         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
460         [TG3_LINK_TEST]         = { "link test         (online) " },
461         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
462         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
463         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
464         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
465         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
466         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
467 };
468
469 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
470
471
472 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
473 {
474         writel(val, tp->regs + off);
475 }
476
477 static u32 tg3_read32(struct tg3 *tp, u32 off)
478 {
479         return readl(tp->regs + off);
480 }
481
482 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
483 {
484         writel(val, tp->aperegs + off);
485 }
486
487 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
488 {
489         return readl(tp->aperegs + off);
490 }
491
492 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
493 {
494         unsigned long flags;
495
496         spin_lock_irqsave(&tp->indirect_lock, flags);
497         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
499         spin_unlock_irqrestore(&tp->indirect_lock, flags);
500 }
501
502 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
503 {
504         writel(val, tp->regs + off);
505         readl(tp->regs + off);
506 }
507
508 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 {
510         unsigned long flags;
511         u32 val;
512
513         spin_lock_irqsave(&tp->indirect_lock, flags);
514         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
515         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
516         spin_unlock_irqrestore(&tp->indirect_lock, flags);
517         return val;
518 }
519
520 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
521 {
522         unsigned long flags;
523
524         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
525                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
526                                        TG3_64BIT_REG_LOW, val);
527                 return;
528         }
529         if (off == TG3_RX_STD_PROD_IDX_REG) {
530                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
531                                        TG3_64BIT_REG_LOW, val);
532                 return;
533         }
534
535         spin_lock_irqsave(&tp->indirect_lock, flags);
536         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
537         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
538         spin_unlock_irqrestore(&tp->indirect_lock, flags);
539
540         /* In indirect mode when disabling interrupts, we also need
541          * to clear the interrupt bit in the GRC local ctrl register.
542          */
543         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
544             (val == 0x1)) {
545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
546                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
547         }
548 }
549
550 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 {
552         unsigned long flags;
553         u32 val;
554
555         spin_lock_irqsave(&tp->indirect_lock, flags);
556         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
557         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
558         spin_unlock_irqrestore(&tp->indirect_lock, flags);
559         return val;
560 }
561
562 /* usec_wait specifies the wait time in usec when writing to certain registers
563  * where it is unsafe to read back the register without some delay.
564  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
565  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
566  */
567 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
568 {
569         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
570                 /* Non-posted methods */
571                 tp->write32(tp, off, val);
572         else {
573                 /* Posted method */
574                 tg3_write32(tp, off, val);
575                 if (usec_wait)
576                         udelay(usec_wait);
577                 tp->read32(tp, off);
578         }
579         /* Wait again after the read for the posted method to guarantee that
580          * the wait time is met.
581          */
582         if (usec_wait)
583                 udelay(usec_wait);
584 }
585
586 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
587 {
588         tp->write32_mbox(tp, off, val);
589         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
590             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
591              !tg3_flag(tp, ICH_WORKAROUND)))
592                 tp->read32_mbox(tp, off);
593 }
594
595 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
596 {
597         void __iomem *mbox = tp->regs + off;
598         writel(val, mbox);
599         if (tg3_flag(tp, TXD_MBOX_HWBUG))
600                 writel(val, mbox);
601         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
602             tg3_flag(tp, FLUSH_POSTED_WRITES))
603                 readl(mbox);
604 }
605
606 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
607 {
608         return readl(tp->regs + off + GRCMBOX_BASE);
609 }
610
611 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
612 {
613         writel(val, tp->regs + off + GRCMBOX_BASE);
614 }
615
616 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
617 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
618 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
619 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
620 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
621
622 #define tw32(reg, val)                  tp->write32(tp, reg, val)
623 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
624 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
625 #define tr32(reg)                       tp->read32(tp, reg)
626
627 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
628 {
629         unsigned long flags;
630
631         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
632             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
633                 return;
634
635         spin_lock_irqsave(&tp->indirect_lock, flags);
636         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
638                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
639
640                 /* Always leave this as zero. */
641                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
642         } else {
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
644                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
645
646                 /* Always leave this as zero. */
647                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
648         }
649         spin_unlock_irqrestore(&tp->indirect_lock, flags);
650 }
651
652 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
653 {
654         unsigned long flags;
655
656         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
657             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658                 *val = 0;
659                 return;
660         }
661
662         spin_lock_irqsave(&tp->indirect_lock, flags);
663         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
665                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
666
667                 /* Always leave this as zero. */
668                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
669         } else {
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
671                 *val = tr32(TG3PCI_MEM_WIN_DATA);
672
673                 /* Always leave this as zero. */
674                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
675         }
676         spin_unlock_irqrestore(&tp->indirect_lock, flags);
677 }
678
679 static void tg3_ape_lock_init(struct tg3 *tp)
680 {
681         int i;
682         u32 regbase, bit;
683
684         if (tg3_asic_rev(tp) == ASIC_REV_5761)
685                 regbase = TG3_APE_LOCK_GRANT;
686         else
687                 regbase = TG3_APE_PER_LOCK_GRANT;
688
689         /* Make sure the driver hasn't any stale locks. */
690         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
691                 switch (i) {
692                 case TG3_APE_LOCK_PHY0:
693                 case TG3_APE_LOCK_PHY1:
694                 case TG3_APE_LOCK_PHY2:
695                 case TG3_APE_LOCK_PHY3:
696                         bit = APE_LOCK_GRANT_DRIVER;
697                         break;
698                 default:
699                         if (!tp->pci_fn)
700                                 bit = APE_LOCK_GRANT_DRIVER;
701                         else
702                                 bit = 1 << tp->pci_fn;
703                 }
704                 tg3_ape_write32(tp, regbase + 4 * i, bit);
705         }
706
707 }
708
709 static int tg3_ape_lock(struct tg3 *tp, int locknum)
710 {
711         int i, off;
712         int ret = 0;
713         u32 status, req, gnt, bit;
714
715         if (!tg3_flag(tp, ENABLE_APE))
716                 return 0;
717
718         switch (locknum) {
719         case TG3_APE_LOCK_GPIO:
720                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
721                         return 0;
722         case TG3_APE_LOCK_GRC:
723         case TG3_APE_LOCK_MEM:
724                 if (!tp->pci_fn)
725                         bit = APE_LOCK_REQ_DRIVER;
726                 else
727                         bit = 1 << tp->pci_fn;
728                 break;
729         case TG3_APE_LOCK_PHY0:
730         case TG3_APE_LOCK_PHY1:
731         case TG3_APE_LOCK_PHY2:
732         case TG3_APE_LOCK_PHY3:
733                 bit = APE_LOCK_REQ_DRIVER;
734                 break;
735         default:
736                 return -EINVAL;
737         }
738
739         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
740                 req = TG3_APE_LOCK_REQ;
741                 gnt = TG3_APE_LOCK_GRANT;
742         } else {
743                 req = TG3_APE_PER_LOCK_REQ;
744                 gnt = TG3_APE_PER_LOCK_GRANT;
745         }
746
747         off = 4 * locknum;
748
749         tg3_ape_write32(tp, req + off, bit);
750
751         /* Wait for up to 1 millisecond to acquire lock. */
752         for (i = 0; i < 100; i++) {
753                 status = tg3_ape_read32(tp, gnt + off);
754                 if (status == bit)
755                         break;
756                 if (pci_channel_offline(tp->pdev))
757                         break;
758
759                 udelay(10);
760         }
761
762         if (status != bit) {
763                 /* Revoke the lock request. */
764                 tg3_ape_write32(tp, gnt + off, bit);
765                 ret = -EBUSY;
766         }
767
768         return ret;
769 }
770
771 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
772 {
773         u32 gnt, bit;
774
775         if (!tg3_flag(tp, ENABLE_APE))
776                 return;
777
778         switch (locknum) {
779         case TG3_APE_LOCK_GPIO:
780                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
781                         return;
782         case TG3_APE_LOCK_GRC:
783         case TG3_APE_LOCK_MEM:
784                 if (!tp->pci_fn)
785                         bit = APE_LOCK_GRANT_DRIVER;
786                 else
787                         bit = 1 << tp->pci_fn;
788                 break;
789         case TG3_APE_LOCK_PHY0:
790         case TG3_APE_LOCK_PHY1:
791         case TG3_APE_LOCK_PHY2:
792         case TG3_APE_LOCK_PHY3:
793                 bit = APE_LOCK_GRANT_DRIVER;
794                 break;
795         default:
796                 return;
797         }
798
799         if (tg3_asic_rev(tp) == ASIC_REV_5761)
800                 gnt = TG3_APE_LOCK_GRANT;
801         else
802                 gnt = TG3_APE_PER_LOCK_GRANT;
803
804         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
805 }
806
807 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 {
809         u32 apedata;
810
811         while (timeout_us) {
812                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
813                         return -EBUSY;
814
815                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
816                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
817                         break;
818
819                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
820
821                 udelay(10);
822                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
823         }
824
825         return timeout_us ? 0 : -EBUSY;
826 }
827
828 #ifdef CONFIG_TIGON3_HWMON
829 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
830 {
831         u32 i, apedata;
832
833         for (i = 0; i < timeout_us / 10; i++) {
834                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
835
836                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837                         break;
838
839                 udelay(10);
840         }
841
842         return i == timeout_us / 10;
843 }
844
845 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
846                                    u32 len)
847 {
848         int err;
849         u32 i, bufoff, msgoff, maxlen, apedata;
850
851         if (!tg3_flag(tp, APE_HAS_NCSI))
852                 return 0;
853
854         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
855         if (apedata != APE_SEG_SIG_MAGIC)
856                 return -ENODEV;
857
858         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
859         if (!(apedata & APE_FW_STATUS_READY))
860                 return -EAGAIN;
861
862         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
863                  TG3_APE_SHMEM_BASE;
864         msgoff = bufoff + 2 * sizeof(u32);
865         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
866
867         while (len) {
868                 u32 length;
869
870                 /* Cap xfer sizes to scratchpad limits. */
871                 length = (len > maxlen) ? maxlen : len;
872                 len -= length;
873
874                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
875                 if (!(apedata & APE_FW_STATUS_READY))
876                         return -EAGAIN;
877
878                 /* Wait for up to 1 msec for APE to service previous event. */
879                 err = tg3_ape_event_lock(tp, 1000);
880                 if (err)
881                         return err;
882
883                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
884                           APE_EVENT_STATUS_SCRTCHPD_READ |
885                           APE_EVENT_STATUS_EVENT_PENDING;
886                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
887
888                 tg3_ape_write32(tp, bufoff, base_off);
889                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
890
891                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
892                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
893
894                 base_off += length;
895
896                 if (tg3_ape_wait_for_event(tp, 30000))
897                         return -EAGAIN;
898
899                 for (i = 0; length; i += 4, length -= 4) {
900                         u32 val = tg3_ape_read32(tp, msgoff + i);
901                         memcpy(data, &val, sizeof(u32));
902                         data++;
903                 }
904         }
905
906         return 0;
907 }
908 #endif
909
910 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
911 {
912         int err;
913         u32 apedata;
914
915         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
916         if (apedata != APE_SEG_SIG_MAGIC)
917                 return -EAGAIN;
918
919         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
920         if (!(apedata & APE_FW_STATUS_READY))
921                 return -EAGAIN;
922
923         /* Wait for up to 1 millisecond for APE to service previous event. */
924         err = tg3_ape_event_lock(tp, 1000);
925         if (err)
926                 return err;
927
928         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
929                         event | APE_EVENT_STATUS_EVENT_PENDING);
930
931         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
932         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
933
934         return 0;
935 }
936
937 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
938 {
939         u32 event;
940         u32 apedata;
941
942         if (!tg3_flag(tp, ENABLE_APE))
943                 return;
944
945         switch (kind) {
946         case RESET_KIND_INIT:
947                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
948                                 APE_HOST_SEG_SIG_MAGIC);
949                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
950                                 APE_HOST_SEG_LEN_MAGIC);
951                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
952                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
953                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
954                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
955                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
956                                 APE_HOST_BEHAV_NO_PHYLOCK);
957                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
958                                     TG3_APE_HOST_DRVR_STATE_START);
959
960                 event = APE_EVENT_STATUS_STATE_START;
961                 break;
962         case RESET_KIND_SHUTDOWN:
963                 /* With the interface we are currently using,
964                  * APE does not track driver state.  Wiping
965                  * out the HOST SEGMENT SIGNATURE forces
966                  * the APE to assume OS absent status.
967                  */
968                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
969
970                 if (device_may_wakeup(&tp->pdev->dev) &&
971                     tg3_flag(tp, WOL_ENABLE)) {
972                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
973                                             TG3_APE_HOST_WOL_SPEED_AUTO);
974                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
975                 } else
976                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
977
978                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
979
980                 event = APE_EVENT_STATUS_STATE_UNLOAD;
981                 break;
982         default:
983                 return;
984         }
985
986         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
987
988         tg3_ape_send_event(tp, event);
989 }
990
991 static void tg3_disable_ints(struct tg3 *tp)
992 {
993         int i;
994
995         tw32(TG3PCI_MISC_HOST_CTRL,
996              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
997         for (i = 0; i < tp->irq_max; i++)
998                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
999 }
1000
1001 static void tg3_enable_ints(struct tg3 *tp)
1002 {
1003         int i;
1004
1005         tp->irq_sync = 0;
1006         wmb();
1007
1008         tw32(TG3PCI_MISC_HOST_CTRL,
1009              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1010
1011         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1012         for (i = 0; i < tp->irq_cnt; i++) {
1013                 struct tg3_napi *tnapi = &tp->napi[i];
1014
1015                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1016                 if (tg3_flag(tp, 1SHOT_MSI))
1017                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1018
1019                 tp->coal_now |= tnapi->coal_now;
1020         }
1021
1022         /* Force an initial interrupt */
1023         if (!tg3_flag(tp, TAGGED_STATUS) &&
1024             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1025                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1026         else
1027                 tw32(HOSTCC_MODE, tp->coal_now);
1028
1029         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1030 }
1031
1032 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1033 {
1034         struct tg3 *tp = tnapi->tp;
1035         struct tg3_hw_status *sblk = tnapi->hw_status;
1036         unsigned int work_exists = 0;
1037
1038         /* check for phy events */
1039         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1040                 if (sblk->status & SD_STATUS_LINK_CHG)
1041                         work_exists = 1;
1042         }
1043
1044         /* check for TX work to do */
1045         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1046                 work_exists = 1;
1047
1048         /* check for RX work to do */
1049         if (tnapi->rx_rcb_prod_idx &&
1050             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1051                 work_exists = 1;
1052
1053         return work_exists;
1054 }
1055
1056 /* tg3_int_reenable
1057  *  similar to tg3_enable_ints, but it accurately determines whether there
1058  *  is new work pending and can return without flushing the PIO write
1059  *  which reenables interrupts
1060  */
1061 static void tg3_int_reenable(struct tg3_napi *tnapi)
1062 {
1063         struct tg3 *tp = tnapi->tp;
1064
1065         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1066         mmiowb();
1067
1068         /* When doing tagged status, this work check is unnecessary.
1069          * The last_tag we write above tells the chip which piece of
1070          * work we've completed.
1071          */
1072         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1073                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1074                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1075 }
1076
1077 static void tg3_switch_clocks(struct tg3 *tp)
1078 {
1079         u32 clock_ctrl;
1080         u32 orig_clock_ctrl;
1081
1082         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1083                 return;
1084
1085         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1086
1087         orig_clock_ctrl = clock_ctrl;
1088         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1089                        CLOCK_CTRL_CLKRUN_OENABLE |
1090                        0x1f);
1091         tp->pci_clock_ctrl = clock_ctrl;
1092
1093         if (tg3_flag(tp, 5705_PLUS)) {
1094                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1095                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1097                 }
1098         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1099                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1100                             clock_ctrl |
1101                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1102                             40);
1103                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1104                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1105                             40);
1106         }
1107         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1108 }
1109
1110 #define PHY_BUSY_LOOPS  5000
1111
1112 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1113                          u32 *val)
1114 {
1115         u32 frame_val;
1116         unsigned int loops;
1117         int ret;
1118
1119         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1120                 tw32_f(MAC_MI_MODE,
1121                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1122                 udelay(80);
1123         }
1124
1125         tg3_ape_lock(tp, tp->phy_ape_lock);
1126
1127         *val = 0x0;
1128
1129         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1130                       MI_COM_PHY_ADDR_MASK);
1131         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1132                       MI_COM_REG_ADDR_MASK);
1133         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1134
1135         tw32_f(MAC_MI_COM, frame_val);
1136
1137         loops = PHY_BUSY_LOOPS;
1138         while (loops != 0) {
1139                 udelay(10);
1140                 frame_val = tr32(MAC_MI_COM);
1141
1142                 if ((frame_val & MI_COM_BUSY) == 0) {
1143                         udelay(5);
1144                         frame_val = tr32(MAC_MI_COM);
1145                         break;
1146                 }
1147                 loops -= 1;
1148         }
1149
1150         ret = -EBUSY;
1151         if (loops != 0) {
1152                 *val = frame_val & MI_COM_DATA_MASK;
1153                 ret = 0;
1154         }
1155
1156         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1157                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1158                 udelay(80);
1159         }
1160
1161         tg3_ape_unlock(tp, tp->phy_ape_lock);
1162
1163         return ret;
1164 }
1165
1166 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1167 {
1168         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1169 }
1170
1171 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1172                           u32 val)
1173 {
1174         u32 frame_val;
1175         unsigned int loops;
1176         int ret;
1177
1178         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1179             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1180                 return 0;
1181
1182         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1183                 tw32_f(MAC_MI_MODE,
1184                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1185                 udelay(80);
1186         }
1187
1188         tg3_ape_lock(tp, tp->phy_ape_lock);
1189
1190         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1191                       MI_COM_PHY_ADDR_MASK);
1192         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1193                       MI_COM_REG_ADDR_MASK);
1194         frame_val |= (val & MI_COM_DATA_MASK);
1195         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1196
1197         tw32_f(MAC_MI_COM, frame_val);
1198
1199         loops = PHY_BUSY_LOOPS;
1200         while (loops != 0) {
1201                 udelay(10);
1202                 frame_val = tr32(MAC_MI_COM);
1203                 if ((frame_val & MI_COM_BUSY) == 0) {
1204                         udelay(5);
1205                         frame_val = tr32(MAC_MI_COM);
1206                         break;
1207                 }
1208                 loops -= 1;
1209         }
1210
1211         ret = -EBUSY;
1212         if (loops != 0)
1213                 ret = 0;
1214
1215         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1216                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1217                 udelay(80);
1218         }
1219
1220         tg3_ape_unlock(tp, tp->phy_ape_lock);
1221
1222         return ret;
1223 }
1224
1225 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1226 {
1227         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1228 }
1229
1230 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1231 {
1232         int err;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1235         if (err)
1236                 goto done;
1237
1238         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1239         if (err)
1240                 goto done;
1241
1242         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1243                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1244         if (err)
1245                 goto done;
1246
1247         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1248
1249 done:
1250         return err;
1251 }
1252
1253 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1254 {
1255         int err;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1258         if (err)
1259                 goto done;
1260
1261         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1262         if (err)
1263                 goto done;
1264
1265         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1266                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1267         if (err)
1268                 goto done;
1269
1270         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1271
1272 done:
1273         return err;
1274 }
1275
1276 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1277 {
1278         int err;
1279
1280         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1281         if (!err)
1282                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1283
1284         return err;
1285 }
1286
1287 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1288 {
1289         int err;
1290
1291         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1292         if (!err)
1293                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1294
1295         return err;
1296 }
1297
1298 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1299 {
1300         int err;
1301
1302         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1303                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1304                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1305         if (!err)
1306                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1307
1308         return err;
1309 }
1310
1311 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1312 {
1313         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1314                 set |= MII_TG3_AUXCTL_MISC_WREN;
1315
1316         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1317 }
1318
1319 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1320 {
1321         u32 val;
1322         int err;
1323
1324         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1325
1326         if (err)
1327                 return err;
1328
1329         if (enable)
1330                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1331         else
1332                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1333
1334         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1335                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1336
1337         return err;
1338 }
1339
1340 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1341 {
1342         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1343                             reg | val | MII_TG3_MISC_SHDW_WREN);
1344 }
1345
1346 static int tg3_bmcr_reset(struct tg3 *tp)
1347 {
1348         u32 phy_control;
1349         int limit, err;
1350
1351         /* OK, reset it, and poll the BMCR_RESET bit until it
1352          * clears or we time out.
1353          */
1354         phy_control = BMCR_RESET;
1355         err = tg3_writephy(tp, MII_BMCR, phy_control);
1356         if (err != 0)
1357                 return -EBUSY;
1358
1359         limit = 5000;
1360         while (limit--) {
1361                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1362                 if (err != 0)
1363                         return -EBUSY;
1364
1365                 if ((phy_control & BMCR_RESET) == 0) {
1366                         udelay(40);
1367                         break;
1368                 }
1369                 udelay(10);
1370         }
1371         if (limit < 0)
1372                 return -EBUSY;
1373
1374         return 0;
1375 }
1376
1377 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1378 {
1379         struct tg3 *tp = bp->priv;
1380         u32 val;
1381
1382         spin_lock_bh(&tp->lock);
1383
1384         if (__tg3_readphy(tp, mii_id, reg, &val))
1385                 val = -EIO;
1386
1387         spin_unlock_bh(&tp->lock);
1388
1389         return val;
1390 }
1391
1392 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1393 {
1394         struct tg3 *tp = bp->priv;
1395         u32 ret = 0;
1396
1397         spin_lock_bh(&tp->lock);
1398
1399         if (__tg3_writephy(tp, mii_id, reg, val))
1400                 ret = -EIO;
1401
1402         spin_unlock_bh(&tp->lock);
1403
1404         return ret;
1405 }
1406
1407 static void tg3_mdio_config_5785(struct tg3 *tp)
1408 {
1409         u32 val;
1410         struct phy_device *phydev;
1411
1412         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1413         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1414         case PHY_ID_BCM50610:
1415         case PHY_ID_BCM50610M:
1416                 val = MAC_PHYCFG2_50610_LED_MODES;
1417                 break;
1418         case PHY_ID_BCMAC131:
1419                 val = MAC_PHYCFG2_AC131_LED_MODES;
1420                 break;
1421         case PHY_ID_RTL8211C:
1422                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8201E:
1425                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1426                 break;
1427         default:
1428                 return;
1429         }
1430
1431         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1432                 tw32(MAC_PHYCFG2, val);
1433
1434                 val = tr32(MAC_PHYCFG1);
1435                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1436                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1437                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1438                 tw32(MAC_PHYCFG1, val);
1439
1440                 return;
1441         }
1442
1443         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1444                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1445                        MAC_PHYCFG2_FMODE_MASK_MASK |
1446                        MAC_PHYCFG2_GMODE_MASK_MASK |
1447                        MAC_PHYCFG2_ACT_MASK_MASK   |
1448                        MAC_PHYCFG2_QUAL_MASK_MASK |
1449                        MAC_PHYCFG2_INBAND_ENABLE;
1450
1451         tw32(MAC_PHYCFG2, val);
1452
1453         val = tr32(MAC_PHYCFG1);
1454         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1455                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1456         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1457                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1458                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1459                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1460                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1461         }
1462         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1463                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1464         tw32(MAC_PHYCFG1, val);
1465
1466         val = tr32(MAC_EXT_RGMII_MODE);
1467         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1468                  MAC_RGMII_MODE_RX_QUALITY |
1469                  MAC_RGMII_MODE_RX_ACTIVITY |
1470                  MAC_RGMII_MODE_RX_ENG_DET |
1471                  MAC_RGMII_MODE_TX_ENABLE |
1472                  MAC_RGMII_MODE_TX_LOWPWR |
1473                  MAC_RGMII_MODE_TX_RESET);
1474         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1475                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1476                         val |= MAC_RGMII_MODE_RX_INT_B |
1477                                MAC_RGMII_MODE_RX_QUALITY |
1478                                MAC_RGMII_MODE_RX_ACTIVITY |
1479                                MAC_RGMII_MODE_RX_ENG_DET;
1480                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1481                         val |= MAC_RGMII_MODE_TX_ENABLE |
1482                                MAC_RGMII_MODE_TX_LOWPWR |
1483                                MAC_RGMII_MODE_TX_RESET;
1484         }
1485         tw32(MAC_EXT_RGMII_MODE, val);
1486 }
1487
1488 static void tg3_mdio_start(struct tg3 *tp)
1489 {
1490         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1491         tw32_f(MAC_MI_MODE, tp->mi_mode);
1492         udelay(80);
1493
1494         if (tg3_flag(tp, MDIOBUS_INITED) &&
1495             tg3_asic_rev(tp) == ASIC_REV_5785)
1496                 tg3_mdio_config_5785(tp);
1497 }
1498
1499 static int tg3_mdio_init(struct tg3 *tp)
1500 {
1501         int i;
1502         u32 reg;
1503         struct phy_device *phydev;
1504
1505         if (tg3_flag(tp, 5717_PLUS)) {
1506                 u32 is_serdes;
1507
1508                 tp->phy_addr = tp->pci_fn + 1;
1509
1510                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1511                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1512                 else
1513                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1514                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1515                 if (is_serdes)
1516                         tp->phy_addr += 7;
1517         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1518                 int addr;
1519
1520                 addr = ssb_gige_get_phyaddr(tp->pdev);
1521                 if (addr < 0)
1522                         return addr;
1523                 tp->phy_addr = addr;
1524         } else
1525                 tp->phy_addr = TG3_PHY_MII_ADDR;
1526
1527         tg3_mdio_start(tp);
1528
1529         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1530                 return 0;
1531
1532         tp->mdio_bus = mdiobus_alloc();
1533         if (tp->mdio_bus == NULL)
1534                 return -ENOMEM;
1535
1536         tp->mdio_bus->name     = "tg3 mdio bus";
1537         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1538                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1539         tp->mdio_bus->priv     = tp;
1540         tp->mdio_bus->parent   = &tp->pdev->dev;
1541         tp->mdio_bus->read     = &tg3_mdio_read;
1542         tp->mdio_bus->write    = &tg3_mdio_write;
1543         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544
1545         /* The bus registration will look for all the PHYs on the mdio bus.
1546          * Unfortunately, it does not ensure the PHY is powered up before
1547          * accessing the PHY ID registers.  A chip reset is the
1548          * quickest way to bring the device back to an operational state..
1549          */
1550         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1551                 tg3_bmcr_reset(tp);
1552
1553         i = mdiobus_register(tp->mdio_bus);
1554         if (i) {
1555                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1556                 mdiobus_free(tp->mdio_bus);
1557                 return i;
1558         }
1559
1560         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1561
1562         if (!phydev || !phydev->drv) {
1563                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1564                 mdiobus_unregister(tp->mdio_bus);
1565                 mdiobus_free(tp->mdio_bus);
1566                 return -ENODEV;
1567         }
1568
1569         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1570         case PHY_ID_BCM57780:
1571                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1572                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1573                 break;
1574         case PHY_ID_BCM50610:
1575         case PHY_ID_BCM50610M:
1576                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1577                                      PHY_BRCM_RX_REFCLK_UNUSED |
1578                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1579                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1580                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1581                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1582                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1583                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1584                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1585                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1586                 /* fallthru */
1587         case PHY_ID_RTL8211C:
1588                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1589                 break;
1590         case PHY_ID_RTL8201E:
1591         case PHY_ID_BCMAC131:
1592                 phydev->interface = PHY_INTERFACE_MODE_MII;
1593                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1594                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1595                 break;
1596         }
1597
1598         tg3_flag_set(tp, MDIOBUS_INITED);
1599
1600         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1601                 tg3_mdio_config_5785(tp);
1602
1603         return 0;
1604 }
1605
1606 static void tg3_mdio_fini(struct tg3 *tp)
1607 {
1608         if (tg3_flag(tp, MDIOBUS_INITED)) {
1609                 tg3_flag_clear(tp, MDIOBUS_INITED);
1610                 mdiobus_unregister(tp->mdio_bus);
1611                 mdiobus_free(tp->mdio_bus);
1612         }
1613 }
1614
1615 /* tp->lock is held. */
1616 static inline void tg3_generate_fw_event(struct tg3 *tp)
1617 {
1618         u32 val;
1619
1620         val = tr32(GRC_RX_CPU_EVENT);
1621         val |= GRC_RX_CPU_DRIVER_EVENT;
1622         tw32_f(GRC_RX_CPU_EVENT, val);
1623
1624         tp->last_event_jiffies = jiffies;
1625 }
1626
1627 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1628
1629 /* tp->lock is held. */
1630 static void tg3_wait_for_event_ack(struct tg3 *tp)
1631 {
1632         int i;
1633         unsigned int delay_cnt;
1634         long time_remain;
1635
1636         /* If enough time has passed, no wait is necessary. */
1637         time_remain = (long)(tp->last_event_jiffies + 1 +
1638                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1639                       (long)jiffies;
1640         if (time_remain < 0)
1641                 return;
1642
1643         /* Check if we can shorten the wait time. */
1644         delay_cnt = jiffies_to_usecs(time_remain);
1645         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1646                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1647         delay_cnt = (delay_cnt >> 3) + 1;
1648
1649         for (i = 0; i < delay_cnt; i++) {
1650                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1651                         break;
1652                 if (pci_channel_offline(tp->pdev))
1653                         break;
1654
1655                 udelay(8);
1656         }
1657 }
1658
1659 /* tp->lock is held. */
1660 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1661 {
1662         u32 reg, val;
1663
1664         val = 0;
1665         if (!tg3_readphy(tp, MII_BMCR, &reg))
1666                 val = reg << 16;
1667         if (!tg3_readphy(tp, MII_BMSR, &reg))
1668                 val |= (reg & 0xffff);
1669         *data++ = val;
1670
1671         val = 0;
1672         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1673                 val = reg << 16;
1674         if (!tg3_readphy(tp, MII_LPA, &reg))
1675                 val |= (reg & 0xffff);
1676         *data++ = val;
1677
1678         val = 0;
1679         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1680                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1681                         val = reg << 16;
1682                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1683                         val |= (reg & 0xffff);
1684         }
1685         *data++ = val;
1686
1687         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1688                 val = reg << 16;
1689         else
1690                 val = 0;
1691         *data++ = val;
1692 }
1693
1694 /* tp->lock is held. */
1695 static void tg3_ump_link_report(struct tg3 *tp)
1696 {
1697         u32 data[4];
1698
1699         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1700                 return;
1701
1702         tg3_phy_gather_ump_data(tp, data);
1703
1704         tg3_wait_for_event_ack(tp);
1705
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1712
1713         tg3_generate_fw_event(tp);
1714 }
1715
1716 /* tp->lock is held. */
1717 static void tg3_stop_fw(struct tg3 *tp)
1718 {
1719         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1720                 /* Wait for RX cpu to ACK the previous event. */
1721                 tg3_wait_for_event_ack(tp);
1722
1723                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1724
1725                 tg3_generate_fw_event(tp);
1726
1727                 /* Wait for RX cpu to ACK this event. */
1728                 tg3_wait_for_event_ack(tp);
1729         }
1730 }
1731
1732 /* tp->lock is held. */
1733 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1734 {
1735         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1736                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1737
1738         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1739                 switch (kind) {
1740                 case RESET_KIND_INIT:
1741                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1742                                       DRV_STATE_START);
1743                         break;
1744
1745                 case RESET_KIND_SHUTDOWN:
1746                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1747                                       DRV_STATE_UNLOAD);
1748                         break;
1749
1750                 case RESET_KIND_SUSPEND:
1751                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1752                                       DRV_STATE_SUSPEND);
1753                         break;
1754
1755                 default:
1756                         break;
1757                 }
1758         }
1759 }
1760
1761 /* tp->lock is held. */
1762 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1763 {
1764         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1765                 switch (kind) {
1766                 case RESET_KIND_INIT:
1767                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1768                                       DRV_STATE_START_DONE);
1769                         break;
1770
1771                 case RESET_KIND_SHUTDOWN:
1772                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1773                                       DRV_STATE_UNLOAD_DONE);
1774                         break;
1775
1776                 default:
1777                         break;
1778                 }
1779         }
1780 }
1781
1782 /* tp->lock is held. */
1783 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1784 {
1785         if (tg3_flag(tp, ENABLE_ASF)) {
1786                 switch (kind) {
1787                 case RESET_KIND_INIT:
1788                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1789                                       DRV_STATE_START);
1790                         break;
1791
1792                 case RESET_KIND_SHUTDOWN:
1793                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1794                                       DRV_STATE_UNLOAD);
1795                         break;
1796
1797                 case RESET_KIND_SUSPEND:
1798                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1799                                       DRV_STATE_SUSPEND);
1800                         break;
1801
1802                 default:
1803                         break;
1804                 }
1805         }
1806 }
1807
1808 static int tg3_poll_fw(struct tg3 *tp)
1809 {
1810         int i;
1811         u32 val;
1812
1813         if (tg3_flag(tp, NO_FWARE_REPORTED))
1814                 return 0;
1815
1816         if (tg3_flag(tp, IS_SSB_CORE)) {
1817                 /* We don't use firmware. */
1818                 return 0;
1819         }
1820
1821         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1822                 /* Wait up to 20ms for init done. */
1823                 for (i = 0; i < 200; i++) {
1824                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1825                                 return 0;
1826                         if (pci_channel_offline(tp->pdev))
1827                                 return -ENODEV;
1828
1829                         udelay(100);
1830                 }
1831                 return -ENODEV;
1832         }
1833
1834         /* Wait for firmware initialization to complete. */
1835         for (i = 0; i < 100000; i++) {
1836                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1837                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1838                         break;
1839                 if (pci_channel_offline(tp->pdev)) {
1840                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1841                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1842                                 netdev_info(tp->dev, "No firmware running\n");
1843                         }
1844
1845                         break;
1846                 }
1847
1848                 udelay(10);
1849         }
1850
1851         /* Chip might not be fitted with firmware.  Some Sun onboard
1852          * parts are configured like that.  So don't signal the timeout
1853          * of the above loop as an error, but do report the lack of
1854          * running firmware once.
1855          */
1856         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1857                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1858
1859                 netdev_info(tp->dev, "No firmware running\n");
1860         }
1861
1862         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1863                 /* The 57765 A0 needs a little more
1864                  * time to do some important work.
1865                  */
1866                 mdelay(10);
1867         }
1868
1869         return 0;
1870 }
1871
1872 static void tg3_link_report(struct tg3 *tp)
1873 {
1874         if (!netif_carrier_ok(tp->dev)) {
1875                 netif_info(tp, link, tp->dev, "Link is down\n");
1876                 tg3_ump_link_report(tp);
1877         } else if (netif_msg_link(tp)) {
1878                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1879                             (tp->link_config.active_speed == SPEED_1000 ?
1880                              1000 :
1881                              (tp->link_config.active_speed == SPEED_100 ?
1882                               100 : 10)),
1883                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1884                              "full" : "half"));
1885
1886                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1887                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1888                             "on" : "off",
1889                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1890                             "on" : "off");
1891
1892                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1893                         netdev_info(tp->dev, "EEE is %s\n",
1894                                     tp->setlpicnt ? "enabled" : "disabled");
1895
1896                 tg3_ump_link_report(tp);
1897         }
1898
1899         tp->link_up = netif_carrier_ok(tp->dev);
1900 }
1901
1902 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1903 {
1904         u32 flowctrl = 0;
1905
1906         if (adv & ADVERTISE_PAUSE_CAP) {
1907                 flowctrl |= FLOW_CTRL_RX;
1908                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1909                         flowctrl |= FLOW_CTRL_TX;
1910         } else if (adv & ADVERTISE_PAUSE_ASYM)
1911                 flowctrl |= FLOW_CTRL_TX;
1912
1913         return flowctrl;
1914 }
1915
1916 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1917 {
1918         u16 miireg;
1919
1920         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1921                 miireg = ADVERTISE_1000XPAUSE;
1922         else if (flow_ctrl & FLOW_CTRL_TX)
1923                 miireg = ADVERTISE_1000XPSE_ASYM;
1924         else if (flow_ctrl & FLOW_CTRL_RX)
1925                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1926         else
1927                 miireg = 0;
1928
1929         return miireg;
1930 }
1931
1932 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1933 {
1934         u32 flowctrl = 0;
1935
1936         if (adv & ADVERTISE_1000XPAUSE) {
1937                 flowctrl |= FLOW_CTRL_RX;
1938                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1939                         flowctrl |= FLOW_CTRL_TX;
1940         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1941                 flowctrl |= FLOW_CTRL_TX;
1942
1943         return flowctrl;
1944 }
1945
1946 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1947 {
1948         u8 cap = 0;
1949
1950         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1951                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1952         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1953                 if (lcladv & ADVERTISE_1000XPAUSE)
1954                         cap = FLOW_CTRL_RX;
1955                 if (rmtadv & ADVERTISE_1000XPAUSE)
1956                         cap = FLOW_CTRL_TX;
1957         }
1958
1959         return cap;
1960 }
1961
1962 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1963 {
1964         u8 autoneg;
1965         u8 flowctrl = 0;
1966         u32 old_rx_mode = tp->rx_mode;
1967         u32 old_tx_mode = tp->tx_mode;
1968
1969         if (tg3_flag(tp, USE_PHYLIB))
1970                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1971         else
1972                 autoneg = tp->link_config.autoneg;
1973
1974         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1975                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1976                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1977                 else
1978                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1979         } else
1980                 flowctrl = tp->link_config.flowctrl;
1981
1982         tp->link_config.active_flowctrl = flowctrl;
1983
1984         if (flowctrl & FLOW_CTRL_RX)
1985                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1986         else
1987                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1988
1989         if (old_rx_mode != tp->rx_mode)
1990                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1991
1992         if (flowctrl & FLOW_CTRL_TX)
1993                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1994         else
1995                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1996
1997         if (old_tx_mode != tp->tx_mode)
1998                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1999 }
2000
2001 static void tg3_adjust_link(struct net_device *dev)
2002 {
2003         u8 oldflowctrl, linkmesg = 0;
2004         u32 mac_mode, lcl_adv, rmt_adv;
2005         struct tg3 *tp = netdev_priv(dev);
2006         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2007
2008         spin_lock_bh(&tp->lock);
2009
2010         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2011                                     MAC_MODE_HALF_DUPLEX);
2012
2013         oldflowctrl = tp->link_config.active_flowctrl;
2014
2015         if (phydev->link) {
2016                 lcl_adv = 0;
2017                 rmt_adv = 0;
2018
2019                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2020                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2021                 else if (phydev->speed == SPEED_1000 ||
2022                          tg3_asic_rev(tp) != ASIC_REV_5785)
2023                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2024                 else
2025                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2026
2027                 if (phydev->duplex == DUPLEX_HALF)
2028                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2029                 else {
2030                         lcl_adv = mii_advertise_flowctrl(
2031                                   tp->link_config.flowctrl);
2032
2033                         if (phydev->pause)
2034                                 rmt_adv = LPA_PAUSE_CAP;
2035                         if (phydev->asym_pause)
2036                                 rmt_adv |= LPA_PAUSE_ASYM;
2037                 }
2038
2039                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2040         } else
2041                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2042
2043         if (mac_mode != tp->mac_mode) {
2044                 tp->mac_mode = mac_mode;
2045                 tw32_f(MAC_MODE, tp->mac_mode);
2046                 udelay(40);
2047         }
2048
2049         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2050                 if (phydev->speed == SPEED_10)
2051                         tw32(MAC_MI_STAT,
2052                              MAC_MI_STAT_10MBPS_MODE |
2053                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054                 else
2055                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2056         }
2057
2058         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2059                 tw32(MAC_TX_LENGTHS,
2060                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2061                       (6 << TX_LENGTHS_IPG_SHIFT) |
2062                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2063         else
2064                 tw32(MAC_TX_LENGTHS,
2065                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2066                       (6 << TX_LENGTHS_IPG_SHIFT) |
2067                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2068
2069         if (phydev->link != tp->old_link ||
2070             phydev->speed != tp->link_config.active_speed ||
2071             phydev->duplex != tp->link_config.active_duplex ||
2072             oldflowctrl != tp->link_config.active_flowctrl)
2073                 linkmesg = 1;
2074
2075         tp->old_link = phydev->link;
2076         tp->link_config.active_speed = phydev->speed;
2077         tp->link_config.active_duplex = phydev->duplex;
2078
2079         spin_unlock_bh(&tp->lock);
2080
2081         if (linkmesg)
2082                 tg3_link_report(tp);
2083 }
2084
2085 static int tg3_phy_init(struct tg3 *tp)
2086 {
2087         struct phy_device *phydev;
2088
2089         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2090                 return 0;
2091
2092         /* Bring the PHY back to a known state. */
2093         tg3_bmcr_reset(tp);
2094
2095         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2096
2097         /* Attach the MAC to the PHY. */
2098         phydev = phy_connect(tp->dev, phydev_name(phydev),
2099                              tg3_adjust_link, phydev->interface);
2100         if (IS_ERR(phydev)) {
2101                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2102                 return PTR_ERR(phydev);
2103         }
2104
2105         /* Mask with MAC supported features. */
2106         switch (phydev->interface) {
2107         case PHY_INTERFACE_MODE_GMII:
2108         case PHY_INTERFACE_MODE_RGMII:
2109                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2110                         phydev->supported &= (PHY_GBIT_FEATURES |
2111                                               SUPPORTED_Pause |
2112                                               SUPPORTED_Asym_Pause);
2113                         break;
2114                 }
2115                 /* fallthru */
2116         case PHY_INTERFACE_MODE_MII:
2117                 phydev->supported &= (PHY_BASIC_FEATURES |
2118                                       SUPPORTED_Pause |
2119                                       SUPPORTED_Asym_Pause);
2120                 break;
2121         default:
2122                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2123                 return -EINVAL;
2124         }
2125
2126         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2127
2128         phydev->advertising = phydev->supported;
2129
2130         phy_attached_info(phydev);
2131
2132         return 0;
2133 }
2134
2135 static void tg3_phy_start(struct tg3 *tp)
2136 {
2137         struct phy_device *phydev;
2138
2139         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2140                 return;
2141
2142         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2143
2144         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2145                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2146                 phydev->speed = tp->link_config.speed;
2147                 phydev->duplex = tp->link_config.duplex;
2148                 phydev->autoneg = tp->link_config.autoneg;
2149                 phydev->advertising = tp->link_config.advertising;
2150         }
2151
2152         phy_start(phydev);
2153
2154         phy_start_aneg(phydev);
2155 }
2156
2157 static void tg3_phy_stop(struct tg3 *tp)
2158 {
2159         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2160                 return;
2161
2162         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 }
2164
2165 static void tg3_phy_fini(struct tg3 *tp)
2166 {
2167         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2168                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170         }
2171 }
2172
2173 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 {
2175         int err;
2176         u32 val;
2177
2178         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2179                 return 0;
2180
2181         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2182                 /* Cannot do read-modify-write on 5401 */
2183                 err = tg3_phy_auxctl_write(tp,
2184                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2185                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186                                            0x4c20);
2187                 goto done;
2188         }
2189
2190         err = tg3_phy_auxctl_read(tp,
2191                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192         if (err)
2193                 return err;
2194
2195         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2196         err = tg3_phy_auxctl_write(tp,
2197                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198
2199 done:
2200         return err;
2201 }
2202
2203 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 {
2205         u32 phytest;
2206
2207         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2208                 u32 phy;
2209
2210                 tg3_writephy(tp, MII_TG3_FET_TEST,
2211                              phytest | MII_TG3_FET_SHADOW_EN);
2212                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2213                         if (enable)
2214                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215                         else
2216                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2218                 }
2219                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220         }
2221 }
2222
2223 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 {
2225         u32 reg;
2226
2227         if (!tg3_flag(tp, 5705_PLUS) ||
2228             (tg3_flag(tp, 5717_PLUS) &&
2229              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2230                 return;
2231
2232         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2233                 tg3_phy_fet_toggle_apd(tp, enable);
2234                 return;
2235         }
2236
2237         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2238               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2239               MII_TG3_MISC_SHDW_SCR5_SDTL |
2240               MII_TG3_MISC_SHDW_SCR5_C125OE;
2241         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2242                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2243
2244         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2245
2246
2247         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2248         if (enable)
2249                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2250
2251         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2252 }
2253
2254 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 {
2256         u32 phy;
2257
2258         if (!tg3_flag(tp, 5705_PLUS) ||
2259             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260                 return;
2261
2262         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263                 u32 ephy;
2264
2265                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2266                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2267
2268                         tg3_writephy(tp, MII_TG3_FET_TEST,
2269                                      ephy | MII_TG3_FET_SHADOW_EN);
2270                         if (!tg3_readphy(tp, reg, &phy)) {
2271                                 if (enable)
2272                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 else
2274                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275                                 tg3_writephy(tp, reg, phy);
2276                         }
2277                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278                 }
2279         } else {
2280                 int ret;
2281
2282                 ret = tg3_phy_auxctl_read(tp,
2283                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284                 if (!ret) {
2285                         if (enable)
2286                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         else
2288                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289                         tg3_phy_auxctl_write(tp,
2290                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291                 }
2292         }
2293 }
2294
2295 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 {
2297         int ret;
2298         u32 val;
2299
2300         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301                 return;
2302
2303         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2304         if (!ret)
2305                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2306                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 }
2308
2309 static void tg3_phy_apply_otp(struct tg3 *tp)
2310 {
2311         u32 otp, phy;
2312
2313         if (!tp->phy_otp)
2314                 return;
2315
2316         otp = tp->phy_otp;
2317
2318         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319                 return;
2320
2321         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2322         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2323         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2324
2325         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2326               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2327         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2328
2329         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2330         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2331         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2332
2333         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2335
2336         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2337         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2338
2339         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2340               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2341         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2342
2343         tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 }
2345
2346 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2347 {
2348         u32 val;
2349         struct ethtool_eee *dest = &tp->eee;
2350
2351         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352                 return;
2353
2354         if (eee)
2355                 dest = eee;
2356
2357         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2358                 return;
2359
2360         /* Pull eee_active */
2361         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2362             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2363                 dest->eee_active = 1;
2364         } else
2365                 dest->eee_active = 0;
2366
2367         /* Pull lp advertised settings */
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2369                 return;
2370         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372         /* Pull advertised and eee_enabled settings */
2373         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2374                 return;
2375         dest->eee_enabled = !!val;
2376         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378         /* Pull tx_lpi_enabled */
2379         val = tr32(TG3_CPMU_EEE_MODE);
2380         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2381
2382         /* Pull lpi timer value */
2383         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2384 }
2385
2386 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 {
2388         u32 val;
2389
2390         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391                 return;
2392
2393         tp->setlpicnt = 0;
2394
2395         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2396             current_link_up &&
2397             tp->link_config.active_duplex == DUPLEX_FULL &&
2398             (tp->link_config.active_speed == SPEED_100 ||
2399              tp->link_config.active_speed == SPEED_1000)) {
2400                 u32 eeectl;
2401
2402                 if (tp->link_config.active_speed == SPEED_1000)
2403                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2404                 else
2405                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2406
2407                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2408
2409                 tg3_eee_pull_config(tp, NULL);
2410                 if (tp->eee.eee_active)
2411                         tp->setlpicnt = 2;
2412         }
2413
2414         if (!tp->setlpicnt) {
2415                 if (current_link_up &&
2416                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2418                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2419                 }
2420
2421                 val = tr32(TG3_CPMU_EEE_MODE);
2422                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423         }
2424 }
2425
2426 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 {
2428         u32 val;
2429
2430         if (tp->link_config.active_speed == SPEED_1000 &&
2431             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2432              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2433              tg3_flag(tp, 57765_CLASS)) &&
2434             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2435                 val = MII_TG3_DSP_TAP26_ALNOKO |
2436                       MII_TG3_DSP_TAP26_RMRXSTO;
2437                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2438                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2439         }
2440
2441         val = tr32(TG3_CPMU_EEE_MODE);
2442         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2443 }
2444
2445 static int tg3_wait_macro_done(struct tg3 *tp)
2446 {
2447         int limit = 100;
2448
2449         while (limit--) {
2450                 u32 tmp32;
2451
2452                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2453                         if ((tmp32 & 0x1000) == 0)
2454                                 break;
2455                 }
2456         }
2457         if (limit < 0)
2458                 return -EBUSY;
2459
2460         return 0;
2461 }
2462
2463 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2464 {
2465         static const u32 test_pat[4][6] = {
2466         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470         };
2471         int chan;
2472
2473         for (chan = 0; chan < 4; chan++) {
2474                 int i;
2475
2476                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477                              (chan * 0x2000) | 0x0200);
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479
2480                 for (i = 0; i < 6; i++)
2481                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2482                                      test_pat[chan][i]);
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485                 if (tg3_wait_macro_done(tp)) {
2486                         *resetp = 1;
2487                         return -EBUSY;
2488                 }
2489
2490                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2491                              (chan * 0x2000) | 0x0200);
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2499                 if (tg3_wait_macro_done(tp)) {
2500                         *resetp = 1;
2501                         return -EBUSY;
2502                 }
2503
2504                 for (i = 0; i < 6; i += 2) {
2505                         u32 low, high;
2506
2507                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2508                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2509                             tg3_wait_macro_done(tp)) {
2510                                 *resetp = 1;
2511                                 return -EBUSY;
2512                         }
2513                         low &= 0x7fff;
2514                         high &= 0x000f;
2515                         if (low != test_pat[chan][i] ||
2516                             high != test_pat[chan][i+1]) {
2517                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2518                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2519                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520
2521                                 return -EBUSY;
2522                         }
2523                 }
2524         }
2525
2526         return 0;
2527 }
2528
2529 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 {
2531         int chan;
2532
2533         for (chan = 0; chan < 4; chan++) {
2534                 int i;
2535
2536                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2537                              (chan * 0x2000) | 0x0200);
2538                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2539                 for (i = 0; i < 6; i++)
2540                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2541                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2542                 if (tg3_wait_macro_done(tp))
2543                         return -EBUSY;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2550 {
2551         u32 reg32, phy9_orig;
2552         int retries, do_phy_reset, err;
2553
2554         retries = 10;
2555         do_phy_reset = 1;
2556         do {
2557                 if (do_phy_reset) {
2558                         err = tg3_bmcr_reset(tp);
2559                         if (err)
2560                                 return err;
2561                         do_phy_reset = 0;
2562                 }
2563
2564                 /* Disable transmitter and interrupt.  */
2565                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2566                         continue;
2567
2568                 reg32 |= 0x3000;
2569                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2570
2571                 /* Set full-duplex, 1000 mbps.  */
2572                 tg3_writephy(tp, MII_BMCR,
2573                              BMCR_FULLDPLX | BMCR_SPEED1000);
2574
2575                 /* Set to master mode.  */
2576                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2577                         continue;
2578
2579                 tg3_writephy(tp, MII_CTRL1000,
2580                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2581
2582                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583                 if (err)
2584                         return err;
2585
2586                 /* Block the PHY control access.  */
2587                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2588
2589                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2590                 if (!err)
2591                         break;
2592         } while (--retries);
2593
2594         err = tg3_phy_reset_chanpat(tp);
2595         if (err)
2596                 return err;
2597
2598         tg3_phydsp_write(tp, 0x8005, 0x0000);
2599
2600         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2601         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2602
2603         tg3_phy_toggle_auxctl_smdsp(tp, false);
2604
2605         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2606
2607         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2608         if (err)
2609                 return err;
2610
2611         reg32 &= ~0x3000;
2612         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613
2614         return 0;
2615 }
2616
2617 static void tg3_carrier_off(struct tg3 *tp)
2618 {
2619         netif_carrier_off(tp->dev);
2620         tp->link_up = false;
2621 }
2622
2623 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2624 {
2625         if (tg3_flag(tp, ENABLE_ASF))
2626                 netdev_warn(tp->dev,
2627                             "Management side-band traffic will be interrupted during phy settings change\n");
2628 }
2629
2630 /* This will reset the tigon3 PHY if there is no valid
2631  * link unless the FORCE argument is non-zero.
2632  */
2633 static int tg3_phy_reset(struct tg3 *tp)
2634 {
2635         u32 val, cpmuctrl;
2636         int err;
2637
2638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2639                 val = tr32(GRC_MISC_CFG);
2640                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2641                 udelay(40);
2642         }
2643         err  = tg3_readphy(tp, MII_BMSR, &val);
2644         err |= tg3_readphy(tp, MII_BMSR, &val);
2645         if (err != 0)
2646                 return -EBUSY;
2647
2648         if (netif_running(tp->dev) && tp->link_up) {
2649                 netif_carrier_off(tp->dev);
2650                 tg3_link_report(tp);
2651         }
2652
2653         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2654             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2655             tg3_asic_rev(tp) == ASIC_REV_5705) {
2656                 err = tg3_phy_reset_5703_4_5(tp);
2657                 if (err)
2658                         return err;
2659                 goto out;
2660         }
2661
2662         cpmuctrl = 0;
2663         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2664             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2665                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2666                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2667                         tw32(TG3_CPMU_CTRL,
2668                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2669         }
2670
2671         err = tg3_bmcr_reset(tp);
2672         if (err)
2673                 return err;
2674
2675         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2676                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2677                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2678
2679                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2680         }
2681
2682         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2683             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2684                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2685                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2686                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2687                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2688                         udelay(40);
2689                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690                 }
2691         }
2692
2693         if (tg3_flag(tp, 5717_PLUS) &&
2694             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2695                 return 0;
2696
2697         tg3_phy_apply_otp(tp);
2698
2699         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2700                 tg3_phy_toggle_apd(tp, true);
2701         else
2702                 tg3_phy_toggle_apd(tp, false);
2703
2704 out:
2705         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2706             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2708                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2709                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2710         }
2711
2712         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2713                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715         }
2716
2717         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2718                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2720                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2721                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2722                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2723                 }
2724         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2725                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2727                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2728                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2729                                 tg3_writephy(tp, MII_TG3_TEST1,
2730                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2731                         } else
2732                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2733
2734                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2735                 }
2736         }
2737
2738         /* Set Extended packet length bit (bit 14) on all chips that */
2739         /* support jumbo frames */
2740         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2741                 /* Cannot do read-modify-write on 5401 */
2742                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2743         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744                 /* Set bit 14 with read-modify-write to preserve other bits */
2745                 err = tg3_phy_auxctl_read(tp,
2746                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2747                 if (!err)
2748                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2749                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2750         }
2751
2752         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753          * jumbo frames transmission.
2754          */
2755         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2757                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2758                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2759         }
2760
2761         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2762                 /* adjust output voltage */
2763                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2764         }
2765
2766         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2767                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2768
2769         tg3_phy_toggle_automdix(tp, true);
2770         tg3_phy_set_wirespeed(tp);
2771         return 0;
2772 }
2773
2774 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2776 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2777                                           TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782          (TG3_GPIO_MSG_DRVR_PRES << 12))
2783
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788          (TG3_GPIO_MSG_NEED_VAUX << 12))
2789
2790 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 {
2792         u32 status, shift;
2793
2794         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795             tg3_asic_rev(tp) == ASIC_REV_5719)
2796                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2797         else
2798                 status = tr32(TG3_CPMU_DRV_STATUS);
2799
2800         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2801         status &= ~(TG3_GPIO_MSG_MASK << shift);
2802         status |= (newstat << shift);
2803
2804         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805             tg3_asic_rev(tp) == ASIC_REV_5719)
2806                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2807         else
2808                 tw32(TG3_CPMU_DRV_STATUS, status);
2809
2810         return status >> TG3_APE_GPIO_MSG_SHIFT;
2811 }
2812
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2814 {
2815         if (!tg3_flag(tp, IS_NIC))
2816                 return 0;
2817
2818         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2819             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2820             tg3_asic_rev(tp) == ASIC_REV_5720) {
2821                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2822                         return -EIO;
2823
2824                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2825
2826                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2828
2829                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2830         } else {
2831                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833         }
2834
2835         return 0;
2836 }
2837
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 {
2840         u32 grc_local_ctrl;
2841
2842         if (!tg3_flag(tp, IS_NIC) ||
2843             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844             tg3_asic_rev(tp) == ASIC_REV_5701)
2845                 return;
2846
2847         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2848
2849         tw32_wait_f(GRC_LOCAL_CTRL,
2850                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2851                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853         tw32_wait_f(GRC_LOCAL_CTRL,
2854                     grc_local_ctrl,
2855                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860 }
2861
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2863 {
2864         if (!tg3_flag(tp, IS_NIC))
2865                 return;
2866
2867         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2868             tg3_asic_rev(tp) == ASIC_REV_5701) {
2869                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870                             (GRC_LCLCTRL_GPIO_OE0 |
2871                              GRC_LCLCTRL_GPIO_OE1 |
2872                              GRC_LCLCTRL_GPIO_OE2 |
2873                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2874                              GRC_LCLCTRL_GPIO_OUTPUT1),
2875                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2876         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2877                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2878                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2880                                      GRC_LCLCTRL_GPIO_OE1 |
2881                                      GRC_LCLCTRL_GPIO_OE2 |
2882                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2883                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2884                                      tp->grc_local_ctrl;
2885                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2889                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2891
2892                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895         } else {
2896                 u32 no_gpio2;
2897                 u32 grc_local_ctrl = 0;
2898
2899                 /* Workaround to prevent overdrawing Amps. */
2900                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2901                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2902                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2903                                     grc_local_ctrl,
2904                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2905                 }
2906
2907                 /* On 5753 and variants, GPIO2 cannot be used. */
2908                 no_gpio2 = tp->nic_sram_data_cfg &
2909                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2910
2911                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2912                                   GRC_LCLCTRL_GPIO_OE1 |
2913                                   GRC_LCLCTRL_GPIO_OE2 |
2914                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2915                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2916                 if (no_gpio2) {
2917                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2918                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2919                 }
2920                 tw32_wait_f(GRC_LOCAL_CTRL,
2921                             tp->grc_local_ctrl | grc_local_ctrl,
2922                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2925
2926                 tw32_wait_f(GRC_LOCAL_CTRL,
2927                             tp->grc_local_ctrl | grc_local_ctrl,
2928                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2929
2930                 if (!no_gpio2) {
2931                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2932                         tw32_wait_f(GRC_LOCAL_CTRL,
2933                                     tp->grc_local_ctrl | grc_local_ctrl,
2934                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2935                 }
2936         }
2937 }
2938
2939 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 {
2941         u32 msg = 0;
2942
2943         /* Serialize power state transitions */
2944         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2945                 return;
2946
2947         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2948                 msg = TG3_GPIO_MSG_NEED_VAUX;
2949
2950         msg = tg3_set_function_status(tp, msg);
2951
2952         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2953                 goto done;
2954
2955         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2956                 tg3_pwrsrc_switch_to_vaux(tp);
2957         else
2958                 tg3_pwrsrc_die_with_vmain(tp);
2959
2960 done:
2961         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2962 }
2963
2964 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2965 {
2966         bool need_vaux = false;
2967
2968         /* The GPIOs do something completely different on 57765. */
2969         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2970                 return;
2971
2972         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2973             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2974             tg3_asic_rev(tp) == ASIC_REV_5720) {
2975                 tg3_frob_aux_power_5717(tp, include_wol ?
2976                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977                 return;
2978         }
2979
2980         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2981                 struct net_device *dev_peer;
2982
2983                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2984
2985                 /* remove_one() may have been run on the peer. */
2986                 if (dev_peer) {
2987                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2988
2989                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2990                                 return;
2991
2992                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2993                             tg3_flag(tp_peer, ENABLE_ASF))
2994                                 need_vaux = true;
2995                 }
2996         }
2997
2998         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2999             tg3_flag(tp, ENABLE_ASF))
3000                 need_vaux = true;
3001
3002         if (need_vaux)
3003                 tg3_pwrsrc_switch_to_vaux(tp);
3004         else
3005                 tg3_pwrsrc_die_with_vmain(tp);
3006 }
3007
3008 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3009 {
3010         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3011                 return 1;
3012         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3013                 if (speed != SPEED_10)
3014                         return 1;
3015         } else if (speed == SPEED_10)
3016                 return 1;
3017
3018         return 0;
3019 }
3020
3021 static bool tg3_phy_power_bug(struct tg3 *tp)
3022 {
3023         switch (tg3_asic_rev(tp)) {
3024         case ASIC_REV_5700:
3025         case ASIC_REV_5704:
3026                 return true;
3027         case ASIC_REV_5780:
3028                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3029                         return true;
3030                 return false;
3031         case ASIC_REV_5717:
3032                 if (!tp->pci_fn)
3033                         return true;
3034                 return false;
3035         case ASIC_REV_5719:
3036         case ASIC_REV_5720:
3037                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3038                     !tp->pci_fn)
3039                         return true;
3040                 return false;
3041         }
3042
3043         return false;
3044 }
3045
3046 static bool tg3_phy_led_bug(struct tg3 *tp)
3047 {
3048         switch (tg3_asic_rev(tp)) {
3049         case ASIC_REV_5719:
3050         case ASIC_REV_5720:
3051                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3052                     !tp->pci_fn)
3053                         return true;
3054                 return false;
3055         }
3056
3057         return false;
3058 }
3059
3060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 {
3062         u32 val;
3063
3064         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3065                 return;
3066
3067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3068                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3069                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3070                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3071
3072                         sg_dig_ctrl |=
3073                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3074                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3075                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076                 }
3077                 return;
3078         }
3079
3080         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3081                 tg3_bmcr_reset(tp);
3082                 val = tr32(GRC_MISC_CFG);
3083                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3084                 udelay(40);
3085                 return;
3086         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3087                 u32 phytest;
3088                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3089                         u32 phy;
3090
3091                         tg3_writephy(tp, MII_ADVERTISE, 0);
3092                         tg3_writephy(tp, MII_BMCR,
3093                                      BMCR_ANENABLE | BMCR_ANRESTART);
3094
3095                         tg3_writephy(tp, MII_TG3_FET_TEST,
3096                                      phytest | MII_TG3_FET_SHADOW_EN);
3097                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3098                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3099                                 tg3_writephy(tp,
3100                                              MII_TG3_FET_SHDW_AUXMODE4,
3101                                              phy);
3102                         }
3103                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3104                 }
3105                 return;
3106         } else if (do_low_power) {
3107                 if (!tg3_phy_led_bug(tp))
3108                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3109                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3110
3111                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3112                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3113                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3114                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3115         }
3116
3117         /* The PHY should not be powered down on some chips because
3118          * of bugs.
3119          */
3120         if (tg3_phy_power_bug(tp))
3121                 return;
3122
3123         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3124             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3125                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3126                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3127                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3128                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3129         }
3130
3131         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3132 }
3133
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3 *tp)
3136 {
3137         if (tg3_flag(tp, NVRAM)) {
3138                 int i;
3139
3140                 if (tp->nvram_lock_cnt == 0) {
3141                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3142                         for (i = 0; i < 8000; i++) {
3143                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144                                         break;
3145                                 udelay(20);
3146                         }
3147                         if (i == 8000) {
3148                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149                                 return -ENODEV;
3150                         }
3151                 }
3152                 tp->nvram_lock_cnt++;
3153         }
3154         return 0;
3155 }
3156
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3 *tp)
3159 {
3160         if (tg3_flag(tp, NVRAM)) {
3161                 if (tp->nvram_lock_cnt > 0)
3162                         tp->nvram_lock_cnt--;
3163                 if (tp->nvram_lock_cnt == 0)
3164                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165         }
3166 }
3167
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3 *tp)
3170 {
3171         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172                 u32 nvaccess = tr32(NVRAM_ACCESS);
3173
3174                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175         }
3176 }
3177
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3 *tp)
3180 {
3181         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182                 u32 nvaccess = tr32(NVRAM_ACCESS);
3183
3184                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185         }
3186 }
3187
3188 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3189                                         u32 offset, u32 *val)
3190 {
3191         u32 tmp;
3192         int i;
3193
3194         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3195                 return -EINVAL;
3196
3197         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3198                                         EEPROM_ADDR_DEVID_MASK |
3199                                         EEPROM_ADDR_READ);
3200         tw32(GRC_EEPROM_ADDR,
3201              tmp |
3202              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3203              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3204               EEPROM_ADDR_ADDR_MASK) |
3205              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3206
3207         for (i = 0; i < 1000; i++) {
3208                 tmp = tr32(GRC_EEPROM_ADDR);
3209
3210                 if (tmp & EEPROM_ADDR_COMPLETE)
3211                         break;
3212                 msleep(1);
3213         }
3214         if (!(tmp & EEPROM_ADDR_COMPLETE))
3215                 return -EBUSY;
3216
3217         tmp = tr32(GRC_EEPROM_DATA);
3218
3219         /*
3220          * The data will always be opposite the native endian
3221          * format.  Perform a blind byteswap to compensate.
3222          */
3223         *val = swab32(tmp);
3224
3225         return 0;
3226 }
3227
3228 #define NVRAM_CMD_TIMEOUT 5000
3229
3230 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 {
3232         int i;
3233
3234         tw32(NVRAM_CMD, nvram_cmd);
3235         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3236                 usleep_range(10, 40);
3237                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238                         udelay(10);
3239                         break;
3240                 }
3241         }
3242
3243         if (i == NVRAM_CMD_TIMEOUT)
3244                 return -EBUSY;
3245
3246         return 0;
3247 }
3248
3249 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3250 {
3251         if (tg3_flag(tp, NVRAM) &&
3252             tg3_flag(tp, NVRAM_BUFFERED) &&
3253             tg3_flag(tp, FLASH) &&
3254             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3255             (tp->nvram_jedecnum == JEDEC_ATMEL))
3256
3257                 addr = ((addr / tp->nvram_pagesize) <<
3258                         ATMEL_AT45DB0X1B_PAGE_POS) +
3259                        (addr % tp->nvram_pagesize);
3260
3261         return addr;
3262 }
3263
3264 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3265 {
3266         if (tg3_flag(tp, NVRAM) &&
3267             tg3_flag(tp, NVRAM_BUFFERED) &&
3268             tg3_flag(tp, FLASH) &&
3269             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270             (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3273                         tp->nvram_pagesize) +
3274                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275
3276         return addr;
3277 }
3278
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280  * the byteswapping settings for all other register accesses.
3281  * tg3 devices are BE devices, so on a BE machine, the data
3282  * returned will be exactly as it is seen in NVRAM.  On a LE
3283  * machine, the 32-bit value will be byteswapped.
3284  */
3285 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 {
3287         int ret;
3288
3289         if (!tg3_flag(tp, NVRAM))
3290                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3291
3292         offset = tg3_nvram_phys_addr(tp, offset);
3293
3294         if (offset > NVRAM_ADDR_MSK)
3295                 return -EINVAL;
3296
3297         ret = tg3_nvram_lock(tp);
3298         if (ret)
3299                 return ret;
3300
3301         tg3_enable_nvram_access(tp);
3302
3303         tw32(NVRAM_ADDR, offset);
3304         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3305                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3306
3307         if (ret == 0)
3308                 *val = tr32(NVRAM_RDDATA);
3309
3310         tg3_disable_nvram_access(tp);
3311
3312         tg3_nvram_unlock(tp);
3313
3314         return ret;
3315 }
3316
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3319 {
3320         u32 v;
3321         int res = tg3_nvram_read(tp, offset, &v);
3322         if (!res)
3323                 *val = cpu_to_be32(v);
3324         return res;
3325 }
3326
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3328                                     u32 offset, u32 len, u8 *buf)
3329 {
3330         int i, j, rc = 0;
3331         u32 val;
3332
3333         for (i = 0; i < len; i += 4) {
3334                 u32 addr;
3335                 __be32 data;
3336
3337                 addr = offset + i;
3338
3339                 memcpy(&data, buf + i, 4);
3340
3341                 /*
3342                  * The SEEPROM interface expects the data to always be opposite
3343                  * the native endian format.  We accomplish this by reversing
3344                  * all the operations that would have been performed on the
3345                  * data from a call to tg3_nvram_read_be32().
3346                  */
3347                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3348
3349                 val = tr32(GRC_EEPROM_ADDR);
3350                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3351
3352                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3353                         EEPROM_ADDR_READ);
3354                 tw32(GRC_EEPROM_ADDR, val |
3355                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3356                         (addr & EEPROM_ADDR_ADDR_MASK) |
3357                         EEPROM_ADDR_START |
3358                         EEPROM_ADDR_WRITE);
3359
3360                 for (j = 0; j < 1000; j++) {
3361                         val = tr32(GRC_EEPROM_ADDR);
3362
3363                         if (val & EEPROM_ADDR_COMPLETE)
3364                                 break;
3365                         msleep(1);
3366                 }
3367                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3368                         rc = -EBUSY;
3369                         break;
3370                 }
3371         }
3372
3373         return rc;
3374 }
3375
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378                 u8 *buf)
3379 {
3380         int ret = 0;
3381         u32 pagesize = tp->nvram_pagesize;
3382         u32 pagemask = pagesize - 1;
3383         u32 nvram_cmd;
3384         u8 *tmp;
3385
3386         tmp = kmalloc(pagesize, GFP_KERNEL);
3387         if (tmp == NULL)
3388                 return -ENOMEM;
3389
3390         while (len) {
3391                 int j;
3392                 u32 phy_addr, page_off, size;
3393
3394                 phy_addr = offset & ~pagemask;
3395
3396                 for (j = 0; j < pagesize; j += 4) {
3397                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3398                                                   (__be32 *) (tmp + j));
3399                         if (ret)
3400                                 break;
3401                 }
3402                 if (ret)
3403                         break;
3404
3405                 page_off = offset & pagemask;
3406                 size = pagesize;
3407                 if (len < size)
3408                         size = len;
3409
3410                 len -= size;
3411
3412                 memcpy(tmp + page_off, buf, size);
3413
3414                 offset = offset + (pagesize - page_off);
3415
3416                 tg3_enable_nvram_access(tp);
3417
3418                 /*
3419                  * Before we can erase the flash page, we need
3420                  * to issue a special "write enable" command.
3421                  */
3422                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3423
3424                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425                         break;
3426
3427                 /* Erase the target page */
3428                 tw32(NVRAM_ADDR, phy_addr);
3429
3430                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3431                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3432
3433                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434                         break;
3435
3436                 /* Issue another write enable to start the write. */
3437                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440                         break;
3441
3442                 for (j = 0; j < pagesize; j += 4) {
3443                         __be32 data;
3444
3445                         data = *((__be32 *) (tmp + j));
3446
3447                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3448
3449                         tw32(NVRAM_ADDR, phy_addr + j);
3450
3451                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452                                 NVRAM_CMD_WR;
3453
3454                         if (j == 0)
3455                                 nvram_cmd |= NVRAM_CMD_FIRST;
3456                         else if (j == (pagesize - 4))
3457                                 nvram_cmd |= NVRAM_CMD_LAST;
3458
3459                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3460                         if (ret)
3461                                 break;
3462                 }
3463                 if (ret)
3464                         break;
3465         }
3466
3467         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468         tg3_nvram_exec_cmd(tp, nvram_cmd);
3469
3470         kfree(tmp);
3471
3472         return ret;
3473 }
3474
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477                 u8 *buf)
3478 {
3479         int i, ret = 0;
3480
3481         for (i = 0; i < len; i += 4, offset += 4) {
3482                 u32 page_off, phy_addr, nvram_cmd;
3483                 __be32 data;
3484
3485                 memcpy(&data, buf + i, 4);
3486                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3487
3488                 page_off = offset % tp->nvram_pagesize;
3489
3490                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3491
3492                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3493
3494                 if (page_off == 0 || i == 0)
3495                         nvram_cmd |= NVRAM_CMD_FIRST;
3496                 if (page_off == (tp->nvram_pagesize - 4))
3497                         nvram_cmd |= NVRAM_CMD_LAST;
3498
3499                 if (i == (len - 4))
3500                         nvram_cmd |= NVRAM_CMD_LAST;
3501
3502                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3503                     !tg3_flag(tp, FLASH) ||
3504                     !tg3_flag(tp, 57765_PLUS))
3505                         tw32(NVRAM_ADDR, phy_addr);
3506
3507                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3508                     !tg3_flag(tp, 5755_PLUS) &&
3509                     (tp->nvram_jedecnum == JEDEC_ST) &&
3510                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3511                         u32 cmd;
3512
3513                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3514                         ret = tg3_nvram_exec_cmd(tp, cmd);
3515                         if (ret)
3516                                 break;
3517                 }
3518                 if (!tg3_flag(tp, FLASH)) {
3519                         /* We always do complete word writes to eeprom. */
3520                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3521                 }
3522
3523                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524                 if (ret)
3525                         break;
3526         }
3527         return ret;
3528 }
3529
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 {
3533         int ret;
3534
3535         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3536                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3537                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538                 udelay(40);
3539         }
3540
3541         if (!tg3_flag(tp, NVRAM)) {
3542                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543         } else {
3544                 u32 grc_mode;
3545
3546                 ret = tg3_nvram_lock(tp);
3547                 if (ret)
3548                         return ret;
3549
3550                 tg3_enable_nvram_access(tp);
3551                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3552                         tw32(NVRAM_WRITE1, 0x406);
3553
3554                 grc_mode = tr32(GRC_MODE);
3555                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3556
3557                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3558                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3559                                 buf);
3560                 } else {
3561                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562                                 buf);
3563                 }
3564
3565                 grc_mode = tr32(GRC_MODE);
3566                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3567
3568                 tg3_disable_nvram_access(tp);
3569                 tg3_nvram_unlock(tp);
3570         }
3571
3572         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3573                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574                 udelay(40);
3575         }
3576
3577         return ret;
3578 }
3579
3580 #define RX_CPU_SCRATCH_BASE     0x30000
3581 #define RX_CPU_SCRATCH_SIZE     0x04000
3582 #define TX_CPU_SCRATCH_BASE     0x34000
3583 #define TX_CPU_SCRATCH_SIZE     0x04000
3584
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3587 {
3588         int i;
3589         const int iters = 10000;
3590
3591         for (i = 0; i < iters; i++) {
3592                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3593                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3594                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3595                         break;
3596                 if (pci_channel_offline(tp->pdev))
3597                         return -EBUSY;
3598         }
3599
3600         return (i == iters) ? -EBUSY : 0;
3601 }
3602
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3 *tp)
3605 {
3606         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3607
3608         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3609         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3610         udelay(10);
3611
3612         return rc;
3613 }
3614
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3 *tp)
3617 {
3618         return tg3_pause_cpu(tp, TX_CPU_BASE);
3619 }
3620
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3 *tp)
3630 {
3631         tg3_resume_cpu(tp, RX_CPU_BASE);
3632 }
3633
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 {
3637         int rc;
3638
3639         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3640
3641         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3642                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3643
3644                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3645                 return 0;
3646         }
3647         if (cpu_base == RX_CPU_BASE) {
3648                 rc = tg3_rxcpu_pause(tp);
3649         } else {
3650                 /*
3651                  * There is only an Rx CPU for the 5750 derivative in the
3652                  * BCM4785.
3653                  */
3654                 if (tg3_flag(tp, IS_SSB_CORE))
3655                         return 0;
3656
3657                 rc = tg3_txcpu_pause(tp);
3658         }
3659
3660         if (rc) {
3661                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3662                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663                 return -ENODEV;
3664         }
3665
3666         /* Clear firmware's nvram arbitration. */
3667         if (tg3_flag(tp, NVRAM))
3668                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669         return 0;
3670 }
3671
3672 static int tg3_fw_data_len(struct tg3 *tp,
3673                            const struct tg3_firmware_hdr *fw_hdr)
3674 {
3675         int fw_len;
3676
3677         /* Non fragmented firmware have one firmware header followed by a
3678          * contiguous chunk of data to be written. The length field in that
3679          * header is not the length of data to be written but the complete
3680          * length of the bss. The data length is determined based on
3681          * tp->fw->size minus headers.
3682          *
3683          * Fragmented firmware have a main header followed by multiple
3684          * fragments. Each fragment is identical to non fragmented firmware
3685          * with a firmware header followed by a contiguous chunk of data. In
3686          * the main header, the length field is unused and set to 0xffffffff.
3687          * In each fragment header the length is the entire size of that
3688          * fragment i.e. fragment data + header length. Data length is
3689          * therefore length field in the header minus TG3_FW_HDR_LEN.
3690          */
3691         if (tp->fw_len == 0xffffffff)
3692                 fw_len = be32_to_cpu(fw_hdr->len);
3693         else
3694                 fw_len = tp->fw->size;
3695
3696         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3697 }
3698
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3701                                  u32 cpu_scratch_base, int cpu_scratch_size,
3702                                  const struct tg3_firmware_hdr *fw_hdr)
3703 {
3704         int err, i;
3705         void (*write_op)(struct tg3 *, u32, u32);
3706         int total_len = tp->fw->size;
3707
3708         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3709                 netdev_err(tp->dev,
3710                            "%s: Trying to load TX cpu firmware which is 5705\n",
3711                            __func__);
3712                 return -EINVAL;
3713         }
3714
3715         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3716                 write_op = tg3_write_mem;
3717         else
3718                 write_op = tg3_write_indirect_reg32;
3719
3720         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3721                 /* It is possible that bootcode is still loading at this point.
3722                  * Get the nvram lock first before halting the cpu.
3723                  */
3724                 int lock_err = tg3_nvram_lock(tp);
3725                 err = tg3_halt_cpu(tp, cpu_base);
3726                 if (!lock_err)
3727                         tg3_nvram_unlock(tp);
3728                 if (err)
3729                         goto out;
3730
3731                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3732                         write_op(tp, cpu_scratch_base + i, 0);
3733                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3734                 tw32(cpu_base + CPU_MODE,
3735                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3736         } else {
3737                 /* Subtract additional main header for fragmented firmware and
3738                  * advance to the first fragment
3739                  */
3740                 total_len -= TG3_FW_HDR_LEN;
3741                 fw_hdr++;
3742         }
3743
3744         do {
3745                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3746                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3747                         write_op(tp, cpu_scratch_base +
3748                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3749                                      (i * sizeof(u32)),
3750                                  be32_to_cpu(fw_data[i]));
3751
3752                 total_len -= be32_to_cpu(fw_hdr->len);
3753
3754                 /* Advance to next fragment */
3755                 fw_hdr = (struct tg3_firmware_hdr *)
3756                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3757         } while (total_len > 0);
3758
3759         err = 0;
3760
3761 out:
3762         return err;
3763 }
3764
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3767 {
3768         int i;
3769         const int iters = 5;
3770
3771         tw32(cpu_base + CPU_STATE, 0xffffffff);
3772         tw32_f(cpu_base + CPU_PC, pc);
3773
3774         for (i = 0; i < iters; i++) {
3775                 if (tr32(cpu_base + CPU_PC) == pc)
3776                         break;
3777                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3778                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3779                 tw32_f(cpu_base + CPU_PC, pc);
3780                 udelay(1000);
3781         }
3782
3783         return (i == iters) ? -EBUSY : 0;
3784 }
3785
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3788 {
3789         const struct tg3_firmware_hdr *fw_hdr;
3790         int err;
3791
3792         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3793
3794         /* Firmware blob starts with version numbers, followed by
3795            start address and length. We are setting complete length.
3796            length = end_address_of_bss - start_address_of_text.
3797            Remainder is the blob to be loaded contiguously
3798            from start address. */
3799
3800         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3801                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3807                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808                                     fw_hdr);
3809         if (err)
3810                 return err;
3811
3812         /* Now startup only the RX cpu. */
3813         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3814                                        be32_to_cpu(fw_hdr->base_addr));
3815         if (err) {
3816                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3817                            "should be %08x\n", __func__,
3818                            tr32(RX_CPU_BASE + CPU_PC),
3819                                 be32_to_cpu(fw_hdr->base_addr));
3820                 return -ENODEV;
3821         }
3822
3823         tg3_rxcpu_resume(tp);
3824
3825         return 0;
3826 }
3827
3828 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3829 {
3830         const int iters = 1000;
3831         int i;
3832         u32 val;
3833
3834         /* Wait for boot code to complete initialization and enter service
3835          * loop. It is then safe to download service patches
3836          */
3837         for (i = 0; i < iters; i++) {
3838                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839                         break;
3840
3841                 udelay(10);
3842         }
3843
3844         if (i == iters) {
3845                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846                 return -EBUSY;
3847         }
3848
3849         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3850         if (val & 0xff) {
3851                 netdev_warn(tp->dev,
3852                             "Other patches exist. Not downloading EEE patch\n");
3853                 return -EEXIST;
3854         }
3855
3856         return 0;
3857 }
3858
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3 *tp)
3861 {
3862         struct tg3_firmware_hdr *fw_hdr;
3863
3864         if (!tg3_flag(tp, NO_NVRAM))
3865                 return;
3866
3867         if (tg3_validate_rxcpu_state(tp))
3868                 return;
3869
3870         if (!tp->fw)
3871                 return;
3872
3873         /* This firmware blob has a different format than older firmware
3874          * releases as given below. The main difference is we have fragmented
3875          * data to be written to non-contiguous locations.
3876          *
3877          * In the beginning we have a firmware header identical to other
3878          * firmware which consists of version, base addr and length. The length
3879          * here is unused and set to 0xffffffff.
3880          *
3881          * This is followed by a series of firmware fragments which are
3882          * individually identical to previous firmware. i.e. they have the
3883          * firmware header and followed by data for that fragment. The version
3884          * field of the individual fragment header is unused.
3885          */
3886
3887         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3888         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3889                 return;
3890
3891         if (tg3_rxcpu_pause(tp))
3892                 return;
3893
3894         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3896
3897         tg3_rxcpu_resume(tp);
3898 }
3899
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3 *tp)
3902 {
3903         const struct tg3_firmware_hdr *fw_hdr;
3904         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3905         int err;
3906
3907         if (!tg3_flag(tp, FW_TSO))
3908                 return 0;
3909
3910         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3911
3912         /* Firmware blob starts with version numbers, followed by
3913            start address and length. We are setting complete length.
3914            length = end_address_of_bss - start_address_of_text.
3915            Remainder is the blob to be loaded contiguously
3916            from start address. */
3917
3918         cpu_scratch_size = tp->fw_len;
3919
3920         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3921                 cpu_base = RX_CPU_BASE;
3922                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3923         } else {
3924                 cpu_base = TX_CPU_BASE;
3925                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3926                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3927         }
3928
3929         err = tg3_load_firmware_cpu(tp, cpu_base,
3930                                     cpu_scratch_base, cpu_scratch_size,
3931                                     fw_hdr);
3932         if (err)
3933                 return err;
3934
3935         /* Now startup the cpu. */
3936         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3937                                        be32_to_cpu(fw_hdr->base_addr));
3938         if (err) {
3939                 netdev_err(tp->dev,
3940                            "%s fails to set CPU PC, is %08x should be %08x\n",
3941                            __func__, tr32(cpu_base + CPU_PC),
3942                            be32_to_cpu(fw_hdr->base_addr));
3943                 return -ENODEV;
3944         }
3945
3946         tg3_resume_cpu(tp, cpu_base);
3947         return 0;
3948 }
3949
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3952 {
3953         u32 addr_high, addr_low;
3954
3955         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3956         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3957                     (mac_addr[4] <<  8) | mac_addr[5]);
3958
3959         if (index < 4) {
3960                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3961                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3962         } else {
3963                 index -= 4;
3964                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3965                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966         }
3967 }
3968
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 {
3972         u32 addr_high;
3973         int i;
3974
3975         for (i = 0; i < 4; i++) {
3976                 if (i == 1 && skip_mac_1)
3977                         continue;
3978                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979         }
3980
3981         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3982             tg3_asic_rev(tp) == ASIC_REV_5704) {
3983                 for (i = 4; i < 16; i++)
3984                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985         }
3986
3987         addr_high = (tp->dev->dev_addr[0] +
3988                      tp->dev->dev_addr[1] +
3989                      tp->dev->dev_addr[2] +
3990                      tp->dev->dev_addr[3] +
3991                      tp->dev->dev_addr[4] +
3992                      tp->dev->dev_addr[5]) &
3993                 TX_BACKOFF_SEED_MASK;
3994         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3995 }
3996
3997 static void tg3_enable_register_access(struct tg3 *tp)
3998 {
3999         /*
4000          * Make sure register accesses (indirect or otherwise) will function
4001          * correctly.
4002          */
4003         pci_write_config_dword(tp->pdev,
4004                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4005 }
4006
4007 static int tg3_power_up(struct tg3 *tp)
4008 {
4009         int err;
4010
4011         tg3_enable_register_access(tp);
4012
4013         err = pci_set_power_state(tp->pdev, PCI_D0);
4014         if (!err) {
4015                 /* Switch out of Vaux if it is a NIC */
4016                 tg3_pwrsrc_switch_to_vmain(tp);
4017         } else {
4018                 netdev_err(tp->dev, "Transition to D0 failed\n");
4019         }
4020
4021         return err;
4022 }
4023
4024 static int tg3_setup_phy(struct tg3 *, bool);
4025
4026 static int tg3_power_down_prepare(struct tg3 *tp)
4027 {
4028         u32 misc_host_ctrl;
4029         bool device_should_wake, do_low_power;
4030
4031         tg3_enable_register_access(tp);
4032
4033         /* Restore the CLKREQ setting. */
4034         if (tg3_flag(tp, CLKREQ_BUG))
4035                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4036                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4037
4038         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4039         tw32(TG3PCI_MISC_HOST_CTRL,
4040              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4041
4042         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4043                              tg3_flag(tp, WOL_ENABLE);
4044
4045         if (tg3_flag(tp, USE_PHYLIB)) {
4046                 do_low_power = false;
4047                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4048                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4049                         struct phy_device *phydev;
4050                         u32 phyid, advertising;
4051
4052                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4053
4054                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4055
4056                         tp->link_config.speed = phydev->speed;
4057                         tp->link_config.duplex = phydev->duplex;
4058                         tp->link_config.autoneg = phydev->autoneg;
4059                         tp->link_config.advertising = phydev->advertising;
4060
4061                         advertising = ADVERTISED_TP |
4062                                       ADVERTISED_Pause |
4063                                       ADVERTISED_Autoneg |
4064                                       ADVERTISED_10baseT_Half;
4065
4066                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4068                                         advertising |=
4069                                                 ADVERTISED_100baseT_Half |
4070                                                 ADVERTISED_100baseT_Full |
4071                                                 ADVERTISED_10baseT_Full;
4072                                 else
4073                                         advertising |= ADVERTISED_10baseT_Full;
4074                         }
4075
4076                         phydev->advertising = advertising;
4077
4078                         phy_start_aneg(phydev);
4079
4080                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4081                         if (phyid != PHY_ID_BCMAC131) {
4082                                 phyid &= PHY_BCM_OUI_MASK;
4083                                 if (phyid == PHY_BCM_OUI_1 ||
4084                                     phyid == PHY_BCM_OUI_2 ||
4085                                     phyid == PHY_BCM_OUI_3)
4086                                         do_low_power = true;
4087                         }
4088                 }
4089         } else {
4090                 do_low_power = true;
4091
4092                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4093                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4094
4095                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4096                         tg3_setup_phy(tp, false);
4097         }
4098
4099         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4100                 u32 val;
4101
4102                 val = tr32(GRC_VCPU_EXT_CTRL);
4103                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4104         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4105                 int i;
4106                 u32 val;
4107
4108                 for (i = 0; i < 200; i++) {
4109                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4110                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4111                                 break;
4112                         msleep(1);
4113                 }
4114         }
4115         if (tg3_flag(tp, WOL_CAP))
4116                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4117                                                      WOL_DRV_STATE_SHUTDOWN |
4118                                                      WOL_DRV_WOL |
4119                                                      WOL_SET_MAGIC_PKT);
4120
4121         if (device_should_wake) {
4122                 u32 mac_mode;
4123
4124                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4125                         if (do_low_power &&
4126                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4127                                 tg3_phy_auxctl_write(tp,
4128                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4129                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4130                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4131                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4132                                 udelay(40);
4133                         }
4134
4135                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4136                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4137                         else if (tp->phy_flags &
4138                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4139                                 if (tp->link_config.active_speed == SPEED_1000)
4140                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4141                                 else
4142                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4143                         } else
4144                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4145
4146                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4147                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4148                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4149                                              SPEED_100 : SPEED_10;
4150                                 if (tg3_5700_link_polarity(tp, speed))
4151                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4152                                 else
4153                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4154                         }
4155                 } else {
4156                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4157                 }
4158
4159                 if (!tg3_flag(tp, 5750_PLUS))
4160                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4161
4162                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4163                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4164                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4165                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4166
4167                 if (tg3_flag(tp, ENABLE_APE))
4168                         mac_mode |= MAC_MODE_APE_TX_EN |
4169                                     MAC_MODE_APE_RX_EN |
4170                                     MAC_MODE_TDE_ENABLE;
4171
4172                 tw32_f(MAC_MODE, mac_mode);
4173                 udelay(100);
4174
4175                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4176                 udelay(10);
4177         }
4178
4179         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4180             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4181              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4182                 u32 base_val;
4183
4184                 base_val = tp->pci_clock_ctrl;
4185                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4186                              CLOCK_CTRL_TXCLK_DISABLE);
4187
4188                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4189                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4190         } else if (tg3_flag(tp, 5780_CLASS) ||
4191                    tg3_flag(tp, CPMU_PRESENT) ||
4192                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4193                 /* do nothing */
4194         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4195                 u32 newbits1, newbits2;
4196
4197                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4198                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4199                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4200                                     CLOCK_CTRL_TXCLK_DISABLE |
4201                                     CLOCK_CTRL_ALTCLK);
4202                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4203                 } else if (tg3_flag(tp, 5705_PLUS)) {
4204                         newbits1 = CLOCK_CTRL_625_CORE;
4205                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4206                 } else {
4207                         newbits1 = CLOCK_CTRL_ALTCLK;
4208                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4209                 }
4210
4211                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4212                             40);
4213
4214                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4215                             40);
4216
4217                 if (!tg3_flag(tp, 5705_PLUS)) {
4218                         u32 newbits3;
4219
4220                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4221                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4222                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4223                                             CLOCK_CTRL_TXCLK_DISABLE |
4224                                             CLOCK_CTRL_44MHZ_CORE);
4225                         } else {
4226                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4227                         }
4228
4229                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4230                                     tp->pci_clock_ctrl | newbits3, 40);
4231                 }
4232         }
4233
4234         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4235                 tg3_power_down_phy(tp, do_low_power);
4236
4237         tg3_frob_aux_power(tp, true);
4238
4239         /* Workaround for unstable PLL clock */
4240         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4241             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4242              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4243                 u32 val = tr32(0x7d00);
4244
4245                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4246                 tw32(0x7d00, val);
4247                 if (!tg3_flag(tp, ENABLE_ASF)) {
4248                         int err;
4249
4250                         err = tg3_nvram_lock(tp);
4251                         tg3_halt_cpu(tp, RX_CPU_BASE);
4252                         if (!err)
4253                                 tg3_nvram_unlock(tp);
4254                 }
4255         }
4256
4257         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4258
4259         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4260
4261         return 0;
4262 }
4263
4264 static void tg3_power_down(struct tg3 *tp)
4265 {
4266         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4267         pci_set_power_state(tp->pdev, PCI_D3hot);
4268 }
4269
4270 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4271 {
4272         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4273         case MII_TG3_AUX_STAT_10HALF:
4274                 *speed = SPEED_10;
4275                 *duplex = DUPLEX_HALF;
4276                 break;
4277
4278         case MII_TG3_AUX_STAT_10FULL:
4279                 *speed = SPEED_10;
4280                 *duplex = DUPLEX_FULL;
4281                 break;
4282
4283         case MII_TG3_AUX_STAT_100HALF:
4284                 *speed = SPEED_100;
4285                 *duplex = DUPLEX_HALF;
4286                 break;
4287
4288         case MII_TG3_AUX_STAT_100FULL:
4289                 *speed = SPEED_100;
4290                 *duplex = DUPLEX_FULL;
4291                 break;
4292
4293         case MII_TG3_AUX_STAT_1000HALF:
4294                 *speed = SPEED_1000;
4295                 *duplex = DUPLEX_HALF;
4296                 break;
4297
4298         case MII_TG3_AUX_STAT_1000FULL:
4299                 *speed = SPEED_1000;
4300                 *duplex = DUPLEX_FULL;
4301                 break;
4302
4303         default:
4304                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4305                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4306                                  SPEED_10;
4307                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4308                                   DUPLEX_HALF;
4309                         break;
4310                 }
4311                 *speed = SPEED_UNKNOWN;
4312                 *duplex = DUPLEX_UNKNOWN;
4313                 break;
4314         }
4315 }
4316
4317 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4318 {
4319         int err = 0;
4320         u32 val, new_adv;
4321
4322         new_adv = ADVERTISE_CSMA;
4323         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4324         new_adv |= mii_advertise_flowctrl(flowctrl);
4325
4326         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4327         if (err)
4328                 goto done;
4329
4330         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4331                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4332
4333                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4334                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4335                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4336
4337                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4338                 if (err)
4339                         goto done;
4340         }
4341
4342         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4343                 goto done;
4344
4345         tw32(TG3_CPMU_EEE_MODE,
4346              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4347
4348         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4349         if (!err) {
4350                 u32 err2;
4351
4352                 val = 0;
4353                 /* Advertise 100-BaseTX EEE ability */
4354                 if (advertise & ADVERTISED_100baseT_Full)
4355                         val |= MDIO_AN_EEE_ADV_100TX;
4356                 /* Advertise 1000-BaseT EEE ability */
4357                 if (advertise & ADVERTISED_1000baseT_Full)
4358                         val |= MDIO_AN_EEE_ADV_1000T;
4359
4360                 if (!tp->eee.eee_enabled) {
4361                         val = 0;
4362                         tp->eee.advertised = 0;
4363                 } else {
4364                         tp->eee.advertised = advertise &
4365                                              (ADVERTISED_100baseT_Full |
4366                                               ADVERTISED_1000baseT_Full);
4367                 }
4368
4369                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4370                 if (err)
4371                         val = 0;
4372
4373                 switch (tg3_asic_rev(tp)) {
4374                 case ASIC_REV_5717:
4375                 case ASIC_REV_57765:
4376                 case ASIC_REV_57766:
4377                 case ASIC_REV_5719:
4378                         /* If we advertised any eee advertisements above... */
4379                         if (val)
4380                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4381                                       MII_TG3_DSP_TAP26_RMRXSTO |
4382                                       MII_TG3_DSP_TAP26_OPCSINPT;
4383                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4384                         /* Fall through */
4385                 case ASIC_REV_5720:
4386                 case ASIC_REV_5762:
4387                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4388                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4389                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4390                 }
4391
4392                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4393                 if (!err)
4394                         err = err2;
4395         }
4396
4397 done:
4398         return err;
4399 }
4400
4401 static void tg3_phy_copper_begin(struct tg3 *tp)
4402 {
4403         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4404             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4405                 u32 adv, fc;
4406
4407                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4408                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4409                         adv = ADVERTISED_10baseT_Half |
4410                               ADVERTISED_10baseT_Full;
4411                         if (tg3_flag(tp, WOL_SPEED_100MB))
4412                                 adv |= ADVERTISED_100baseT_Half |
4413                                        ADVERTISED_100baseT_Full;
4414                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4415                                 if (!(tp->phy_flags &
4416                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4417                                         adv |= ADVERTISED_1000baseT_Half;
4418                                 adv |= ADVERTISED_1000baseT_Full;
4419                         }
4420
4421                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4422                 } else {
4423                         adv = tp->link_config.advertising;
4424                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4425                                 adv &= ~(ADVERTISED_1000baseT_Half |
4426                                          ADVERTISED_1000baseT_Full);
4427
4428                         fc = tp->link_config.flowctrl;
4429                 }
4430
4431                 tg3_phy_autoneg_cfg(tp, adv, fc);
4432
4433                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4434                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4435                         /* Normally during power down we want to autonegotiate
4436                          * the lowest possible speed for WOL. However, to avoid
4437                          * link flap, we leave it untouched.
4438                          */
4439                         return;
4440                 }
4441
4442                 tg3_writephy(tp, MII_BMCR,
4443                              BMCR_ANENABLE | BMCR_ANRESTART);
4444         } else {
4445                 int i;
4446                 u32 bmcr, orig_bmcr;
4447
4448                 tp->link_config.active_speed = tp->link_config.speed;
4449                 tp->link_config.active_duplex = tp->link_config.duplex;
4450
4451                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4452                         /* With autoneg disabled, 5715 only links up when the
4453                          * advertisement register has the configured speed
4454                          * enabled.
4455                          */
4456                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4457                 }
4458
4459                 bmcr = 0;
4460                 switch (tp->link_config.speed) {
4461                 default:
4462                 case SPEED_10:
4463                         break;
4464
4465                 case SPEED_100:
4466                         bmcr |= BMCR_SPEED100;
4467                         break;
4468
4469                 case SPEED_1000:
4470                         bmcr |= BMCR_SPEED1000;
4471                         break;
4472                 }
4473
4474                 if (tp->link_config.duplex == DUPLEX_FULL)
4475                         bmcr |= BMCR_FULLDPLX;
4476
4477                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4478                     (bmcr != orig_bmcr)) {
4479                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4480                         for (i = 0; i < 1500; i++) {
4481                                 u32 tmp;
4482
4483                                 udelay(10);
4484                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4485                                     tg3_readphy(tp, MII_BMSR, &tmp))
4486                                         continue;
4487                                 if (!(tmp & BMSR_LSTATUS)) {
4488                                         udelay(40);
4489                                         break;
4490                                 }
4491                         }
4492                         tg3_writephy(tp, MII_BMCR, bmcr);
4493                         udelay(40);
4494                 }
4495         }
4496 }
4497
4498 static int tg3_phy_pull_config(struct tg3 *tp)
4499 {
4500         int err;
4501         u32 val;
4502
4503         err = tg3_readphy(tp, MII_BMCR, &val);
4504         if (err)
4505                 goto done;
4506
4507         if (!(val & BMCR_ANENABLE)) {
4508                 tp->link_config.autoneg = AUTONEG_DISABLE;
4509                 tp->link_config.advertising = 0;
4510                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4511
4512                 err = -EIO;
4513
4514                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4515                 case 0:
4516                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4517                                 goto done;
4518
4519                         tp->link_config.speed = SPEED_10;
4520                         break;
4521                 case BMCR_SPEED100:
4522                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4523                                 goto done;
4524
4525                         tp->link_config.speed = SPEED_100;
4526                         break;
4527                 case BMCR_SPEED1000:
4528                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4529                                 tp->link_config.speed = SPEED_1000;
4530                                 break;
4531                         }
4532                         /* Fall through */
4533                 default:
4534                         goto done;
4535                 }
4536
4537                 if (val & BMCR_FULLDPLX)
4538                         tp->link_config.duplex = DUPLEX_FULL;
4539                 else
4540                         tp->link_config.duplex = DUPLEX_HALF;
4541
4542                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4543
4544                 err = 0;
4545                 goto done;
4546         }
4547
4548         tp->link_config.autoneg = AUTONEG_ENABLE;
4549         tp->link_config.advertising = ADVERTISED_Autoneg;
4550         tg3_flag_set(tp, PAUSE_AUTONEG);
4551
4552         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4553                 u32 adv;
4554
4555                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4556                 if (err)
4557                         goto done;
4558
4559                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4560                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4561
4562                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4563         } else {
4564                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4565         }
4566
4567         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4568                 u32 adv;
4569
4570                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4571                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4572                         if (err)
4573                                 goto done;
4574
4575                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4576                 } else {
4577                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4578                         if (err)
4579                                 goto done;
4580
4581                         adv = tg3_decode_flowctrl_1000X(val);
4582                         tp->link_config.flowctrl = adv;
4583
4584                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4585                         adv = mii_adv_to_ethtool_adv_x(val);
4586                 }
4587
4588                 tp->link_config.advertising |= adv;
4589         }
4590
4591 done:
4592         return err;
4593 }
4594
4595 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4596 {
4597         int err;
4598
4599         /* Turn off tap power management. */
4600         /* Set Extended packet length bit */
4601         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4602
4603         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4604         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4605         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4606         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4607         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4608
4609         udelay(40);
4610
4611         return err;
4612 }
4613
4614 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4615 {
4616         struct ethtool_eee eee;
4617
4618         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4619                 return true;
4620
4621         tg3_eee_pull_config(tp, &eee);
4622
4623         if (tp->eee.eee_enabled) {
4624                 if (tp->eee.advertised != eee.advertised ||
4625                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4626                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4627                         return false;
4628         } else {
4629                 /* EEE is disabled but we're advertising */
4630                 if (eee.advertised)
4631                         return false;
4632         }
4633
4634         return true;
4635 }
4636
4637 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4638 {
4639         u32 advmsk, tgtadv, advertising;
4640
4641         advertising = tp->link_config.advertising;
4642         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4643
4644         advmsk = ADVERTISE_ALL;
4645         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4646                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4647                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4648         }
4649
4650         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4651                 return false;
4652
4653         if ((*lcladv & advmsk) != tgtadv)
4654                 return false;
4655
4656         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4657                 u32 tg3_ctrl;
4658
4659                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4660
4661                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4662                         return false;
4663
4664                 if (tgtadv &&
4665                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4666                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4667                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4668                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4669                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4670                 } else {
4671                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4672                 }
4673
4674                 if (tg3_ctrl != tgtadv)
4675                         return false;
4676         }
4677
4678         return true;
4679 }
4680
4681 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4682 {
4683         u32 lpeth = 0;
4684
4685         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4686                 u32 val;
4687
4688                 if (tg3_readphy(tp, MII_STAT1000, &val))
4689                         return false;
4690
4691                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4692         }
4693
4694         if (tg3_readphy(tp, MII_LPA, rmtadv))
4695                 return false;
4696
4697         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4698         tp->link_config.rmt_adv = lpeth;
4699
4700         return true;
4701 }
4702
4703 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4704 {
4705         if (curr_link_up != tp->link_up) {
4706                 if (curr_link_up) {
4707                         netif_carrier_on(tp->dev);
4708                 } else {
4709                         netif_carrier_off(tp->dev);
4710                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4711                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4712                 }
4713
4714                 tg3_link_report(tp);
4715                 return true;
4716         }
4717
4718         return false;
4719 }
4720
4721 static void tg3_clear_mac_status(struct tg3 *tp)
4722 {
4723         tw32(MAC_EVENT, 0);
4724
4725         tw32_f(MAC_STATUS,
4726                MAC_STATUS_SYNC_CHANGED |
4727                MAC_STATUS_CFG_CHANGED |
4728                MAC_STATUS_MI_COMPLETION |
4729                MAC_STATUS_LNKSTATE_CHANGED);
4730         udelay(40);
4731 }
4732
4733 static void tg3_setup_eee(struct tg3 *tp)
4734 {
4735         u32 val;
4736
4737         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4738               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4739         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4740                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4741
4742         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4743
4744         tw32_f(TG3_CPMU_EEE_CTRL,
4745                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4746
4747         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4748               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4749               TG3_CPMU_EEEMD_LPI_IN_RX |
4750               TG3_CPMU_EEEMD_EEE_ENABLE;
4751
4752         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4753                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4754
4755         if (tg3_flag(tp, ENABLE_APE))
4756                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4757
4758         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4759
4760         tw32_f(TG3_CPMU_EEE_DBTMR1,
4761                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4762                (tp->eee.tx_lpi_timer & 0xffff));
4763
4764         tw32_f(TG3_CPMU_EEE_DBTMR2,
4765                TG3_CPMU_DBTMR2_APE_TX_2047US |
4766                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4767 }
4768
4769 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4770 {
4771         bool current_link_up;
4772         u32 bmsr, val;
4773         u32 lcl_adv, rmt_adv;
4774         u16 current_speed;
4775         u8 current_duplex;
4776         int i, err;
4777
4778         tg3_clear_mac_status(tp);
4779
4780         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4781                 tw32_f(MAC_MI_MODE,
4782                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4783                 udelay(80);
4784         }
4785
4786         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4787
4788         /* Some third-party PHYs need to be reset on link going
4789          * down.
4790          */
4791         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4792              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4793              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4794             tp->link_up) {
4795                 tg3_readphy(tp, MII_BMSR, &bmsr);
4796                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4797                     !(bmsr & BMSR_LSTATUS))
4798                         force_reset = true;
4799         }
4800         if (force_reset)
4801                 tg3_phy_reset(tp);
4802
4803         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4804                 tg3_readphy(tp, MII_BMSR, &bmsr);
4805                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4806                     !tg3_flag(tp, INIT_COMPLETE))
4807                         bmsr = 0;
4808
4809                 if (!(bmsr & BMSR_LSTATUS)) {
4810                         err = tg3_init_5401phy_dsp(tp);
4811                         if (err)
4812                                 return err;
4813
4814                         tg3_readphy(tp, MII_BMSR, &bmsr);
4815                         for (i = 0; i < 1000; i++) {
4816                                 udelay(10);
4817                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4818                                     (bmsr & BMSR_LSTATUS)) {
4819                                         udelay(40);
4820                                         break;
4821                                 }
4822                         }
4823
4824                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4825                             TG3_PHY_REV_BCM5401_B0 &&
4826                             !(bmsr & BMSR_LSTATUS) &&
4827                             tp->link_config.active_speed == SPEED_1000) {
4828                                 err = tg3_phy_reset(tp);
4829                                 if (!err)
4830                                         err = tg3_init_5401phy_dsp(tp);
4831                                 if (err)
4832                                         return err;
4833                         }
4834                 }
4835         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4836                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4837                 /* 5701 {A0,B0} CRC bug workaround */
4838                 tg3_writephy(tp, 0x15, 0x0a75);
4839                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4840                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4841                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4842         }
4843
4844         /* Clear pending interrupts... */
4845         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4847
4848         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4849                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4850         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4851                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4852
4853         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4854             tg3_asic_rev(tp) == ASIC_REV_5701) {
4855                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4856                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4857                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4858                 else
4859                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4860         }
4861
4862         current_link_up = false;
4863         current_speed = SPEED_UNKNOWN;
4864         current_duplex = DUPLEX_UNKNOWN;
4865         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4866         tp->link_config.rmt_adv = 0;
4867
4868         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4869                 err = tg3_phy_auxctl_read(tp,
4870                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4871                                           &val);
4872                 if (!err && !(val & (1 << 10))) {
4873                         tg3_phy_auxctl_write(tp,
4874                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4875                                              val | (1 << 10));
4876                         goto relink;
4877                 }
4878         }
4879
4880         bmsr = 0;
4881         for (i = 0; i < 100; i++) {
4882                 tg3_readphy(tp, MII_BMSR, &bmsr);
4883                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4884                     (bmsr & BMSR_LSTATUS))
4885                         break;
4886                 udelay(40);
4887         }
4888
4889         if (bmsr & BMSR_LSTATUS) {
4890                 u32 aux_stat, bmcr;
4891
4892                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4893                 for (i = 0; i < 2000; i++) {
4894                         udelay(10);
4895                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4896                             aux_stat)
4897                                 break;
4898                 }
4899
4900                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4901                                              &current_speed,
4902                                              &current_duplex);
4903
4904                 bmcr = 0;
4905                 for (i = 0; i < 200; i++) {
4906                         tg3_readphy(tp, MII_BMCR, &bmcr);
4907                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4908                                 continue;
4909                         if (bmcr && bmcr != 0x7fff)
4910                                 break;
4911                         udelay(10);
4912                 }
4913
4914                 lcl_adv = 0;
4915                 rmt_adv = 0;
4916
4917                 tp->link_config.active_speed = current_speed;
4918                 tp->link_config.active_duplex = current_duplex;
4919
4920                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4921                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4922
4923                         if ((bmcr & BMCR_ANENABLE) &&
4924                             eee_config_ok &&
4925                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4926                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4927                                 current_link_up = true;
4928
4929                         /* EEE settings changes take effect only after a phy
4930                          * reset.  If we have skipped a reset due to Link Flap
4931                          * Avoidance being enabled, do it now.
4932                          */
4933                         if (!eee_config_ok &&
4934                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4935                             !force_reset) {
4936                                 tg3_setup_eee(tp);
4937                                 tg3_phy_reset(tp);
4938                         }
4939                 } else {
4940                         if (!(bmcr & BMCR_ANENABLE) &&
4941                             tp->link_config.speed == current_speed &&
4942                             tp->link_config.duplex == current_duplex) {
4943                                 current_link_up = true;
4944                         }
4945                 }
4946
4947                 if (current_link_up &&
4948                     tp->link_config.active_duplex == DUPLEX_FULL) {
4949                         u32 reg, bit;
4950
4951                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4952                                 reg = MII_TG3_FET_GEN_STAT;
4953                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4954                         } else {
4955                                 reg = MII_TG3_EXT_STAT;
4956                                 bit = MII_TG3_EXT_STAT_MDIX;
4957                         }
4958
4959                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4960                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4961
4962                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4963                 }
4964         }
4965
4966 relink:
4967         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4968                 tg3_phy_copper_begin(tp);
4969
4970                 if (tg3_flag(tp, ROBOSWITCH)) {
4971                         current_link_up = true;
4972                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4973                         current_speed = SPEED_1000;
4974                         current_duplex = DUPLEX_FULL;
4975                         tp->link_config.active_speed = current_speed;
4976                         tp->link_config.active_duplex = current_duplex;
4977                 }
4978
4979                 tg3_readphy(tp, MII_BMSR, &bmsr);
4980                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4981                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4982                         current_link_up = true;
4983         }
4984
4985         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4986         if (current_link_up) {
4987                 if (tp->link_config.active_speed == SPEED_100 ||
4988                     tp->link_config.active_speed == SPEED_10)
4989                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4990                 else
4991                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4992         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4993                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4994         else
4995                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4996
4997         /* In order for the 5750 core in BCM4785 chip to work properly
4998          * in RGMII mode, the Led Control Register must be set up.
4999          */
5000         if (tg3_flag(tp, RGMII_MODE)) {
5001                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5002                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5003
5004                 if (tp->link_config.active_speed == SPEED_10)
5005                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5006                 else if (tp->link_config.active_speed == SPEED_100)
5007                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5008                                      LED_CTRL_100MBPS_ON);
5009                 else if (tp->link_config.active_speed == SPEED_1000)
5010                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011                                      LED_CTRL_1000MBPS_ON);
5012
5013                 tw32(MAC_LED_CTRL, led_ctrl);
5014                 udelay(40);
5015         }
5016
5017         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5018         if (tp->link_config.active_duplex == DUPLEX_HALF)
5019                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5020
5021         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5022                 if (current_link_up &&
5023                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5024                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5025                 else
5026                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5027         }
5028
5029         /* ??? Without this setting Netgear GA302T PHY does not
5030          * ??? send/receive packets...
5031          */
5032         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5033             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5034                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5035                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5036                 udelay(80);
5037         }
5038
5039         tw32_f(MAC_MODE, tp->mac_mode);
5040         udelay(40);
5041
5042         tg3_phy_eee_adjust(tp, current_link_up);
5043
5044         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5045                 /* Polled via timer. */
5046                 tw32_f(MAC_EVENT, 0);
5047         } else {
5048                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5049         }
5050         udelay(40);
5051
5052         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5053             current_link_up &&
5054             tp->link_config.active_speed == SPEED_1000 &&
5055             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5056                 udelay(120);
5057                 tw32_f(MAC_STATUS,
5058                      (MAC_STATUS_SYNC_CHANGED |
5059                       MAC_STATUS_CFG_CHANGED));
5060                 udelay(40);
5061                 tg3_write_mem(tp,
5062                               NIC_SRAM_FIRMWARE_MBOX,
5063                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5064         }
5065
5066         /* Prevent send BD corruption. */
5067         if (tg3_flag(tp, CLKREQ_BUG)) {
5068                 if (tp->link_config.active_speed == SPEED_100 ||
5069                     tp->link_config.active_speed == SPEED_10)
5070                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5071                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5072                 else
5073                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5074                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5075         }
5076
5077         tg3_test_and_report_link_chg(tp, current_link_up);
5078
5079         return 0;
5080 }
5081
5082 struct tg3_fiber_aneginfo {
5083         int state;
5084 #define ANEG_STATE_UNKNOWN              0
5085 #define ANEG_STATE_AN_ENABLE            1
5086 #define ANEG_STATE_RESTART_INIT         2
5087 #define ANEG_STATE_RESTART              3
5088 #define ANEG_STATE_DISABLE_LINK_OK      4
5089 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5090 #define ANEG_STATE_ABILITY_DETECT       6
5091 #define ANEG_STATE_ACK_DETECT_INIT      7
5092 #define ANEG_STATE_ACK_DETECT           8
5093 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5094 #define ANEG_STATE_COMPLETE_ACK         10
5095 #define ANEG_STATE_IDLE_DETECT_INIT     11
5096 #define ANEG_STATE_IDLE_DETECT          12
5097 #define ANEG_STATE_LINK_OK              13
5098 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5099 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5100
5101         u32 flags;
5102 #define MR_AN_ENABLE            0x00000001
5103 #define MR_RESTART_AN           0x00000002
5104 #define MR_AN_COMPLETE          0x00000004
5105 #define MR_PAGE_RX              0x00000008
5106 #define MR_NP_LOADED            0x00000010
5107 #define MR_TOGGLE_TX            0x00000020
5108 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5109 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5110 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5111 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5112 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5113 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5114 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5115 #define MR_TOGGLE_RX            0x00002000
5116 #define MR_NP_RX                0x00004000
5117
5118 #define MR_LINK_OK              0x80000000
5119
5120         unsigned long link_time, cur_time;
5121
5122         u32 ability_match_cfg;
5123         int ability_match_count;
5124
5125         char ability_match, idle_match, ack_match;
5126
5127         u32 txconfig, rxconfig;
5128 #define ANEG_CFG_NP             0x00000080
5129 #define ANEG_CFG_ACK            0x00000040
5130 #define ANEG_CFG_RF2            0x00000020
5131 #define ANEG_CFG_RF1            0x00000010
5132 #define ANEG_CFG_PS2            0x00000001
5133 #define ANEG_CFG_PS1            0x00008000
5134 #define ANEG_CFG_HD             0x00004000
5135 #define ANEG_CFG_FD             0x00002000
5136 #define ANEG_CFG_INVAL          0x00001f06
5137
5138 };
5139 #define ANEG_OK         0
5140 #define ANEG_DONE       1
5141 #define ANEG_TIMER_ENAB 2
5142 #define ANEG_FAILED     -1
5143
5144 #define ANEG_STATE_SETTLE_TIME  10000
5145
5146 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5147                                    struct tg3_fiber_aneginfo *ap)
5148 {
5149         u16 flowctrl;
5150         unsigned long delta;
5151         u32 rx_cfg_reg;
5152         int ret;
5153
5154         if (ap->state == ANEG_STATE_UNKNOWN) {
5155                 ap->rxconfig = 0;
5156                 ap->link_time = 0;
5157                 ap->cur_time = 0;
5158                 ap->ability_match_cfg = 0;
5159                 ap->ability_match_count = 0;
5160                 ap->ability_match = 0;
5161                 ap->idle_match = 0;
5162                 ap->ack_match = 0;
5163         }
5164         ap->cur_time++;
5165
5166         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5167                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5168
5169                 if (rx_cfg_reg != ap->ability_match_cfg) {
5170                         ap->ability_match_cfg = rx_cfg_reg;
5171                         ap->ability_match = 0;
5172                         ap->ability_match_count = 0;
5173                 } else {
5174                         if (++ap->ability_match_count > 1) {
5175                                 ap->ability_match = 1;
5176                                 ap->ability_match_cfg = rx_cfg_reg;
5177                         }
5178                 }
5179                 if (rx_cfg_reg & ANEG_CFG_ACK)
5180                         ap->ack_match = 1;
5181                 else
5182                         ap->ack_match = 0;
5183
5184                 ap->idle_match = 0;
5185         } else {
5186                 ap->idle_match = 1;
5187                 ap->ability_match_cfg = 0;
5188                 ap->ability_match_count = 0;
5189                 ap->ability_match = 0;
5190                 ap->ack_match = 0;
5191
5192                 rx_cfg_reg = 0;
5193         }
5194
5195         ap->rxconfig = rx_cfg_reg;
5196         ret = ANEG_OK;
5197
5198         switch (ap->state) {
5199         case ANEG_STATE_UNKNOWN:
5200                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5201                         ap->state = ANEG_STATE_AN_ENABLE;
5202
5203                 /* fallthru */
5204         case ANEG_STATE_AN_ENABLE:
5205                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5206                 if (ap->flags & MR_AN_ENABLE) {
5207                         ap->link_time = 0;
5208                         ap->cur_time = 0;
5209                         ap->ability_match_cfg = 0;
5210                         ap->ability_match_count = 0;
5211                         ap->ability_match = 0;
5212                         ap->idle_match = 0;
5213                         ap->ack_match = 0;
5214
5215                         ap->state = ANEG_STATE_RESTART_INIT;
5216                 } else {
5217                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5218                 }
5219                 break;
5220
5221         case ANEG_STATE_RESTART_INIT:
5222                 ap->link_time = ap->cur_time;
5223                 ap->flags &= ~(MR_NP_LOADED);
5224                 ap->txconfig = 0;
5225                 tw32(MAC_TX_AUTO_NEG, 0);
5226                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5227                 tw32_f(MAC_MODE, tp->mac_mode);
5228                 udelay(40);
5229
5230                 ret = ANEG_TIMER_ENAB;
5231                 ap->state = ANEG_STATE_RESTART;
5232
5233                 /* fallthru */
5234         case ANEG_STATE_RESTART:
5235                 delta = ap->cur_time - ap->link_time;
5236                 if (delta > ANEG_STATE_SETTLE_TIME)
5237                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5238                 else
5239                         ret = ANEG_TIMER_ENAB;
5240                 break;
5241
5242         case ANEG_STATE_DISABLE_LINK_OK:
5243                 ret = ANEG_DONE;
5244                 break;
5245
5246         case ANEG_STATE_ABILITY_DETECT_INIT:
5247                 ap->flags &= ~(MR_TOGGLE_TX);
5248                 ap->txconfig = ANEG_CFG_FD;
5249                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5250                 if (flowctrl & ADVERTISE_1000XPAUSE)
5251                         ap->txconfig |= ANEG_CFG_PS1;
5252                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5253                         ap->txconfig |= ANEG_CFG_PS2;
5254                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5255                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5256                 tw32_f(MAC_MODE, tp->mac_mode);
5257                 udelay(40);
5258
5259                 ap->state = ANEG_STATE_ABILITY_DETECT;
5260                 break;
5261
5262         case ANEG_STATE_ABILITY_DETECT:
5263                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5264                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5265                 break;
5266
5267         case ANEG_STATE_ACK_DETECT_INIT:
5268                 ap->txconfig |= ANEG_CFG_ACK;
5269                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5270                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5271                 tw32_f(MAC_MODE, tp->mac_mode);
5272                 udelay(40);
5273
5274                 ap->state = ANEG_STATE_ACK_DETECT;
5275
5276                 /* fallthru */
5277         case ANEG_STATE_ACK_DETECT:
5278                 if (ap->ack_match != 0) {
5279                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5280                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5281                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5282                         } else {
5283                                 ap->state = ANEG_STATE_AN_ENABLE;
5284                         }
5285                 } else if (ap->ability_match != 0 &&
5286                            ap->rxconfig == 0) {
5287                         ap->state = ANEG_STATE_AN_ENABLE;
5288                 }
5289                 break;
5290
5291         case ANEG_STATE_COMPLETE_ACK_INIT:
5292                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5293                         ret = ANEG_FAILED;
5294                         break;
5295                 }
5296                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5297                                MR_LP_ADV_HALF_DUPLEX |
5298                                MR_LP_ADV_SYM_PAUSE |
5299                                MR_LP_ADV_ASYM_PAUSE |
5300                                MR_LP_ADV_REMOTE_FAULT1 |
5301                                MR_LP_ADV_REMOTE_FAULT2 |
5302                                MR_LP_ADV_NEXT_PAGE |
5303                                MR_TOGGLE_RX |
5304                                MR_NP_RX);
5305                 if (ap->rxconfig & ANEG_CFG_FD)
5306                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5307                 if (ap->rxconfig & ANEG_CFG_HD)
5308                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5309                 if (ap->rxconfig & ANEG_CFG_PS1)
5310                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5311                 if (ap->rxconfig & ANEG_CFG_PS2)
5312                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5313                 if (ap->rxconfig & ANEG_CFG_RF1)
5314                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5315                 if (ap->rxconfig & ANEG_CFG_RF2)
5316                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5317                 if (ap->rxconfig & ANEG_CFG_NP)
5318                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5319
5320                 ap->link_time = ap->cur_time;
5321
5322                 ap->flags ^= (MR_TOGGLE_TX);
5323                 if (ap->rxconfig & 0x0008)
5324                         ap->flags |= MR_TOGGLE_RX;
5325                 if (ap->rxconfig & ANEG_CFG_NP)
5326                         ap->flags |= MR_NP_RX;
5327                 ap->flags |= MR_PAGE_RX;
5328
5329                 ap->state = ANEG_STATE_COMPLETE_ACK;
5330                 ret = ANEG_TIMER_ENAB;
5331                 break;
5332
5333         case ANEG_STATE_COMPLETE_ACK:
5334                 if (ap->ability_match != 0 &&
5335                     ap->rxconfig == 0) {
5336                         ap->state = ANEG_STATE_AN_ENABLE;
5337                         break;
5338                 }
5339                 delta = ap->cur_time - ap->link_time;
5340                 if (delta > ANEG_STATE_SETTLE_TIME) {
5341                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5342                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5343                         } else {
5344                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5345                                     !(ap->flags & MR_NP_RX)) {
5346                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5347                                 } else {
5348                                         ret = ANEG_FAILED;
5349                                 }
5350                         }
5351                 }
5352                 break;
5353
5354         case ANEG_STATE_IDLE_DETECT_INIT:
5355                 ap->link_time = ap->cur_time;
5356                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5357                 tw32_f(MAC_MODE, tp->mac_mode);
5358                 udelay(40);
5359
5360                 ap->state = ANEG_STATE_IDLE_DETECT;
5361                 ret = ANEG_TIMER_ENAB;
5362                 break;
5363
5364         case ANEG_STATE_IDLE_DETECT:
5365                 if (ap->ability_match != 0 &&
5366                     ap->rxconfig == 0) {
5367                         ap->state = ANEG_STATE_AN_ENABLE;
5368                         break;
5369                 }
5370                 delta = ap->cur_time - ap->link_time;
5371                 if (delta > ANEG_STATE_SETTLE_TIME) {
5372                         /* XXX another gem from the Broadcom driver :( */
5373                         ap->state = ANEG_STATE_LINK_OK;
5374                 }
5375                 break;
5376
5377         case ANEG_STATE_LINK_OK:
5378                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5379                 ret = ANEG_DONE;
5380                 break;
5381
5382         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5383                 /* ??? unimplemented */
5384                 break;
5385
5386         case ANEG_STATE_NEXT_PAGE_WAIT:
5387                 /* ??? unimplemented */
5388                 break;
5389
5390         default:
5391                 ret = ANEG_FAILED;
5392                 break;
5393         }
5394
5395         return ret;
5396 }
5397
5398 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5399 {
5400         int res = 0;
5401         struct tg3_fiber_aneginfo aninfo;
5402         int status = ANEG_FAILED;
5403         unsigned int tick;
5404         u32 tmp;
5405
5406         tw32_f(MAC_TX_AUTO_NEG, 0);
5407
5408         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5409         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5410         udelay(40);
5411
5412         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5413         udelay(40);
5414
5415         memset(&aninfo, 0, sizeof(aninfo));
5416         aninfo.flags |= MR_AN_ENABLE;
5417         aninfo.state = ANEG_STATE_UNKNOWN;
5418         aninfo.cur_time = 0;
5419         tick = 0;
5420         while (++tick < 195000) {
5421                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5422                 if (status == ANEG_DONE || status == ANEG_FAILED)
5423                         break;
5424
5425                 udelay(1);
5426         }
5427
5428         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5429         tw32_f(MAC_MODE, tp->mac_mode);
5430         udelay(40);
5431
5432         *txflags = aninfo.txconfig;
5433         *rxflags = aninfo.flags;
5434
5435         if (status == ANEG_DONE &&
5436             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5437                              MR_LP_ADV_FULL_DUPLEX)))
5438                 res = 1;
5439
5440         return res;
5441 }
5442
5443 static void tg3_init_bcm8002(struct tg3 *tp)
5444 {
5445         u32 mac_status = tr32(MAC_STATUS);
5446         int i;
5447
5448         /* Reset when initting first time or we have a link. */
5449         if (tg3_flag(tp, INIT_COMPLETE) &&
5450             !(mac_status & MAC_STATUS_PCS_SYNCED))
5451                 return;
5452
5453         /* Set PLL lock range. */
5454         tg3_writephy(tp, 0x16, 0x8007);
5455
5456         /* SW reset */
5457         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5458
5459         /* Wait for reset to complete. */
5460         /* XXX schedule_timeout() ... */
5461         for (i = 0; i < 500; i++)
5462                 udelay(10);
5463
5464         /* Config mode; select PMA/Ch 1 regs. */
5465         tg3_writephy(tp, 0x10, 0x8411);
5466
5467         /* Enable auto-lock and comdet, select txclk for tx. */
5468         tg3_writephy(tp, 0x11, 0x0a10);
5469
5470         tg3_writephy(tp, 0x18, 0x00a0);
5471         tg3_writephy(tp, 0x16, 0x41ff);
5472
5473         /* Assert and deassert POR. */
5474         tg3_writephy(tp, 0x13, 0x0400);
5475         udelay(40);
5476         tg3_writephy(tp, 0x13, 0x0000);
5477
5478         tg3_writephy(tp, 0x11, 0x0a50);
5479         udelay(40);
5480         tg3_writephy(tp, 0x11, 0x0a10);
5481
5482         /* Wait for signal to stabilize */
5483         /* XXX schedule_timeout() ... */
5484         for (i = 0; i < 15000; i++)
5485                 udelay(10);
5486
5487         /* Deselect the channel register so we can read the PHYID
5488          * later.
5489          */
5490         tg3_writephy(tp, 0x10, 0x8011);
5491 }
5492
5493 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5494 {
5495         u16 flowctrl;
5496         bool current_link_up;
5497         u32 sg_dig_ctrl, sg_dig_status;
5498         u32 serdes_cfg, expected_sg_dig_ctrl;
5499         int workaround, port_a;
5500
5501         serdes_cfg = 0;
5502         expected_sg_dig_ctrl = 0;
5503         workaround = 0;
5504         port_a = 1;
5505         current_link_up = false;
5506
5507         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5508             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5509                 workaround = 1;
5510                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5511                         port_a = 0;
5512
5513                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5514                 /* preserve bits 20-23 for voltage regulator */
5515                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5516         }
5517
5518         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5519
5520         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5521                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5522                         if (workaround) {
5523                                 u32 val = serdes_cfg;
5524
5525                                 if (port_a)
5526                                         val |= 0xc010000;
5527                                 else
5528                                         val |= 0x4010000;
5529                                 tw32_f(MAC_SERDES_CFG, val);
5530                         }
5531
5532                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5533                 }
5534                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5535                         tg3_setup_flow_control(tp, 0, 0);
5536                         current_link_up = true;
5537                 }
5538                 goto out;
5539         }
5540
5541         /* Want auto-negotiation.  */
5542         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5543
5544         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5545         if (flowctrl & ADVERTISE_1000XPAUSE)
5546                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5547         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5548                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5549
5550         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5551                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5552                     tp->serdes_counter &&
5553                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5554                                     MAC_STATUS_RCVD_CFG)) ==
5555                      MAC_STATUS_PCS_SYNCED)) {
5556                         tp->serdes_counter--;
5557                         current_link_up = true;
5558                         goto out;
5559                 }
5560 restart_autoneg:
5561                 if (workaround)
5562                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5563                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5564                 udelay(5);
5565                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5566
5567                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5568                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5569         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5570                                  MAC_STATUS_SIGNAL_DET)) {
5571                 sg_dig_status = tr32(SG_DIG_STATUS);
5572                 mac_status = tr32(MAC_STATUS);
5573
5574                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5575                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5576                         u32 local_adv = 0, remote_adv = 0;
5577
5578                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5579                                 local_adv |= ADVERTISE_1000XPAUSE;
5580                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5581                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5582
5583                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5584                                 remote_adv |= LPA_1000XPAUSE;
5585                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5586                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5587
5588                         tp->link_config.rmt_adv =
5589                                            mii_adv_to_ethtool_adv_x(remote_adv);
5590
5591                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5592                         current_link_up = true;
5593                         tp->serdes_counter = 0;
5594                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5595                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5596                         if (tp->serdes_counter)
5597                                 tp->serdes_counter--;
5598                         else {
5599                                 if (workaround) {
5600                                         u32 val = serdes_cfg;
5601
5602                                         if (port_a)
5603                                                 val |= 0xc010000;
5604                                         else
5605                                                 val |= 0x4010000;
5606
5607                                         tw32_f(MAC_SERDES_CFG, val);
5608                                 }
5609
5610                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5611                                 udelay(40);
5612
5613                                 /* Link parallel detection - link is up */
5614                                 /* only if we have PCS_SYNC and not */
5615                                 /* receiving config code words */
5616                                 mac_status = tr32(MAC_STATUS);
5617                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5618                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5619                                         tg3_setup_flow_control(tp, 0, 0);
5620                                         current_link_up = true;
5621                                         tp->phy_flags |=
5622                                                 TG3_PHYFLG_PARALLEL_DETECT;
5623                                         tp->serdes_counter =
5624                                                 SERDES_PARALLEL_DET_TIMEOUT;
5625                                 } else
5626                                         goto restart_autoneg;
5627                         }
5628                 }
5629         } else {
5630                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5631                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5632         }
5633
5634 out:
5635         return current_link_up;
5636 }
5637
5638 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5639 {
5640         bool current_link_up = false;
5641
5642         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5643                 goto out;
5644
5645         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5646                 u32 txflags, rxflags;
5647                 int i;
5648
5649                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5650                         u32 local_adv = 0, remote_adv = 0;
5651
5652                         if (txflags & ANEG_CFG_PS1)
5653                                 local_adv |= ADVERTISE_1000XPAUSE;
5654                         if (txflags & ANEG_CFG_PS2)
5655                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5656
5657                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5658                                 remote_adv |= LPA_1000XPAUSE;
5659                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5660                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5661
5662                         tp->link_config.rmt_adv =
5663                                            mii_adv_to_ethtool_adv_x(remote_adv);
5664
5665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5666
5667                         current_link_up = true;
5668                 }
5669                 for (i = 0; i < 30; i++) {
5670                         udelay(20);
5671                         tw32_f(MAC_STATUS,
5672                                (MAC_STATUS_SYNC_CHANGED |
5673                                 MAC_STATUS_CFG_CHANGED));
5674                         udelay(40);
5675                         if ((tr32(MAC_STATUS) &
5676                              (MAC_STATUS_SYNC_CHANGED |
5677                               MAC_STATUS_CFG_CHANGED)) == 0)
5678                                 break;
5679                 }
5680
5681                 mac_status = tr32(MAC_STATUS);
5682                 if (!current_link_up &&
5683                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5684                     !(mac_status & MAC_STATUS_RCVD_CFG))
5685                         current_link_up = true;
5686         } else {
5687                 tg3_setup_flow_control(tp, 0, 0);
5688
5689                 /* Forcing 1000FD link up. */
5690                 current_link_up = true;
5691
5692                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5693                 udelay(40);
5694
5695                 tw32_f(MAC_MODE, tp->mac_mode);
5696                 udelay(40);
5697         }
5698
5699 out:
5700         return current_link_up;
5701 }
5702
5703 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5704 {
5705         u32 orig_pause_cfg;
5706         u16 orig_active_speed;
5707         u8 orig_active_duplex;
5708         u32 mac_status;
5709         bool current_link_up;
5710         int i;
5711
5712         orig_pause_cfg = tp->link_config.active_flowctrl;
5713         orig_active_speed = tp->link_config.active_speed;
5714         orig_active_duplex = tp->link_config.active_duplex;
5715
5716         if (!tg3_flag(tp, HW_AUTONEG) &&
5717             tp->link_up &&
5718             tg3_flag(tp, INIT_COMPLETE)) {
5719                 mac_status = tr32(MAC_STATUS);
5720                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5721                                MAC_STATUS_SIGNAL_DET |
5722                                MAC_STATUS_CFG_CHANGED |
5723                                MAC_STATUS_RCVD_CFG);
5724                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5725                                    MAC_STATUS_SIGNAL_DET)) {
5726                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5727                                             MAC_STATUS_CFG_CHANGED));
5728                         return 0;
5729                 }
5730         }
5731
5732         tw32_f(MAC_TX_AUTO_NEG, 0);
5733
5734         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5735         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5736         tw32_f(MAC_MODE, tp->mac_mode);
5737         udelay(40);
5738
5739         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5740                 tg3_init_bcm8002(tp);
5741
5742         /* Enable link change event even when serdes polling.  */
5743         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5744         udelay(40);
5745
5746         current_link_up = false;
5747         tp->link_config.rmt_adv = 0;
5748         mac_status = tr32(MAC_STATUS);
5749
5750         if (tg3_flag(tp, HW_AUTONEG))
5751                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5752         else
5753                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5754
5755         tp->napi[0].hw_status->status =
5756                 (SD_STATUS_UPDATED |
5757                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5758
5759         for (i = 0; i < 100; i++) {
5760                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5761                                     MAC_STATUS_CFG_CHANGED));
5762                 udelay(5);
5763                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5764                                          MAC_STATUS_CFG_CHANGED |
5765                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5766                         break;
5767         }
5768
5769         mac_status = tr32(MAC_STATUS);
5770         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5771                 current_link_up = false;
5772                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5773                     tp->serdes_counter == 0) {
5774                         tw32_f(MAC_MODE, (tp->mac_mode |
5775                                           MAC_MODE_SEND_CONFIGS));
5776                         udelay(1);
5777                         tw32_f(MAC_MODE, tp->mac_mode);
5778                 }
5779         }
5780
5781         if (current_link_up) {
5782                 tp->link_config.active_speed = SPEED_1000;
5783                 tp->link_config.active_duplex = DUPLEX_FULL;
5784                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5785                                     LED_CTRL_LNKLED_OVERRIDE |
5786                                     LED_CTRL_1000MBPS_ON));
5787         } else {
5788                 tp->link_config.active_speed = SPEED_UNKNOWN;
5789                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5790                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5791                                     LED_CTRL_LNKLED_OVERRIDE |
5792                                     LED_CTRL_TRAFFIC_OVERRIDE));
5793         }
5794
5795         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5796                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5797                 if (orig_pause_cfg != now_pause_cfg ||
5798                     orig_active_speed != tp->link_config.active_speed ||
5799                     orig_active_duplex != tp->link_config.active_duplex)
5800                         tg3_link_report(tp);
5801         }
5802
5803         return 0;
5804 }
5805
5806 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5807 {
5808         int err = 0;
5809         u32 bmsr, bmcr;
5810         u16 current_speed = SPEED_UNKNOWN;
5811         u8 current_duplex = DUPLEX_UNKNOWN;
5812         bool current_link_up = false;
5813         u32 local_adv, remote_adv, sgsr;
5814
5815         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5816              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5817              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5818              (sgsr & SERDES_TG3_SGMII_MODE)) {
5819
5820                 if (force_reset)
5821                         tg3_phy_reset(tp);
5822
5823                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5824
5825                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5826                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5827                 } else {
5828                         current_link_up = true;
5829                         if (sgsr & SERDES_TG3_SPEED_1000) {
5830                                 current_speed = SPEED_1000;
5831                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5832                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5833                                 current_speed = SPEED_100;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5835                         } else {
5836                                 current_speed = SPEED_10;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         }
5839
5840                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5841                                 current_duplex = DUPLEX_FULL;
5842                         else
5843                                 current_duplex = DUPLEX_HALF;
5844                 }
5845
5846                 tw32_f(MAC_MODE, tp->mac_mode);
5847                 udelay(40);
5848
5849                 tg3_clear_mac_status(tp);
5850
5851                 goto fiber_setup_done;
5852         }
5853
5854         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5855         tw32_f(MAC_MODE, tp->mac_mode);
5856         udelay(40);
5857
5858         tg3_clear_mac_status(tp);
5859
5860         if (force_reset)
5861                 tg3_phy_reset(tp);
5862
5863         tp->link_config.rmt_adv = 0;
5864
5865         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5867         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5868                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5869                         bmsr |= BMSR_LSTATUS;
5870                 else
5871                         bmsr &= ~BMSR_LSTATUS;
5872         }
5873
5874         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5875
5876         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5877             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5878                 /* do nothing, just check for link up at the end */
5879         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5880                 u32 adv, newadv;
5881
5882                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5883                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5884                                  ADVERTISE_1000XPAUSE |
5885                                  ADVERTISE_1000XPSE_ASYM |
5886                                  ADVERTISE_SLCT);
5887
5888                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5889                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5890
5891                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5892                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5893                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5894                         tg3_writephy(tp, MII_BMCR, bmcr);
5895
5896                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5897                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5898                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5899
5900                         return err;
5901                 }
5902         } else {
5903                 u32 new_bmcr;
5904
5905                 bmcr &= ~BMCR_SPEED1000;
5906                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5907
5908                 if (tp->link_config.duplex == DUPLEX_FULL)
5909                         new_bmcr |= BMCR_FULLDPLX;
5910
5911                 if (new_bmcr != bmcr) {
5912                         /* BMCR_SPEED1000 is a reserved bit that needs
5913                          * to be set on write.
5914                          */
5915                         new_bmcr |= BMCR_SPEED1000;
5916
5917                         /* Force a linkdown */
5918                         if (tp->link_up) {
5919                                 u32 adv;
5920
5921                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5922                                 adv &= ~(ADVERTISE_1000XFULL |
5923                                          ADVERTISE_1000XHALF |
5924                                          ADVERTISE_SLCT);
5925                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5926                                 tg3_writephy(tp, MII_BMCR, bmcr |
5927                                                            BMCR_ANRESTART |
5928                                                            BMCR_ANENABLE);
5929                                 udelay(10);
5930                                 tg3_carrier_off(tp);
5931                         }
5932                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5933                         bmcr = new_bmcr;
5934                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5936                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5937                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5938                                         bmsr |= BMSR_LSTATUS;
5939                                 else
5940                                         bmsr &= ~BMSR_LSTATUS;
5941                         }
5942                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5943                 }
5944         }
5945
5946         if (bmsr & BMSR_LSTATUS) {
5947                 current_speed = SPEED_1000;
5948                 current_link_up = true;
5949                 if (bmcr & BMCR_FULLDPLX)
5950                         current_duplex = DUPLEX_FULL;
5951                 else
5952                         current_duplex = DUPLEX_HALF;
5953
5954                 local_adv = 0;
5955                 remote_adv = 0;
5956
5957                 if (bmcr & BMCR_ANENABLE) {
5958                         u32 common;
5959
5960                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5961                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5962                         common = local_adv & remote_adv;
5963                         if (common & (ADVERTISE_1000XHALF |
5964                                       ADVERTISE_1000XFULL)) {
5965                                 if (common & ADVERTISE_1000XFULL)
5966                                         current_duplex = DUPLEX_FULL;
5967                                 else
5968                                         current_duplex = DUPLEX_HALF;
5969
5970                                 tp->link_config.rmt_adv =
5971                                            mii_adv_to_ethtool_adv_x(remote_adv);
5972                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5973                                 /* Link is up via parallel detect */
5974                         } else {
5975                                 current_link_up = false;
5976                         }
5977                 }
5978         }
5979
5980 fiber_setup_done:
5981         if (current_link_up && current_duplex == DUPLEX_FULL)
5982                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5983
5984         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5985         if (tp->link_config.active_duplex == DUPLEX_HALF)
5986                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5987
5988         tw32_f(MAC_MODE, tp->mac_mode);
5989         udelay(40);
5990
5991         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5992
5993         tp->link_config.active_speed = current_speed;
5994         tp->link_config.active_duplex = current_duplex;
5995
5996         tg3_test_and_report_link_chg(tp, current_link_up);
5997         return err;
5998 }
5999
6000 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6001 {
6002         if (tp->serdes_counter) {
6003                 /* Give autoneg time to complete. */
6004                 tp->serdes_counter--;
6005                 return;
6006         }
6007
6008         if (!tp->link_up &&
6009             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6010                 u32 bmcr;
6011
6012                 tg3_readphy(tp, MII_BMCR, &bmcr);
6013                 if (bmcr & BMCR_ANENABLE) {
6014                         u32 phy1, phy2;
6015
6016                         /* Select shadow register 0x1f */
6017                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6018                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6019
6020                         /* Select expansion interrupt status register */
6021                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6022                                          MII_TG3_DSP_EXP1_INT_STAT);
6023                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6025
6026                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6027                                 /* We have signal detect and not receiving
6028                                  * config code words, link is up by parallel
6029                                  * detection.
6030                                  */
6031
6032                                 bmcr &= ~BMCR_ANENABLE;
6033                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6034                                 tg3_writephy(tp, MII_BMCR, bmcr);
6035                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6036                         }
6037                 }
6038         } else if (tp->link_up &&
6039                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6040                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6041                 u32 phy2;
6042
6043                 /* Select expansion interrupt status register */
6044                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6045                                  MII_TG3_DSP_EXP1_INT_STAT);
6046                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6047                 if (phy2 & 0x20) {
6048                         u32 bmcr;
6049
6050                         /* Config code words received, turn on autoneg. */
6051                         tg3_readphy(tp, MII_BMCR, &bmcr);
6052                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6053
6054                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6055
6056                 }
6057         }
6058 }
6059
6060 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6061 {
6062         u32 val;
6063         int err;
6064
6065         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6066                 err = tg3_setup_fiber_phy(tp, force_reset);
6067         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6068                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6069         else
6070                 err = tg3_setup_copper_phy(tp, force_reset);
6071
6072         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6073                 u32 scale;
6074
6075                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6076                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6077                         scale = 65;
6078                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6079                         scale = 6;
6080                 else
6081                         scale = 12;
6082
6083                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6084                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6085                 tw32(GRC_MISC_CFG, val);
6086         }
6087
6088         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6089               (6 << TX_LENGTHS_IPG_SHIFT);
6090         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6091             tg3_asic_rev(tp) == ASIC_REV_5762)
6092                 val |= tr32(MAC_TX_LENGTHS) &
6093                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6094                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6095
6096         if (tp->link_config.active_speed == SPEED_1000 &&
6097             tp->link_config.active_duplex == DUPLEX_HALF)
6098                 tw32(MAC_TX_LENGTHS, val |
6099                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6100         else
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6103
6104         if (!tg3_flag(tp, 5705_PLUS)) {
6105                 if (tp->link_up) {
6106                         tw32(HOSTCC_STAT_COAL_TICKS,
6107                              tp->coal.stats_block_coalesce_usecs);
6108                 } else {
6109                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6110                 }
6111         }
6112
6113         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6114                 val = tr32(PCIE_PWR_MGMT_THRESH);
6115                 if (!tp->link_up)
6116                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6117                               tp->pwrmgmt_thresh;
6118                 else
6119                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6120                 tw32(PCIE_PWR_MGMT_THRESH, val);
6121         }
6122
6123         return err;
6124 }
6125
6126 /* tp->lock must be held */
6127 static u64 tg3_refclk_read(struct tg3 *tp)
6128 {
6129         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6130         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6131 }
6132
6133 /* tp->lock must be held */
6134 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6135 {
6136         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6137
6138         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6139         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6140         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6141         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6142 }
6143
6144 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6145 static inline void tg3_full_unlock(struct tg3 *tp);
6146 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6147 {
6148         struct tg3 *tp = netdev_priv(dev);
6149
6150         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6151                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6152                                 SOF_TIMESTAMPING_SOFTWARE;
6153
6154         if (tg3_flag(tp, PTP_CAPABLE)) {
6155                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6156                                         SOF_TIMESTAMPING_RX_HARDWARE |
6157                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6158         }
6159
6160         if (tp->ptp_clock)
6161                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6162         else
6163                 info->phc_index = -1;
6164
6165         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6166
6167         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6168                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6169                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6170                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6171         return 0;
6172 }
6173
6174 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6175 {
6176         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6177         bool neg_adj = false;
6178         u32 correction = 0;
6179
6180         if (ppb < 0) {
6181                 neg_adj = true;
6182                 ppb = -ppb;
6183         }
6184
6185         /* Frequency adjustment is performed using hardware with a 24 bit
6186          * accumulator and a programmable correction value. On each clk, the
6187          * correction value gets added to the accumulator and when it
6188          * overflows, the time counter is incremented/decremented.
6189          *
6190          * So conversion from ppb to correction value is
6191          *              ppb * (1 << 24) / 1000000000
6192          */
6193         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6194                      TG3_EAV_REF_CLK_CORRECT_MASK;
6195
6196         tg3_full_lock(tp, 0);
6197
6198         if (correction)
6199                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6200                      TG3_EAV_REF_CLK_CORRECT_EN |
6201                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6202         else
6203                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6204
6205         tg3_full_unlock(tp);
6206
6207         return 0;
6208 }
6209
6210 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6211 {
6212         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6213
6214         tg3_full_lock(tp, 0);
6215         tp->ptp_adjust += delta;
6216         tg3_full_unlock(tp);
6217
6218         return 0;
6219 }
6220
6221 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6222 {
6223         u64 ns;
6224         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6225
6226         tg3_full_lock(tp, 0);
6227         ns = tg3_refclk_read(tp);
6228         ns += tp->ptp_adjust;
6229         tg3_full_unlock(tp);
6230
6231         *ts = ns_to_timespec64(ns);
6232
6233         return 0;
6234 }
6235
6236 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6237                            const struct timespec64 *ts)
6238 {
6239         u64 ns;
6240         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241
6242         ns = timespec64_to_ns(ts);
6243
6244         tg3_full_lock(tp, 0);
6245         tg3_refclk_write(tp, ns);
6246         tp->ptp_adjust = 0;
6247         tg3_full_unlock(tp);
6248
6249         return 0;
6250 }
6251
6252 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6253                           struct ptp_clock_request *rq, int on)
6254 {
6255         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6256         u32 clock_ctl;
6257         int rval = 0;
6258
6259         switch (rq->type) {
6260         case PTP_CLK_REQ_PEROUT:
6261                 if (rq->perout.index != 0)
6262                         return -EINVAL;
6263
6264                 tg3_full_lock(tp, 0);
6265                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6266                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6267
6268                 if (on) {
6269                         u64 nsec;
6270
6271                         nsec = rq->perout.start.sec * 1000000000ULL +
6272                                rq->perout.start.nsec;
6273
6274                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6275                                 netdev_warn(tp->dev,
6276                                             "Device supports only a one-shot timesync output, period must be 0\n");
6277                                 rval = -EINVAL;
6278                                 goto err_out;
6279                         }
6280
6281                         if (nsec & (1ULL << 63)) {
6282                                 netdev_warn(tp->dev,
6283                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6284                                 rval = -EINVAL;
6285                                 goto err_out;
6286                         }
6287
6288                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6289                         tw32(TG3_EAV_WATCHDOG0_MSB,
6290                              TG3_EAV_WATCHDOG0_EN |
6291                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6292
6293                         tw32(TG3_EAV_REF_CLCK_CTL,
6294                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6295                 } else {
6296                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6297                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6298                 }
6299
6300 err_out:
6301                 tg3_full_unlock(tp);
6302                 return rval;
6303
6304         default:
6305                 break;
6306         }
6307
6308         return -EOPNOTSUPP;
6309 }
6310
6311 static const struct ptp_clock_info tg3_ptp_caps = {
6312         .owner          = THIS_MODULE,
6313         .name           = "tg3 clock",
6314         .max_adj        = 250000000,
6315         .n_alarm        = 0,
6316         .n_ext_ts       = 0,
6317         .n_per_out      = 1,
6318         .n_pins         = 0,
6319         .pps            = 0,
6320         .adjfreq        = tg3_ptp_adjfreq,
6321         .adjtime        = tg3_ptp_adjtime,
6322         .gettime64      = tg3_ptp_gettime,
6323         .settime64      = tg3_ptp_settime,
6324         .enable         = tg3_ptp_enable,
6325 };
6326
6327 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6328                                      struct skb_shared_hwtstamps *timestamp)
6329 {
6330         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6331         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6332                                            tp->ptp_adjust);
6333 }
6334
6335 /* tp->lock must be held */
6336 static void tg3_ptp_init(struct tg3 *tp)
6337 {
6338         if (!tg3_flag(tp, PTP_CAPABLE))
6339                 return;
6340
6341         /* Initialize the hardware clock to the system time. */
6342         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6343         tp->ptp_adjust = 0;
6344         tp->ptp_info = tg3_ptp_caps;
6345 }
6346
6347 /* tp->lock must be held */
6348 static void tg3_ptp_resume(struct tg3 *tp)
6349 {
6350         if (!tg3_flag(tp, PTP_CAPABLE))
6351                 return;
6352
6353         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6354         tp->ptp_adjust = 0;
6355 }
6356
6357 static void tg3_ptp_fini(struct tg3 *tp)
6358 {
6359         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6360                 return;
6361
6362         ptp_clock_unregister(tp->ptp_clock);
6363         tp->ptp_clock = NULL;
6364         tp->ptp_adjust = 0;
6365 }
6366
6367 static inline int tg3_irq_sync(struct tg3 *tp)
6368 {
6369         return tp->irq_sync;
6370 }
6371
6372 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6373 {
6374         int i;
6375
6376         dst = (u32 *)((u8 *)dst + off);
6377         for (i = 0; i < len; i += sizeof(u32))
6378                 *dst++ = tr32(off + i);
6379 }
6380
6381 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6382 {
6383         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6384         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6385         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6386         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6387         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6388         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6389         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6390         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6391         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6392         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6393         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6394         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6395         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6396         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6397         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6398         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6399         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6400         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6401         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6402
6403         if (tg3_flag(tp, SUPPORT_MSIX))
6404                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6405
6406         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6407         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6408         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6409         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6410         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6412         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6413         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6414
6415         if (!tg3_flag(tp, 5705_PLUS)) {
6416                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6417                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6418                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6419         }
6420
6421         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6422         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6423         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6424         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6425         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6426
6427         if (tg3_flag(tp, NVRAM))
6428                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6429 }
6430
6431 static void tg3_dump_state(struct tg3 *tp)
6432 {
6433         int i;
6434         u32 *regs;
6435
6436         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6437         if (!regs)
6438                 return;
6439
6440         if (tg3_flag(tp, PCI_EXPRESS)) {
6441                 /* Read up to but not including private PCI registers */
6442                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6443                         regs[i / sizeof(u32)] = tr32(i);
6444         } else
6445                 tg3_dump_legacy_regs(tp, regs);
6446
6447         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6448                 if (!regs[i + 0] && !regs[i + 1] &&
6449                     !regs[i + 2] && !regs[i + 3])
6450                         continue;
6451
6452                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6453                            i * 4,
6454                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6455         }
6456
6457         kfree(regs);
6458
6459         for (i = 0; i < tp->irq_cnt; i++) {
6460                 struct tg3_napi *tnapi = &tp->napi[i];
6461
6462                 /* SW status block */
6463                 netdev_err(tp->dev,
6464                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6465                            i,
6466                            tnapi->hw_status->status,
6467                            tnapi->hw_status->status_tag,
6468                            tnapi->hw_status->rx_jumbo_consumer,
6469                            tnapi->hw_status->rx_consumer,
6470                            tnapi->hw_status->rx_mini_consumer,
6471                            tnapi->hw_status->idx[0].rx_producer,
6472                            tnapi->hw_status->idx[0].tx_consumer);
6473
6474                 netdev_err(tp->dev,
6475                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6476                            i,
6477                            tnapi->last_tag, tnapi->last_irq_tag,
6478                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6479                            tnapi->rx_rcb_ptr,
6480                            tnapi->prodring.rx_std_prod_idx,
6481                            tnapi->prodring.rx_std_cons_idx,
6482                            tnapi->prodring.rx_jmb_prod_idx,
6483                            tnapi->prodring.rx_jmb_cons_idx);
6484         }
6485 }
6486
6487 /* This is called whenever we suspect that the system chipset is re-
6488  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6489  * is bogus tx completions. We try to recover by setting the
6490  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6491  * in the workqueue.
6492  */
6493 static void tg3_tx_recover(struct tg3 *tp)
6494 {
6495         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6496                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6497
6498         netdev_warn(tp->dev,
6499                     "The system may be re-ordering memory-mapped I/O "
6500                     "cycles to the network device, attempting to recover. "
6501                     "Please report the problem to the driver maintainer "
6502                     "and include system chipset information.\n");
6503
6504         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6505 }
6506
6507 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6508 {
6509         /* Tell compiler to fetch tx indices from memory. */
6510         barrier();
6511         return tnapi->tx_pending -
6512                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6513 }
6514
6515 /* Tigon3 never reports partial packet sends.  So we do not
6516  * need special logic to handle SKBs that have not had all
6517  * of their frags sent yet, like SunGEM does.
6518  */
6519 static void tg3_tx(struct tg3_napi *tnapi)
6520 {
6521         struct tg3 *tp = tnapi->tp;
6522         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6523         u32 sw_idx = tnapi->tx_cons;
6524         struct netdev_queue *txq;
6525         int index = tnapi - tp->napi;
6526         unsigned int pkts_compl = 0, bytes_compl = 0;
6527
6528         if (tg3_flag(tp, ENABLE_TSS))
6529                 index--;
6530
6531         txq = netdev_get_tx_queue(tp->dev, index);
6532
6533         while (sw_idx != hw_idx) {
6534                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6535                 struct sk_buff *skb = ri->skb;
6536                 int i, tx_bug = 0;
6537
6538                 if (unlikely(skb == NULL)) {
6539                         tg3_tx_recover(tp);
6540                         return;
6541                 }
6542
6543                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6544                         struct skb_shared_hwtstamps timestamp;
6545                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6546                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6547
6548                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6549
6550                         skb_tstamp_tx(skb, &timestamp);
6551                 }
6552
6553                 pci_unmap_single(tp->pdev,
6554                                  dma_unmap_addr(ri, mapping),
6555                                  skb_headlen(skb),
6556                                  PCI_DMA_TODEVICE);
6557
6558                 ri->skb = NULL;
6559
6560                 while (ri->fragmented) {
6561                         ri->fragmented = false;
6562                         sw_idx = NEXT_TX(sw_idx);
6563                         ri = &tnapi->tx_buffers[sw_idx];
6564                 }
6565
6566                 sw_idx = NEXT_TX(sw_idx);
6567
6568                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6569                         ri = &tnapi->tx_buffers[sw_idx];
6570                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6571                                 tx_bug = 1;
6572
6573                         pci_unmap_page(tp->pdev,
6574                                        dma_unmap_addr(ri, mapping),
6575                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6576                                        PCI_DMA_TODEVICE);
6577
6578                         while (ri->fragmented) {
6579                                 ri->fragmented = false;
6580                                 sw_idx = NEXT_TX(sw_idx);
6581                                 ri = &tnapi->tx_buffers[sw_idx];
6582                         }
6583
6584                         sw_idx = NEXT_TX(sw_idx);
6585                 }
6586
6587                 pkts_compl++;
6588                 bytes_compl += skb->len;
6589
6590                 dev_consume_skb_any(skb);
6591
6592                 if (unlikely(tx_bug)) {
6593                         tg3_tx_recover(tp);
6594                         return;
6595                 }
6596         }
6597
6598         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6599
6600         tnapi->tx_cons = sw_idx;
6601
6602         /* Need to make the tx_cons update visible to tg3_start_xmit()
6603          * before checking for netif_queue_stopped().  Without the
6604          * memory barrier, there is a small possibility that tg3_start_xmit()
6605          * will miss it and cause the queue to be stopped forever.
6606          */
6607         smp_mb();
6608
6609         if (unlikely(netif_tx_queue_stopped(txq) &&
6610                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6611                 __netif_tx_lock(txq, smp_processor_id());
6612                 if (netif_tx_queue_stopped(txq) &&
6613                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6614                         netif_tx_wake_queue(txq);
6615                 __netif_tx_unlock(txq);
6616         }
6617 }
6618
6619 static void tg3_frag_free(bool is_frag, void *data)
6620 {
6621         if (is_frag)
6622                 skb_free_frag(data);
6623         else
6624                 kfree(data);
6625 }
6626
6627 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6628 {
6629         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6630                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6631
6632         if (!ri->data)
6633                 return;
6634
6635         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6636                          map_sz, PCI_DMA_FROMDEVICE);
6637         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6638         ri->data = NULL;
6639 }
6640
6641
6642 /* Returns size of skb allocated or < 0 on error.
6643  *
6644  * We only need to fill in the address because the other members
6645  * of the RX descriptor are invariant, see tg3_init_rings.
6646  *
6647  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6648  * posting buffers we only dirty the first cache line of the RX
6649  * descriptor (containing the address).  Whereas for the RX status
6650  * buffers the cpu only reads the last cacheline of the RX descriptor
6651  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6652  */
6653 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6654                              u32 opaque_key, u32 dest_idx_unmasked,
6655                              unsigned int *frag_size)
6656 {
6657         struct tg3_rx_buffer_desc *desc;
6658         struct ring_info *map;
6659         u8 *data;
6660         dma_addr_t mapping;
6661         int skb_size, data_size, dest_idx;
6662
6663         switch (opaque_key) {
6664         case RXD_OPAQUE_RING_STD:
6665                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6666                 desc = &tpr->rx_std[dest_idx];
6667                 map = &tpr->rx_std_buffers[dest_idx];
6668                 data_size = tp->rx_pkt_map_sz;
6669                 break;
6670
6671         case RXD_OPAQUE_RING_JUMBO:
6672                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6673                 desc = &tpr->rx_jmb[dest_idx].std;
6674                 map = &tpr->rx_jmb_buffers[dest_idx];
6675                 data_size = TG3_RX_JMB_MAP_SZ;
6676                 break;
6677
6678         default:
6679                 return -EINVAL;
6680         }
6681
6682         /* Do not overwrite any of the map or rp information
6683          * until we are sure we can commit to a new buffer.
6684          *
6685          * Callers depend upon this behavior and assume that
6686          * we leave everything unchanged if we fail.
6687          */
6688         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6689                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6690         if (skb_size <= PAGE_SIZE) {
6691                 data = netdev_alloc_frag(skb_size);
6692                 *frag_size = skb_size;
6693         } else {
6694                 data = kmalloc(skb_size, GFP_ATOMIC);
6695                 *frag_size = 0;
6696         }
6697         if (!data)
6698                 return -ENOMEM;
6699
6700         mapping = pci_map_single(tp->pdev,
6701                                  data + TG3_RX_OFFSET(tp),
6702                                  data_size,
6703                                  PCI_DMA_FROMDEVICE);
6704         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6705                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6706                 return -EIO;
6707         }
6708
6709         map->data = data;
6710         dma_unmap_addr_set(map, mapping, mapping);
6711
6712         desc->addr_hi = ((u64)mapping >> 32);
6713         desc->addr_lo = ((u64)mapping & 0xffffffff);
6714
6715         return data_size;
6716 }
6717
6718 /* We only need to move over in the address because the other
6719  * members of the RX descriptor are invariant.  See notes above
6720  * tg3_alloc_rx_data for full details.
6721  */
6722 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6723                            struct tg3_rx_prodring_set *dpr,
6724                            u32 opaque_key, int src_idx,
6725                            u32 dest_idx_unmasked)
6726 {
6727         struct tg3 *tp = tnapi->tp;
6728         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6729         struct ring_info *src_map, *dest_map;
6730         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6731         int dest_idx;
6732
6733         switch (opaque_key) {
6734         case RXD_OPAQUE_RING_STD:
6735                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6736                 dest_desc = &dpr->rx_std[dest_idx];
6737                 dest_map = &dpr->rx_std_buffers[dest_idx];
6738                 src_desc = &spr->rx_std[src_idx];
6739                 src_map = &spr->rx_std_buffers[src_idx];
6740                 break;
6741
6742         case RXD_OPAQUE_RING_JUMBO:
6743                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6744                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6745                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6746                 src_desc = &spr->rx_jmb[src_idx].std;
6747                 src_map = &spr->rx_jmb_buffers[src_idx];
6748                 break;
6749
6750         default:
6751                 return;
6752         }
6753
6754         dest_map->data = src_map->data;
6755         dma_unmap_addr_set(dest_map, mapping,
6756                            dma_unmap_addr(src_map, mapping));
6757         dest_desc->addr_hi = src_desc->addr_hi;
6758         dest_desc->addr_lo = src_desc->addr_lo;
6759
6760         /* Ensure that the update to the skb happens after the physical
6761          * addresses have been transferred to the new BD location.
6762          */
6763         smp_wmb();
6764
6765         src_map->data = NULL;
6766 }
6767
6768 /* The RX ring scheme is composed of multiple rings which post fresh
6769  * buffers to the chip, and one special ring the chip uses to report
6770  * status back to the host.
6771  *
6772  * The special ring reports the status of received packets to the
6773  * host.  The chip does not write into the original descriptor the
6774  * RX buffer was obtained from.  The chip simply takes the original
6775  * descriptor as provided by the host, updates the status and length
6776  * field, then writes this into the next status ring entry.
6777  *
6778  * Each ring the host uses to post buffers to the chip is described
6779  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6780  * it is first placed into the on-chip ram.  When the packet's length
6781  * is known, it walks down the TG3_BDINFO entries to select the ring.
6782  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6783  * which is within the range of the new packet's length is chosen.
6784  *
6785  * The "separate ring for rx status" scheme may sound queer, but it makes
6786  * sense from a cache coherency perspective.  If only the host writes
6787  * to the buffer post rings, and only the chip writes to the rx status
6788  * rings, then cache lines never move beyond shared-modified state.
6789  * If both the host and chip were to write into the same ring, cache line
6790  * eviction could occur since both entities want it in an exclusive state.
6791  */
6792 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6793 {
6794         struct tg3 *tp = tnapi->tp;
6795         u32 work_mask, rx_std_posted = 0;
6796         u32 std_prod_idx, jmb_prod_idx;
6797         u32 sw_idx = tnapi->rx_rcb_ptr;
6798         u16 hw_idx;
6799         int received;
6800         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6801
6802         hw_idx = *(tnapi->rx_rcb_prod_idx);
6803         /*
6804          * We need to order the read of hw_idx and the read of
6805          * the opaque cookie.
6806          */
6807         rmb();
6808         work_mask = 0;
6809         received = 0;
6810         std_prod_idx = tpr->rx_std_prod_idx;
6811         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6812         while (sw_idx != hw_idx && budget > 0) {
6813                 struct ring_info *ri;
6814                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6815                 unsigned int len;
6816                 struct sk_buff *skb;
6817                 dma_addr_t dma_addr;
6818                 u32 opaque_key, desc_idx, *post_ptr;
6819                 u8 *data;
6820                 u64 tstamp = 0;
6821
6822                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6823                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6824                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6825                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6826                         dma_addr = dma_unmap_addr(ri, mapping);
6827                         data = ri->data;
6828                         post_ptr = &std_prod_idx;
6829                         rx_std_posted++;
6830                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6831                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6832                         dma_addr = dma_unmap_addr(ri, mapping);
6833                         data = ri->data;
6834                         post_ptr = &jmb_prod_idx;
6835                 } else
6836                         goto next_pkt_nopost;
6837
6838                 work_mask |= opaque_key;
6839
6840                 if (desc->err_vlan & RXD_ERR_MASK) {
6841                 drop_it:
6842                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6843                                        desc_idx, *post_ptr);
6844                 drop_it_no_recycle:
6845                         /* Other statistics kept track of by card. */
6846                         tp->rx_dropped++;
6847                         goto next_pkt;
6848                 }
6849
6850                 prefetch(data + TG3_RX_OFFSET(tp));
6851                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6852                       ETH_FCS_LEN;
6853
6854                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6855                      RXD_FLAG_PTPSTAT_PTPV1 ||
6856                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6857                      RXD_FLAG_PTPSTAT_PTPV2) {
6858                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6859                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6860                 }
6861
6862                 if (len > TG3_RX_COPY_THRESH(tp)) {
6863                         int skb_size;
6864                         unsigned int frag_size;
6865
6866                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6867                                                     *post_ptr, &frag_size);
6868                         if (skb_size < 0)
6869                                 goto drop_it;
6870
6871                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6872                                          PCI_DMA_FROMDEVICE);
6873
6874                         /* Ensure that the update to the data happens
6875                          * after the usage of the old DMA mapping.
6876                          */
6877                         smp_wmb();
6878
6879                         ri->data = NULL;
6880
6881                         skb = build_skb(data, frag_size);
6882                         if (!skb) {
6883                                 tg3_frag_free(frag_size != 0, data);
6884                                 goto drop_it_no_recycle;
6885                         }
6886                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6887                 } else {
6888                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6889                                        desc_idx, *post_ptr);
6890
6891                         skb = netdev_alloc_skb(tp->dev,
6892                                                len + TG3_RAW_IP_ALIGN);
6893                         if (skb == NULL)
6894                                 goto drop_it_no_recycle;
6895
6896                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6897                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6898                         memcpy(skb->data,
6899                                data + TG3_RX_OFFSET(tp),
6900                                len);
6901                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6902                 }
6903
6904                 skb_put(skb, len);
6905                 if (tstamp)
6906                         tg3_hwclock_to_timestamp(tp, tstamp,
6907                                                  skb_hwtstamps(skb));
6908
6909                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6910                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6911                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6912                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6913                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6914                 else
6915                         skb_checksum_none_assert(skb);
6916
6917                 skb->protocol = eth_type_trans(skb, tp->dev);
6918
6919                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6920                     skb->protocol != htons(ETH_P_8021Q) &&
6921                     skb->protocol != htons(ETH_P_8021AD)) {
6922                         dev_kfree_skb_any(skb);
6923                         goto drop_it_no_recycle;
6924                 }
6925
6926                 if (desc->type_flags & RXD_FLAG_VLAN &&
6927                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6928                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6929                                                desc->err_vlan & RXD_VLAN_MASK);
6930
6931                 napi_gro_receive(&tnapi->napi, skb);
6932
6933                 received++;
6934                 budget--;
6935
6936 next_pkt:
6937                 (*post_ptr)++;
6938
6939                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6940                         tpr->rx_std_prod_idx = std_prod_idx &
6941                                                tp->rx_std_ring_mask;
6942                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6943                                      tpr->rx_std_prod_idx);
6944                         work_mask &= ~RXD_OPAQUE_RING_STD;
6945                         rx_std_posted = 0;
6946                 }
6947 next_pkt_nopost:
6948                 sw_idx++;
6949                 sw_idx &= tp->rx_ret_ring_mask;
6950
6951                 /* Refresh hw_idx to see if there is new work */
6952                 if (sw_idx == hw_idx) {
6953                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6954                         rmb();
6955                 }
6956         }
6957
6958         /* ACK the status ring. */
6959         tnapi->rx_rcb_ptr = sw_idx;
6960         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6961
6962         /* Refill RX ring(s). */
6963         if (!tg3_flag(tp, ENABLE_RSS)) {
6964                 /* Sync BD data before updating mailbox */
6965                 wmb();
6966
6967                 if (work_mask & RXD_OPAQUE_RING_STD) {
6968                         tpr->rx_std_prod_idx = std_prod_idx &
6969                                                tp->rx_std_ring_mask;
6970                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6971                                      tpr->rx_std_prod_idx);
6972                 }
6973                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6974                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6975                                                tp->rx_jmb_ring_mask;
6976                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6977                                      tpr->rx_jmb_prod_idx);
6978                 }
6979                 mmiowb();
6980         } else if (work_mask) {
6981                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6982                  * updated before the producer indices can be updated.
6983                  */
6984                 smp_wmb();
6985
6986                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6987                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6988
6989                 if (tnapi != &tp->napi[1]) {
6990                         tp->rx_refill = true;
6991                         napi_schedule(&tp->napi[1].napi);
6992                 }
6993         }
6994
6995         return received;
6996 }
6997
6998 static void tg3_poll_link(struct tg3 *tp)
6999 {
7000         /* handle link change and other phy events */
7001         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7002                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7003
7004                 if (sblk->status & SD_STATUS_LINK_CHG) {
7005                         sblk->status = SD_STATUS_UPDATED |
7006                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7007                         spin_lock(&tp->lock);
7008                         if (tg3_flag(tp, USE_PHYLIB)) {
7009                                 tw32_f(MAC_STATUS,
7010                                      (MAC_STATUS_SYNC_CHANGED |
7011                                       MAC_STATUS_CFG_CHANGED |
7012                                       MAC_STATUS_MI_COMPLETION |
7013                                       MAC_STATUS_LNKSTATE_CHANGED));
7014                                 udelay(40);
7015                         } else
7016                                 tg3_setup_phy(tp, false);
7017                         spin_unlock(&tp->lock);
7018                 }
7019         }
7020 }
7021
7022 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7023                                 struct tg3_rx_prodring_set *dpr,
7024                                 struct tg3_rx_prodring_set *spr)
7025 {
7026         u32 si, di, cpycnt, src_prod_idx;
7027         int i, err = 0;
7028
7029         while (1) {
7030                 src_prod_idx = spr->rx_std_prod_idx;
7031
7032                 /* Make sure updates to the rx_std_buffers[] entries and the
7033                  * standard producer index are seen in the correct order.
7034                  */
7035                 smp_rmb();
7036
7037                 if (spr->rx_std_cons_idx == src_prod_idx)
7038                         break;
7039
7040                 if (spr->rx_std_cons_idx < src_prod_idx)
7041                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7042                 else
7043                         cpycnt = tp->rx_std_ring_mask + 1 -
7044                                  spr->rx_std_cons_idx;
7045
7046                 cpycnt = min(cpycnt,
7047                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7048
7049                 si = spr->rx_std_cons_idx;
7050                 di = dpr->rx_std_prod_idx;
7051
7052                 for (i = di; i < di + cpycnt; i++) {
7053                         if (dpr->rx_std_buffers[i].data) {
7054                                 cpycnt = i - di;
7055                                 err = -ENOSPC;
7056                                 break;
7057                         }
7058                 }
7059
7060                 if (!cpycnt)
7061                         break;
7062
7063                 /* Ensure that updates to the rx_std_buffers ring and the
7064                  * shadowed hardware producer ring from tg3_recycle_skb() are
7065                  * ordered correctly WRT the skb check above.
7066                  */
7067                 smp_rmb();
7068
7069                 memcpy(&dpr->rx_std_buffers[di],
7070                        &spr->rx_std_buffers[si],
7071                        cpycnt * sizeof(struct ring_info));
7072
7073                 for (i = 0; i < cpycnt; i++, di++, si++) {
7074                         struct tg3_rx_buffer_desc *sbd, *dbd;
7075                         sbd = &spr->rx_std[si];
7076                         dbd = &dpr->rx_std[di];
7077                         dbd->addr_hi = sbd->addr_hi;
7078                         dbd->addr_lo = sbd->addr_lo;
7079                 }
7080
7081                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7082                                        tp->rx_std_ring_mask;
7083                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7084                                        tp->rx_std_ring_mask;
7085         }
7086
7087         while (1) {
7088                 src_prod_idx = spr->rx_jmb_prod_idx;
7089
7090                 /* Make sure updates to the rx_jmb_buffers[] entries and
7091                  * the jumbo producer index are seen in the correct order.
7092                  */
7093                 smp_rmb();
7094
7095                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7096                         break;
7097
7098                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7099                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7100                 else
7101                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7102                                  spr->rx_jmb_cons_idx;
7103
7104                 cpycnt = min(cpycnt,
7105                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7106
7107                 si = spr->rx_jmb_cons_idx;
7108                 di = dpr->rx_jmb_prod_idx;
7109
7110                 for (i = di; i < di + cpycnt; i++) {
7111                         if (dpr->rx_jmb_buffers[i].data) {
7112                                 cpycnt = i - di;
7113                                 err = -ENOSPC;
7114                                 break;
7115                         }
7116                 }
7117
7118                 if (!cpycnt)
7119                         break;
7120
7121                 /* Ensure that updates to the rx_jmb_buffers ring and the
7122                  * shadowed hardware producer ring from tg3_recycle_skb() are
7123                  * ordered correctly WRT the skb check above.
7124                  */
7125                 smp_rmb();
7126
7127                 memcpy(&dpr->rx_jmb_buffers[di],
7128                        &spr->rx_jmb_buffers[si],
7129                        cpycnt * sizeof(struct ring_info));
7130
7131                 for (i = 0; i < cpycnt; i++, di++, si++) {
7132                         struct tg3_rx_buffer_desc *sbd, *dbd;
7133                         sbd = &spr->rx_jmb[si].std;
7134                         dbd = &dpr->rx_jmb[di].std;
7135                         dbd->addr_hi = sbd->addr_hi;
7136                         dbd->addr_lo = sbd->addr_lo;
7137                 }
7138
7139                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7140                                        tp->rx_jmb_ring_mask;
7141                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7142                                        tp->rx_jmb_ring_mask;
7143         }
7144
7145         return err;
7146 }
7147
7148 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7149 {
7150         struct tg3 *tp = tnapi->tp;
7151
7152         /* run TX completion thread */
7153         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7154                 tg3_tx(tnapi);
7155                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7156                         return work_done;
7157         }
7158
7159         if (!tnapi->rx_rcb_prod_idx)
7160                 return work_done;
7161
7162         /* run RX thread, within the bounds set by NAPI.
7163          * All RX "locking" is done by ensuring outside
7164          * code synchronizes with tg3->napi.poll()
7165          */
7166         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7167                 work_done += tg3_rx(tnapi, budget - work_done);
7168
7169         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7170                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7171                 int i, err = 0;
7172                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7173                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7174
7175                 tp->rx_refill = false;
7176                 for (i = 1; i <= tp->rxq_cnt; i++)
7177                         err |= tg3_rx_prodring_xfer(tp, dpr,
7178                                                     &tp->napi[i].prodring);
7179
7180                 wmb();
7181
7182                 if (std_prod_idx != dpr->rx_std_prod_idx)
7183                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7184                                      dpr->rx_std_prod_idx);
7185
7186                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7187                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7188                                      dpr->rx_jmb_prod_idx);
7189
7190                 mmiowb();
7191
7192                 if (err)
7193                         tw32_f(HOSTCC_MODE, tp->coal_now);
7194         }
7195
7196         return work_done;
7197 }
7198
7199 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7200 {
7201         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7202                 schedule_work(&tp->reset_task);
7203 }
7204
7205 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7206 {
7207         cancel_work_sync(&tp->reset_task);
7208         tg3_flag_clear(tp, RESET_TASK_PENDING);
7209         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7210 }
7211
7212 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7213 {
7214         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7215         struct tg3 *tp = tnapi->tp;
7216         int work_done = 0;
7217         struct tg3_hw_status *sblk = tnapi->hw_status;
7218
7219         while (1) {
7220                 work_done = tg3_poll_work(tnapi, work_done, budget);
7221
7222                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7223                         goto tx_recovery;
7224
7225                 if (unlikely(work_done >= budget))
7226                         break;
7227
7228                 /* tp->last_tag is used in tg3_int_reenable() below
7229                  * to tell the hw how much work has been processed,
7230                  * so we must read it before checking for more work.
7231                  */
7232                 tnapi->last_tag = sblk->status_tag;
7233                 tnapi->last_irq_tag = tnapi->last_tag;
7234                 rmb();
7235
7236                 /* check for RX/TX work to do */
7237                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7238                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7239
7240                         /* This test here is not race free, but will reduce
7241                          * the number of interrupts by looping again.
7242                          */
7243                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7244                                 continue;
7245
7246                         napi_complete_done(napi, work_done);
7247                         /* Reenable interrupts. */
7248                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7249
7250                         /* This test here is synchronized by napi_schedule()
7251                          * and napi_complete() to close the race condition.
7252                          */
7253                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7254                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7255                                                   HOSTCC_MODE_ENABLE |
7256                                                   tnapi->coal_now);
7257                         }
7258                         mmiowb();
7259                         break;
7260                 }
7261         }
7262
7263         return work_done;
7264
7265 tx_recovery:
7266         /* work_done is guaranteed to be less than budget. */
7267         napi_complete(napi);
7268         tg3_reset_task_schedule(tp);
7269         return work_done;
7270 }
7271
7272 static void tg3_process_error(struct tg3 *tp)
7273 {
7274         u32 val;
7275         bool real_error = false;
7276
7277         if (tg3_flag(tp, ERROR_PROCESSED))
7278                 return;
7279
7280         /* Check Flow Attention register */
7281         val = tr32(HOSTCC_FLOW_ATTN);
7282         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7283                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7284                 real_error = true;
7285         }
7286
7287         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7288                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7289                 real_error = true;
7290         }
7291
7292         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7293                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7294                 real_error = true;
7295         }
7296
7297         if (!real_error)
7298                 return;
7299
7300         tg3_dump_state(tp);
7301
7302         tg3_flag_set(tp, ERROR_PROCESSED);
7303         tg3_reset_task_schedule(tp);
7304 }
7305
7306 static int tg3_poll(struct napi_struct *napi, int budget)
7307 {
7308         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7309         struct tg3 *tp = tnapi->tp;
7310         int work_done = 0;
7311         struct tg3_hw_status *sblk = tnapi->hw_status;
7312
7313         while (1) {
7314                 if (sblk->status & SD_STATUS_ERROR)
7315                         tg3_process_error(tp);
7316
7317                 tg3_poll_link(tp);
7318
7319                 work_done = tg3_poll_work(tnapi, work_done, budget);
7320
7321                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7322                         goto tx_recovery;
7323
7324                 if (unlikely(work_done >= budget))
7325                         break;
7326
7327                 if (tg3_flag(tp, TAGGED_STATUS)) {
7328                         /* tp->last_tag is used in tg3_int_reenable() below
7329                          * to tell the hw how much work has been processed,
7330                          * so we must read it before checking for more work.
7331                          */
7332                         tnapi->last_tag = sblk->status_tag;
7333                         tnapi->last_irq_tag = tnapi->last_tag;
7334                         rmb();
7335                 } else
7336                         sblk->status &= ~SD_STATUS_UPDATED;
7337
7338                 if (likely(!tg3_has_work(tnapi))) {
7339                         napi_complete_done(napi, work_done);
7340                         tg3_int_reenable(tnapi);
7341                         break;
7342                 }
7343         }
7344
7345         return work_done;
7346
7347 tx_recovery:
7348         /* work_done is guaranteed to be less than budget. */
7349         napi_complete(napi);
7350         tg3_reset_task_schedule(tp);
7351         return work_done;
7352 }
7353
7354 static void tg3_napi_disable(struct tg3 *tp)
7355 {
7356         int i;
7357
7358         for (i = tp->irq_cnt - 1; i >= 0; i--)
7359                 napi_disable(&tp->napi[i].napi);
7360 }
7361
7362 static void tg3_napi_enable(struct tg3 *tp)
7363 {
7364         int i;
7365
7366         for (i = 0; i < tp->irq_cnt; i++)
7367                 napi_enable(&tp->napi[i].napi);
7368 }
7369
7370 static void tg3_napi_init(struct tg3 *tp)
7371 {
7372         int i;
7373
7374         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7375         for (i = 1; i < tp->irq_cnt; i++)
7376                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7377 }
7378
7379 static void tg3_napi_fini(struct tg3 *tp)
7380 {
7381         int i;
7382
7383         for (i = 0; i < tp->irq_cnt; i++)
7384                 netif_napi_del(&tp->napi[i].napi);
7385 }
7386
7387 static inline void tg3_netif_stop(struct tg3 *tp)
7388 {
7389         netif_trans_update(tp->dev);    /* prevent tx timeout */
7390         tg3_napi_disable(tp);
7391         netif_carrier_off(tp->dev);
7392         netif_tx_disable(tp->dev);
7393 }
7394
7395 /* tp->lock must be held */
7396 static inline void tg3_netif_start(struct tg3 *tp)
7397 {
7398         tg3_ptp_resume(tp);
7399
7400         /* NOTE: unconditional netif_tx_wake_all_queues is only
7401          * appropriate so long as all callers are assured to
7402          * have free tx slots (such as after tg3_init_hw)
7403          */
7404         netif_tx_wake_all_queues(tp->dev);
7405
7406         if (tp->link_up)
7407                 netif_carrier_on(tp->dev);
7408
7409         tg3_napi_enable(tp);
7410         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7411         tg3_enable_ints(tp);
7412 }
7413
7414 static void tg3_irq_quiesce(struct tg3 *tp)
7415         __releases(tp->lock)
7416         __acquires(tp->lock)
7417 {
7418         int i;
7419
7420         BUG_ON(tp->irq_sync);
7421
7422         tp->irq_sync = 1;
7423         smp_mb();
7424
7425         spin_unlock_bh(&tp->lock);
7426
7427         for (i = 0; i < tp->irq_cnt; i++)
7428                 synchronize_irq(tp->napi[i].irq_vec);
7429
7430         spin_lock_bh(&tp->lock);
7431 }
7432
7433 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7434  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7435  * with as well.  Most of the time, this is not necessary except when
7436  * shutting down the device.
7437  */
7438 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7439 {
7440         spin_lock_bh(&tp->lock);
7441         if (irq_sync)
7442                 tg3_irq_quiesce(tp);
7443 }
7444
7445 static inline void tg3_full_unlock(struct tg3 *tp)
7446 {
7447         spin_unlock_bh(&tp->lock);
7448 }
7449
7450 /* One-shot MSI handler - Chip automatically disables interrupt
7451  * after sending MSI so driver doesn't have to do it.
7452  */
7453 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7454 {
7455         struct tg3_napi *tnapi = dev_id;
7456         struct tg3 *tp = tnapi->tp;
7457
7458         prefetch(tnapi->hw_status);
7459         if (tnapi->rx_rcb)
7460                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7461
7462         if (likely(!tg3_irq_sync(tp)))
7463                 napi_schedule(&tnapi->napi);
7464
7465         return IRQ_HANDLED;
7466 }
7467
7468 /* MSI ISR - No need to check for interrupt sharing and no need to
7469  * flush status block and interrupt mailbox. PCI ordering rules
7470  * guarantee that MSI will arrive after the status block.
7471  */
7472 static irqreturn_t tg3_msi(int irq, void *dev_id)
7473 {
7474         struct tg3_napi *tnapi = dev_id;
7475         struct tg3 *tp = tnapi->tp;
7476
7477         prefetch(tnapi->hw_status);
7478         if (tnapi->rx_rcb)
7479                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7480         /*
7481          * Writing any value to intr-mbox-0 clears PCI INTA# and
7482          * chip-internal interrupt pending events.
7483          * Writing non-zero to intr-mbox-0 additional tells the
7484          * NIC to stop sending us irqs, engaging "in-intr-handler"
7485          * event coalescing.
7486          */
7487         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7488         if (likely(!tg3_irq_sync(tp)))
7489                 napi_schedule(&tnapi->napi);
7490
7491         return IRQ_RETVAL(1);
7492 }
7493
7494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7495 {
7496         struct tg3_napi *tnapi = dev_id;
7497         struct tg3 *tp = tnapi->tp;
7498         struct tg3_hw_status *sblk = tnapi->hw_status;
7499         unsigned int handled = 1;
7500
7501         /* In INTx mode, it is possible for the interrupt to arrive at
7502          * the CPU before the status block posted prior to the interrupt.
7503          * Reading the PCI State register will confirm whether the
7504          * interrupt is ours and will flush the status block.
7505          */
7506         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7507                 if (tg3_flag(tp, CHIP_RESETTING) ||
7508                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7509                         handled = 0;
7510                         goto out;
7511                 }
7512         }
7513
7514         /*
7515          * Writing any value to intr-mbox-0 clears PCI INTA# and
7516          * chip-internal interrupt pending events.
7517          * Writing non-zero to intr-mbox-0 additional tells the
7518          * NIC to stop sending us irqs, engaging "in-intr-handler"
7519          * event coalescing.
7520          *
7521          * Flush the mailbox to de-assert the IRQ immediately to prevent
7522          * spurious interrupts.  The flush impacts performance but
7523          * excessive spurious interrupts can be worse in some cases.
7524          */
7525         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7526         if (tg3_irq_sync(tp))
7527                 goto out;
7528         sblk->status &= ~SD_STATUS_UPDATED;
7529         if (likely(tg3_has_work(tnapi))) {
7530                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7531                 napi_schedule(&tnapi->napi);
7532         } else {
7533                 /* No work, shared interrupt perhaps?  re-enable
7534                  * interrupts, and flush that PCI write
7535                  */
7536                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7537                                0x00000000);
7538         }
7539 out:
7540         return IRQ_RETVAL(handled);
7541 }
7542
7543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7544 {
7545         struct tg3_napi *tnapi = dev_id;
7546         struct tg3 *tp = tnapi->tp;
7547         struct tg3_hw_status *sblk = tnapi->hw_status;
7548         unsigned int handled = 1;
7549
7550         /* In INTx mode, it is possible for the interrupt to arrive at
7551          * the CPU before the status block posted prior to the interrupt.
7552          * Reading the PCI State register will confirm whether the
7553          * interrupt is ours and will flush the status block.
7554          */
7555         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7556                 if (tg3_flag(tp, CHIP_RESETTING) ||
7557                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7558                         handled = 0;
7559                         goto out;
7560                 }
7561         }
7562
7563         /*
7564          * writing any value to intr-mbox-0 clears PCI INTA# and
7565          * chip-internal interrupt pending events.
7566          * writing non-zero to intr-mbox-0 additional tells the
7567          * NIC to stop sending us irqs, engaging "in-intr-handler"
7568          * event coalescing.
7569          *
7570          * Flush the mailbox to de-assert the IRQ immediately to prevent
7571          * spurious interrupts.  The flush impacts performance but
7572          * excessive spurious interrupts can be worse in some cases.
7573          */
7574         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7575
7576         /*
7577          * In a shared interrupt configuration, sometimes other devices'
7578          * interrupts will scream.  We record the current status tag here
7579          * so that the above check can report that the screaming interrupts
7580          * are unhandled.  Eventually they will be silenced.
7581          */
7582         tnapi->last_irq_tag = sblk->status_tag;
7583
7584         if (tg3_irq_sync(tp))
7585                 goto out;
7586
7587         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7588
7589         napi_schedule(&tnapi->napi);
7590
7591 out:
7592         return IRQ_RETVAL(handled);
7593 }
7594
7595 /* ISR for interrupt test */
7596 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7597 {
7598         struct tg3_napi *tnapi = dev_id;
7599         struct tg3 *tp = tnapi->tp;
7600         struct tg3_hw_status *sblk = tnapi->hw_status;
7601
7602         if ((sblk->status & SD_STATUS_UPDATED) ||
7603             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7604                 tg3_disable_ints(tp);
7605                 return IRQ_RETVAL(1);
7606         }
7607         return IRQ_RETVAL(0);
7608 }
7609
7610 #ifdef CONFIG_NET_POLL_CONTROLLER
7611 static void tg3_poll_controller(struct net_device *dev)
7612 {
7613         int i;
7614         struct tg3 *tp = netdev_priv(dev);
7615
7616         if (tg3_irq_sync(tp))
7617                 return;
7618
7619         for (i = 0; i < tp->irq_cnt; i++)
7620                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7621 }
7622 #endif
7623
7624 static void tg3_tx_timeout(struct net_device *dev)
7625 {
7626         struct tg3 *tp = netdev_priv(dev);
7627
7628         if (netif_msg_tx_err(tp)) {
7629                 netdev_err(dev, "transmit timed out, resetting\n");
7630                 tg3_dump_state(tp);
7631         }
7632
7633         tg3_reset_task_schedule(tp);
7634 }
7635
7636 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7637 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7638 {
7639         u32 base = (u32) mapping & 0xffffffff;
7640
7641         return base + len + 8 < base;
7642 }
7643
7644 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7645  * of any 4GB boundaries: 4G, 8G, etc
7646  */
7647 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7648                                            u32 len, u32 mss)
7649 {
7650         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7651                 u32 base = (u32) mapping & 0xffffffff;
7652
7653                 return ((base + len + (mss & 0x3fff)) < base);
7654         }
7655         return 0;
7656 }
7657
7658 /* Test for DMA addresses > 40-bit */
7659 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660                                           int len)
7661 {
7662 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7663         if (tg3_flag(tp, 40BIT_DMA_BUG))
7664                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7665         return 0;
7666 #else
7667         return 0;
7668 #endif
7669 }
7670
7671 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7672                                  dma_addr_t mapping, u32 len, u32 flags,
7673                                  u32 mss, u32 vlan)
7674 {
7675         txbd->addr_hi = ((u64) mapping >> 32);
7676         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7677         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7678         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7679 }
7680
7681 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7682                             dma_addr_t map, u32 len, u32 flags,
7683                             u32 mss, u32 vlan)
7684 {
7685         struct tg3 *tp = tnapi->tp;
7686         bool hwbug = false;
7687
7688         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7689                 hwbug = true;
7690
7691         if (tg3_4g_overflow_test(map, len))
7692                 hwbug = true;
7693
7694         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7695                 hwbug = true;
7696
7697         if (tg3_40bit_overflow_test(tp, map, len))
7698                 hwbug = true;
7699
7700         if (tp->dma_limit) {
7701                 u32 prvidx = *entry;
7702                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7703                 while (len > tp->dma_limit && *budget) {
7704                         u32 frag_len = tp->dma_limit;
7705                         len -= tp->dma_limit;
7706
7707                         /* Avoid the 8byte DMA problem */
7708                         if (len <= 8) {
7709                                 len += tp->dma_limit / 2;
7710                                 frag_len = tp->dma_limit / 2;
7711                         }
7712
7713                         tnapi->tx_buffers[*entry].fragmented = true;
7714
7715                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7716                                       frag_len, tmp_flag, mss, vlan);
7717                         *budget -= 1;
7718                         prvidx = *entry;
7719                         *entry = NEXT_TX(*entry);
7720
7721                         map += frag_len;
7722                 }
7723
7724                 if (len) {
7725                         if (*budget) {
7726                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7727                                               len, flags, mss, vlan);
7728                                 *budget -= 1;
7729                                 *entry = NEXT_TX(*entry);
7730                         } else {
7731                                 hwbug = true;
7732                                 tnapi->tx_buffers[prvidx].fragmented = false;
7733                         }
7734                 }
7735         } else {
7736                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                               len, flags, mss, vlan);
7738                 *entry = NEXT_TX(*entry);
7739         }
7740
7741         return hwbug;
7742 }
7743
7744 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7745 {
7746         int i;
7747         struct sk_buff *skb;
7748         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7749
7750         skb = txb->skb;
7751         txb->skb = NULL;
7752
7753         pci_unmap_single(tnapi->tp->pdev,
7754                          dma_unmap_addr(txb, mapping),
7755                          skb_headlen(skb),
7756                          PCI_DMA_TODEVICE);
7757
7758         while (txb->fragmented) {
7759                 txb->fragmented = false;
7760                 entry = NEXT_TX(entry);
7761                 txb = &tnapi->tx_buffers[entry];
7762         }
7763
7764         for (i = 0; i <= last; i++) {
7765                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7766
7767                 entry = NEXT_TX(entry);
7768                 txb = &tnapi->tx_buffers[entry];
7769
7770                 pci_unmap_page(tnapi->tp->pdev,
7771                                dma_unmap_addr(txb, mapping),
7772                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7773
7774                 while (txb->fragmented) {
7775                         txb->fragmented = false;
7776                         entry = NEXT_TX(entry);
7777                         txb = &tnapi->tx_buffers[entry];
7778                 }
7779         }
7780 }
7781
7782 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7783 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7784                                        struct sk_buff **pskb,
7785                                        u32 *entry, u32 *budget,
7786                                        u32 base_flags, u32 mss, u32 vlan)
7787 {
7788         struct tg3 *tp = tnapi->tp;
7789         struct sk_buff *new_skb, *skb = *pskb;
7790         dma_addr_t new_addr = 0;
7791         int ret = 0;
7792
7793         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7794                 new_skb = skb_copy(skb, GFP_ATOMIC);
7795         else {
7796                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7797
7798                 new_skb = skb_copy_expand(skb,
7799                                           skb_headroom(skb) + more_headroom,
7800                                           skb_tailroom(skb), GFP_ATOMIC);
7801         }
7802
7803         if (!new_skb) {
7804                 ret = -1;
7805         } else {
7806                 /* New SKB is guaranteed to be linear. */
7807                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7808                                           PCI_DMA_TODEVICE);
7809                 /* Make sure the mapping succeeded */
7810                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7811                         dev_kfree_skb_any(new_skb);
7812                         ret = -1;
7813                 } else {
7814                         u32 save_entry = *entry;
7815
7816                         base_flags |= TXD_FLAG_END;
7817
7818                         tnapi->tx_buffers[*entry].skb = new_skb;
7819                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7820                                            mapping, new_addr);
7821
7822                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7823                                             new_skb->len, base_flags,
7824                                             mss, vlan)) {
7825                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7826                                 dev_kfree_skb_any(new_skb);
7827                                 ret = -1;
7828                         }
7829                 }
7830         }
7831
7832         dev_consume_skb_any(skb);
7833         *pskb = new_skb;
7834         return ret;
7835 }
7836
7837 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7838 {
7839         /* Check if we will never have enough descriptors,
7840          * as gso_segs can be more than current ring size
7841          */
7842         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7843 }
7844
7845 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7846
7847 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7848  * indicated in tg3_tx_frag_set()
7849  */
7850 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7851                        struct netdev_queue *txq, struct sk_buff *skb)
7852 {
7853         struct sk_buff *segs, *nskb;
7854         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7855
7856         /* Estimate the number of fragments in the worst case */
7857         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7858                 netif_tx_stop_queue(txq);
7859
7860                 /* netif_tx_stop_queue() must be done before checking
7861                  * checking tx index in tg3_tx_avail() below, because in
7862                  * tg3_tx(), we update tx index before checking for
7863                  * netif_tx_queue_stopped().
7864                  */
7865                 smp_mb();
7866                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7867                         return NETDEV_TX_BUSY;
7868
7869                 netif_tx_wake_queue(txq);
7870         }
7871
7872         segs = skb_gso_segment(skb, tp->dev->features &
7873                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7874         if (IS_ERR(segs) || !segs)
7875                 goto tg3_tso_bug_end;
7876
7877         do {
7878                 nskb = segs;
7879                 segs = segs->next;
7880                 nskb->next = NULL;
7881                 tg3_start_xmit(nskb, tp->dev);
7882         } while (segs);
7883
7884 tg3_tso_bug_end:
7885         dev_consume_skb_any(skb);
7886
7887         return NETDEV_TX_OK;
7888 }
7889
7890 /* hard_start_xmit for all devices */
7891 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7892 {
7893         struct tg3 *tp = netdev_priv(dev);
7894         u32 len, entry, base_flags, mss, vlan = 0;
7895         u32 budget;
7896         int i = -1, would_hit_hwbug;
7897         dma_addr_t mapping;
7898         struct tg3_napi *tnapi;
7899         struct netdev_queue *txq;
7900         unsigned int last;
7901         struct iphdr *iph = NULL;
7902         struct tcphdr *tcph = NULL;
7903         __sum16 tcp_csum = 0, ip_csum = 0;
7904         __be16 ip_tot_len = 0;
7905
7906         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7907         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7908         if (tg3_flag(tp, ENABLE_TSS))
7909                 tnapi++;
7910
7911         budget = tg3_tx_avail(tnapi);
7912
7913         /* We are running in BH disabled context with netif_tx_lock
7914          * and TX reclaim runs via tp->napi.poll inside of a software
7915          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7916          * no IRQ context deadlocks to worry about either.  Rejoice!
7917          */
7918         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7919                 if (!netif_tx_queue_stopped(txq)) {
7920                         netif_tx_stop_queue(txq);
7921
7922                         /* This is a hard error, log it. */
7923                         netdev_err(dev,
7924                                    "BUG! Tx Ring full when queue awake!\n");
7925                 }
7926                 return NETDEV_TX_BUSY;
7927         }
7928
7929         entry = tnapi->tx_prod;
7930         base_flags = 0;
7931
7932         mss = skb_shinfo(skb)->gso_size;
7933         if (mss) {
7934                 u32 tcp_opt_len, hdr_len;
7935
7936                 if (skb_cow_head(skb, 0))
7937                         goto drop;
7938
7939                 iph = ip_hdr(skb);
7940                 tcp_opt_len = tcp_optlen(skb);
7941
7942                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7943
7944                 /* HW/FW can not correctly segment packets that have been
7945                  * vlan encapsulated.
7946                  */
7947                 if (skb->protocol == htons(ETH_P_8021Q) ||
7948                     skb->protocol == htons(ETH_P_8021AD)) {
7949                         if (tg3_tso_bug_gso_check(tnapi, skb))
7950                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7951                         goto drop;
7952                 }
7953
7954                 if (!skb_is_gso_v6(skb)) {
7955                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7956                             tg3_flag(tp, TSO_BUG)) {
7957                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7958                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7959                                 goto drop;
7960                         }
7961                         ip_csum = iph->check;
7962                         ip_tot_len = iph->tot_len;
7963                         iph->check = 0;
7964                         iph->tot_len = htons(mss + hdr_len);
7965                 }
7966
7967                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7968                                TXD_FLAG_CPU_POST_DMA);
7969
7970                 tcph = tcp_hdr(skb);
7971                 tcp_csum = tcph->check;
7972
7973                 if (tg3_flag(tp, HW_TSO_1) ||
7974                     tg3_flag(tp, HW_TSO_2) ||
7975                     tg3_flag(tp, HW_TSO_3)) {
7976                         tcph->check = 0;
7977                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7978                 } else {
7979                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7980                                                          0, IPPROTO_TCP, 0);
7981                 }
7982
7983                 if (tg3_flag(tp, HW_TSO_3)) {
7984                         mss |= (hdr_len & 0xc) << 12;
7985                         if (hdr_len & 0x10)
7986                                 base_flags |= 0x00000010;
7987                         base_flags |= (hdr_len & 0x3e0) << 5;
7988                 } else if (tg3_flag(tp, HW_TSO_2))
7989                         mss |= hdr_len << 9;
7990                 else if (tg3_flag(tp, HW_TSO_1) ||
7991                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7992                         if (tcp_opt_len || iph->ihl > 5) {
7993                                 int tsflags;
7994
7995                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7996                                 mss |= (tsflags << 11);
7997                         }
7998                 } else {
7999                         if (tcp_opt_len || iph->ihl > 5) {
8000                                 int tsflags;
8001
8002                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8003                                 base_flags |= tsflags << 12;
8004                         }
8005                 }
8006         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8007                 /* HW/FW can not correctly checksum packets that have been
8008                  * vlan encapsulated.
8009                  */
8010                 if (skb->protocol == htons(ETH_P_8021Q) ||
8011                     skb->protocol == htons(ETH_P_8021AD)) {
8012                         if (skb_checksum_help(skb))
8013                                 goto drop;
8014                 } else  {
8015                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8016                 }
8017         }
8018
8019         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8020             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8021                 base_flags |= TXD_FLAG_JMB_PKT;
8022
8023         if (skb_vlan_tag_present(skb)) {
8024                 base_flags |= TXD_FLAG_VLAN;
8025                 vlan = skb_vlan_tag_get(skb);
8026         }
8027
8028         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8029             tg3_flag(tp, TX_TSTAMP_EN)) {
8030                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8031                 base_flags |= TXD_FLAG_HWTSTAMP;
8032         }
8033
8034         len = skb_headlen(skb);
8035
8036         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8037         if (pci_dma_mapping_error(tp->pdev, mapping))
8038                 goto drop;
8039
8040
8041         tnapi->tx_buffers[entry].skb = skb;
8042         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8043
8044         would_hit_hwbug = 0;
8045
8046         if (tg3_flag(tp, 5701_DMA_BUG))
8047                 would_hit_hwbug = 1;
8048
8049         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8050                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8051                             mss, vlan)) {
8052                 would_hit_hwbug = 1;
8053         } else if (skb_shinfo(skb)->nr_frags > 0) {
8054                 u32 tmp_mss = mss;
8055
8056                 if (!tg3_flag(tp, HW_TSO_1) &&
8057                     !tg3_flag(tp, HW_TSO_2) &&
8058                     !tg3_flag(tp, HW_TSO_3))
8059                         tmp_mss = 0;
8060
8061                 /* Now loop through additional data
8062                  * fragments, and queue them.
8063                  */
8064                 last = skb_shinfo(skb)->nr_frags - 1;
8065                 for (i = 0; i <= last; i++) {
8066                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8067
8068                         len = skb_frag_size(frag);
8069                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8070                                                    len, DMA_TO_DEVICE);
8071
8072                         tnapi->tx_buffers[entry].skb = NULL;
8073                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8074                                            mapping);
8075                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8076                                 goto dma_error;
8077
8078                         if (!budget ||
8079                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8080                                             len, base_flags |
8081                                             ((i == last) ? TXD_FLAG_END : 0),
8082                                             tmp_mss, vlan)) {
8083                                 would_hit_hwbug = 1;
8084                                 break;
8085                         }
8086                 }
8087         }
8088
8089         if (would_hit_hwbug) {
8090                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8091
8092                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8093                         /* If it's a TSO packet, do GSO instead of
8094                          * allocating and copying to a large linear SKB
8095                          */
8096                         if (ip_tot_len) {
8097                                 iph->check = ip_csum;
8098                                 iph->tot_len = ip_tot_len;
8099                         }
8100                         tcph->check = tcp_csum;
8101                         return tg3_tso_bug(tp, tnapi, txq, skb);
8102                 }
8103
8104                 /* If the workaround fails due to memory/mapping
8105                  * failure, silently drop this packet.
8106                  */
8107                 entry = tnapi->tx_prod;
8108                 budget = tg3_tx_avail(tnapi);
8109                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8110                                                 base_flags, mss, vlan))
8111                         goto drop_nofree;
8112         }
8113
8114         skb_tx_timestamp(skb);
8115         netdev_tx_sent_queue(txq, skb->len);
8116
8117         /* Sync BD data before updating mailbox */
8118         wmb();
8119
8120         tnapi->tx_prod = entry;
8121         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8122                 netif_tx_stop_queue(txq);
8123
8124                 /* netif_tx_stop_queue() must be done before checking
8125                  * checking tx index in tg3_tx_avail() below, because in
8126                  * tg3_tx(), we update tx index before checking for
8127                  * netif_tx_queue_stopped().
8128                  */
8129                 smp_mb();
8130                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8131                         netif_tx_wake_queue(txq);
8132         }
8133
8134         if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8135                 /* Packets are ready, update Tx producer idx on card. */
8136                 tw32_tx_mbox(tnapi->prodmbox, entry);
8137                 mmiowb();
8138         }
8139
8140         return NETDEV_TX_OK;
8141
8142 dma_error:
8143         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8144         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8145 drop:
8146         dev_kfree_skb_any(skb);
8147 drop_nofree:
8148         tp->tx_dropped++;
8149         return NETDEV_TX_OK;
8150 }
8151
8152 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8153 {
8154         if (enable) {
8155                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8156                                   MAC_MODE_PORT_MODE_MASK);
8157
8158                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8159
8160                 if (!tg3_flag(tp, 5705_PLUS))
8161                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8162
8163                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8164                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8165                 else
8166                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8167         } else {
8168                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8169
8170                 if (tg3_flag(tp, 5705_PLUS) ||
8171                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8172                     tg3_asic_rev(tp) == ASIC_REV_5700)
8173                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8174         }
8175
8176         tw32(MAC_MODE, tp->mac_mode);
8177         udelay(40);
8178 }
8179
8180 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8181 {
8182         u32 val, bmcr, mac_mode, ptest = 0;
8183
8184         tg3_phy_toggle_apd(tp, false);
8185         tg3_phy_toggle_automdix(tp, false);
8186
8187         if (extlpbk && tg3_phy_set_extloopbk(tp))
8188                 return -EIO;
8189
8190         bmcr = BMCR_FULLDPLX;
8191         switch (speed) {
8192         case SPEED_10:
8193                 break;
8194         case SPEED_100:
8195                 bmcr |= BMCR_SPEED100;
8196                 break;
8197         case SPEED_1000:
8198         default:
8199                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8200                         speed = SPEED_100;
8201                         bmcr |= BMCR_SPEED100;
8202                 } else {
8203                         speed = SPEED_1000;
8204                         bmcr |= BMCR_SPEED1000;
8205                 }
8206         }
8207
8208         if (extlpbk) {
8209                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8210                         tg3_readphy(tp, MII_CTRL1000, &val);
8211                         val |= CTL1000_AS_MASTER |
8212                                CTL1000_ENABLE_MASTER;
8213                         tg3_writephy(tp, MII_CTRL1000, val);
8214                 } else {
8215                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8216                                 MII_TG3_FET_PTEST_TRIM_2;
8217                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8218                 }
8219         } else
8220                 bmcr |= BMCR_LOOPBACK;
8221
8222         tg3_writephy(tp, MII_BMCR, bmcr);
8223
8224         /* The write needs to be flushed for the FETs */
8225         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8226                 tg3_readphy(tp, MII_BMCR, &bmcr);
8227
8228         udelay(40);
8229
8230         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8231             tg3_asic_rev(tp) == ASIC_REV_5785) {
8232                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8233                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8234                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8235
8236                 /* The write needs to be flushed for the AC131 */
8237                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8238         }
8239
8240         /* Reset to prevent losing 1st rx packet intermittently */
8241         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8242             tg3_flag(tp, 5780_CLASS)) {
8243                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8244                 udelay(10);
8245                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8246         }
8247
8248         mac_mode = tp->mac_mode &
8249                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8250         if (speed == SPEED_1000)
8251                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8252         else
8253                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8254
8255         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8256                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8257
8258                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8259                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8260                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8261                         mac_mode |= MAC_MODE_LINK_POLARITY;
8262
8263                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8264                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8265         }
8266
8267         tw32(MAC_MODE, mac_mode);
8268         udelay(40);
8269
8270         return 0;
8271 }
8272
8273 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8274 {
8275         struct tg3 *tp = netdev_priv(dev);
8276
8277         if (features & NETIF_F_LOOPBACK) {
8278                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8279                         return;
8280
8281                 spin_lock_bh(&tp->lock);
8282                 tg3_mac_loopback(tp, true);
8283                 netif_carrier_on(tp->dev);
8284                 spin_unlock_bh(&tp->lock);
8285                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8286         } else {
8287                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8288                         return;
8289
8290                 spin_lock_bh(&tp->lock);
8291                 tg3_mac_loopback(tp, false);
8292                 /* Force link status check */
8293                 tg3_setup_phy(tp, true);
8294                 spin_unlock_bh(&tp->lock);
8295                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8296         }
8297 }
8298
8299 static netdev_features_t tg3_fix_features(struct net_device *dev,
8300         netdev_features_t features)
8301 {
8302         struct tg3 *tp = netdev_priv(dev);
8303
8304         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8305                 features &= ~NETIF_F_ALL_TSO;
8306
8307         return features;
8308 }
8309
8310 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8311 {
8312         netdev_features_t changed = dev->features ^ features;
8313
8314         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8315                 tg3_set_loopback(dev, features);
8316
8317         return 0;
8318 }
8319
8320 static void tg3_rx_prodring_free(struct tg3 *tp,
8321                                  struct tg3_rx_prodring_set *tpr)
8322 {
8323         int i;
8324
8325         if (tpr != &tp->napi[0].prodring) {
8326                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8327                      i = (i + 1) & tp->rx_std_ring_mask)
8328                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8329                                         tp->rx_pkt_map_sz);
8330
8331                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8332                         for (i = tpr->rx_jmb_cons_idx;
8333                              i != tpr->rx_jmb_prod_idx;
8334                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8335                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8336                                                 TG3_RX_JMB_MAP_SZ);
8337                         }
8338                 }
8339
8340                 return;
8341         }
8342
8343         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8344                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8345                                 tp->rx_pkt_map_sz);
8346
8347         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8348                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8349                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8350                                         TG3_RX_JMB_MAP_SZ);
8351         }
8352 }
8353
8354 /* Initialize rx rings for packet processing.
8355  *
8356  * The chip has been shut down and the driver detached from
8357  * the networking, so no interrupts or new tx packets will
8358  * end up in the driver.  tp->{tx,}lock are held and thus
8359  * we may not sleep.
8360  */
8361 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8362                                  struct tg3_rx_prodring_set *tpr)
8363 {
8364         u32 i, rx_pkt_dma_sz;
8365
8366         tpr->rx_std_cons_idx = 0;
8367         tpr->rx_std_prod_idx = 0;
8368         tpr->rx_jmb_cons_idx = 0;
8369         tpr->rx_jmb_prod_idx = 0;
8370
8371         if (tpr != &tp->napi[0].prodring) {
8372                 memset(&tpr->rx_std_buffers[0], 0,
8373                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8374                 if (tpr->rx_jmb_buffers)
8375                         memset(&tpr->rx_jmb_buffers[0], 0,
8376                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8377                 goto done;
8378         }
8379
8380         /* Zero out all descriptors. */
8381         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8382
8383         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8384         if (tg3_flag(tp, 5780_CLASS) &&
8385             tp->dev->mtu > ETH_DATA_LEN)
8386                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8387         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8388
8389         /* Initialize invariants of the rings, we only set this
8390          * stuff once.  This works because the card does not
8391          * write into the rx buffer posting rings.
8392          */
8393         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8394                 struct tg3_rx_buffer_desc *rxd;
8395
8396                 rxd = &tpr->rx_std[i];
8397                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8398                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8399                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8400                                (i << RXD_OPAQUE_INDEX_SHIFT));
8401         }
8402
8403         /* Now allocate fresh SKBs for each rx ring. */
8404         for (i = 0; i < tp->rx_pending; i++) {
8405                 unsigned int frag_size;
8406
8407                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8408                                       &frag_size) < 0) {
8409                         netdev_warn(tp->dev,
8410                                     "Using a smaller RX standard ring. Only "
8411                                     "%d out of %d buffers were allocated "
8412                                     "successfully\n", i, tp->rx_pending);
8413                         if (i == 0)
8414                                 goto initfail;
8415                         tp->rx_pending = i;
8416                         break;
8417                 }
8418         }
8419
8420         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8421                 goto done;
8422
8423         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8424
8425         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8426                 goto done;
8427
8428         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8429                 struct tg3_rx_buffer_desc *rxd;
8430
8431                 rxd = &tpr->rx_jmb[i].std;
8432                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8433                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8434                                   RXD_FLAG_JUMBO;
8435                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8436                        (i << RXD_OPAQUE_INDEX_SHIFT));
8437         }
8438
8439         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8440                 unsigned int frag_size;
8441
8442                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8443                                       &frag_size) < 0) {
8444                         netdev_warn(tp->dev,
8445                                     "Using a smaller RX jumbo ring. Only %d "
8446                                     "out of %d buffers were allocated "
8447                                     "successfully\n", i, tp->rx_jumbo_pending);
8448                         if (i == 0)
8449                                 goto initfail;
8450                         tp->rx_jumbo_pending = i;
8451                         break;
8452                 }
8453         }
8454
8455 done:
8456         return 0;
8457
8458 initfail:
8459         tg3_rx_prodring_free(tp, tpr);
8460         return -ENOMEM;
8461 }
8462
8463 static void tg3_rx_prodring_fini(struct tg3 *tp,
8464                                  struct tg3_rx_prodring_set *tpr)
8465 {
8466         kfree(tpr->rx_std_buffers);
8467         tpr->rx_std_buffers = NULL;
8468         kfree(tpr->rx_jmb_buffers);
8469         tpr->rx_jmb_buffers = NULL;
8470         if (tpr->rx_std) {
8471                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8472                                   tpr->rx_std, tpr->rx_std_mapping);
8473                 tpr->rx_std = NULL;
8474         }
8475         if (tpr->rx_jmb) {
8476                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8477                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8478                 tpr->rx_jmb = NULL;
8479         }
8480 }
8481
8482 static int tg3_rx_prodring_init(struct tg3 *tp,
8483                                 struct tg3_rx_prodring_set *tpr)
8484 {
8485         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8486                                       GFP_KERNEL);
8487         if (!tpr->rx_std_buffers)
8488                 return -ENOMEM;
8489
8490         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8491                                          TG3_RX_STD_RING_BYTES(tp),
8492                                          &tpr->rx_std_mapping,
8493                                          GFP_KERNEL);
8494         if (!tpr->rx_std)
8495                 goto err_out;
8496
8497         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8498                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8499                                               GFP_KERNEL);
8500                 if (!tpr->rx_jmb_buffers)
8501                         goto err_out;
8502
8503                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8504                                                  TG3_RX_JMB_RING_BYTES(tp),
8505                                                  &tpr->rx_jmb_mapping,
8506                                                  GFP_KERNEL);
8507                 if (!tpr->rx_jmb)
8508                         goto err_out;
8509         }
8510
8511         return 0;
8512
8513 err_out:
8514         tg3_rx_prodring_fini(tp, tpr);
8515         return -ENOMEM;
8516 }
8517
8518 /* Free up pending packets in all rx/tx rings.
8519  *
8520  * The chip has been shut down and the driver detached from
8521  * the networking, so no interrupts or new tx packets will
8522  * end up in the driver.  tp->{tx,}lock is not held and we are not
8523  * in an interrupt context and thus may sleep.
8524  */
8525 static void tg3_free_rings(struct tg3 *tp)
8526 {
8527         int i, j;
8528
8529         for (j = 0; j < tp->irq_cnt; j++) {
8530                 struct tg3_napi *tnapi = &tp->napi[j];
8531
8532                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8533
8534                 if (!tnapi->tx_buffers)
8535                         continue;
8536
8537                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8538                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8539
8540                         if (!skb)
8541                                 continue;
8542
8543                         tg3_tx_skb_unmap(tnapi, i,
8544                                          skb_shinfo(skb)->nr_frags - 1);
8545
8546                         dev_consume_skb_any(skb);
8547                 }
8548                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8549         }
8550 }
8551
8552 /* Initialize tx/rx rings for packet processing.
8553  *
8554  * The chip has been shut down and the driver detached from
8555  * the networking, so no interrupts or new tx packets will
8556  * end up in the driver.  tp->{tx,}lock are held and thus
8557  * we may not sleep.
8558  */
8559 static int tg3_init_rings(struct tg3 *tp)
8560 {
8561         int i;
8562
8563         /* Free up all the SKBs. */
8564         tg3_free_rings(tp);
8565
8566         for (i = 0; i < tp->irq_cnt; i++) {
8567                 struct tg3_napi *tnapi = &tp->napi[i];
8568
8569                 tnapi->last_tag = 0;
8570                 tnapi->last_irq_tag = 0;
8571                 tnapi->hw_status->status = 0;
8572                 tnapi->hw_status->status_tag = 0;
8573                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8574
8575                 tnapi->tx_prod = 0;
8576                 tnapi->tx_cons = 0;
8577                 if (tnapi->tx_ring)
8578                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8579
8580                 tnapi->rx_rcb_ptr = 0;
8581                 if (tnapi->rx_rcb)
8582                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8583
8584                 if (tnapi->prodring.rx_std &&
8585                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8586                         tg3_free_rings(tp);
8587                         return -ENOMEM;
8588                 }
8589         }
8590
8591         return 0;
8592 }
8593
8594 static void tg3_mem_tx_release(struct tg3 *tp)
8595 {
8596         int i;
8597
8598         for (i = 0; i < tp->irq_max; i++) {
8599                 struct tg3_napi *tnapi = &tp->napi[i];
8600
8601                 if (tnapi->tx_ring) {
8602                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8603                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8604                         tnapi->tx_ring = NULL;
8605                 }
8606
8607                 kfree(tnapi->tx_buffers);
8608                 tnapi->tx_buffers = NULL;
8609         }
8610 }
8611
8612 static int tg3_mem_tx_acquire(struct tg3 *tp)
8613 {
8614         int i;
8615         struct tg3_napi *tnapi = &tp->napi[0];
8616
8617         /* If multivector TSS is enabled, vector 0 does not handle
8618          * tx interrupts.  Don't allocate any resources for it.
8619          */
8620         if (tg3_flag(tp, ENABLE_TSS))
8621                 tnapi++;
8622
8623         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8624                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8625                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8626                 if (!tnapi->tx_buffers)
8627                         goto err_out;
8628
8629                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8630                                                     TG3_TX_RING_BYTES,
8631                                                     &tnapi->tx_desc_mapping,
8632                                                     GFP_KERNEL);
8633                 if (!tnapi->tx_ring)
8634                         goto err_out;
8635         }
8636
8637         return 0;
8638
8639 err_out:
8640         tg3_mem_tx_release(tp);
8641         return -ENOMEM;
8642 }
8643
8644 static void tg3_mem_rx_release(struct tg3 *tp)
8645 {
8646         int i;
8647
8648         for (i = 0; i < tp->irq_max; i++) {
8649                 struct tg3_napi *tnapi = &tp->napi[i];
8650
8651                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8652
8653                 if (!tnapi->rx_rcb)
8654                         continue;
8655
8656                 dma_free_coherent(&tp->pdev->dev,
8657                                   TG3_RX_RCB_RING_BYTES(tp),
8658                                   tnapi->rx_rcb,
8659                                   tnapi->rx_rcb_mapping);
8660                 tnapi->rx_rcb = NULL;
8661         }
8662 }
8663
8664 static int tg3_mem_rx_acquire(struct tg3 *tp)
8665 {
8666         unsigned int i, limit;
8667
8668         limit = tp->rxq_cnt;
8669
8670         /* If RSS is enabled, we need a (dummy) producer ring
8671          * set on vector zero.  This is the true hw prodring.
8672          */
8673         if (tg3_flag(tp, ENABLE_RSS))
8674                 limit++;
8675
8676         for (i = 0; i < limit; i++) {
8677                 struct tg3_napi *tnapi = &tp->napi[i];
8678
8679                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8680                         goto err_out;
8681
8682                 /* If multivector RSS is enabled, vector 0
8683                  * does not handle rx or tx interrupts.
8684                  * Don't allocate any resources for it.
8685                  */
8686                 if (!i && tg3_flag(tp, ENABLE_RSS))
8687                         continue;
8688
8689                 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8690                                                     TG3_RX_RCB_RING_BYTES(tp),
8691                                                     &tnapi->rx_rcb_mapping,
8692                                                     GFP_KERNEL);
8693                 if (!tnapi->rx_rcb)
8694                         goto err_out;
8695         }
8696
8697         return 0;
8698
8699 err_out:
8700         tg3_mem_rx_release(tp);
8701         return -ENOMEM;
8702 }
8703
8704 /*
8705  * Must not be invoked with interrupt sources disabled and
8706  * the hardware shutdown down.
8707  */
8708 static void tg3_free_consistent(struct tg3 *tp)
8709 {
8710         int i;
8711
8712         for (i = 0; i < tp->irq_cnt; i++) {
8713                 struct tg3_napi *tnapi = &tp->napi[i];
8714
8715                 if (tnapi->hw_status) {
8716                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8717                                           tnapi->hw_status,
8718                                           tnapi->status_mapping);
8719                         tnapi->hw_status = NULL;
8720                 }
8721         }
8722
8723         tg3_mem_rx_release(tp);
8724         tg3_mem_tx_release(tp);
8725
8726         /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
8727         tg3_full_lock(tp, 0);
8728         if (tp->hw_stats) {
8729                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8730                                   tp->hw_stats, tp->stats_mapping);
8731                 tp->hw_stats = NULL;
8732         }
8733         tg3_full_unlock(tp);
8734 }
8735
8736 /*
8737  * Must not be invoked with interrupt sources disabled and
8738  * the hardware shutdown down.  Can sleep.
8739  */
8740 static int tg3_alloc_consistent(struct tg3 *tp)
8741 {
8742         int i;
8743
8744         tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8745                                            sizeof(struct tg3_hw_stats),
8746                                            &tp->stats_mapping, GFP_KERNEL);
8747         if (!tp->hw_stats)
8748                 goto err_out;
8749
8750         for (i = 0; i < tp->irq_cnt; i++) {
8751                 struct tg3_napi *tnapi = &tp->napi[i];
8752                 struct tg3_hw_status *sblk;
8753
8754                 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8755                                                        TG3_HW_STATUS_SIZE,
8756                                                        &tnapi->status_mapping,
8757                                                        GFP_KERNEL);
8758                 if (!tnapi->hw_status)
8759                         goto err_out;
8760
8761                 sblk = tnapi->hw_status;
8762
8763                 if (tg3_flag(tp, ENABLE_RSS)) {
8764                         u16 *prodptr = NULL;
8765
8766                         /*
8767                          * When RSS is enabled, the status block format changes
8768                          * slightly.  The "rx_jumbo_consumer", "reserved",
8769                          * and "rx_mini_consumer" members get mapped to the
8770                          * other three rx return ring producer indexes.
8771                          */
8772                         switch (i) {
8773                         case 1:
8774                                 prodptr = &sblk->idx[0].rx_producer;
8775                                 break;
8776                         case 2:
8777                                 prodptr = &sblk->rx_jumbo_consumer;
8778                                 break;
8779                         case 3:
8780                                 prodptr = &sblk->reserved;
8781                                 break;
8782                         case 4:
8783                                 prodptr = &sblk->rx_mini_consumer;
8784                                 break;
8785                         }
8786                         tnapi->rx_rcb_prod_idx = prodptr;
8787                 } else {
8788                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8789                 }
8790         }
8791
8792         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8793                 goto err_out;
8794
8795         return 0;
8796
8797 err_out:
8798         tg3_free_consistent(tp);
8799         return -ENOMEM;
8800 }
8801
8802 #define MAX_WAIT_CNT 1000
8803
8804 /* To stop a block, clear the enable bit and poll till it
8805  * clears.  tp->lock is held.
8806  */
8807 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8808 {
8809         unsigned int i;
8810         u32 val;
8811
8812         if (tg3_flag(tp, 5705_PLUS)) {
8813                 switch (ofs) {
8814                 case RCVLSC_MODE:
8815                 case DMAC_MODE:
8816                 case MBFREE_MODE:
8817                 case BUFMGR_MODE:
8818                 case MEMARB_MODE:
8819                         /* We can't enable/disable these bits of the
8820                          * 5705/5750, just say success.
8821                          */
8822                         return 0;
8823
8824                 default:
8825                         break;
8826                 }
8827         }
8828
8829         val = tr32(ofs);
8830         val &= ~enable_bit;
8831         tw32_f(ofs, val);
8832
8833         for (i = 0; i < MAX_WAIT_CNT; i++) {
8834                 if (pci_channel_offline(tp->pdev)) {
8835                         dev_err(&tp->pdev->dev,
8836                                 "tg3_stop_block device offline, "
8837                                 "ofs=%lx enable_bit=%x\n",
8838                                 ofs, enable_bit);
8839                         return -ENODEV;
8840                 }
8841
8842                 udelay(100);
8843                 val = tr32(ofs);
8844                 if ((val & enable_bit) == 0)
8845                         break;
8846         }
8847
8848         if (i == MAX_WAIT_CNT && !silent) {
8849                 dev_err(&tp->pdev->dev,
8850                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8851                         ofs, enable_bit);
8852                 return -ENODEV;
8853         }
8854
8855         return 0;
8856 }
8857
8858 /* tp->lock is held. */
8859 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8860 {
8861         int i, err;
8862
8863         tg3_disable_ints(tp);
8864
8865         if (pci_channel_offline(tp->pdev)) {
8866                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8867                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8868                 err = -ENODEV;
8869                 goto err_no_dev;
8870         }
8871
8872         tp->rx_mode &= ~RX_MODE_ENABLE;
8873         tw32_f(MAC_RX_MODE, tp->rx_mode);
8874         udelay(10);
8875
8876         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8877         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8878         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8879         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8880         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8881         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8882
8883         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8884         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8890
8891         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8892         tw32_f(MAC_MODE, tp->mac_mode);
8893         udelay(40);
8894
8895         tp->tx_mode &= ~TX_MODE_ENABLE;
8896         tw32_f(MAC_TX_MODE, tp->tx_mode);
8897
8898         for (i = 0; i < MAX_WAIT_CNT; i++) {
8899                 udelay(100);
8900                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8901                         break;
8902         }
8903         if (i >= MAX_WAIT_CNT) {
8904                 dev_err(&tp->pdev->dev,
8905                         "%s timed out, TX_MODE_ENABLE will not clear "
8906                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8907                 err |= -ENODEV;
8908         }
8909
8910         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8911         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8912         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8913
8914         tw32(FTQ_RESET, 0xffffffff);
8915         tw32(FTQ_RESET, 0x00000000);
8916
8917         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8918         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8919
8920 err_no_dev:
8921         for (i = 0; i < tp->irq_cnt; i++) {
8922                 struct tg3_napi *tnapi = &tp->napi[i];
8923                 if (tnapi->hw_status)
8924                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8925         }
8926
8927         return err;
8928 }
8929
8930 /* Save PCI command register before chip reset */
8931 static void tg3_save_pci_state(struct tg3 *tp)
8932 {
8933         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8934 }
8935
8936 /* Restore PCI state after chip reset */
8937 static void tg3_restore_pci_state(struct tg3 *tp)
8938 {
8939         u32 val;
8940
8941         /* Re-enable indirect register accesses. */
8942         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8943                                tp->misc_host_ctrl);
8944
8945         /* Set MAX PCI retry to zero. */
8946         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8947         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8948             tg3_flag(tp, PCIX_MODE))
8949                 val |= PCISTATE_RETRY_SAME_DMA;
8950         /* Allow reads and writes to the APE register and memory space. */
8951         if (tg3_flag(tp, ENABLE_APE))
8952                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8953                        PCISTATE_ALLOW_APE_SHMEM_WR |
8954                        PCISTATE_ALLOW_APE_PSPACE_WR;
8955         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8956
8957         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8958
8959         if (!tg3_flag(tp, PCI_EXPRESS)) {
8960                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8961                                       tp->pci_cacheline_sz);
8962                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8963                                       tp->pci_lat_timer);
8964         }
8965
8966         /* Make sure PCI-X relaxed ordering bit is clear. */
8967         if (tg3_flag(tp, PCIX_MODE)) {
8968                 u16 pcix_cmd;
8969
8970                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8971                                      &pcix_cmd);
8972                 pcix_cmd &= ~PCI_X_CMD_ERO;
8973                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8974                                       pcix_cmd);
8975         }
8976
8977         if (tg3_flag(tp, 5780_CLASS)) {
8978
8979                 /* Chip reset on 5780 will reset MSI enable bit,
8980                  * so need to restore it.
8981                  */
8982                 if (tg3_flag(tp, USING_MSI)) {
8983                         u16 ctrl;
8984
8985                         pci_read_config_word(tp->pdev,
8986                                              tp->msi_cap + PCI_MSI_FLAGS,
8987                                              &ctrl);
8988                         pci_write_config_word(tp->pdev,
8989                                               tp->msi_cap + PCI_MSI_FLAGS,
8990                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8991                         val = tr32(MSGINT_MODE);
8992                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8993                 }
8994         }
8995 }
8996
8997 static void tg3_override_clk(struct tg3 *tp)
8998 {
8999         u32 val;
9000
9001         switch (tg3_asic_rev(tp)) {
9002         case ASIC_REV_5717:
9003                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9004                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9005                      TG3_CPMU_MAC_ORIDE_ENABLE);
9006                 break;
9007
9008         case ASIC_REV_5719:
9009         case ASIC_REV_5720:
9010                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9011                 break;
9012
9013         default:
9014                 return;
9015         }
9016 }
9017
9018 static void tg3_restore_clk(struct tg3 *tp)
9019 {
9020         u32 val;
9021
9022         switch (tg3_asic_rev(tp)) {
9023         case ASIC_REV_5717:
9024                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9025                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9026                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9027                 break;
9028
9029         case ASIC_REV_5719:
9030         case ASIC_REV_5720:
9031                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9032                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9033                 break;
9034
9035         default:
9036                 return;
9037         }
9038 }
9039
9040 /* tp->lock is held. */
9041 static int tg3_chip_reset(struct tg3 *tp)
9042         __releases(tp->lock)
9043         __acquires(tp->lock)
9044 {
9045         u32 val;
9046         void (*write_op)(struct tg3 *, u32, u32);
9047         int i, err;
9048
9049         if (!pci_device_is_present(tp->pdev))
9050                 return -ENODEV;
9051
9052         tg3_nvram_lock(tp);
9053
9054         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9055
9056         /* No matching tg3_nvram_unlock() after this because
9057          * chip reset below will undo the nvram lock.
9058          */
9059         tp->nvram_lock_cnt = 0;
9060
9061         /* GRC_MISC_CFG core clock reset will clear the memory
9062          * enable bit in PCI register 4 and the MSI enable bit
9063          * on some chips, so we save relevant registers here.
9064          */
9065         tg3_save_pci_state(tp);
9066
9067         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9068             tg3_flag(tp, 5755_PLUS))
9069                 tw32(GRC_FASTBOOT_PC, 0);
9070
9071         /*
9072          * We must avoid the readl() that normally takes place.
9073          * It locks machines, causes machine checks, and other
9074          * fun things.  So, temporarily disable the 5701
9075          * hardware workaround, while we do the reset.
9076          */
9077         write_op = tp->write32;
9078         if (write_op == tg3_write_flush_reg32)
9079                 tp->write32 = tg3_write32;
9080
9081         /* Prevent the irq handler from reading or writing PCI registers
9082          * during chip reset when the memory enable bit in the PCI command
9083          * register may be cleared.  The chip does not generate interrupt
9084          * at this time, but the irq handler may still be called due to irq
9085          * sharing or irqpoll.
9086          */
9087         tg3_flag_set(tp, CHIP_RESETTING);
9088         for (i = 0; i < tp->irq_cnt; i++) {
9089                 struct tg3_napi *tnapi = &tp->napi[i];
9090                 if (tnapi->hw_status) {
9091                         tnapi->hw_status->status = 0;
9092                         tnapi->hw_status->status_tag = 0;
9093                 }
9094                 tnapi->last_tag = 0;
9095                 tnapi->last_irq_tag = 0;
9096         }
9097         smp_mb();
9098
9099         tg3_full_unlock(tp);
9100
9101         for (i = 0; i < tp->irq_cnt; i++)
9102                 synchronize_irq(tp->napi[i].irq_vec);
9103
9104         tg3_full_lock(tp, 0);
9105
9106         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9107                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9108                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9109         }
9110
9111         /* do the reset */
9112         val = GRC_MISC_CFG_CORECLK_RESET;
9113
9114         if (tg3_flag(tp, PCI_EXPRESS)) {
9115                 /* Force PCIe 1.0a mode */
9116                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9117                     !tg3_flag(tp, 57765_PLUS) &&
9118                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9119                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9120                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9121
9122                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9123                         tw32(GRC_MISC_CFG, (1 << 29));
9124                         val |= (1 << 29);
9125                 }
9126         }
9127
9128         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9129                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9130                 tw32(GRC_VCPU_EXT_CTRL,
9131                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9132         }
9133
9134         /* Set the clock to the highest frequency to avoid timeouts. With link
9135          * aware mode, the clock speed could be slow and bootcode does not
9136          * complete within the expected time. Override the clock to allow the
9137          * bootcode to finish sooner and then restore it.
9138          */
9139         tg3_override_clk(tp);
9140
9141         /* Manage gphy power for all CPMU absent PCIe devices. */
9142         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9143                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9144
9145         tw32(GRC_MISC_CFG, val);
9146
9147         /* restore 5701 hardware bug workaround write method */
9148         tp->write32 = write_op;
9149
9150         /* Unfortunately, we have to delay before the PCI read back.
9151          * Some 575X chips even will not respond to a PCI cfg access
9152          * when the reset command is given to the chip.
9153          *
9154          * How do these hardware designers expect things to work
9155          * properly if the PCI write is posted for a long period
9156          * of time?  It is always necessary to have some method by
9157          * which a register read back can occur to push the write
9158          * out which does the reset.
9159          *
9160          * For most tg3 variants the trick below was working.
9161          * Ho hum...
9162          */
9163         udelay(120);
9164
9165         /* Flush PCI posted writes.  The normal MMIO registers
9166          * are inaccessible at this time so this is the only
9167          * way to make this reliably (actually, this is no longer
9168          * the case, see above).  I tried to use indirect
9169          * register read/write but this upset some 5701 variants.
9170          */
9171         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9172
9173         udelay(120);
9174
9175         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9176                 u16 val16;
9177
9178                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9179                         int j;
9180                         u32 cfg_val;
9181
9182                         /* Wait for link training to complete.  */
9183                         for (j = 0; j < 5000; j++)
9184                                 udelay(100);
9185
9186                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9187                         pci_write_config_dword(tp->pdev, 0xc4,
9188                                                cfg_val | (1 << 15));
9189                 }
9190
9191                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9192                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9193                 /*
9194                  * Older PCIe devices only support the 128 byte
9195                  * MPS setting.  Enforce the restriction.
9196                  */
9197                 if (!tg3_flag(tp, CPMU_PRESENT))
9198                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9199                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9200
9201                 /* Clear error status */
9202                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9203                                       PCI_EXP_DEVSTA_CED |
9204                                       PCI_EXP_DEVSTA_NFED |
9205                                       PCI_EXP_DEVSTA_FED |
9206                                       PCI_EXP_DEVSTA_URD);
9207         }
9208
9209         tg3_restore_pci_state(tp);
9210
9211         tg3_flag_clear(tp, CHIP_RESETTING);
9212         tg3_flag_clear(tp, ERROR_PROCESSED);
9213
9214         val = 0;
9215         if (tg3_flag(tp, 5780_CLASS))
9216                 val = tr32(MEMARB_MODE);
9217         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9218
9219         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9220                 tg3_stop_fw(tp);
9221                 tw32(0x5000, 0x400);
9222         }
9223
9224         if (tg3_flag(tp, IS_SSB_CORE)) {
9225                 /*
9226                  * BCM4785: In order to avoid repercussions from using
9227                  * potentially defective internal ROM, stop the Rx RISC CPU,
9228                  * which is not required.
9229                  */
9230                 tg3_stop_fw(tp);
9231                 tg3_halt_cpu(tp, RX_CPU_BASE);
9232         }
9233
9234         err = tg3_poll_fw(tp);
9235         if (err)
9236                 return err;
9237
9238         tw32(GRC_MODE, tp->grc_mode);
9239
9240         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9241                 val = tr32(0xc4);
9242
9243                 tw32(0xc4, val | (1 << 15));
9244         }
9245
9246         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9247             tg3_asic_rev(tp) == ASIC_REV_5705) {
9248                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9249                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9250                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9251                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9252         }
9253
9254         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9255                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9256                 val = tp->mac_mode;
9257         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9258                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9259                 val = tp->mac_mode;
9260         } else
9261                 val = 0;
9262
9263         tw32_f(MAC_MODE, val);
9264         udelay(40);
9265
9266         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9267
9268         tg3_mdio_start(tp);
9269
9270         if (tg3_flag(tp, PCI_EXPRESS) &&
9271             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9272             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9273             !tg3_flag(tp, 57765_PLUS)) {
9274                 val = tr32(0x7c00);
9275
9276                 tw32(0x7c00, val | (1 << 25));
9277         }
9278
9279         tg3_restore_clk(tp);
9280
9281         /* Reprobe ASF enable state.  */
9282         tg3_flag_clear(tp, ENABLE_ASF);
9283         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9284                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9285
9286         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9287         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9288         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9289                 u32 nic_cfg;
9290
9291                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9292                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9293                         tg3_flag_set(tp, ENABLE_ASF);
9294                         tp->last_event_jiffies = jiffies;
9295                         if (tg3_flag(tp, 5750_PLUS))
9296                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9297
9298                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9299                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9300                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9301                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9302                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9303                 }
9304         }
9305
9306         return 0;
9307 }
9308
9309 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9310 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9311 static void __tg3_set_rx_mode(struct net_device *);
9312
9313 /* tp->lock is held. */
9314 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9315 {
9316         int err;
9317
9318         tg3_stop_fw(tp);
9319
9320         tg3_write_sig_pre_reset(tp, kind);
9321
9322         tg3_abort_hw(tp, silent);
9323         err = tg3_chip_reset(tp);
9324
9325         __tg3_set_mac_addr(tp, false);
9326
9327         tg3_write_sig_legacy(tp, kind);
9328         tg3_write_sig_post_reset(tp, kind);
9329
9330         if (tp->hw_stats) {
9331                 /* Save the stats across chip resets... */
9332                 tg3_get_nstats(tp, &tp->net_stats_prev);
9333                 tg3_get_estats(tp, &tp->estats_prev);
9334
9335                 /* And make sure the next sample is new data */
9336                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9337         }
9338
9339         return err;
9340 }
9341
9342 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9343 {
9344         struct tg3 *tp = netdev_priv(dev);
9345         struct sockaddr *addr = p;
9346         int err = 0;
9347         bool skip_mac_1 = false;
9348
9349         if (!is_valid_ether_addr(addr->sa_data))
9350                 return -EADDRNOTAVAIL;
9351
9352         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9353
9354         if (!netif_running(dev))
9355                 return 0;
9356
9357         if (tg3_flag(tp, ENABLE_ASF)) {
9358                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9359
9360                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9361                 addr0_low = tr32(MAC_ADDR_0_LOW);
9362                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9363                 addr1_low = tr32(MAC_ADDR_1_LOW);
9364
9365                 /* Skip MAC addr 1 if ASF is using it. */
9366                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9367                     !(addr1_high == 0 && addr1_low == 0))
9368                         skip_mac_1 = true;
9369         }
9370         spin_lock_bh(&tp->lock);
9371         __tg3_set_mac_addr(tp, skip_mac_1);
9372         __tg3_set_rx_mode(dev);
9373         spin_unlock_bh(&tp->lock);
9374
9375         return err;
9376 }
9377
9378 /* tp->lock is held. */
9379 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9380                            dma_addr_t mapping, u32 maxlen_flags,
9381                            u32 nic_addr)
9382 {
9383         tg3_write_mem(tp,
9384                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9385                       ((u64) mapping >> 32));
9386         tg3_write_mem(tp,
9387                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9388                       ((u64) mapping & 0xffffffff));
9389         tg3_write_mem(tp,
9390                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9391                        maxlen_flags);
9392
9393         if (!tg3_flag(tp, 5705_PLUS))
9394                 tg3_write_mem(tp,
9395                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9396                               nic_addr);
9397 }
9398
9399
9400 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9401 {
9402         int i = 0;
9403
9404         if (!tg3_flag(tp, ENABLE_TSS)) {
9405                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9406                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9407                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9408         } else {
9409                 tw32(HOSTCC_TXCOL_TICKS, 0);
9410                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9411                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9412
9413                 for (; i < tp->txq_cnt; i++) {
9414                         u32 reg;
9415
9416                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9417                         tw32(reg, ec->tx_coalesce_usecs);
9418                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9419                         tw32(reg, ec->tx_max_coalesced_frames);
9420                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9421                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9422                 }
9423         }
9424
9425         for (; i < tp->irq_max - 1; i++) {
9426                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9427                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9428                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9429         }
9430 }
9431
9432 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9433 {
9434         int i = 0;
9435         u32 limit = tp->rxq_cnt;
9436
9437         if (!tg3_flag(tp, ENABLE_RSS)) {
9438                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9439                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9440                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9441                 limit--;
9442         } else {
9443                 tw32(HOSTCC_RXCOL_TICKS, 0);
9444                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9445                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9446         }
9447
9448         for (; i < limit; i++) {
9449                 u32 reg;
9450
9451                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9452                 tw32(reg, ec->rx_coalesce_usecs);
9453                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9454                 tw32(reg, ec->rx_max_coalesced_frames);
9455                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9456                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9457         }
9458
9459         for (; i < tp->irq_max - 1; i++) {
9460                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9461                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9462                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9463         }
9464 }
9465
9466 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9467 {
9468         tg3_coal_tx_init(tp, ec);
9469         tg3_coal_rx_init(tp, ec);
9470
9471         if (!tg3_flag(tp, 5705_PLUS)) {
9472                 u32 val = ec->stats_block_coalesce_usecs;
9473
9474                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9475                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9476
9477                 if (!tp->link_up)
9478                         val = 0;
9479
9480                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9481         }
9482 }
9483
9484 /* tp->lock is held. */
9485 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9486 {
9487         u32 txrcb, limit;
9488
9489         /* Disable all transmit rings but the first. */
9490         if (!tg3_flag(tp, 5705_PLUS))
9491                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9492         else if (tg3_flag(tp, 5717_PLUS))
9493                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9494         else if (tg3_flag(tp, 57765_CLASS) ||
9495                  tg3_asic_rev(tp) == ASIC_REV_5762)
9496                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9497         else
9498                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9499
9500         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9501              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9502                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9503                               BDINFO_FLAGS_DISABLED);
9504 }
9505
9506 /* tp->lock is held. */
9507 static void tg3_tx_rcbs_init(struct tg3 *tp)
9508 {
9509         int i = 0;
9510         u32 txrcb = NIC_SRAM_SEND_RCB;
9511
9512         if (tg3_flag(tp, ENABLE_TSS))
9513                 i++;
9514
9515         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9516                 struct tg3_napi *tnapi = &tp->napi[i];
9517
9518                 if (!tnapi->tx_ring)
9519                         continue;
9520
9521                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9522                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9523                                NIC_SRAM_TX_BUFFER_DESC);
9524         }
9525 }
9526
9527 /* tp->lock is held. */
9528 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9529 {
9530         u32 rxrcb, limit;
9531
9532         /* Disable all receive return rings but the first. */
9533         if (tg3_flag(tp, 5717_PLUS))
9534                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9535         else if (!tg3_flag(tp, 5705_PLUS))
9536                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9537         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9538                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9539                  tg3_flag(tp, 57765_CLASS))
9540                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9541         else
9542                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9543
9544         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9545              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9546                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9547                               BDINFO_FLAGS_DISABLED);
9548 }
9549
9550 /* tp->lock is held. */
9551 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9552 {
9553         int i = 0;
9554         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9555
9556         if (tg3_flag(tp, ENABLE_RSS))
9557                 i++;
9558
9559         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9560                 struct tg3_napi *tnapi = &tp->napi[i];
9561
9562                 if (!tnapi->rx_rcb)
9563                         continue;
9564
9565                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9566                                (tp->rx_ret_ring_mask + 1) <<
9567                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9568         }
9569 }
9570
9571 /* tp->lock is held. */
9572 static void tg3_rings_reset(struct tg3 *tp)
9573 {
9574         int i;
9575         u32 stblk;
9576         struct tg3_napi *tnapi = &tp->napi[0];
9577
9578         tg3_tx_rcbs_disable(tp);
9579
9580         tg3_rx_ret_rcbs_disable(tp);
9581
9582         /* Disable interrupts */
9583         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9584         tp->napi[0].chk_msi_cnt = 0;
9585         tp->napi[0].last_rx_cons = 0;
9586         tp->napi[0].last_tx_cons = 0;
9587
9588         /* Zero mailbox registers. */
9589         if (tg3_flag(tp, SUPPORT_MSIX)) {
9590                 for (i = 1; i < tp->irq_max; i++) {
9591                         tp->napi[i].tx_prod = 0;
9592                         tp->napi[i].tx_cons = 0;
9593                         if (tg3_flag(tp, ENABLE_TSS))
9594                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9595                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9596                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9597                         tp->napi[i].chk_msi_cnt = 0;
9598                         tp->napi[i].last_rx_cons = 0;
9599                         tp->napi[i].last_tx_cons = 0;
9600                 }
9601                 if (!tg3_flag(tp, ENABLE_TSS))
9602                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9603         } else {
9604                 tp->napi[0].tx_prod = 0;
9605                 tp->napi[0].tx_cons = 0;
9606                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9607                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9608         }
9609
9610         /* Make sure the NIC-based send BD rings are disabled. */
9611         if (!tg3_flag(tp, 5705_PLUS)) {
9612                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9613                 for (i = 0; i < 16; i++)
9614                         tw32_tx_mbox(mbox + i * 8, 0);
9615         }
9616
9617         /* Clear status block in ram. */
9618         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9619
9620         /* Set status block DMA address */
9621         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9622              ((u64) tnapi->status_mapping >> 32));
9623         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9624              ((u64) tnapi->status_mapping & 0xffffffff));
9625
9626         stblk = HOSTCC_STATBLCK_RING1;
9627
9628         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9629                 u64 mapping = (u64)tnapi->status_mapping;
9630                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9631                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9632                 stblk += 8;
9633
9634                 /* Clear status block in ram. */
9635                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636         }
9637
9638         tg3_tx_rcbs_init(tp);
9639         tg3_rx_ret_rcbs_init(tp);
9640 }
9641
9642 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9643 {
9644         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9645
9646         if (!tg3_flag(tp, 5750_PLUS) ||
9647             tg3_flag(tp, 5780_CLASS) ||
9648             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9649             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9650             tg3_flag(tp, 57765_PLUS))
9651                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9652         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9653                  tg3_asic_rev(tp) == ASIC_REV_5787)
9654                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9655         else
9656                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9657
9658         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9659         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9660
9661         val = min(nic_rep_thresh, host_rep_thresh);
9662         tw32(RCVBDI_STD_THRESH, val);
9663
9664         if (tg3_flag(tp, 57765_PLUS))
9665                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9666
9667         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9668                 return;
9669
9670         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9671
9672         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9673
9674         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9675         tw32(RCVBDI_JUMBO_THRESH, val);
9676
9677         if (tg3_flag(tp, 57765_PLUS))
9678                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9679 }
9680
9681 static inline u32 calc_crc(unsigned char *buf, int len)
9682 {
9683         u32 reg;
9684         u32 tmp;
9685         int j, k;
9686
9687         reg = 0xffffffff;
9688
9689         for (j = 0; j < len; j++) {
9690                 reg ^= buf[j];
9691
9692                 for (k = 0; k < 8; k++) {
9693                         tmp = reg & 0x01;
9694
9695                         reg >>= 1;
9696
9697                         if (tmp)
9698                                 reg ^= 0xedb88320;
9699                 }
9700         }
9701
9702         return ~reg;
9703 }
9704
9705 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9706 {
9707         /* accept or reject all multicast frames */
9708         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9709         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9710         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9711         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9712 }
9713
9714 static void __tg3_set_rx_mode(struct net_device *dev)
9715 {
9716         struct tg3 *tp = netdev_priv(dev);
9717         u32 rx_mode;
9718
9719         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9720                                   RX_MODE_KEEP_VLAN_TAG);
9721
9722 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9723         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9724          * flag clear.
9725          */
9726         if (!tg3_flag(tp, ENABLE_ASF))
9727                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9728 #endif
9729
9730         if (dev->flags & IFF_PROMISC) {
9731                 /* Promiscuous mode. */
9732                 rx_mode |= RX_MODE_PROMISC;
9733         } else if (dev->flags & IFF_ALLMULTI) {
9734                 /* Accept all multicast. */
9735                 tg3_set_multi(tp, 1);
9736         } else if (netdev_mc_empty(dev)) {
9737                 /* Reject all multicast. */
9738                 tg3_set_multi(tp, 0);
9739         } else {
9740                 /* Accept one or more multicast(s). */
9741                 struct netdev_hw_addr *ha;
9742                 u32 mc_filter[4] = { 0, };
9743                 u32 regidx;
9744                 u32 bit;
9745                 u32 crc;
9746
9747                 netdev_for_each_mc_addr(ha, dev) {
9748                         crc = calc_crc(ha->addr, ETH_ALEN);
9749                         bit = ~crc & 0x7f;
9750                         regidx = (bit & 0x60) >> 5;
9751                         bit &= 0x1f;
9752                         mc_filter[regidx] |= (1 << bit);
9753                 }
9754
9755                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9756                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9757                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9758                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9759         }
9760
9761         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9762                 rx_mode |= RX_MODE_PROMISC;
9763         } else if (!(dev->flags & IFF_PROMISC)) {
9764                 /* Add all entries into to the mac addr filter list */
9765                 int i = 0;
9766                 struct netdev_hw_addr *ha;
9767
9768                 netdev_for_each_uc_addr(ha, dev) {
9769                         __tg3_set_one_mac_addr(tp, ha->addr,
9770                                                i + TG3_UCAST_ADDR_IDX(tp));
9771                         i++;
9772                 }
9773         }
9774
9775         if (rx_mode != tp->rx_mode) {
9776                 tp->rx_mode = rx_mode;
9777                 tw32_f(MAC_RX_MODE, rx_mode);
9778                 udelay(10);
9779         }
9780 }
9781
9782 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9783 {
9784         int i;
9785
9786         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9787                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9788 }
9789
9790 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9791 {
9792         int i;
9793
9794         if (!tg3_flag(tp, SUPPORT_MSIX))
9795                 return;
9796
9797         if (tp->rxq_cnt == 1) {
9798                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9799                 return;
9800         }
9801
9802         /* Validate table against current IRQ count */
9803         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9804                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9805                         break;
9806         }
9807
9808         if (i != TG3_RSS_INDIR_TBL_SIZE)
9809                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9810 }
9811
9812 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9813 {
9814         int i = 0;
9815         u32 reg = MAC_RSS_INDIR_TBL_0;
9816
9817         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9818                 u32 val = tp->rss_ind_tbl[i];
9819                 i++;
9820                 for (; i % 8; i++) {
9821                         val <<= 4;
9822                         val |= tp->rss_ind_tbl[i];
9823                 }
9824                 tw32(reg, val);
9825                 reg += 4;
9826         }
9827 }
9828
9829 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9830 {
9831         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9832                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9833         else
9834                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9835 }
9836
9837 /* tp->lock is held. */
9838 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9839 {
9840         u32 val, rdmac_mode;
9841         int i, err, limit;
9842         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9843
9844         tg3_disable_ints(tp);
9845
9846         tg3_stop_fw(tp);
9847
9848         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9849
9850         if (tg3_flag(tp, INIT_COMPLETE))
9851                 tg3_abort_hw(tp, 1);
9852
9853         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9854             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9855                 tg3_phy_pull_config(tp);
9856                 tg3_eee_pull_config(tp, NULL);
9857                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9858         }
9859
9860         /* Enable MAC control of LPI */
9861         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9862                 tg3_setup_eee(tp);
9863
9864         if (reset_phy)
9865                 tg3_phy_reset(tp);
9866
9867         err = tg3_chip_reset(tp);
9868         if (err)
9869                 return err;
9870
9871         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9872
9873         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9874                 val = tr32(TG3_CPMU_CTRL);
9875                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9876                 tw32(TG3_CPMU_CTRL, val);
9877
9878                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9879                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9880                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9881                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9882
9883                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9884                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9885                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9886                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9887
9888                 val = tr32(TG3_CPMU_HST_ACC);
9889                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9890                 val |= CPMU_HST_ACC_MACCLK_6_25;
9891                 tw32(TG3_CPMU_HST_ACC, val);
9892         }
9893
9894         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9895                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9896                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9897                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9898                 tw32(PCIE_PWR_MGMT_THRESH, val);
9899
9900                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9901                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9902
9903                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9904
9905                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9906                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9907         }
9908
9909         if (tg3_flag(tp, L1PLLPD_EN)) {
9910                 u32 grc_mode = tr32(GRC_MODE);
9911
9912                 /* Access the lower 1K of PL PCIE block registers. */
9913                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9914                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9915
9916                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9917                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9918                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9919
9920                 tw32(GRC_MODE, grc_mode);
9921         }
9922
9923         if (tg3_flag(tp, 57765_CLASS)) {
9924                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9925                         u32 grc_mode = tr32(GRC_MODE);
9926
9927                         /* Access the lower 1K of PL PCIE block registers. */
9928                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9929                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9930
9931                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9932                                    TG3_PCIE_PL_LO_PHYCTL5);
9933                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9934                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9935
9936                         tw32(GRC_MODE, grc_mode);
9937                 }
9938
9939                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9940                         u32 grc_mode;
9941
9942                         /* Fix transmit hangs */
9943                         val = tr32(TG3_CPMU_PADRNG_CTL);
9944                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9945                         tw32(TG3_CPMU_PADRNG_CTL, val);
9946
9947                         grc_mode = tr32(GRC_MODE);
9948
9949                         /* Access the lower 1K of DL PCIE block registers. */
9950                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9951                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9952
9953                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9954                                    TG3_PCIE_DL_LO_FTSMAX);
9955                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9956                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9957                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9958
9959                         tw32(GRC_MODE, grc_mode);
9960                 }
9961
9962                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9963                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9964                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9965                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9966         }
9967
9968         /* This works around an issue with Athlon chipsets on
9969          * B3 tigon3 silicon.  This bit has no effect on any
9970          * other revision.  But do not set this on PCI Express
9971          * chips and don't even touch the clocks if the CPMU is present.
9972          */
9973         if (!tg3_flag(tp, CPMU_PRESENT)) {
9974                 if (!tg3_flag(tp, PCI_EXPRESS))
9975                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9976                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9977         }
9978
9979         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9980             tg3_flag(tp, PCIX_MODE)) {
9981                 val = tr32(TG3PCI_PCISTATE);
9982                 val |= PCISTATE_RETRY_SAME_DMA;
9983                 tw32(TG3PCI_PCISTATE, val);
9984         }
9985
9986         if (tg3_flag(tp, ENABLE_APE)) {
9987                 /* Allow reads and writes to the
9988                  * APE register and memory space.
9989                  */
9990                 val = tr32(TG3PCI_PCISTATE);
9991                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9992                        PCISTATE_ALLOW_APE_SHMEM_WR |
9993                        PCISTATE_ALLOW_APE_PSPACE_WR;
9994                 tw32(TG3PCI_PCISTATE, val);
9995         }
9996
9997         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9998                 /* Enable some hw fixes.  */
9999                 val = tr32(TG3PCI_MSI_DATA);
10000                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10001                 tw32(TG3PCI_MSI_DATA, val);
10002         }
10003
10004         /* Descriptor ring init may make accesses to the
10005          * NIC SRAM area to setup the TX descriptors, so we
10006          * can only do this after the hardware has been
10007          * successfully reset.
10008          */
10009         err = tg3_init_rings(tp);
10010         if (err)
10011                 return err;
10012
10013         if (tg3_flag(tp, 57765_PLUS)) {
10014                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10015                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10016                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10017                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10018                 if (!tg3_flag(tp, 57765_CLASS) &&
10019                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10020                     tg3_asic_rev(tp) != ASIC_REV_5762)
10021                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10022                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10023         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10024                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10025                 /* This value is determined during the probe time DMA
10026                  * engine test, tg3_test_dma.
10027                  */
10028                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10029         }
10030
10031         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10032                           GRC_MODE_4X_NIC_SEND_RINGS |
10033                           GRC_MODE_NO_TX_PHDR_CSUM |
10034                           GRC_MODE_NO_RX_PHDR_CSUM);
10035         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10036
10037         /* Pseudo-header checksum is done by hardware logic and not
10038          * the offload processers, so make the chip do the pseudo-
10039          * header checksums on receive.  For transmit it is more
10040          * convenient to do the pseudo-header checksum in software
10041          * as Linux does that on transmit for us in all cases.
10042          */
10043         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10044
10045         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10046         if (tp->rxptpctl)
10047                 tw32(TG3_RX_PTP_CTL,
10048                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10049
10050         if (tg3_flag(tp, PTP_CAPABLE))
10051                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10052
10053         tw32(GRC_MODE, tp->grc_mode | val);
10054
10055         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10056         val = tr32(GRC_MISC_CFG);
10057         val &= ~0xff;
10058         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10059         tw32(GRC_MISC_CFG, val);
10060
10061         /* Initialize MBUF/DESC pool. */
10062         if (tg3_flag(tp, 5750_PLUS)) {
10063                 /* Do nothing.  */
10064         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10065                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10066                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10067                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10068                 else
10069                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10070                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10071                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10072         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10073                 int fw_len;
10074
10075                 fw_len = tp->fw_len;
10076                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10077                 tw32(BUFMGR_MB_POOL_ADDR,
10078                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10079                 tw32(BUFMGR_MB_POOL_SIZE,
10080                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10081         }
10082
10083         if (tp->dev->mtu <= ETH_DATA_LEN) {
10084                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10085                      tp->bufmgr_config.mbuf_read_dma_low_water);
10086                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10087                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10088                 tw32(BUFMGR_MB_HIGH_WATER,
10089                      tp->bufmgr_config.mbuf_high_water);
10090         } else {
10091                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10092                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10093                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10094                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10095                 tw32(BUFMGR_MB_HIGH_WATER,
10096                      tp->bufmgr_config.mbuf_high_water_jumbo);
10097         }
10098         tw32(BUFMGR_DMA_LOW_WATER,
10099              tp->bufmgr_config.dma_low_water);
10100         tw32(BUFMGR_DMA_HIGH_WATER,
10101              tp->bufmgr_config.dma_high_water);
10102
10103         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10104         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10105                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10106         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10107             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10108             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10109             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10110                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10111         tw32(BUFMGR_MODE, val);
10112         for (i = 0; i < 2000; i++) {
10113                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10114                         break;
10115                 udelay(10);
10116         }
10117         if (i >= 2000) {
10118                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10119                 return -ENODEV;
10120         }
10121
10122         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10123                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10124
10125         tg3_setup_rxbd_thresholds(tp);
10126
10127         /* Initialize TG3_BDINFO's at:
10128          *  RCVDBDI_STD_BD:     standard eth size rx ring
10129          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10130          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10131          *
10132          * like so:
10133          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10134          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10135          *                              ring attribute flags
10136          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10137          *
10138          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10139          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10140          *
10141          * The size of each ring is fixed in the firmware, but the location is
10142          * configurable.
10143          */
10144         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10145              ((u64) tpr->rx_std_mapping >> 32));
10146         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10147              ((u64) tpr->rx_std_mapping & 0xffffffff));
10148         if (!tg3_flag(tp, 5717_PLUS))
10149                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10150                      NIC_SRAM_RX_BUFFER_DESC);
10151
10152         /* Disable the mini ring */
10153         if (!tg3_flag(tp, 5705_PLUS))
10154                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10155                      BDINFO_FLAGS_DISABLED);
10156
10157         /* Program the jumbo buffer descriptor ring control
10158          * blocks on those devices that have them.
10159          */
10160         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10161             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10162
10163                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10164                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10165                              ((u64) tpr->rx_jmb_mapping >> 32));
10166                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10167                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10168                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10169                               BDINFO_FLAGS_MAXLEN_SHIFT;
10170                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10171                              val | BDINFO_FLAGS_USE_EXT_RECV);
10172                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10173                             tg3_flag(tp, 57765_CLASS) ||
10174                             tg3_asic_rev(tp) == ASIC_REV_5762)
10175                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10176                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10177                 } else {
10178                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10179                              BDINFO_FLAGS_DISABLED);
10180                 }
10181
10182                 if (tg3_flag(tp, 57765_PLUS)) {
10183                         val = TG3_RX_STD_RING_SIZE(tp);
10184                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10185                         val |= (TG3_RX_STD_DMA_SZ << 2);
10186                 } else
10187                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10188         } else
10189                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10190
10191         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10192
10193         tpr->rx_std_prod_idx = tp->rx_pending;
10194         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10195
10196         tpr->rx_jmb_prod_idx =
10197                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10198         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10199
10200         tg3_rings_reset(tp);
10201
10202         /* Initialize MAC address and backoff seed. */
10203         __tg3_set_mac_addr(tp, false);
10204
10205         /* MTU + ethernet header + FCS + optional VLAN tag */
10206         tw32(MAC_RX_MTU_SIZE,
10207              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10208
10209         /* The slot time is changed by tg3_setup_phy if we
10210          * run at gigabit with half duplex.
10211          */
10212         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10213               (6 << TX_LENGTHS_IPG_SHIFT) |
10214               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10215
10216         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10217             tg3_asic_rev(tp) == ASIC_REV_5762)
10218                 val |= tr32(MAC_TX_LENGTHS) &
10219                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10220                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10221
10222         tw32(MAC_TX_LENGTHS, val);
10223
10224         /* Receive rules. */
10225         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10226         tw32(RCVLPC_CONFIG, 0x0181);
10227
10228         /* Calculate RDMAC_MODE setting early, we need it to determine
10229          * the RCVLPC_STATE_ENABLE mask.
10230          */
10231         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10232                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10233                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10234                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10235                       RDMAC_MODE_LNGREAD_ENAB);
10236
10237         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10238                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10239
10240         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10241             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10242             tg3_asic_rev(tp) == ASIC_REV_57780)
10243                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10244                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10245                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10246
10247         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10248             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10249                 if (tg3_flag(tp, TSO_CAPABLE) &&
10250                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10251                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10252                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10253                            !tg3_flag(tp, IS_5788)) {
10254                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10255                 }
10256         }
10257
10258         if (tg3_flag(tp, PCI_EXPRESS))
10259                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10260
10261         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10262                 tp->dma_limit = 0;
10263                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10264                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10265                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10266                 }
10267         }
10268
10269         if (tg3_flag(tp, HW_TSO_1) ||
10270             tg3_flag(tp, HW_TSO_2) ||
10271             tg3_flag(tp, HW_TSO_3))
10272                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10273
10274         if (tg3_flag(tp, 57765_PLUS) ||
10275             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10276             tg3_asic_rev(tp) == ASIC_REV_57780)
10277                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10278
10279         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10280             tg3_asic_rev(tp) == ASIC_REV_5762)
10281                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10284             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10285             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10286             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10287             tg3_flag(tp, 57765_PLUS)) {
10288                 u32 tgtreg;
10289
10290                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10291                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10292                 else
10293                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10294
10295                 val = tr32(tgtreg);
10296                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10297                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10298                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10299                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10300                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10301                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10302                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10303                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10304                 }
10305                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10306         }
10307
10308         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10309             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10310             tg3_asic_rev(tp) == ASIC_REV_5762) {
10311                 u32 tgtreg;
10312
10313                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10314                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10315                 else
10316                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10317
10318                 val = tr32(tgtreg);
10319                 tw32(tgtreg, val |
10320                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10321                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10322         }
10323
10324         /* Receive/send statistics. */
10325         if (tg3_flag(tp, 5750_PLUS)) {
10326                 val = tr32(RCVLPC_STATS_ENABLE);
10327                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10328                 tw32(RCVLPC_STATS_ENABLE, val);
10329         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10330                    tg3_flag(tp, TSO_CAPABLE)) {
10331                 val = tr32(RCVLPC_STATS_ENABLE);
10332                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10333                 tw32(RCVLPC_STATS_ENABLE, val);
10334         } else {
10335                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10336         }
10337         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10338         tw32(SNDDATAI_STATSENAB, 0xffffff);
10339         tw32(SNDDATAI_STATSCTRL,
10340              (SNDDATAI_SCTRL_ENABLE |
10341               SNDDATAI_SCTRL_FASTUPD));
10342
10343         /* Setup host coalescing engine. */
10344         tw32(HOSTCC_MODE, 0);
10345         for (i = 0; i < 2000; i++) {
10346                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10347                         break;
10348                 udelay(10);
10349         }
10350
10351         __tg3_set_coalesce(tp, &tp->coal);
10352
10353         if (!tg3_flag(tp, 5705_PLUS)) {
10354                 /* Status/statistics block address.  See tg3_timer,
10355                  * the tg3_periodic_fetch_stats call there, and
10356                  * tg3_get_stats to see how this works for 5705/5750 chips.
10357                  */
10358                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10359                      ((u64) tp->stats_mapping >> 32));
10360                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10361                      ((u64) tp->stats_mapping & 0xffffffff));
10362                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10363
10364                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10365
10366                 /* Clear statistics and status block memory areas */
10367                 for (i = NIC_SRAM_STATS_BLK;
10368                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10369                      i += sizeof(u32)) {
10370                         tg3_write_mem(tp, i, 0);
10371                         udelay(40);
10372                 }
10373         }
10374
10375         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10376
10377         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10378         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10379         if (!tg3_flag(tp, 5705_PLUS))
10380                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10381
10382         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10383                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10384                 /* reset to prevent losing 1st rx packet intermittently */
10385                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10386                 udelay(10);
10387         }
10388
10389         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10390                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10391                         MAC_MODE_FHDE_ENABLE;
10392         if (tg3_flag(tp, ENABLE_APE))
10393                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10394         if (!tg3_flag(tp, 5705_PLUS) &&
10395             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10396             tg3_asic_rev(tp) != ASIC_REV_5700)
10397                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10398         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10399         udelay(40);
10400
10401         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10402          * If TG3_FLAG_IS_NIC is zero, we should read the
10403          * register to preserve the GPIO settings for LOMs. The GPIOs,
10404          * whether used as inputs or outputs, are set by boot code after
10405          * reset.
10406          */
10407         if (!tg3_flag(tp, IS_NIC)) {
10408                 u32 gpio_mask;
10409
10410                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10411                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10412                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10413
10414                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10415                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10416                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10417
10418                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10419                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10420
10421                 tp->grc_local_ctrl &= ~gpio_mask;
10422                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10423
10424                 /* GPIO1 must be driven high for eeprom write protect */
10425                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10426                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10427                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10428         }
10429         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10430         udelay(100);
10431
10432         if (tg3_flag(tp, USING_MSIX)) {
10433                 val = tr32(MSGINT_MODE);
10434                 val |= MSGINT_MODE_ENABLE;
10435                 if (tp->irq_cnt > 1)
10436                         val |= MSGINT_MODE_MULTIVEC_EN;
10437                 if (!tg3_flag(tp, 1SHOT_MSI))
10438                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10439                 tw32(MSGINT_MODE, val);
10440         }
10441
10442         if (!tg3_flag(tp, 5705_PLUS)) {
10443                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10444                 udelay(40);
10445         }
10446
10447         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10448                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10449                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10450                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10451                WDMAC_MODE_LNGREAD_ENAB);
10452
10453         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10454             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10455                 if (tg3_flag(tp, TSO_CAPABLE) &&
10456                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10457                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10458                         /* nothing */
10459                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10460                            !tg3_flag(tp, IS_5788)) {
10461                         val |= WDMAC_MODE_RX_ACCEL;
10462                 }
10463         }
10464
10465         /* Enable host coalescing bug fix */
10466         if (tg3_flag(tp, 5755_PLUS))
10467                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10468
10469         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10470                 val |= WDMAC_MODE_BURST_ALL_DATA;
10471
10472         tw32_f(WDMAC_MODE, val);
10473         udelay(40);
10474
10475         if (tg3_flag(tp, PCIX_MODE)) {
10476                 u16 pcix_cmd;
10477
10478                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10479                                      &pcix_cmd);
10480                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10481                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10482                         pcix_cmd |= PCI_X_CMD_READ_2K;
10483                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10484                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10485                         pcix_cmd |= PCI_X_CMD_READ_2K;
10486                 }
10487                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10488                                       pcix_cmd);
10489         }
10490
10491         tw32_f(RDMAC_MODE, rdmac_mode);
10492         udelay(40);
10493
10494         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10495             tg3_asic_rev(tp) == ASIC_REV_5720) {
10496                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10497                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10498                                 break;
10499                 }
10500                 if (i < TG3_NUM_RDMA_CHANNELS) {
10501                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10502                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10503                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10504                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10505                 }
10506         }
10507
10508         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10509         if (!tg3_flag(tp, 5705_PLUS))
10510                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10511
10512         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10513                 tw32(SNDDATAC_MODE,
10514                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10515         else
10516                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10517
10518         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10519         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10520         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10521         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10522                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10523         tw32(RCVDBDI_MODE, val);
10524         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10525         if (tg3_flag(tp, HW_TSO_1) ||
10526             tg3_flag(tp, HW_TSO_2) ||
10527             tg3_flag(tp, HW_TSO_3))
10528                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10529         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10530         if (tg3_flag(tp, ENABLE_TSS))
10531                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10532         tw32(SNDBDI_MODE, val);
10533         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10534
10535         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10536                 err = tg3_load_5701_a0_firmware_fix(tp);
10537                 if (err)
10538                         return err;
10539         }
10540
10541         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10542                 /* Ignore any errors for the firmware download. If download
10543                  * fails, the device will operate with EEE disabled
10544                  */
10545                 tg3_load_57766_firmware(tp);
10546         }
10547
10548         if (tg3_flag(tp, TSO_CAPABLE)) {
10549                 err = tg3_load_tso_firmware(tp);
10550                 if (err)
10551                         return err;
10552         }
10553
10554         tp->tx_mode = TX_MODE_ENABLE;
10555
10556         if (tg3_flag(tp, 5755_PLUS) ||
10557             tg3_asic_rev(tp) == ASIC_REV_5906)
10558                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10559
10560         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10561             tg3_asic_rev(tp) == ASIC_REV_5762) {
10562                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10563                 tp->tx_mode &= ~val;
10564                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10565         }
10566
10567         tw32_f(MAC_TX_MODE, tp->tx_mode);
10568         udelay(100);
10569
10570         if (tg3_flag(tp, ENABLE_RSS)) {
10571                 u32 rss_key[10];
10572
10573                 tg3_rss_write_indir_tbl(tp);
10574
10575                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10576
10577                 for (i = 0; i < 10 ; i++)
10578                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10579         }
10580
10581         tp->rx_mode = RX_MODE_ENABLE;
10582         if (tg3_flag(tp, 5755_PLUS))
10583                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10584
10585         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10586                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10587
10588         if (tg3_flag(tp, ENABLE_RSS))
10589                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10590                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10591                                RX_MODE_RSS_IPV6_HASH_EN |
10592                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10593                                RX_MODE_RSS_IPV4_HASH_EN |
10594                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10595
10596         tw32_f(MAC_RX_MODE, tp->rx_mode);
10597         udelay(10);
10598
10599         tw32(MAC_LED_CTRL, tp->led_ctrl);
10600
10601         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10602         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10603                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10604                 udelay(10);
10605         }
10606         tw32_f(MAC_RX_MODE, tp->rx_mode);
10607         udelay(10);
10608
10609         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10610                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10611                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10612                         /* Set drive transmission level to 1.2V  */
10613                         /* only if the signal pre-emphasis bit is not set  */
10614                         val = tr32(MAC_SERDES_CFG);
10615                         val &= 0xfffff000;
10616                         val |= 0x880;
10617                         tw32(MAC_SERDES_CFG, val);
10618                 }
10619                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10620                         tw32(MAC_SERDES_CFG, 0x616000);
10621         }
10622
10623         /* Prevent chip from dropping frames when flow control
10624          * is enabled.
10625          */
10626         if (tg3_flag(tp, 57765_CLASS))
10627                 val = 1;
10628         else
10629                 val = 2;
10630         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10631
10632         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10633             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10634                 /* Use hardware link auto-negotiation */
10635                 tg3_flag_set(tp, HW_AUTONEG);
10636         }
10637
10638         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10639             tg3_asic_rev(tp) == ASIC_REV_5714) {
10640                 u32 tmp;
10641
10642                 tmp = tr32(SERDES_RX_CTRL);
10643                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10644                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10645                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10646                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10647         }
10648
10649         if (!tg3_flag(tp, USE_PHYLIB)) {
10650                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10651                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10652
10653                 err = tg3_setup_phy(tp, false);
10654                 if (err)
10655                         return err;
10656
10657                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10658                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10659                         u32 tmp;
10660
10661                         /* Clear CRC stats. */
10662                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10663                                 tg3_writephy(tp, MII_TG3_TEST1,
10664                                              tmp | MII_TG3_TEST1_CRC_EN);
10665                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10666                         }
10667                 }
10668         }
10669
10670         __tg3_set_rx_mode(tp->dev);
10671
10672         /* Initialize receive rules. */
10673         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10674         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10675         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10676         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10677
10678         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10679                 limit = 8;
10680         else
10681                 limit = 16;
10682         if (tg3_flag(tp, ENABLE_ASF))
10683                 limit -= 4;
10684         switch (limit) {
10685         case 16:
10686                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10687         case 15:
10688                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10689         case 14:
10690                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10691         case 13:
10692                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10693         case 12:
10694                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10695         case 11:
10696                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10697         case 10:
10698                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10699         case 9:
10700                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10701         case 8:
10702                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10703         case 7:
10704                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10705         case 6:
10706                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10707         case 5:
10708                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10709         case 4:
10710                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10711         case 3:
10712                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10713         case 2:
10714         case 1:
10715
10716         default:
10717                 break;
10718         }
10719
10720         if (tg3_flag(tp, ENABLE_APE))
10721                 /* Write our heartbeat update interval to APE. */
10722                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10723                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10724
10725         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10726
10727         return 0;
10728 }
10729
10730 /* Called at device open time to get the chip ready for
10731  * packet processing.  Invoked with tp->lock held.
10732  */
10733 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10734 {
10735         /* Chip may have been just powered on. If so, the boot code may still
10736          * be running initialization. Wait for it to finish to avoid races in
10737          * accessing the hardware.
10738          */
10739         tg3_enable_register_access(tp);
10740         tg3_poll_fw(tp);
10741
10742         tg3_switch_clocks(tp);
10743
10744         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10745
10746         return tg3_reset_hw(tp, reset_phy);
10747 }
10748
10749 #ifdef CONFIG_TIGON3_HWMON
10750 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10751 {
10752         int i;
10753
10754         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10755                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10756
10757                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10758                 off += len;
10759
10760                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10761                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10762                         memset(ocir, 0, TG3_OCIR_LEN);
10763         }
10764 }
10765
10766 /* sysfs attributes for hwmon */
10767 static ssize_t tg3_show_temp(struct device *dev,
10768                              struct device_attribute *devattr, char *buf)
10769 {
10770         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10771         struct tg3 *tp = dev_get_drvdata(dev);
10772         u32 temperature;
10773
10774         spin_lock_bh(&tp->lock);
10775         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10776                                 sizeof(temperature));
10777         spin_unlock_bh(&tp->lock);
10778         return sprintf(buf, "%u\n", temperature * 1000);
10779 }
10780
10781
10782 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10783                           TG3_TEMP_SENSOR_OFFSET);
10784 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10785                           TG3_TEMP_CAUTION_OFFSET);
10786 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10787                           TG3_TEMP_MAX_OFFSET);
10788
10789 static struct attribute *tg3_attrs[] = {
10790         &sensor_dev_attr_temp1_input.dev_attr.attr,
10791         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10792         &sensor_dev_attr_temp1_max.dev_attr.attr,
10793         NULL
10794 };
10795 ATTRIBUTE_GROUPS(tg3);
10796
10797 static void tg3_hwmon_close(struct tg3 *tp)
10798 {
10799         if (tp->hwmon_dev) {
10800                 hwmon_device_unregister(tp->hwmon_dev);
10801                 tp->hwmon_dev = NULL;
10802         }
10803 }
10804
10805 static void tg3_hwmon_open(struct tg3 *tp)
10806 {
10807         int i;
10808         u32 size = 0;
10809         struct pci_dev *pdev = tp->pdev;
10810         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10811
10812         tg3_sd_scan_scratchpad(tp, ocirs);
10813
10814         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10815                 if (!ocirs[i].src_data_length)
10816                         continue;
10817
10818                 size += ocirs[i].src_hdr_length;
10819                 size += ocirs[i].src_data_length;
10820         }
10821
10822         if (!size)
10823                 return;
10824
10825         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10826                                                           tp, tg3_groups);
10827         if (IS_ERR(tp->hwmon_dev)) {
10828                 tp->hwmon_dev = NULL;
10829                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10830         }
10831 }
10832 #else
10833 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10834 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10835 #endif /* CONFIG_TIGON3_HWMON */
10836
10837
10838 #define TG3_STAT_ADD32(PSTAT, REG) \
10839 do {    u32 __val = tr32(REG); \
10840         (PSTAT)->low += __val; \
10841         if ((PSTAT)->low < __val) \
10842                 (PSTAT)->high += 1; \
10843 } while (0)
10844
10845 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10846 {
10847         struct tg3_hw_stats *sp = tp->hw_stats;
10848
10849         if (!tp->link_up)
10850                 return;
10851
10852         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10853         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10854         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10855         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10856         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10857         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10858         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10859         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10860         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10861         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10862         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10863         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10864         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10865         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10866                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10867                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10868                 u32 val;
10869
10870                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10871                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10872                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10873                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10874         }
10875
10876         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10877         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10878         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10879         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10880         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10881         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10882         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10883         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10884         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10885         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10886         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10887         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10888         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10889         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10890
10891         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10892         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10893             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10894             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10895             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10896                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10897         } else {
10898                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10899                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10900                 if (val) {
10901                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10902                         sp->rx_discards.low += val;
10903                         if (sp->rx_discards.low < val)
10904                                 sp->rx_discards.high += 1;
10905                 }
10906                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10907         }
10908         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10909 }
10910
10911 static void tg3_chk_missed_msi(struct tg3 *tp)
10912 {
10913         u32 i;
10914
10915         for (i = 0; i < tp->irq_cnt; i++) {
10916                 struct tg3_napi *tnapi = &tp->napi[i];
10917
10918                 if (tg3_has_work(tnapi)) {
10919                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10920                             tnapi->last_tx_cons == tnapi->tx_cons) {
10921                                 if (tnapi->chk_msi_cnt < 1) {
10922                                         tnapi->chk_msi_cnt++;
10923                                         return;
10924                                 }
10925                                 tg3_msi(0, tnapi);
10926                         }
10927                 }
10928                 tnapi->chk_msi_cnt = 0;
10929                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10930                 tnapi->last_tx_cons = tnapi->tx_cons;
10931         }
10932 }
10933
10934 static void tg3_timer(struct timer_list *t)
10935 {
10936         struct tg3 *tp = from_timer(tp, t, timer);
10937
10938         spin_lock(&tp->lock);
10939
10940         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10941                 spin_unlock(&tp->lock);
10942                 goto restart_timer;
10943         }
10944
10945         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10946             tg3_flag(tp, 57765_CLASS))
10947                 tg3_chk_missed_msi(tp);
10948
10949         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10950                 /* BCM4785: Flush posted writes from GbE to host memory. */
10951                 tr32(HOSTCC_MODE);
10952         }
10953
10954         if (!tg3_flag(tp, TAGGED_STATUS)) {
10955                 /* All of this garbage is because when using non-tagged
10956                  * IRQ status the mailbox/status_block protocol the chip
10957                  * uses with the cpu is race prone.
10958                  */
10959                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10960                         tw32(GRC_LOCAL_CTRL,
10961                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10962                 } else {
10963                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10964                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10965                 }
10966
10967                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10968                         spin_unlock(&tp->lock);
10969                         tg3_reset_task_schedule(tp);
10970                         goto restart_timer;
10971                 }
10972         }
10973
10974         /* This part only runs once per second. */
10975         if (!--tp->timer_counter) {
10976                 if (tg3_flag(tp, 5705_PLUS))
10977                         tg3_periodic_fetch_stats(tp);
10978
10979                 if (tp->setlpicnt && !--tp->setlpicnt)
10980                         tg3_phy_eee_enable(tp);
10981
10982                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10983                         u32 mac_stat;
10984                         int phy_event;
10985
10986                         mac_stat = tr32(MAC_STATUS);
10987
10988                         phy_event = 0;
10989                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10990                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10991                                         phy_event = 1;
10992                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10993                                 phy_event = 1;
10994
10995                         if (phy_event)
10996                                 tg3_setup_phy(tp, false);
10997                 } else if (tg3_flag(tp, POLL_SERDES)) {
10998                         u32 mac_stat = tr32(MAC_STATUS);
10999                         int need_setup = 0;
11000
11001                         if (tp->link_up &&
11002                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11003                                 need_setup = 1;
11004                         }
11005                         if (!tp->link_up &&
11006                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11007                                          MAC_STATUS_SIGNAL_DET))) {
11008                                 need_setup = 1;
11009                         }
11010                         if (need_setup) {
11011                                 if (!tp->serdes_counter) {
11012                                         tw32_f(MAC_MODE,
11013                                              (tp->mac_mode &
11014                                               ~MAC_MODE_PORT_MODE_MASK));
11015                                         udelay(40);
11016                                         tw32_f(MAC_MODE, tp->mac_mode);
11017                                         udelay(40);
11018                                 }
11019                                 tg3_setup_phy(tp, false);
11020                         }
11021                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11022                            tg3_flag(tp, 5780_CLASS)) {
11023                         tg3_serdes_parallel_detect(tp);
11024                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11025                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11026                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11027                                          TG3_CPMU_STATUS_LINK_MASK);
11028
11029                         if (link_up != tp->link_up)
11030                                 tg3_setup_phy(tp, false);
11031                 }
11032
11033                 tp->timer_counter = tp->timer_multiplier;
11034         }
11035
11036         /* Heartbeat is only sent once every 2 seconds.
11037          *
11038          * The heartbeat is to tell the ASF firmware that the host
11039          * driver is still alive.  In the event that the OS crashes,
11040          * ASF needs to reset the hardware to free up the FIFO space
11041          * that may be filled with rx packets destined for the host.
11042          * If the FIFO is full, ASF will no longer function properly.
11043          *
11044          * Unintended resets have been reported on real time kernels
11045          * where the timer doesn't run on time.  Netpoll will also have
11046          * same problem.
11047          *
11048          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11049          * to check the ring condition when the heartbeat is expiring
11050          * before doing the reset.  This will prevent most unintended
11051          * resets.
11052          */
11053         if (!--tp->asf_counter) {
11054                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11055                         tg3_wait_for_event_ack(tp);
11056
11057                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11058                                       FWCMD_NICDRV_ALIVE3);
11059                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11060                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11061                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11062
11063                         tg3_generate_fw_event(tp);
11064                 }
11065                 tp->asf_counter = tp->asf_multiplier;
11066         }
11067
11068         spin_unlock(&tp->lock);
11069
11070 restart_timer:
11071         tp->timer.expires = jiffies + tp->timer_offset;
11072         add_timer(&tp->timer);
11073 }
11074
11075 static void tg3_timer_init(struct tg3 *tp)
11076 {
11077         if (tg3_flag(tp, TAGGED_STATUS) &&
11078             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11079             !tg3_flag(tp, 57765_CLASS))
11080                 tp->timer_offset = HZ;
11081         else
11082                 tp->timer_offset = HZ / 10;
11083
11084         BUG_ON(tp->timer_offset > HZ);
11085
11086         tp->timer_multiplier = (HZ / tp->timer_offset);
11087         tp->asf_multiplier = (HZ / tp->timer_offset) *
11088                              TG3_FW_UPDATE_FREQ_SEC;
11089
11090         timer_setup(&tp->timer, tg3_timer, 0);
11091 }
11092
11093 static void tg3_timer_start(struct tg3 *tp)
11094 {
11095         tp->asf_counter   = tp->asf_multiplier;
11096         tp->timer_counter = tp->timer_multiplier;
11097
11098         tp->timer.expires = jiffies + tp->timer_offset;
11099         add_timer(&tp->timer);
11100 }
11101
11102 static void tg3_timer_stop(struct tg3 *tp)
11103 {
11104         del_timer_sync(&tp->timer);
11105 }
11106
11107 /* Restart hardware after configuration changes, self-test, etc.
11108  * Invoked with tp->lock held.
11109  */
11110 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11111         __releases(tp->lock)
11112         __acquires(tp->lock)
11113 {
11114         int err;
11115
11116         err = tg3_init_hw(tp, reset_phy);
11117         if (err) {
11118                 netdev_err(tp->dev,
11119                            "Failed to re-initialize device, aborting\n");
11120                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11121                 tg3_full_unlock(tp);
11122                 tg3_timer_stop(tp);
11123                 tp->irq_sync = 0;
11124                 tg3_napi_enable(tp);
11125                 dev_close(tp->dev);
11126                 tg3_full_lock(tp, 0);
11127         }
11128         return err;
11129 }
11130
11131 static void tg3_reset_task(struct work_struct *work)
11132 {
11133         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11134         int err;
11135
11136         rtnl_lock();
11137         tg3_full_lock(tp, 0);
11138
11139         if (!netif_running(tp->dev)) {
11140                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11141                 tg3_full_unlock(tp);
11142                 rtnl_unlock();
11143                 return;
11144         }
11145
11146         tg3_full_unlock(tp);
11147
11148         tg3_phy_stop(tp);
11149
11150         tg3_netif_stop(tp);
11151
11152         tg3_full_lock(tp, 1);
11153
11154         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11155                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11156                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11157                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11158                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11159         }
11160
11161         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11162         err = tg3_init_hw(tp, true);
11163         if (err)
11164                 goto out;
11165
11166         tg3_netif_start(tp);
11167
11168 out:
11169         tg3_full_unlock(tp);
11170
11171         if (!err)
11172                 tg3_phy_start(tp);
11173
11174         tg3_flag_clear(tp, RESET_TASK_PENDING);
11175         rtnl_unlock();
11176 }
11177
11178 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11179 {
11180         irq_handler_t fn;
11181         unsigned long flags;
11182         char *name;
11183         struct tg3_napi *tnapi = &tp->napi[irq_num];
11184
11185         if (tp->irq_cnt == 1)
11186                 name = tp->dev->name;
11187         else {
11188                 name = &tnapi->irq_lbl[0];
11189                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11190                         snprintf(name, IFNAMSIZ,
11191                                  "%s-txrx-%d", tp->dev->name, irq_num);
11192                 else if (tnapi->tx_buffers)
11193                         snprintf(name, IFNAMSIZ,
11194                                  "%s-tx-%d", tp->dev->name, irq_num);
11195                 else if (tnapi->rx_rcb)
11196                         snprintf(name, IFNAMSIZ,
11197                                  "%s-rx-%d", tp->dev->name, irq_num);
11198                 else
11199                         snprintf(name, IFNAMSIZ,
11200                                  "%s-%d", tp->dev->name, irq_num);
11201                 name[IFNAMSIZ-1] = 0;
11202         }
11203
11204         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11205                 fn = tg3_msi;
11206                 if (tg3_flag(tp, 1SHOT_MSI))
11207                         fn = tg3_msi_1shot;
11208                 flags = 0;
11209         } else {
11210                 fn = tg3_interrupt;
11211                 if (tg3_flag(tp, TAGGED_STATUS))
11212                         fn = tg3_interrupt_tagged;
11213                 flags = IRQF_SHARED;
11214         }
11215
11216         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11217 }
11218
11219 static int tg3_test_interrupt(struct tg3 *tp)
11220 {
11221         struct tg3_napi *tnapi = &tp->napi[0];
11222         struct net_device *dev = tp->dev;
11223         int err, i, intr_ok = 0;
11224         u32 val;
11225
11226         if (!netif_running(dev))
11227                 return -ENODEV;
11228
11229         tg3_disable_ints(tp);
11230
11231         free_irq(tnapi->irq_vec, tnapi);
11232
11233         /*
11234          * Turn off MSI one shot mode.  Otherwise this test has no
11235          * observable way to know whether the interrupt was delivered.
11236          */
11237         if (tg3_flag(tp, 57765_PLUS)) {
11238                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11239                 tw32(MSGINT_MODE, val);
11240         }
11241
11242         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11243                           IRQF_SHARED, dev->name, tnapi);
11244         if (err)
11245                 return err;
11246
11247         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11248         tg3_enable_ints(tp);
11249
11250         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11251                tnapi->coal_now);
11252
11253         for (i = 0; i < 5; i++) {
11254                 u32 int_mbox, misc_host_ctrl;
11255
11256                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11257                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11258
11259                 if ((int_mbox != 0) ||
11260                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11261                         intr_ok = 1;
11262                         break;
11263                 }
11264
11265                 if (tg3_flag(tp, 57765_PLUS) &&
11266                     tnapi->hw_status->status_tag != tnapi->last_tag)
11267                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11268
11269                 msleep(10);
11270         }
11271
11272         tg3_disable_ints(tp);
11273
11274         free_irq(tnapi->irq_vec, tnapi);
11275
11276         err = tg3_request_irq(tp, 0);
11277
11278         if (err)
11279                 return err;
11280
11281         if (intr_ok) {
11282                 /* Reenable MSI one shot mode. */
11283                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11284                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11285                         tw32(MSGINT_MODE, val);
11286                 }
11287                 return 0;
11288         }
11289
11290         return -EIO;
11291 }
11292
11293 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11294  * successfully restored
11295  */
11296 static int tg3_test_msi(struct tg3 *tp)
11297 {
11298         int err;
11299         u16 pci_cmd;
11300
11301         if (!tg3_flag(tp, USING_MSI))
11302                 return 0;
11303
11304         /* Turn off SERR reporting in case MSI terminates with Master
11305          * Abort.
11306          */
11307         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11308         pci_write_config_word(tp->pdev, PCI_COMMAND,
11309                               pci_cmd & ~PCI_COMMAND_SERR);
11310
11311         err = tg3_test_interrupt(tp);
11312
11313         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11314
11315         if (!err)
11316                 return 0;
11317
11318         /* other failures */
11319         if (err != -EIO)
11320                 return err;
11321
11322         /* MSI test failed, go back to INTx mode */
11323         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11324                     "to INTx mode. Please report this failure to the PCI "
11325                     "maintainer and include system chipset information\n");
11326
11327         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11328
11329         pci_disable_msi(tp->pdev);
11330
11331         tg3_flag_clear(tp, USING_MSI);
11332         tp->napi[0].irq_vec = tp->pdev->irq;
11333
11334         err = tg3_request_irq(tp, 0);
11335         if (err)
11336                 return err;
11337
11338         /* Need to reset the chip because the MSI cycle may have terminated
11339          * with Master Abort.
11340          */
11341         tg3_full_lock(tp, 1);
11342
11343         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11344         err = tg3_init_hw(tp, true);
11345
11346         tg3_full_unlock(tp);
11347
11348         if (err)
11349                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11350
11351         return err;
11352 }
11353
11354 static int tg3_request_firmware(struct tg3 *tp)
11355 {
11356         const struct tg3_firmware_hdr *fw_hdr;
11357
11358         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11359                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11360                            tp->fw_needed);
11361                 return -ENOENT;
11362         }
11363
11364         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11365
11366         /* Firmware blob starts with version numbers, followed by
11367          * start address and _full_ length including BSS sections
11368          * (which must be longer than the actual data, of course
11369          */
11370
11371         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11372         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11373                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11374                            tp->fw_len, tp->fw_needed);
11375                 release_firmware(tp->fw);
11376                 tp->fw = NULL;
11377                 return -EINVAL;
11378         }
11379
11380         /* We no longer need firmware; we have it. */
11381         tp->fw_needed = NULL;
11382         return 0;
11383 }
11384
11385 static u32 tg3_irq_count(struct tg3 *tp)
11386 {
11387         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11388
11389         if (irq_cnt > 1) {
11390                 /* We want as many rx rings enabled as there are cpus.
11391                  * In multiqueue MSI-X mode, the first MSI-X vector
11392                  * only deals with link interrupts, etc, so we add
11393                  * one to the number of vectors we are requesting.
11394                  */
11395                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11396         }
11397
11398         return irq_cnt;
11399 }
11400
11401 static bool tg3_enable_msix(struct tg3 *tp)
11402 {
11403         int i, rc;
11404         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11405
11406         tp->txq_cnt = tp->txq_req;
11407         tp->rxq_cnt = tp->rxq_req;
11408         if (!tp->rxq_cnt)
11409                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11410         if (tp->rxq_cnt > tp->rxq_max)
11411                 tp->rxq_cnt = tp->rxq_max;
11412
11413         /* Disable multiple TX rings by default.  Simple round-robin hardware
11414          * scheduling of the TX rings can cause starvation of rings with
11415          * small packets when other rings have TSO or jumbo packets.
11416          */
11417         if (!tp->txq_req)
11418                 tp->txq_cnt = 1;
11419
11420         tp->irq_cnt = tg3_irq_count(tp);
11421
11422         for (i = 0; i < tp->irq_max; i++) {
11423                 msix_ent[i].entry  = i;
11424                 msix_ent[i].vector = 0;
11425         }
11426
11427         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11428         if (rc < 0) {
11429                 return false;
11430         } else if (rc < tp->irq_cnt) {
11431                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11432                               tp->irq_cnt, rc);
11433                 tp->irq_cnt = rc;
11434                 tp->rxq_cnt = max(rc - 1, 1);
11435                 if (tp->txq_cnt)
11436                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11437         }
11438
11439         for (i = 0; i < tp->irq_max; i++)
11440                 tp->napi[i].irq_vec = msix_ent[i].vector;
11441
11442         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11443                 pci_disable_msix(tp->pdev);
11444                 return false;
11445         }
11446
11447         if (tp->irq_cnt == 1)
11448                 return true;
11449
11450         tg3_flag_set(tp, ENABLE_RSS);
11451
11452         if (tp->txq_cnt > 1)
11453                 tg3_flag_set(tp, ENABLE_TSS);
11454
11455         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11456
11457         return true;
11458 }
11459
11460 static void tg3_ints_init(struct tg3 *tp)
11461 {
11462         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11463             !tg3_flag(tp, TAGGED_STATUS)) {
11464                 /* All MSI supporting chips should support tagged
11465                  * status.  Assert that this is the case.
11466                  */
11467                 netdev_warn(tp->dev,
11468                             "MSI without TAGGED_STATUS? Not using MSI\n");
11469                 goto defcfg;
11470         }
11471
11472         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11473                 tg3_flag_set(tp, USING_MSIX);
11474         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11475                 tg3_flag_set(tp, USING_MSI);
11476
11477         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11478                 u32 msi_mode = tr32(MSGINT_MODE);
11479                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11480                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11481                 if (!tg3_flag(tp, 1SHOT_MSI))
11482                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11483                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11484         }
11485 defcfg:
11486         if (!tg3_flag(tp, USING_MSIX)) {
11487                 tp->irq_cnt = 1;
11488                 tp->napi[0].irq_vec = tp->pdev->irq;
11489         }
11490
11491         if (tp->irq_cnt == 1) {
11492                 tp->txq_cnt = 1;
11493                 tp->rxq_cnt = 1;
11494                 netif_set_real_num_tx_queues(tp->dev, 1);
11495                 netif_set_real_num_rx_queues(tp->dev, 1);
11496         }
11497 }
11498
11499 static void tg3_ints_fini(struct tg3 *tp)
11500 {
11501         if (tg3_flag(tp, USING_MSIX))
11502                 pci_disable_msix(tp->pdev);
11503         else if (tg3_flag(tp, USING_MSI))
11504                 pci_disable_msi(tp->pdev);
11505         tg3_flag_clear(tp, USING_MSI);
11506         tg3_flag_clear(tp, USING_MSIX);
11507         tg3_flag_clear(tp, ENABLE_RSS);
11508         tg3_flag_clear(tp, ENABLE_TSS);
11509 }
11510
11511 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11512                      bool init)
11513 {
11514         struct net_device *dev = tp->dev;
11515         int i, err;
11516
11517         /*
11518          * Setup interrupts first so we know how
11519          * many NAPI resources to allocate
11520          */
11521         tg3_ints_init(tp);
11522
11523         tg3_rss_check_indir_tbl(tp);
11524
11525         /* The placement of this call is tied
11526          * to the setup and use of Host TX descriptors.
11527          */
11528         err = tg3_alloc_consistent(tp);
11529         if (err)
11530                 goto out_ints_fini;
11531
11532         tg3_napi_init(tp);
11533
11534         tg3_napi_enable(tp);
11535
11536         for (i = 0; i < tp->irq_cnt; i++) {
11537                 err = tg3_request_irq(tp, i);
11538                 if (err) {
11539                         for (i--; i >= 0; i--) {
11540                                 struct tg3_napi *tnapi = &tp->napi[i];
11541
11542                                 free_irq(tnapi->irq_vec, tnapi);
11543                         }
11544                         goto out_napi_fini;
11545                 }
11546         }
11547
11548         tg3_full_lock(tp, 0);
11549
11550         if (init)
11551                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11552
11553         err = tg3_init_hw(tp, reset_phy);
11554         if (err) {
11555                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11556                 tg3_free_rings(tp);
11557         }
11558
11559         tg3_full_unlock(tp);
11560
11561         if (err)
11562                 goto out_free_irq;
11563
11564         if (test_irq && tg3_flag(tp, USING_MSI)) {
11565                 err = tg3_test_msi(tp);
11566
11567                 if (err) {
11568                         tg3_full_lock(tp, 0);
11569                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11570                         tg3_free_rings(tp);
11571                         tg3_full_unlock(tp);
11572
11573                         goto out_napi_fini;
11574                 }
11575
11576                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11577                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11578
11579                         tw32(PCIE_TRANSACTION_CFG,
11580                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11581                 }
11582         }
11583
11584         tg3_phy_start(tp);
11585
11586         tg3_hwmon_open(tp);
11587
11588         tg3_full_lock(tp, 0);
11589
11590         tg3_timer_start(tp);
11591         tg3_flag_set(tp, INIT_COMPLETE);
11592         tg3_enable_ints(tp);
11593
11594         tg3_ptp_resume(tp);
11595
11596         tg3_full_unlock(tp);
11597
11598         netif_tx_start_all_queues(dev);
11599
11600         /*
11601          * Reset loopback feature if it was turned on while the device was down
11602          * make sure that it's installed properly now.
11603          */
11604         if (dev->features & NETIF_F_LOOPBACK)
11605                 tg3_set_loopback(dev, dev->features);
11606
11607         return 0;
11608
11609 out_free_irq:
11610         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11611                 struct tg3_napi *tnapi = &tp->napi[i];
11612                 free_irq(tnapi->irq_vec, tnapi);
11613         }
11614
11615 out_napi_fini:
11616         tg3_napi_disable(tp);
11617         tg3_napi_fini(tp);
11618         tg3_free_consistent(tp);
11619
11620 out_ints_fini:
11621         tg3_ints_fini(tp);
11622
11623         return err;
11624 }
11625
11626 static void tg3_stop(struct tg3 *tp)
11627 {
11628         int i;
11629
11630         tg3_reset_task_cancel(tp);
11631         tg3_netif_stop(tp);
11632
11633         tg3_timer_stop(tp);
11634
11635         tg3_hwmon_close(tp);
11636
11637         tg3_phy_stop(tp);
11638
11639         tg3_full_lock(tp, 1);
11640
11641         tg3_disable_ints(tp);
11642
11643         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11644         tg3_free_rings(tp);
11645         tg3_flag_clear(tp, INIT_COMPLETE);
11646
11647         tg3_full_unlock(tp);
11648
11649         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11650                 struct tg3_napi *tnapi = &tp->napi[i];
11651                 free_irq(tnapi->irq_vec, tnapi);
11652         }
11653
11654         tg3_ints_fini(tp);
11655
11656         tg3_napi_fini(tp);
11657
11658         tg3_free_consistent(tp);
11659 }
11660
11661 static int tg3_open(struct net_device *dev)
11662 {
11663         struct tg3 *tp = netdev_priv(dev);
11664         int err;
11665
11666         if (tp->pcierr_recovery) {
11667                 netdev_err(dev, "Failed to open device. PCI error recovery "
11668                            "in progress\n");
11669                 return -EAGAIN;
11670         }
11671
11672         if (tp->fw_needed) {
11673                 err = tg3_request_firmware(tp);
11674                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11675                         if (err) {
11676                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11677                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11678                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11679                                 netdev_warn(tp->dev, "EEE capability restored\n");
11680                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11681                         }
11682                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11683                         if (err)
11684                                 return err;
11685                 } else if (err) {
11686                         netdev_warn(tp->dev, "TSO capability disabled\n");
11687                         tg3_flag_clear(tp, TSO_CAPABLE);
11688                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11689                         netdev_notice(tp->dev, "TSO capability restored\n");
11690                         tg3_flag_set(tp, TSO_CAPABLE);
11691                 }
11692         }
11693
11694         tg3_carrier_off(tp);
11695
11696         err = tg3_power_up(tp);
11697         if (err)
11698                 return err;
11699
11700         tg3_full_lock(tp, 0);
11701
11702         tg3_disable_ints(tp);
11703         tg3_flag_clear(tp, INIT_COMPLETE);
11704
11705         tg3_full_unlock(tp);
11706
11707         err = tg3_start(tp,
11708                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11709                         true, true);
11710         if (err) {
11711                 tg3_frob_aux_power(tp, false);
11712                 pci_set_power_state(tp->pdev, PCI_D3hot);
11713         }
11714
11715         return err;
11716 }
11717
11718 static int tg3_close(struct net_device *dev)
11719 {
11720         struct tg3 *tp = netdev_priv(dev);
11721
11722         if (tp->pcierr_recovery) {
11723                 netdev_err(dev, "Failed to close device. PCI error recovery "
11724                            "in progress\n");
11725                 return -EAGAIN;
11726         }
11727
11728         tg3_stop(tp);
11729
11730         if (pci_device_is_present(tp->pdev)) {
11731                 tg3_power_down_prepare(tp);
11732
11733                 tg3_carrier_off(tp);
11734         }
11735         return 0;
11736 }
11737
11738 static inline u64 get_stat64(tg3_stat64_t *val)
11739 {
11740        return ((u64)val->high << 32) | ((u64)val->low);
11741 }
11742
11743 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11744 {
11745         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11746
11747         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11748             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11749              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11750                 u32 val;
11751
11752                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11753                         tg3_writephy(tp, MII_TG3_TEST1,
11754                                      val | MII_TG3_TEST1_CRC_EN);
11755                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11756                 } else
11757                         val = 0;
11758
11759                 tp->phy_crc_errors += val;
11760
11761                 return tp->phy_crc_errors;
11762         }
11763
11764         return get_stat64(&hw_stats->rx_fcs_errors);
11765 }
11766
11767 #define ESTAT_ADD(member) \
11768         estats->member =        old_estats->member + \
11769                                 get_stat64(&hw_stats->member)
11770
11771 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11772 {
11773         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11774         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11775
11776         ESTAT_ADD(rx_octets);
11777         ESTAT_ADD(rx_fragments);
11778         ESTAT_ADD(rx_ucast_packets);
11779         ESTAT_ADD(rx_mcast_packets);
11780         ESTAT_ADD(rx_bcast_packets);
11781         ESTAT_ADD(rx_fcs_errors);
11782         ESTAT_ADD(rx_align_errors);
11783         ESTAT_ADD(rx_xon_pause_rcvd);
11784         ESTAT_ADD(rx_xoff_pause_rcvd);
11785         ESTAT_ADD(rx_mac_ctrl_rcvd);
11786         ESTAT_ADD(rx_xoff_entered);
11787         ESTAT_ADD(rx_frame_too_long_errors);
11788         ESTAT_ADD(rx_jabbers);
11789         ESTAT_ADD(rx_undersize_packets);
11790         ESTAT_ADD(rx_in_length_errors);
11791         ESTAT_ADD(rx_out_length_errors);
11792         ESTAT_ADD(rx_64_or_less_octet_packets);
11793         ESTAT_ADD(rx_65_to_127_octet_packets);
11794         ESTAT_ADD(rx_128_to_255_octet_packets);
11795         ESTAT_ADD(rx_256_to_511_octet_packets);
11796         ESTAT_ADD(rx_512_to_1023_octet_packets);
11797         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11798         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11799         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11800         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11801         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11802
11803         ESTAT_ADD(tx_octets);
11804         ESTAT_ADD(tx_collisions);
11805         ESTAT_ADD(tx_xon_sent);
11806         ESTAT_ADD(tx_xoff_sent);
11807         ESTAT_ADD(tx_flow_control);
11808         ESTAT_ADD(tx_mac_errors);
11809         ESTAT_ADD(tx_single_collisions);
11810         ESTAT_ADD(tx_mult_collisions);
11811         ESTAT_ADD(tx_deferred);
11812         ESTAT_ADD(tx_excessive_collisions);
11813         ESTAT_ADD(tx_late_collisions);
11814         ESTAT_ADD(tx_collide_2times);
11815         ESTAT_ADD(tx_collide_3times);
11816         ESTAT_ADD(tx_collide_4times);
11817         ESTAT_ADD(tx_collide_5times);
11818         ESTAT_ADD(tx_collide_6times);
11819         ESTAT_ADD(tx_collide_7times);
11820         ESTAT_ADD(tx_collide_8times);
11821         ESTAT_ADD(tx_collide_9times);
11822         ESTAT_ADD(tx_collide_10times);
11823         ESTAT_ADD(tx_collide_11times);
11824         ESTAT_ADD(tx_collide_12times);
11825         ESTAT_ADD(tx_collide_13times);
11826         ESTAT_ADD(tx_collide_14times);
11827         ESTAT_ADD(tx_collide_15times);
11828         ESTAT_ADD(tx_ucast_packets);
11829         ESTAT_ADD(tx_mcast_packets);
11830         ESTAT_ADD(tx_bcast_packets);
11831         ESTAT_ADD(tx_carrier_sense_errors);
11832         ESTAT_ADD(tx_discards);
11833         ESTAT_ADD(tx_errors);
11834
11835         ESTAT_ADD(dma_writeq_full);
11836         ESTAT_ADD(dma_write_prioq_full);
11837         ESTAT_ADD(rxbds_empty);
11838         ESTAT_ADD(rx_discards);
11839         ESTAT_ADD(rx_errors);
11840         ESTAT_ADD(rx_threshold_hit);
11841
11842         ESTAT_ADD(dma_readq_full);
11843         ESTAT_ADD(dma_read_prioq_full);
11844         ESTAT_ADD(tx_comp_queue_full);
11845
11846         ESTAT_ADD(ring_set_send_prod_index);
11847         ESTAT_ADD(ring_status_update);
11848         ESTAT_ADD(nic_irqs);
11849         ESTAT_ADD(nic_avoided_irqs);
11850         ESTAT_ADD(nic_tx_threshold_hit);
11851
11852         ESTAT_ADD(mbuf_lwm_thresh_hit);
11853 }
11854
11855 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11856 {
11857         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11858         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11859
11860         stats->rx_packets = old_stats->rx_packets +
11861                 get_stat64(&hw_stats->rx_ucast_packets) +
11862                 get_stat64(&hw_stats->rx_mcast_packets) +
11863                 get_stat64(&hw_stats->rx_bcast_packets);
11864
11865         stats->tx_packets = old_stats->tx_packets +
11866                 get_stat64(&hw_stats->tx_ucast_packets) +
11867                 get_stat64(&hw_stats->tx_mcast_packets) +
11868                 get_stat64(&hw_stats->tx_bcast_packets);
11869
11870         stats->rx_bytes = old_stats->rx_bytes +
11871                 get_stat64(&hw_stats->rx_octets);
11872         stats->tx_bytes = old_stats->tx_bytes +
11873                 get_stat64(&hw_stats->tx_octets);
11874
11875         stats->rx_errors = old_stats->rx_errors +
11876                 get_stat64(&hw_stats->rx_errors);
11877         stats->tx_errors = old_stats->tx_errors +
11878                 get_stat64(&hw_stats->tx_errors) +
11879                 get_stat64(&hw_stats->tx_mac_errors) +
11880                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11881                 get_stat64(&hw_stats->tx_discards);
11882
11883         stats->multicast = old_stats->multicast +
11884                 get_stat64(&hw_stats->rx_mcast_packets);
11885         stats->collisions = old_stats->collisions +
11886                 get_stat64(&hw_stats->tx_collisions);
11887
11888         stats->rx_length_errors = old_stats->rx_length_errors +
11889                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11890                 get_stat64(&hw_stats->rx_undersize_packets);
11891
11892         stats->rx_frame_errors = old_stats->rx_frame_errors +
11893                 get_stat64(&hw_stats->rx_align_errors);
11894         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11895                 get_stat64(&hw_stats->tx_discards);
11896         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11897                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11898
11899         stats->rx_crc_errors = old_stats->rx_crc_errors +
11900                 tg3_calc_crc_errors(tp);
11901
11902         stats->rx_missed_errors = old_stats->rx_missed_errors +
11903                 get_stat64(&hw_stats->rx_discards);
11904
11905         stats->rx_dropped = tp->rx_dropped;
11906         stats->tx_dropped = tp->tx_dropped;
11907 }
11908
11909 static int tg3_get_regs_len(struct net_device *dev)
11910 {
11911         return TG3_REG_BLK_SIZE;
11912 }
11913
11914 static void tg3_get_regs(struct net_device *dev,
11915                 struct ethtool_regs *regs, void *_p)
11916 {
11917         struct tg3 *tp = netdev_priv(dev);
11918
11919         regs->version = 0;
11920
11921         memset(_p, 0, TG3_REG_BLK_SIZE);
11922
11923         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11924                 return;
11925
11926         tg3_full_lock(tp, 0);
11927
11928         tg3_dump_legacy_regs(tp, (u32 *)_p);
11929
11930         tg3_full_unlock(tp);
11931 }
11932
11933 static int tg3_get_eeprom_len(struct net_device *dev)
11934 {
11935         struct tg3 *tp = netdev_priv(dev);
11936
11937         return tp->nvram_size;
11938 }
11939
11940 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11941 {
11942         struct tg3 *tp = netdev_priv(dev);
11943         int ret, cpmu_restore = 0;
11944         u8  *pd;
11945         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11946         __be32 val;
11947
11948         if (tg3_flag(tp, NO_NVRAM))
11949                 return -EINVAL;
11950
11951         offset = eeprom->offset;
11952         len = eeprom->len;
11953         eeprom->len = 0;
11954
11955         eeprom->magic = TG3_EEPROM_MAGIC;
11956
11957         /* Override clock, link aware and link idle modes */
11958         if (tg3_flag(tp, CPMU_PRESENT)) {
11959                 cpmu_val = tr32(TG3_CPMU_CTRL);
11960                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11961                                 CPMU_CTRL_LINK_IDLE_MODE)) {
11962                         tw32(TG3_CPMU_CTRL, cpmu_val &
11963                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
11964                                              CPMU_CTRL_LINK_IDLE_MODE));
11965                         cpmu_restore = 1;
11966                 }
11967         }
11968         tg3_override_clk(tp);
11969
11970         if (offset & 3) {
11971                 /* adjustments to start on required 4 byte boundary */
11972                 b_offset = offset & 3;
11973                 b_count = 4 - b_offset;
11974                 if (b_count > len) {
11975                         /* i.e. offset=1 len=2 */
11976                         b_count = len;
11977                 }
11978                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11979                 if (ret)
11980                         goto eeprom_done;
11981                 memcpy(data, ((char *)&val) + b_offset, b_count);
11982                 len -= b_count;
11983                 offset += b_count;
11984                 eeprom->len += b_count;
11985         }
11986
11987         /* read bytes up to the last 4 byte boundary */
11988         pd = &data[eeprom->len];
11989         for (i = 0; i < (len - (len & 3)); i += 4) {
11990                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11991                 if (ret) {
11992                         if (i)
11993                                 i -= 4;
11994                         eeprom->len += i;
11995                         goto eeprom_done;
11996                 }
11997                 memcpy(pd + i, &val, 4);
11998                 if (need_resched()) {
11999                         if (signal_pending(current)) {
12000                                 eeprom->len += i;
12001                                 ret = -EINTR;
12002                                 goto eeprom_done;
12003                         }
12004                         cond_resched();
12005                 }
12006         }
12007         eeprom->len += i;
12008
12009         if (len & 3) {
12010                 /* read last bytes not ending on 4 byte boundary */
12011                 pd = &data[eeprom->len];
12012                 b_count = len & 3;
12013                 b_offset = offset + len - b_count;
12014                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12015                 if (ret)
12016                         goto eeprom_done;
12017                 memcpy(pd, &val, b_count);
12018                 eeprom->len += b_count;
12019         }
12020         ret = 0;
12021
12022 eeprom_done:
12023         /* Restore clock, link aware and link idle modes */
12024         tg3_restore_clk(tp);
12025         if (cpmu_restore)
12026                 tw32(TG3_CPMU_CTRL, cpmu_val);
12027
12028         return ret;
12029 }
12030
12031 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12032 {
12033         struct tg3 *tp = netdev_priv(dev);
12034         int ret;
12035         u32 offset, len, b_offset, odd_len;
12036         u8 *buf;
12037         __be32 start = 0, end;
12038
12039         if (tg3_flag(tp, NO_NVRAM) ||
12040             eeprom->magic != TG3_EEPROM_MAGIC)
12041                 return -EINVAL;
12042
12043         offset = eeprom->offset;
12044         len = eeprom->len;
12045
12046         if ((b_offset = (offset & 3))) {
12047                 /* adjustments to start on required 4 byte boundary */
12048                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12049                 if (ret)
12050                         return ret;
12051                 len += b_offset;
12052                 offset &= ~3;
12053                 if (len < 4)
12054                         len = 4;
12055         }
12056
12057         odd_len = 0;
12058         if (len & 3) {
12059                 /* adjustments to end on required 4 byte boundary */
12060                 odd_len = 1;
12061                 len = (len + 3) & ~3;
12062                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12063                 if (ret)
12064                         return ret;
12065         }
12066
12067         buf = data;
12068         if (b_offset || odd_len) {
12069                 buf = kmalloc(len, GFP_KERNEL);
12070                 if (!buf)
12071                         return -ENOMEM;
12072                 if (b_offset)
12073                         memcpy(buf, &start, 4);
12074                 if (odd_len)
12075                         memcpy(buf+len-4, &end, 4);
12076                 memcpy(buf + b_offset, data, eeprom->len);
12077         }
12078
12079         ret = tg3_nvram_write_block(tp, offset, len, buf);
12080
12081         if (buf != data)
12082                 kfree(buf);
12083
12084         return ret;
12085 }
12086
12087 static int tg3_get_link_ksettings(struct net_device *dev,
12088                                   struct ethtool_link_ksettings *cmd)
12089 {
12090         struct tg3 *tp = netdev_priv(dev);
12091         u32 supported, advertising;
12092
12093         if (tg3_flag(tp, USE_PHYLIB)) {
12094                 struct phy_device *phydev;
12095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12096                         return -EAGAIN;
12097                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12098                 phy_ethtool_ksettings_get(phydev, cmd);
12099
12100                 return 0;
12101         }
12102
12103         supported = (SUPPORTED_Autoneg);
12104
12105         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12106                 supported |= (SUPPORTED_1000baseT_Half |
12107                               SUPPORTED_1000baseT_Full);
12108
12109         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12110                 supported |= (SUPPORTED_100baseT_Half |
12111                               SUPPORTED_100baseT_Full |
12112                               SUPPORTED_10baseT_Half |
12113                               SUPPORTED_10baseT_Full |
12114                               SUPPORTED_TP);
12115                 cmd->base.port = PORT_TP;
12116         } else {
12117                 supported |= SUPPORTED_FIBRE;
12118                 cmd->base.port = PORT_FIBRE;
12119         }
12120         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12121                                                 supported);
12122
12123         advertising = tp->link_config.advertising;
12124         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12125                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12126                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12127                                 advertising |= ADVERTISED_Pause;
12128                         } else {
12129                                 advertising |= ADVERTISED_Pause |
12130                                         ADVERTISED_Asym_Pause;
12131                         }
12132                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12133                         advertising |= ADVERTISED_Asym_Pause;
12134                 }
12135         }
12136         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12137                                                 advertising);
12138
12139         if (netif_running(dev) && tp->link_up) {
12140                 cmd->base.speed = tp->link_config.active_speed;
12141                 cmd->base.duplex = tp->link_config.active_duplex;
12142                 ethtool_convert_legacy_u32_to_link_mode(
12143                         cmd->link_modes.lp_advertising,
12144                         tp->link_config.rmt_adv);
12145
12146                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12147                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12148                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12149                         else
12150                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12151                 }
12152         } else {
12153                 cmd->base.speed = SPEED_UNKNOWN;
12154                 cmd->base.duplex = DUPLEX_UNKNOWN;
12155                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12156         }
12157         cmd->base.phy_address = tp->phy_addr;
12158         cmd->base.autoneg = tp->link_config.autoneg;
12159         return 0;
12160 }
12161
12162 static int tg3_set_link_ksettings(struct net_device *dev,
12163                                   const struct ethtool_link_ksettings *cmd)
12164 {
12165         struct tg3 *tp = netdev_priv(dev);
12166         u32 speed = cmd->base.speed;
12167         u32 advertising;
12168
12169         if (tg3_flag(tp, USE_PHYLIB)) {
12170                 struct phy_device *phydev;
12171                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12172                         return -EAGAIN;
12173                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12174                 return phy_ethtool_ksettings_set(phydev, cmd);
12175         }
12176
12177         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12178             cmd->base.autoneg != AUTONEG_DISABLE)
12179                 return -EINVAL;
12180
12181         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12182             cmd->base.duplex != DUPLEX_FULL &&
12183             cmd->base.duplex != DUPLEX_HALF)
12184                 return -EINVAL;
12185
12186         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12187                                                 cmd->link_modes.advertising);
12188
12189         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12190                 u32 mask = ADVERTISED_Autoneg |
12191                            ADVERTISED_Pause |
12192                            ADVERTISED_Asym_Pause;
12193
12194                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12195                         mask |= ADVERTISED_1000baseT_Half |
12196                                 ADVERTISED_1000baseT_Full;
12197
12198                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12199                         mask |= ADVERTISED_100baseT_Half |
12200                                 ADVERTISED_100baseT_Full |
12201                                 ADVERTISED_10baseT_Half |
12202                                 ADVERTISED_10baseT_Full |
12203                                 ADVERTISED_TP;
12204                 else
12205                         mask |= ADVERTISED_FIBRE;
12206
12207                 if (advertising & ~mask)
12208                         return -EINVAL;
12209
12210                 mask &= (ADVERTISED_1000baseT_Half |
12211                          ADVERTISED_1000baseT_Full |
12212                          ADVERTISED_100baseT_Half |
12213                          ADVERTISED_100baseT_Full |
12214                          ADVERTISED_10baseT_Half |
12215                          ADVERTISED_10baseT_Full);
12216
12217                 advertising &= mask;
12218         } else {
12219                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12220                         if (speed != SPEED_1000)
12221                                 return -EINVAL;
12222
12223                         if (cmd->base.duplex != DUPLEX_FULL)
12224                                 return -EINVAL;
12225                 } else {
12226                         if (speed != SPEED_100 &&
12227                             speed != SPEED_10)
12228                                 return -EINVAL;
12229                 }
12230         }
12231
12232         tg3_full_lock(tp, 0);
12233
12234         tp->link_config.autoneg = cmd->base.autoneg;
12235         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12236                 tp->link_config.advertising = (advertising |
12237                                               ADVERTISED_Autoneg);
12238                 tp->link_config.speed = SPEED_UNKNOWN;
12239                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12240         } else {
12241                 tp->link_config.advertising = 0;
12242                 tp->link_config.speed = speed;
12243                 tp->link_config.duplex = cmd->base.duplex;
12244         }
12245
12246         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12247
12248         tg3_warn_mgmt_link_flap(tp);
12249
12250         if (netif_running(dev))
12251                 tg3_setup_phy(tp, true);
12252
12253         tg3_full_unlock(tp);
12254
12255         return 0;
12256 }
12257
12258 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12259 {
12260         struct tg3 *tp = netdev_priv(dev);
12261
12262         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12263         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12264         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12265         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12266 }
12267
12268 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12269 {
12270         struct tg3 *tp = netdev_priv(dev);
12271
12272         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12273                 wol->supported = WAKE_MAGIC;
12274         else
12275                 wol->supported = 0;
12276         wol->wolopts = 0;
12277         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12278                 wol->wolopts = WAKE_MAGIC;
12279         memset(&wol->sopass, 0, sizeof(wol->sopass));
12280 }
12281
12282 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12283 {
12284         struct tg3 *tp = netdev_priv(dev);
12285         struct device *dp = &tp->pdev->dev;
12286
12287         if (wol->wolopts & ~WAKE_MAGIC)
12288                 return -EINVAL;
12289         if ((wol->wolopts & WAKE_MAGIC) &&
12290             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12291                 return -EINVAL;
12292
12293         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12294
12295         if (device_may_wakeup(dp))
12296                 tg3_flag_set(tp, WOL_ENABLE);
12297         else
12298                 tg3_flag_clear(tp, WOL_ENABLE);
12299
12300         return 0;
12301 }
12302
12303 static u32 tg3_get_msglevel(struct net_device *dev)
12304 {
12305         struct tg3 *tp = netdev_priv(dev);
12306         return tp->msg_enable;
12307 }
12308
12309 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12310 {
12311         struct tg3 *tp = netdev_priv(dev);
12312         tp->msg_enable = value;
12313 }
12314
12315 static int tg3_nway_reset(struct net_device *dev)
12316 {
12317         struct tg3 *tp = netdev_priv(dev);
12318         int r;
12319
12320         if (!netif_running(dev))
12321                 return -EAGAIN;
12322
12323         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12324                 return -EINVAL;
12325
12326         tg3_warn_mgmt_link_flap(tp);
12327
12328         if (tg3_flag(tp, USE_PHYLIB)) {
12329                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12330                         return -EAGAIN;
12331                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12332         } else {
12333                 u32 bmcr;
12334
12335                 spin_lock_bh(&tp->lock);
12336                 r = -EINVAL;
12337                 tg3_readphy(tp, MII_BMCR, &bmcr);
12338                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12339                     ((bmcr & BMCR_ANENABLE) ||
12340                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12341                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12342                                                    BMCR_ANENABLE);
12343                         r = 0;
12344                 }
12345                 spin_unlock_bh(&tp->lock);
12346         }
12347
12348         return r;
12349 }
12350
12351 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12352 {
12353         struct tg3 *tp = netdev_priv(dev);
12354
12355         ering->rx_max_pending = tp->rx_std_ring_mask;
12356         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12357                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12358         else
12359                 ering->rx_jumbo_max_pending = 0;
12360
12361         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12362
12363         ering->rx_pending = tp->rx_pending;
12364         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12365                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12366         else
12367                 ering->rx_jumbo_pending = 0;
12368
12369         ering->tx_pending = tp->napi[0].tx_pending;
12370 }
12371
12372 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12373 {
12374         struct tg3 *tp = netdev_priv(dev);
12375         int i, irq_sync = 0, err = 0;
12376
12377         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12378             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12379             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12380             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12381             (tg3_flag(tp, TSO_BUG) &&
12382              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12383                 return -EINVAL;
12384
12385         if (netif_running(dev)) {
12386                 tg3_phy_stop(tp);
12387                 tg3_netif_stop(tp);
12388                 irq_sync = 1;
12389         }
12390
12391         tg3_full_lock(tp, irq_sync);
12392
12393         tp->rx_pending = ering->rx_pending;
12394
12395         if (tg3_flag(tp, MAX_RXPEND_64) &&
12396             tp->rx_pending > 63)
12397                 tp->rx_pending = 63;
12398
12399         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12400                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12401
12402         for (i = 0; i < tp->irq_max; i++)
12403                 tp->napi[i].tx_pending = ering->tx_pending;
12404
12405         if (netif_running(dev)) {
12406                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12407                 err = tg3_restart_hw(tp, false);
12408                 if (!err)
12409                         tg3_netif_start(tp);
12410         }
12411
12412         tg3_full_unlock(tp);
12413
12414         if (irq_sync && !err)
12415                 tg3_phy_start(tp);
12416
12417         return err;
12418 }
12419
12420 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12421 {
12422         struct tg3 *tp = netdev_priv(dev);
12423
12424         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12425
12426         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12427                 epause->rx_pause = 1;
12428         else
12429                 epause->rx_pause = 0;
12430
12431         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12432                 epause->tx_pause = 1;
12433         else
12434                 epause->tx_pause = 0;
12435 }
12436
12437 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12438 {
12439         struct tg3 *tp = netdev_priv(dev);
12440         int err = 0;
12441
12442         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12443                 tg3_warn_mgmt_link_flap(tp);
12444
12445         if (tg3_flag(tp, USE_PHYLIB)) {
12446                 u32 newadv;
12447                 struct phy_device *phydev;
12448
12449                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12450
12451                 if (!(phydev->supported & SUPPORTED_Pause) ||
12452                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12453                      (epause->rx_pause != epause->tx_pause)))
12454                         return -EINVAL;
12455
12456                 tp->link_config.flowctrl = 0;
12457                 if (epause->rx_pause) {
12458                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12459
12460                         if (epause->tx_pause) {
12461                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12462                                 newadv = ADVERTISED_Pause;
12463                         } else
12464                                 newadv = ADVERTISED_Pause |
12465                                          ADVERTISED_Asym_Pause;
12466                 } else if (epause->tx_pause) {
12467                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12468                         newadv = ADVERTISED_Asym_Pause;
12469                 } else
12470                         newadv = 0;
12471
12472                 if (epause->autoneg)
12473                         tg3_flag_set(tp, PAUSE_AUTONEG);
12474                 else
12475                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12476
12477                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12478                         u32 oldadv = phydev->advertising &
12479                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12480                         if (oldadv != newadv) {
12481                                 phydev->advertising &=
12482                                         ~(ADVERTISED_Pause |
12483                                           ADVERTISED_Asym_Pause);
12484                                 phydev->advertising |= newadv;
12485                                 if (phydev->autoneg) {
12486                                         /*
12487                                          * Always renegotiate the link to
12488                                          * inform our link partner of our
12489                                          * flow control settings, even if the
12490                                          * flow control is forced.  Let
12491                                          * tg3_adjust_link() do the final
12492                                          * flow control setup.
12493                                          */
12494                                         return phy_start_aneg(phydev);
12495                                 }
12496                         }
12497
12498                         if (!epause->autoneg)
12499                                 tg3_setup_flow_control(tp, 0, 0);
12500                 } else {
12501                         tp->link_config.advertising &=
12502                                         ~(ADVERTISED_Pause |
12503                                           ADVERTISED_Asym_Pause);
12504                         tp->link_config.advertising |= newadv;
12505                 }
12506         } else {
12507                 int irq_sync = 0;
12508
12509                 if (netif_running(dev)) {
12510                         tg3_netif_stop(tp);
12511                         irq_sync = 1;
12512                 }
12513
12514                 tg3_full_lock(tp, irq_sync);
12515
12516                 if (epause->autoneg)
12517                         tg3_flag_set(tp, PAUSE_AUTONEG);
12518                 else
12519                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12520                 if (epause->rx_pause)
12521                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12522                 else
12523                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12524                 if (epause->tx_pause)
12525                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12526                 else
12527                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12528
12529                 if (netif_running(dev)) {
12530                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12531                         err = tg3_restart_hw(tp, false);
12532                         if (!err)
12533                                 tg3_netif_start(tp);
12534                 }
12535
12536                 tg3_full_unlock(tp);
12537         }
12538
12539         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12540
12541         return err;
12542 }
12543
12544 static int tg3_get_sset_count(struct net_device *dev, int sset)
12545 {
12546         switch (sset) {
12547         case ETH_SS_TEST:
12548                 return TG3_NUM_TEST;
12549         case ETH_SS_STATS:
12550                 return TG3_NUM_STATS;
12551         default:
12552                 return -EOPNOTSUPP;
12553         }
12554 }
12555
12556 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12557                          u32 *rules __always_unused)
12558 {
12559         struct tg3 *tp = netdev_priv(dev);
12560
12561         if (!tg3_flag(tp, SUPPORT_MSIX))
12562                 return -EOPNOTSUPP;
12563
12564         switch (info->cmd) {
12565         case ETHTOOL_GRXRINGS:
12566                 if (netif_running(tp->dev))
12567                         info->data = tp->rxq_cnt;
12568                 else {
12569                         info->data = num_online_cpus();
12570                         if (info->data > TG3_RSS_MAX_NUM_QS)
12571                                 info->data = TG3_RSS_MAX_NUM_QS;
12572                 }
12573
12574                 return 0;
12575
12576         default:
12577                 return -EOPNOTSUPP;
12578         }
12579 }
12580
12581 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12582 {
12583         u32 size = 0;
12584         struct tg3 *tp = netdev_priv(dev);
12585
12586         if (tg3_flag(tp, SUPPORT_MSIX))
12587                 size = TG3_RSS_INDIR_TBL_SIZE;
12588
12589         return size;
12590 }
12591
12592 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12593 {
12594         struct tg3 *tp = netdev_priv(dev);
12595         int i;
12596
12597         if (hfunc)
12598                 *hfunc = ETH_RSS_HASH_TOP;
12599         if (!indir)
12600                 return 0;
12601
12602         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12603                 indir[i] = tp->rss_ind_tbl[i];
12604
12605         return 0;
12606 }
12607
12608 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12609                         const u8 hfunc)
12610 {
12611         struct tg3 *tp = netdev_priv(dev);
12612         size_t i;
12613
12614         /* We require at least one supported parameter to be changed and no
12615          * change in any of the unsupported parameters
12616          */
12617         if (key ||
12618             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12619                 return -EOPNOTSUPP;
12620
12621         if (!indir)
12622                 return 0;
12623
12624         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12625                 tp->rss_ind_tbl[i] = indir[i];
12626
12627         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12628                 return 0;
12629
12630         /* It is legal to write the indirection
12631          * table while the device is running.
12632          */
12633         tg3_full_lock(tp, 0);
12634         tg3_rss_write_indir_tbl(tp);
12635         tg3_full_unlock(tp);
12636
12637         return 0;
12638 }
12639
12640 static void tg3_get_channels(struct net_device *dev,
12641                              struct ethtool_channels *channel)
12642 {
12643         struct tg3 *tp = netdev_priv(dev);
12644         u32 deflt_qs = netif_get_num_default_rss_queues();
12645
12646         channel->max_rx = tp->rxq_max;
12647         channel->max_tx = tp->txq_max;
12648
12649         if (netif_running(dev)) {
12650                 channel->rx_count = tp->rxq_cnt;
12651                 channel->tx_count = tp->txq_cnt;
12652         } else {
12653                 if (tp->rxq_req)
12654                         channel->rx_count = tp->rxq_req;
12655                 else
12656                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12657
12658                 if (tp->txq_req)
12659                         channel->tx_count = tp->txq_req;
12660                 else
12661                         channel->tx_count = min(deflt_qs, tp->txq_max);
12662         }
12663 }
12664
12665 static int tg3_set_channels(struct net_device *dev,
12666                             struct ethtool_channels *channel)
12667 {
12668         struct tg3 *tp = netdev_priv(dev);
12669
12670         if (!tg3_flag(tp, SUPPORT_MSIX))
12671                 return -EOPNOTSUPP;
12672
12673         if (channel->rx_count > tp->rxq_max ||
12674             channel->tx_count > tp->txq_max)
12675                 return -EINVAL;
12676
12677         tp->rxq_req = channel->rx_count;
12678         tp->txq_req = channel->tx_count;
12679
12680         if (!netif_running(dev))
12681                 return 0;
12682
12683         tg3_stop(tp);
12684
12685         tg3_carrier_off(tp);
12686
12687         tg3_start(tp, true, false, false);
12688
12689         return 0;
12690 }
12691
12692 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12693 {
12694         switch (stringset) {
12695         case ETH_SS_STATS:
12696                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12697                 break;
12698         case ETH_SS_TEST:
12699                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12700                 break;
12701         default:
12702                 WARN_ON(1);     /* we need a WARN() */
12703                 break;
12704         }
12705 }
12706
12707 static int tg3_set_phys_id(struct net_device *dev,
12708                             enum ethtool_phys_id_state state)
12709 {
12710         struct tg3 *tp = netdev_priv(dev);
12711
12712         if (!netif_running(tp->dev))
12713                 return -EAGAIN;
12714
12715         switch (state) {
12716         case ETHTOOL_ID_ACTIVE:
12717                 return 1;       /* cycle on/off once per second */
12718
12719         case ETHTOOL_ID_ON:
12720                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12721                      LED_CTRL_1000MBPS_ON |
12722                      LED_CTRL_100MBPS_ON |
12723                      LED_CTRL_10MBPS_ON |
12724                      LED_CTRL_TRAFFIC_OVERRIDE |
12725                      LED_CTRL_TRAFFIC_BLINK |
12726                      LED_CTRL_TRAFFIC_LED);
12727                 break;
12728
12729         case ETHTOOL_ID_OFF:
12730                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12731                      LED_CTRL_TRAFFIC_OVERRIDE);
12732                 break;
12733
12734         case ETHTOOL_ID_INACTIVE:
12735                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12736                 break;
12737         }
12738
12739         return 0;
12740 }
12741
12742 static void tg3_get_ethtool_stats(struct net_device *dev,
12743                                    struct ethtool_stats *estats, u64 *tmp_stats)
12744 {
12745         struct tg3 *tp = netdev_priv(dev);
12746
12747         if (tp->hw_stats)
12748                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12749         else
12750                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12751 }
12752
12753 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12754 {
12755         int i;
12756         __be32 *buf;
12757         u32 offset = 0, len = 0;
12758         u32 magic, val;
12759
12760         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12761                 return NULL;
12762
12763         if (magic == TG3_EEPROM_MAGIC) {
12764                 for (offset = TG3_NVM_DIR_START;
12765                      offset < TG3_NVM_DIR_END;
12766                      offset += TG3_NVM_DIRENT_SIZE) {
12767                         if (tg3_nvram_read(tp, offset, &val))
12768                                 return NULL;
12769
12770                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12771                             TG3_NVM_DIRTYPE_EXTVPD)
12772                                 break;
12773                 }
12774
12775                 if (offset != TG3_NVM_DIR_END) {
12776                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12777                         if (tg3_nvram_read(tp, offset + 4, &offset))
12778                                 return NULL;
12779
12780                         offset = tg3_nvram_logical_addr(tp, offset);
12781                 }
12782         }
12783
12784         if (!offset || !len) {
12785                 offset = TG3_NVM_VPD_OFF;
12786                 len = TG3_NVM_VPD_LEN;
12787         }
12788
12789         buf = kmalloc(len, GFP_KERNEL);
12790         if (buf == NULL)
12791                 return NULL;
12792
12793         if (magic == TG3_EEPROM_MAGIC) {
12794                 for (i = 0; i < len; i += 4) {
12795                         /* The data is in little-endian format in NVRAM.
12796                          * Use the big-endian read routines to preserve
12797                          * the byte order as it exists in NVRAM.
12798                          */
12799                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12800                                 goto error;
12801                 }
12802         } else {
12803                 u8 *ptr;
12804                 ssize_t cnt;
12805                 unsigned int pos = 0;
12806
12807                 ptr = (u8 *)&buf[0];
12808                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12809                         cnt = pci_read_vpd(tp->pdev, pos,
12810                                            len - pos, ptr);
12811                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12812                                 cnt = 0;
12813                         else if (cnt < 0)
12814                                 goto error;
12815                 }
12816                 if (pos != len)
12817                         goto error;
12818         }
12819
12820         *vpdlen = len;
12821
12822         return buf;
12823
12824 error:
12825         kfree(buf);
12826         return NULL;
12827 }
12828
12829 #define NVRAM_TEST_SIZE 0x100
12830 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12831 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12832 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12833 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12834 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12835 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12836 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12837 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12838
12839 static int tg3_test_nvram(struct tg3 *tp)
12840 {
12841         u32 csum, magic, len;
12842         __be32 *buf;
12843         int i, j, k, err = 0, size;
12844
12845         if (tg3_flag(tp, NO_NVRAM))
12846                 return 0;
12847
12848         if (tg3_nvram_read(tp, 0, &magic) != 0)
12849                 return -EIO;
12850
12851         if (magic == TG3_EEPROM_MAGIC)
12852                 size = NVRAM_TEST_SIZE;
12853         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12854                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12855                     TG3_EEPROM_SB_FORMAT_1) {
12856                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12857                         case TG3_EEPROM_SB_REVISION_0:
12858                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12859                                 break;
12860                         case TG3_EEPROM_SB_REVISION_2:
12861                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12862                                 break;
12863                         case TG3_EEPROM_SB_REVISION_3:
12864                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12865                                 break;
12866                         case TG3_EEPROM_SB_REVISION_4:
12867                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12868                                 break;
12869                         case TG3_EEPROM_SB_REVISION_5:
12870                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12871                                 break;
12872                         case TG3_EEPROM_SB_REVISION_6:
12873                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12874                                 break;
12875                         default:
12876                                 return -EIO;
12877                         }
12878                 } else
12879                         return 0;
12880         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12881                 size = NVRAM_SELFBOOT_HW_SIZE;
12882         else
12883                 return -EIO;
12884
12885         buf = kmalloc(size, GFP_KERNEL);
12886         if (buf == NULL)
12887                 return -ENOMEM;
12888
12889         err = -EIO;
12890         for (i = 0, j = 0; i < size; i += 4, j++) {
12891                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12892                 if (err)
12893                         break;
12894         }
12895         if (i < size)
12896                 goto out;
12897
12898         /* Selfboot format */
12899         magic = be32_to_cpu(buf[0]);
12900         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12901             TG3_EEPROM_MAGIC_FW) {
12902                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12903
12904                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12905                     TG3_EEPROM_SB_REVISION_2) {
12906                         /* For rev 2, the csum doesn't include the MBA. */
12907                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12908                                 csum8 += buf8[i];
12909                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12910                                 csum8 += buf8[i];
12911                 } else {
12912                         for (i = 0; i < size; i++)
12913                                 csum8 += buf8[i];
12914                 }
12915
12916                 if (csum8 == 0) {
12917                         err = 0;
12918                         goto out;
12919                 }
12920
12921                 err = -EIO;
12922                 goto out;
12923         }
12924
12925         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12926             TG3_EEPROM_MAGIC_HW) {
12927                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12928                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12929                 u8 *buf8 = (u8 *) buf;
12930
12931                 /* Separate the parity bits and the data bytes.  */
12932                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12933                         if ((i == 0) || (i == 8)) {
12934                                 int l;
12935                                 u8 msk;
12936
12937                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12938                                         parity[k++] = buf8[i] & msk;
12939                                 i++;
12940                         } else if (i == 16) {
12941                                 int l;
12942                                 u8 msk;
12943
12944                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12945                                         parity[k++] = buf8[i] & msk;
12946                                 i++;
12947
12948                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12949                                         parity[k++] = buf8[i] & msk;
12950                                 i++;
12951                         }
12952                         data[j++] = buf8[i];
12953                 }
12954
12955                 err = -EIO;
12956                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12957                         u8 hw8 = hweight8(data[i]);
12958
12959                         if ((hw8 & 0x1) && parity[i])
12960                                 goto out;
12961                         else if (!(hw8 & 0x1) && !parity[i])
12962                                 goto out;
12963                 }
12964                 err = 0;
12965                 goto out;
12966         }
12967
12968         err = -EIO;
12969
12970         /* Bootstrap checksum at offset 0x10 */
12971         csum = calc_crc((unsigned char *) buf, 0x10);
12972         if (csum != le32_to_cpu(buf[0x10/4]))
12973                 goto out;
12974
12975         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12976         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12977         if (csum != le32_to_cpu(buf[0xfc/4]))
12978                 goto out;
12979
12980         kfree(buf);
12981
12982         buf = tg3_vpd_readblock(tp, &len);
12983         if (!buf)
12984                 return -ENOMEM;
12985
12986         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12987         if (i > 0) {
12988                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12989                 if (j < 0)
12990                         goto out;
12991
12992                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12993                         goto out;
12994
12995                 i += PCI_VPD_LRDT_TAG_SIZE;
12996                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12997                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12998                 if (j > 0) {
12999                         u8 csum8 = 0;
13000
13001                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13002
13003                         for (i = 0; i <= j; i++)
13004                                 csum8 += ((u8 *)buf)[i];
13005
13006                         if (csum8)
13007                                 goto out;
13008                 }
13009         }
13010
13011         err = 0;
13012
13013 out:
13014         kfree(buf);
13015         return err;
13016 }
13017
13018 #define TG3_SERDES_TIMEOUT_SEC  2
13019 #define TG3_COPPER_TIMEOUT_SEC  6
13020
13021 static int tg3_test_link(struct tg3 *tp)
13022 {
13023         int i, max;
13024
13025         if (!netif_running(tp->dev))
13026                 return -ENODEV;
13027
13028         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13029                 max = TG3_SERDES_TIMEOUT_SEC;
13030         else
13031                 max = TG3_COPPER_TIMEOUT_SEC;
13032
13033         for (i = 0; i < max; i++) {
13034                 if (tp->link_up)
13035                         return 0;
13036
13037                 if (msleep_interruptible(1000))
13038                         break;
13039         }
13040
13041         return -EIO;
13042 }
13043
13044 /* Only test the commonly used registers */
13045 static int tg3_test_registers(struct tg3 *tp)
13046 {
13047         int i, is_5705, is_5750;
13048         u32 offset, read_mask, write_mask, val, save_val, read_val;
13049         static struct {
13050                 u16 offset;
13051                 u16 flags;
13052 #define TG3_FL_5705     0x1
13053 #define TG3_FL_NOT_5705 0x2
13054 #define TG3_FL_NOT_5788 0x4
13055 #define TG3_FL_NOT_5750 0x8
13056                 u32 read_mask;
13057                 u32 write_mask;
13058         } reg_tbl[] = {
13059                 /* MAC Control Registers */
13060                 { MAC_MODE, TG3_FL_NOT_5705,
13061                         0x00000000, 0x00ef6f8c },
13062                 { MAC_MODE, TG3_FL_5705,
13063                         0x00000000, 0x01ef6b8c },
13064                 { MAC_STATUS, TG3_FL_NOT_5705,
13065                         0x03800107, 0x00000000 },
13066                 { MAC_STATUS, TG3_FL_5705,
13067                         0x03800100, 0x00000000 },
13068                 { MAC_ADDR_0_HIGH, 0x0000,
13069                         0x00000000, 0x0000ffff },
13070                 { MAC_ADDR_0_LOW, 0x0000,
13071                         0x00000000, 0xffffffff },
13072                 { MAC_RX_MTU_SIZE, 0x0000,
13073                         0x00000000, 0x0000ffff },
13074                 { MAC_TX_MODE, 0x0000,
13075                         0x00000000, 0x00000070 },
13076                 { MAC_TX_LENGTHS, 0x0000,
13077                         0x00000000, 0x00003fff },
13078                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13079                         0x00000000, 0x000007fc },
13080                 { MAC_RX_MODE, TG3_FL_5705,
13081                         0x00000000, 0x000007dc },
13082                 { MAC_HASH_REG_0, 0x0000,
13083                         0x00000000, 0xffffffff },
13084                 { MAC_HASH_REG_1, 0x0000,
13085                         0x00000000, 0xffffffff },
13086                 { MAC_HASH_REG_2, 0x0000,
13087                         0x00000000, 0xffffffff },
13088                 { MAC_HASH_REG_3, 0x0000,
13089                         0x00000000, 0xffffffff },
13090
13091                 /* Receive Data and Receive BD Initiator Control Registers. */
13092                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13093                         0x00000000, 0xffffffff },
13094                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13095                         0x00000000, 0xffffffff },
13096                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13097                         0x00000000, 0x00000003 },
13098                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13099                         0x00000000, 0xffffffff },
13100                 { RCVDBDI_STD_BD+0, 0x0000,
13101                         0x00000000, 0xffffffff },
13102                 { RCVDBDI_STD_BD+4, 0x0000,
13103                         0x00000000, 0xffffffff },
13104                 { RCVDBDI_STD_BD+8, 0x0000,
13105                         0x00000000, 0xffff0002 },
13106                 { RCVDBDI_STD_BD+0xc, 0x0000,
13107                         0x00000000, 0xffffffff },
13108
13109                 /* Receive BD Initiator Control Registers. */
13110                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13111                         0x00000000, 0xffffffff },
13112                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13113                         0x00000000, 0x000003ff },
13114                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13115                         0x00000000, 0xffffffff },
13116
13117                 /* Host Coalescing Control Registers. */
13118                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13119                         0x00000000, 0x00000004 },
13120                 { HOSTCC_MODE, TG3_FL_5705,
13121                         0x00000000, 0x000000f6 },
13122                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13123                         0x00000000, 0xffffffff },
13124                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13125                         0x00000000, 0x000003ff },
13126                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13127                         0x00000000, 0xffffffff },
13128                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13129                         0x00000000, 0x000003ff },
13130                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13131                         0x00000000, 0xffffffff },
13132                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13133                         0x00000000, 0x000000ff },
13134                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13135                         0x00000000, 0xffffffff },
13136                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13137                         0x00000000, 0x000000ff },
13138                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13139                         0x00000000, 0xffffffff },
13140                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13141                         0x00000000, 0xffffffff },
13142                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13143                         0x00000000, 0xffffffff },
13144                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13145                         0x00000000, 0x000000ff },
13146                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13147                         0x00000000, 0xffffffff },
13148                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13149                         0x00000000, 0x000000ff },
13150                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13151                         0x00000000, 0xffffffff },
13152                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13155                         0x00000000, 0xffffffff },
13156                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13157                         0x00000000, 0xffffffff },
13158                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13159                         0x00000000, 0xffffffff },
13160                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13161                         0xffffffff, 0x00000000 },
13162                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13163                         0xffffffff, 0x00000000 },
13164
13165                 /* Buffer Manager Control Registers. */
13166                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13167                         0x00000000, 0x007fff80 },
13168                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13169                         0x00000000, 0x007fffff },
13170                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13171                         0x00000000, 0x0000003f },
13172                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13173                         0x00000000, 0x000001ff },
13174                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13175                         0x00000000, 0x000001ff },
13176                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13177                         0xffffffff, 0x00000000 },
13178                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13179                         0xffffffff, 0x00000000 },
13180
13181                 /* Mailbox Registers */
13182                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13183                         0x00000000, 0x000001ff },
13184                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13185                         0x00000000, 0x000001ff },
13186                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13187                         0x00000000, 0x000007ff },
13188                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13189                         0x00000000, 0x000001ff },
13190
13191                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13192         };
13193
13194         is_5705 = is_5750 = 0;
13195         if (tg3_flag(tp, 5705_PLUS)) {
13196                 is_5705 = 1;
13197                 if (tg3_flag(tp, 5750_PLUS))
13198                         is_5750 = 1;
13199         }
13200
13201         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13202                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13203                         continue;
13204
13205                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13206                         continue;
13207
13208                 if (tg3_flag(tp, IS_5788) &&
13209                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13210                         continue;
13211
13212                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13213                         continue;
13214
13215                 offset = (u32) reg_tbl[i].offset;
13216                 read_mask = reg_tbl[i].read_mask;
13217                 write_mask = reg_tbl[i].write_mask;
13218
13219                 /* Save the original register content */
13220                 save_val = tr32(offset);
13221
13222                 /* Determine the read-only value. */
13223                 read_val = save_val & read_mask;
13224
13225                 /* Write zero to the register, then make sure the read-only bits
13226                  * are not changed and the read/write bits are all zeros.
13227                  */
13228                 tw32(offset, 0);
13229
13230                 val = tr32(offset);
13231
13232                 /* Test the read-only and read/write bits. */
13233                 if (((val & read_mask) != read_val) || (val & write_mask))
13234                         goto out;
13235
13236                 /* Write ones to all the bits defined by RdMask and WrMask, then
13237                  * make sure the read-only bits are not changed and the
13238                  * read/write bits are all ones.
13239                  */
13240                 tw32(offset, read_mask | write_mask);
13241
13242                 val = tr32(offset);
13243
13244                 /* Test the read-only bits. */
13245                 if ((val & read_mask) != read_val)
13246                         goto out;
13247
13248                 /* Test the read/write bits. */
13249                 if ((val & write_mask) != write_mask)
13250                         goto out;
13251
13252                 tw32(offset, save_val);
13253         }
13254
13255         return 0;
13256
13257 out:
13258         if (netif_msg_hw(tp))
13259                 netdev_err(tp->dev,
13260                            "Register test failed at offset %x\n", offset);
13261         tw32(offset, save_val);
13262         return -EIO;
13263 }
13264
13265 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13266 {
13267         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13268         int i;
13269         u32 j;
13270
13271         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13272                 for (j = 0; j < len; j += 4) {
13273                         u32 val;
13274
13275                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13276                         tg3_read_mem(tp, offset + j, &val);
13277                         if (val != test_pattern[i])
13278                                 return -EIO;
13279                 }
13280         }
13281         return 0;
13282 }
13283
13284 static int tg3_test_memory(struct tg3 *tp)
13285 {
13286         static struct mem_entry {
13287                 u32 offset;
13288                 u32 len;
13289         } mem_tbl_570x[] = {
13290                 { 0x00000000, 0x00b50},
13291                 { 0x00002000, 0x1c000},
13292                 { 0xffffffff, 0x00000}
13293         }, mem_tbl_5705[] = {
13294                 { 0x00000100, 0x0000c},
13295                 { 0x00000200, 0x00008},
13296                 { 0x00004000, 0x00800},
13297                 { 0x00006000, 0x01000},
13298                 { 0x00008000, 0x02000},
13299                 { 0x00010000, 0x0e000},
13300                 { 0xffffffff, 0x00000}
13301         }, mem_tbl_5755[] = {
13302                 { 0x00000200, 0x00008},
13303                 { 0x00004000, 0x00800},
13304                 { 0x00006000, 0x00800},
13305                 { 0x00008000, 0x02000},
13306                 { 0x00010000, 0x0c000},
13307                 { 0xffffffff, 0x00000}
13308         }, mem_tbl_5906[] = {
13309                 { 0x00000200, 0x00008},
13310                 { 0x00004000, 0x00400},
13311                 { 0x00006000, 0x00400},
13312                 { 0x00008000, 0x01000},
13313                 { 0x00010000, 0x01000},
13314                 { 0xffffffff, 0x00000}
13315         }, mem_tbl_5717[] = {
13316                 { 0x00000200, 0x00008},
13317                 { 0x00010000, 0x0a000},
13318                 { 0x00020000, 0x13c00},
13319                 { 0xffffffff, 0x00000}
13320         }, mem_tbl_57765[] = {
13321                 { 0x00000200, 0x00008},
13322                 { 0x00004000, 0x00800},
13323                 { 0x00006000, 0x09800},
13324                 { 0x00010000, 0x0a000},
13325                 { 0xffffffff, 0x00000}
13326         };
13327         struct mem_entry *mem_tbl;
13328         int err = 0;
13329         int i;
13330
13331         if (tg3_flag(tp, 5717_PLUS))
13332                 mem_tbl = mem_tbl_5717;
13333         else if (tg3_flag(tp, 57765_CLASS) ||
13334                  tg3_asic_rev(tp) == ASIC_REV_5762)
13335                 mem_tbl = mem_tbl_57765;
13336         else if (tg3_flag(tp, 5755_PLUS))
13337                 mem_tbl = mem_tbl_5755;
13338         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13339                 mem_tbl = mem_tbl_5906;
13340         else if (tg3_flag(tp, 5705_PLUS))
13341                 mem_tbl = mem_tbl_5705;
13342         else
13343                 mem_tbl = mem_tbl_570x;
13344
13345         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13346                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13347                 if (err)
13348                         break;
13349         }
13350
13351         return err;
13352 }
13353
13354 #define TG3_TSO_MSS             500
13355
13356 #define TG3_TSO_IP_HDR_LEN      20
13357 #define TG3_TSO_TCP_HDR_LEN     20
13358 #define TG3_TSO_TCP_OPT_LEN     12
13359
13360 static const u8 tg3_tso_header[] = {
13361 0x08, 0x00,
13362 0x45, 0x00, 0x00, 0x00,
13363 0x00, 0x00, 0x40, 0x00,
13364 0x40, 0x06, 0x00, 0x00,
13365 0x0a, 0x00, 0x00, 0x01,
13366 0x0a, 0x00, 0x00, 0x02,
13367 0x0d, 0x00, 0xe0, 0x00,
13368 0x00, 0x00, 0x01, 0x00,
13369 0x00, 0x00, 0x02, 0x00,
13370 0x80, 0x10, 0x10, 0x00,
13371 0x14, 0x09, 0x00, 0x00,
13372 0x01, 0x01, 0x08, 0x0a,
13373 0x11, 0x11, 0x11, 0x11,
13374 0x11, 0x11, 0x11, 0x11,
13375 };
13376
13377 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13378 {
13379         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13380         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13381         u32 budget;
13382         struct sk_buff *skb;
13383         u8 *tx_data, *rx_data;
13384         dma_addr_t map;
13385         int num_pkts, tx_len, rx_len, i, err;
13386         struct tg3_rx_buffer_desc *desc;
13387         struct tg3_napi *tnapi, *rnapi;
13388         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13389
13390         tnapi = &tp->napi[0];
13391         rnapi = &tp->napi[0];
13392         if (tp->irq_cnt > 1) {
13393                 if (tg3_flag(tp, ENABLE_RSS))
13394                         rnapi = &tp->napi[1];
13395                 if (tg3_flag(tp, ENABLE_TSS))
13396                         tnapi = &tp->napi[1];
13397         }
13398         coal_now = tnapi->coal_now | rnapi->coal_now;
13399
13400         err = -EIO;
13401
13402         tx_len = pktsz;
13403         skb = netdev_alloc_skb(tp->dev, tx_len);
13404         if (!skb)
13405                 return -ENOMEM;
13406
13407         tx_data = skb_put(skb, tx_len);
13408         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13409         memset(tx_data + ETH_ALEN, 0x0, 8);
13410
13411         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13412
13413         if (tso_loopback) {
13414                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13415
13416                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13417                               TG3_TSO_TCP_OPT_LEN;
13418
13419                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13420                        sizeof(tg3_tso_header));
13421                 mss = TG3_TSO_MSS;
13422
13423                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13424                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13425
13426                 /* Set the total length field in the IP header */
13427                 iph->tot_len = htons((u16)(mss + hdr_len));
13428
13429                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13430                               TXD_FLAG_CPU_POST_DMA);
13431
13432                 if (tg3_flag(tp, HW_TSO_1) ||
13433                     tg3_flag(tp, HW_TSO_2) ||
13434                     tg3_flag(tp, HW_TSO_3)) {
13435                         struct tcphdr *th;
13436                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13437                         th = (struct tcphdr *)&tx_data[val];
13438                         th->check = 0;
13439                 } else
13440                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13441
13442                 if (tg3_flag(tp, HW_TSO_3)) {
13443                         mss |= (hdr_len & 0xc) << 12;
13444                         if (hdr_len & 0x10)
13445                                 base_flags |= 0x00000010;
13446                         base_flags |= (hdr_len & 0x3e0) << 5;
13447                 } else if (tg3_flag(tp, HW_TSO_2))
13448                         mss |= hdr_len << 9;
13449                 else if (tg3_flag(tp, HW_TSO_1) ||
13450                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13451                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13452                 } else {
13453                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13454                 }
13455
13456                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13457         } else {
13458                 num_pkts = 1;
13459                 data_off = ETH_HLEN;
13460
13461                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13462                     tx_len > VLAN_ETH_FRAME_LEN)
13463                         base_flags |= TXD_FLAG_JMB_PKT;
13464         }
13465
13466         for (i = data_off; i < tx_len; i++)
13467                 tx_data[i] = (u8) (i & 0xff);
13468
13469         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13470         if (pci_dma_mapping_error(tp->pdev, map)) {
13471                 dev_kfree_skb(skb);
13472                 return -EIO;
13473         }
13474
13475         val = tnapi->tx_prod;
13476         tnapi->tx_buffers[val].skb = skb;
13477         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13478
13479         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13480                rnapi->coal_now);
13481
13482         udelay(10);
13483
13484         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13485
13486         budget = tg3_tx_avail(tnapi);
13487         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13488                             base_flags | TXD_FLAG_END, mss, 0)) {
13489                 tnapi->tx_buffers[val].skb = NULL;
13490                 dev_kfree_skb(skb);
13491                 return -EIO;
13492         }
13493
13494         tnapi->tx_prod++;
13495
13496         /* Sync BD data before updating mailbox */
13497         wmb();
13498
13499         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13500         tr32_mailbox(tnapi->prodmbox);
13501
13502         udelay(10);
13503
13504         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13505         for (i = 0; i < 35; i++) {
13506                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13507                        coal_now);
13508
13509                 udelay(10);
13510
13511                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13512                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13513                 if ((tx_idx == tnapi->tx_prod) &&
13514                     (rx_idx == (rx_start_idx + num_pkts)))
13515                         break;
13516         }
13517
13518         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13519         dev_kfree_skb(skb);
13520
13521         if (tx_idx != tnapi->tx_prod)
13522                 goto out;
13523
13524         if (rx_idx != rx_start_idx + num_pkts)
13525                 goto out;
13526
13527         val = data_off;
13528         while (rx_idx != rx_start_idx) {
13529                 desc = &rnapi->rx_rcb[rx_start_idx++];
13530                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13531                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13532
13533                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13534                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13535                         goto out;
13536
13537                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13538                          - ETH_FCS_LEN;
13539
13540                 if (!tso_loopback) {
13541                         if (rx_len != tx_len)
13542                                 goto out;
13543
13544                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13545                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13546                                         goto out;
13547                         } else {
13548                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13549                                         goto out;
13550                         }
13551                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13552                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13553                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13554                         goto out;
13555                 }
13556
13557                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13558                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13559                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13560                                              mapping);
13561                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13562                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13563                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13564                                              mapping);
13565                 } else
13566                         goto out;
13567
13568                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13569                                             PCI_DMA_FROMDEVICE);
13570
13571                 rx_data += TG3_RX_OFFSET(tp);
13572                 for (i = data_off; i < rx_len; i++, val++) {
13573                         if (*(rx_data + i) != (u8) (val & 0xff))
13574                                 goto out;
13575                 }
13576         }
13577
13578         err = 0;
13579
13580         /* tg3_free_rings will unmap and free the rx_data */
13581 out:
13582         return err;
13583 }
13584
13585 #define TG3_STD_LOOPBACK_FAILED         1
13586 #define TG3_JMB_LOOPBACK_FAILED         2
13587 #define TG3_TSO_LOOPBACK_FAILED         4
13588 #define TG3_LOOPBACK_FAILED \
13589         (TG3_STD_LOOPBACK_FAILED | \
13590          TG3_JMB_LOOPBACK_FAILED | \
13591          TG3_TSO_LOOPBACK_FAILED)
13592
13593 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13594 {
13595         int err = -EIO;
13596         u32 eee_cap;
13597         u32 jmb_pkt_sz = 9000;
13598
13599         if (tp->dma_limit)
13600                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13601
13602         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13603         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13604
13605         if (!netif_running(tp->dev)) {
13606                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13607                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608                 if (do_extlpbk)
13609                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13610                 goto done;
13611         }
13612
13613         err = tg3_reset_hw(tp, true);
13614         if (err) {
13615                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13616                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617                 if (do_extlpbk)
13618                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13619                 goto done;
13620         }
13621
13622         if (tg3_flag(tp, ENABLE_RSS)) {
13623                 int i;
13624
13625                 /* Reroute all rx packets to the 1st queue */
13626                 for (i = MAC_RSS_INDIR_TBL_0;
13627                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13628                         tw32(i, 0x0);
13629         }
13630
13631         /* HW errata - mac loopback fails in some cases on 5780.
13632          * Normal traffic and PHY loopback are not affected by
13633          * errata.  Also, the MAC loopback test is deprecated for
13634          * all newer ASIC revisions.
13635          */
13636         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13637             !tg3_flag(tp, CPMU_PRESENT)) {
13638                 tg3_mac_loopback(tp, true);
13639
13640                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13641                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13642
13643                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13644                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13645                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13646
13647                 tg3_mac_loopback(tp, false);
13648         }
13649
13650         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13651             !tg3_flag(tp, USE_PHYLIB)) {
13652                 int i;
13653
13654                 tg3_phy_lpbk_set(tp, 0, false);
13655
13656                 /* Wait for link */
13657                 for (i = 0; i < 100; i++) {
13658                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13659                                 break;
13660                         mdelay(1);
13661                 }
13662
13663                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13664                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13665                 if (tg3_flag(tp, TSO_CAPABLE) &&
13666                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13667                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13668                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13669                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13670                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13671
13672                 if (do_extlpbk) {
13673                         tg3_phy_lpbk_set(tp, 0, true);
13674
13675                         /* All link indications report up, but the hardware
13676                          * isn't really ready for about 20 msec.  Double it
13677                          * to be sure.
13678                          */
13679                         mdelay(40);
13680
13681                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13682                                 data[TG3_EXT_LOOPB_TEST] |=
13683                                                         TG3_STD_LOOPBACK_FAILED;
13684                         if (tg3_flag(tp, TSO_CAPABLE) &&
13685                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13686                                 data[TG3_EXT_LOOPB_TEST] |=
13687                                                         TG3_TSO_LOOPBACK_FAILED;
13688                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13689                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13690                                 data[TG3_EXT_LOOPB_TEST] |=
13691                                                         TG3_JMB_LOOPBACK_FAILED;
13692                 }
13693
13694                 /* Re-enable gphy autopowerdown. */
13695                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13696                         tg3_phy_toggle_apd(tp, true);
13697         }
13698
13699         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13700                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13701
13702 done:
13703         tp->phy_flags |= eee_cap;
13704
13705         return err;
13706 }
13707
13708 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13709                           u64 *data)
13710 {
13711         struct tg3 *tp = netdev_priv(dev);
13712         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13713
13714         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13715                 if (tg3_power_up(tp)) {
13716                         etest->flags |= ETH_TEST_FL_FAILED;
13717                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13718                         return;
13719                 }
13720                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13721         }
13722
13723         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13724
13725         if (tg3_test_nvram(tp) != 0) {
13726                 etest->flags |= ETH_TEST_FL_FAILED;
13727                 data[TG3_NVRAM_TEST] = 1;
13728         }
13729         if (!doextlpbk && tg3_test_link(tp)) {
13730                 etest->flags |= ETH_TEST_FL_FAILED;
13731                 data[TG3_LINK_TEST] = 1;
13732         }
13733         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13734                 int err, err2 = 0, irq_sync = 0;
13735
13736                 if (netif_running(dev)) {
13737                         tg3_phy_stop(tp);
13738                         tg3_netif_stop(tp);
13739                         irq_sync = 1;
13740                 }
13741
13742                 tg3_full_lock(tp, irq_sync);
13743                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13744                 err = tg3_nvram_lock(tp);
13745                 tg3_halt_cpu(tp, RX_CPU_BASE);
13746                 if (!tg3_flag(tp, 5705_PLUS))
13747                         tg3_halt_cpu(tp, TX_CPU_BASE);
13748                 if (!err)
13749                         tg3_nvram_unlock(tp);
13750
13751                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13752                         tg3_phy_reset(tp);
13753
13754                 if (tg3_test_registers(tp) != 0) {
13755                         etest->flags |= ETH_TEST_FL_FAILED;
13756                         data[TG3_REGISTER_TEST] = 1;
13757                 }
13758
13759                 if (tg3_test_memory(tp) != 0) {
13760                         etest->flags |= ETH_TEST_FL_FAILED;
13761                         data[TG3_MEMORY_TEST] = 1;
13762                 }
13763
13764                 if (doextlpbk)
13765                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13766
13767                 if (tg3_test_loopback(tp, data, doextlpbk))
13768                         etest->flags |= ETH_TEST_FL_FAILED;
13769
13770                 tg3_full_unlock(tp);
13771
13772                 if (tg3_test_interrupt(tp) != 0) {
13773                         etest->flags |= ETH_TEST_FL_FAILED;
13774                         data[TG3_INTERRUPT_TEST] = 1;
13775                 }
13776
13777                 tg3_full_lock(tp, 0);
13778
13779                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13780                 if (netif_running(dev)) {
13781                         tg3_flag_set(tp, INIT_COMPLETE);
13782                         err2 = tg3_restart_hw(tp, true);
13783                         if (!err2)
13784                                 tg3_netif_start(tp);
13785                 }
13786
13787                 tg3_full_unlock(tp);
13788
13789                 if (irq_sync && !err2)
13790                         tg3_phy_start(tp);
13791         }
13792         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13793                 tg3_power_down_prepare(tp);
13794
13795 }
13796
13797 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13798 {
13799         struct tg3 *tp = netdev_priv(dev);
13800         struct hwtstamp_config stmpconf;
13801
13802         if (!tg3_flag(tp, PTP_CAPABLE))
13803                 return -EOPNOTSUPP;
13804
13805         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13806                 return -EFAULT;
13807
13808         if (stmpconf.flags)
13809                 return -EINVAL;
13810
13811         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13812             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13813                 return -ERANGE;
13814
13815         switch (stmpconf.rx_filter) {
13816         case HWTSTAMP_FILTER_NONE:
13817                 tp->rxptpctl = 0;
13818                 break;
13819         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13820                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13821                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13822                 break;
13823         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13824                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13825                                TG3_RX_PTP_CTL_SYNC_EVNT;
13826                 break;
13827         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13828                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13829                                TG3_RX_PTP_CTL_DELAY_REQ;
13830                 break;
13831         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13832                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13833                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13834                 break;
13835         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13836                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13837                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13838                 break;
13839         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13840                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13841                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13842                 break;
13843         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13844                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13845                                TG3_RX_PTP_CTL_SYNC_EVNT;
13846                 break;
13847         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13848                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13849                                TG3_RX_PTP_CTL_SYNC_EVNT;
13850                 break;
13851         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13852                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13853                                TG3_RX_PTP_CTL_SYNC_EVNT;
13854                 break;
13855         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13856                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13857                                TG3_RX_PTP_CTL_DELAY_REQ;
13858                 break;
13859         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13860                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13861                                TG3_RX_PTP_CTL_DELAY_REQ;
13862                 break;
13863         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13864                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13865                                TG3_RX_PTP_CTL_DELAY_REQ;
13866                 break;
13867         default:
13868                 return -ERANGE;
13869         }
13870
13871         if (netif_running(dev) && tp->rxptpctl)
13872                 tw32(TG3_RX_PTP_CTL,
13873                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13874
13875         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13876                 tg3_flag_set(tp, TX_TSTAMP_EN);
13877         else
13878                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13879
13880         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13881                 -EFAULT : 0;
13882 }
13883
13884 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13885 {
13886         struct tg3 *tp = netdev_priv(dev);
13887         struct hwtstamp_config stmpconf;
13888
13889         if (!tg3_flag(tp, PTP_CAPABLE))
13890                 return -EOPNOTSUPP;
13891
13892         stmpconf.flags = 0;
13893         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13894                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13895
13896         switch (tp->rxptpctl) {
13897         case 0:
13898                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13899                 break;
13900         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13901                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13902                 break;
13903         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13904                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13905                 break;
13906         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13907                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13908                 break;
13909         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13910                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13911                 break;
13912         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13913                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13914                 break;
13915         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13916                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13917                 break;
13918         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13919                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13920                 break;
13921         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13922                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13923                 break;
13924         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13925                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13926                 break;
13927         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13928                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13929                 break;
13930         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13935                 break;
13936         default:
13937                 WARN_ON_ONCE(1);
13938                 return -ERANGE;
13939         }
13940
13941         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13942                 -EFAULT : 0;
13943 }
13944
13945 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13946 {
13947         struct mii_ioctl_data *data = if_mii(ifr);
13948         struct tg3 *tp = netdev_priv(dev);
13949         int err;
13950
13951         if (tg3_flag(tp, USE_PHYLIB)) {
13952                 struct phy_device *phydev;
13953                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13954                         return -EAGAIN;
13955                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13956                 return phy_mii_ioctl(phydev, ifr, cmd);
13957         }
13958
13959         switch (cmd) {
13960         case SIOCGMIIPHY:
13961                 data->phy_id = tp->phy_addr;
13962
13963                 /* fallthru */
13964         case SIOCGMIIREG: {
13965                 u32 mii_regval;
13966
13967                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13968                         break;                  /* We have no PHY */
13969
13970                 if (!netif_running(dev))
13971                         return -EAGAIN;
13972
13973                 spin_lock_bh(&tp->lock);
13974                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13975                                     data->reg_num & 0x1f, &mii_regval);
13976                 spin_unlock_bh(&tp->lock);
13977
13978                 data->val_out = mii_regval;
13979
13980                 return err;
13981         }
13982
13983         case SIOCSMIIREG:
13984                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13985                         break;                  /* We have no PHY */
13986
13987                 if (!netif_running(dev))
13988                         return -EAGAIN;
13989
13990                 spin_lock_bh(&tp->lock);
13991                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13992                                      data->reg_num & 0x1f, data->val_in);
13993                 spin_unlock_bh(&tp->lock);
13994
13995                 return err;
13996
13997         case SIOCSHWTSTAMP:
13998                 return tg3_hwtstamp_set(dev, ifr);
13999
14000         case SIOCGHWTSTAMP:
14001                 return tg3_hwtstamp_get(dev, ifr);
14002
14003         default:
14004                 /* do nothing */
14005                 break;
14006         }
14007         return -EOPNOTSUPP;
14008 }
14009
14010 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14011 {
14012         struct tg3 *tp = netdev_priv(dev);
14013
14014         memcpy(ec, &tp->coal, sizeof(*ec));
14015         return 0;
14016 }
14017
14018 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14019 {
14020         struct tg3 *tp = netdev_priv(dev);
14021         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14022         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14023
14024         if (!tg3_flag(tp, 5705_PLUS)) {
14025                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14026                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14027                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14028                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14029         }
14030
14031         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14032             (!ec->rx_coalesce_usecs) ||
14033             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14034             (!ec->tx_coalesce_usecs) ||
14035             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14036             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14037             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14038             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14039             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14040             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14041             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14042             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14043                 return -EINVAL;
14044
14045         /* Only copy relevant parameters, ignore all others. */
14046         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14047         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14048         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14049         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14050         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14051         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14052         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14053         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14054         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14055
14056         if (netif_running(dev)) {
14057                 tg3_full_lock(tp, 0);
14058                 __tg3_set_coalesce(tp, &tp->coal);
14059                 tg3_full_unlock(tp);
14060         }
14061         return 0;
14062 }
14063
14064 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14065 {
14066         struct tg3 *tp = netdev_priv(dev);
14067
14068         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14069                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14070                 return -EOPNOTSUPP;
14071         }
14072
14073         if (edata->advertised != tp->eee.advertised) {
14074                 netdev_warn(tp->dev,
14075                             "Direct manipulation of EEE advertisement is not supported\n");
14076                 return -EINVAL;
14077         }
14078
14079         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14080                 netdev_warn(tp->dev,
14081                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14082                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14083                 return -EINVAL;
14084         }
14085
14086         tp->eee = *edata;
14087
14088         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14089         tg3_warn_mgmt_link_flap(tp);
14090
14091         if (netif_running(tp->dev)) {
14092                 tg3_full_lock(tp, 0);
14093                 tg3_setup_eee(tp);
14094                 tg3_phy_reset(tp);
14095                 tg3_full_unlock(tp);
14096         }
14097
14098         return 0;
14099 }
14100
14101 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14102 {
14103         struct tg3 *tp = netdev_priv(dev);
14104
14105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14106                 netdev_warn(tp->dev,
14107                             "Board does not support EEE!\n");
14108                 return -EOPNOTSUPP;
14109         }
14110
14111         *edata = tp->eee;
14112         return 0;
14113 }
14114
14115 static const struct ethtool_ops tg3_ethtool_ops = {
14116         .get_drvinfo            = tg3_get_drvinfo,
14117         .get_regs_len           = tg3_get_regs_len,
14118         .get_regs               = tg3_get_regs,
14119         .get_wol                = tg3_get_wol,
14120         .set_wol                = tg3_set_wol,
14121         .get_msglevel           = tg3_get_msglevel,
14122         .set_msglevel           = tg3_set_msglevel,
14123         .nway_reset             = tg3_nway_reset,
14124         .get_link               = ethtool_op_get_link,
14125         .get_eeprom_len         = tg3_get_eeprom_len,
14126         .get_eeprom             = tg3_get_eeprom,
14127         .set_eeprom             = tg3_set_eeprom,
14128         .get_ringparam          = tg3_get_ringparam,
14129         .set_ringparam          = tg3_set_ringparam,
14130         .get_pauseparam         = tg3_get_pauseparam,
14131         .set_pauseparam         = tg3_set_pauseparam,
14132         .self_test              = tg3_self_test,
14133         .get_strings            = tg3_get_strings,
14134         .set_phys_id            = tg3_set_phys_id,
14135         .get_ethtool_stats      = tg3_get_ethtool_stats,
14136         .get_coalesce           = tg3_get_coalesce,
14137         .set_coalesce           = tg3_set_coalesce,
14138         .get_sset_count         = tg3_get_sset_count,
14139         .get_rxnfc              = tg3_get_rxnfc,
14140         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14141         .get_rxfh               = tg3_get_rxfh,
14142         .set_rxfh               = tg3_set_rxfh,
14143         .get_channels           = tg3_get_channels,
14144         .set_channels           = tg3_set_channels,
14145         .get_ts_info            = tg3_get_ts_info,
14146         .get_eee                = tg3_get_eee,
14147         .set_eee                = tg3_set_eee,
14148         .get_link_ksettings     = tg3_get_link_ksettings,
14149         .set_link_ksettings     = tg3_set_link_ksettings,
14150 };
14151
14152 static void tg3_get_stats64(struct net_device *dev,
14153                             struct rtnl_link_stats64 *stats)
14154 {
14155         struct tg3 *tp = netdev_priv(dev);
14156
14157         spin_lock_bh(&tp->lock);
14158         if (!tp->hw_stats) {
14159                 *stats = tp->net_stats_prev;
14160                 spin_unlock_bh(&tp->lock);
14161                 return;
14162         }
14163
14164         tg3_get_nstats(tp, stats);
14165         spin_unlock_bh(&tp->lock);
14166 }
14167
14168 static void tg3_set_rx_mode(struct net_device *dev)
14169 {
14170         struct tg3 *tp = netdev_priv(dev);
14171
14172         if (!netif_running(dev))
14173                 return;
14174
14175         tg3_full_lock(tp, 0);
14176         __tg3_set_rx_mode(dev);
14177         tg3_full_unlock(tp);
14178 }
14179
14180 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14181                                int new_mtu)
14182 {
14183         dev->mtu = new_mtu;
14184
14185         if (new_mtu > ETH_DATA_LEN) {
14186                 if (tg3_flag(tp, 5780_CLASS)) {
14187                         netdev_update_features(dev);
14188                         tg3_flag_clear(tp, TSO_CAPABLE);
14189                 } else {
14190                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14191                 }
14192         } else {
14193                 if (tg3_flag(tp, 5780_CLASS)) {
14194                         tg3_flag_set(tp, TSO_CAPABLE);
14195                         netdev_update_features(dev);
14196                 }
14197                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14198         }
14199 }
14200
14201 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14202 {
14203         struct tg3 *tp = netdev_priv(dev);
14204         int err;
14205         bool reset_phy = false;
14206
14207         if (!netif_running(dev)) {
14208                 /* We'll just catch it later when the
14209                  * device is up'd.
14210                  */
14211                 tg3_set_mtu(dev, tp, new_mtu);
14212                 return 0;
14213         }
14214
14215         tg3_phy_stop(tp);
14216
14217         tg3_netif_stop(tp);
14218
14219         tg3_set_mtu(dev, tp, new_mtu);
14220
14221         tg3_full_lock(tp, 1);
14222
14223         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14224
14225         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14226          * breaks all requests to 256 bytes.
14227          */
14228         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14229             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14230             tg3_asic_rev(tp) == ASIC_REV_5719)
14231                 reset_phy = true;
14232
14233         err = tg3_restart_hw(tp, reset_phy);
14234
14235         if (!err)
14236                 tg3_netif_start(tp);
14237
14238         tg3_full_unlock(tp);
14239
14240         if (!err)
14241                 tg3_phy_start(tp);
14242
14243         return err;
14244 }
14245
14246 static const struct net_device_ops tg3_netdev_ops = {
14247         .ndo_open               = tg3_open,
14248         .ndo_stop               = tg3_close,
14249         .ndo_start_xmit         = tg3_start_xmit,
14250         .ndo_get_stats64        = tg3_get_stats64,
14251         .ndo_validate_addr      = eth_validate_addr,
14252         .ndo_set_rx_mode        = tg3_set_rx_mode,
14253         .ndo_set_mac_address    = tg3_set_mac_addr,
14254         .ndo_do_ioctl           = tg3_ioctl,
14255         .ndo_tx_timeout         = tg3_tx_timeout,
14256         .ndo_change_mtu         = tg3_change_mtu,
14257         .ndo_fix_features       = tg3_fix_features,
14258         .ndo_set_features       = tg3_set_features,
14259 #ifdef CONFIG_NET_POLL_CONTROLLER
14260         .ndo_poll_controller    = tg3_poll_controller,
14261 #endif
14262 };
14263
14264 static void tg3_get_eeprom_size(struct tg3 *tp)
14265 {
14266         u32 cursize, val, magic;
14267
14268         tp->nvram_size = EEPROM_CHIP_SIZE;
14269
14270         if (tg3_nvram_read(tp, 0, &magic) != 0)
14271                 return;
14272
14273         if ((magic != TG3_EEPROM_MAGIC) &&
14274             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14275             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14276                 return;
14277
14278         /*
14279          * Size the chip by reading offsets at increasing powers of two.
14280          * When we encounter our validation signature, we know the addressing
14281          * has wrapped around, and thus have our chip size.
14282          */
14283         cursize = 0x10;
14284
14285         while (cursize < tp->nvram_size) {
14286                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14287                         return;
14288
14289                 if (val == magic)
14290                         break;
14291
14292                 cursize <<= 1;
14293         }
14294
14295         tp->nvram_size = cursize;
14296 }
14297
14298 static void tg3_get_nvram_size(struct tg3 *tp)
14299 {
14300         u32 val;
14301
14302         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14303                 return;
14304
14305         /* Selfboot format */
14306         if (val != TG3_EEPROM_MAGIC) {
14307                 tg3_get_eeprom_size(tp);
14308                 return;
14309         }
14310
14311         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14312                 if (val != 0) {
14313                         /* This is confusing.  We want to operate on the
14314                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14315                          * call will read from NVRAM and byteswap the data
14316                          * according to the byteswapping settings for all
14317                          * other register accesses.  This ensures the data we
14318                          * want will always reside in the lower 16-bits.
14319                          * However, the data in NVRAM is in LE format, which
14320                          * means the data from the NVRAM read will always be
14321                          * opposite the endianness of the CPU.  The 16-bit
14322                          * byteswap then brings the data to CPU endianness.
14323                          */
14324                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14325                         return;
14326                 }
14327         }
14328         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14329 }
14330
14331 static void tg3_get_nvram_info(struct tg3 *tp)
14332 {
14333         u32 nvcfg1;
14334
14335         nvcfg1 = tr32(NVRAM_CFG1);
14336         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14337                 tg3_flag_set(tp, FLASH);
14338         } else {
14339                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14340                 tw32(NVRAM_CFG1, nvcfg1);
14341         }
14342
14343         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14344             tg3_flag(tp, 5780_CLASS)) {
14345                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14346                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14347                         tp->nvram_jedecnum = JEDEC_ATMEL;
14348                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14349                         tg3_flag_set(tp, NVRAM_BUFFERED);
14350                         break;
14351                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14352                         tp->nvram_jedecnum = JEDEC_ATMEL;
14353                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14354                         break;
14355                 case FLASH_VENDOR_ATMEL_EEPROM:
14356                         tp->nvram_jedecnum = JEDEC_ATMEL;
14357                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14358                         tg3_flag_set(tp, NVRAM_BUFFERED);
14359                         break;
14360                 case FLASH_VENDOR_ST:
14361                         tp->nvram_jedecnum = JEDEC_ST;
14362                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14363                         tg3_flag_set(tp, NVRAM_BUFFERED);
14364                         break;
14365                 case FLASH_VENDOR_SAIFUN:
14366                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14367                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14368                         break;
14369                 case FLASH_VENDOR_SST_SMALL:
14370                 case FLASH_VENDOR_SST_LARGE:
14371                         tp->nvram_jedecnum = JEDEC_SST;
14372                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14373                         break;
14374                 }
14375         } else {
14376                 tp->nvram_jedecnum = JEDEC_ATMEL;
14377                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14378                 tg3_flag_set(tp, NVRAM_BUFFERED);
14379         }
14380 }
14381
14382 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14383 {
14384         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14385         case FLASH_5752PAGE_SIZE_256:
14386                 tp->nvram_pagesize = 256;
14387                 break;
14388         case FLASH_5752PAGE_SIZE_512:
14389                 tp->nvram_pagesize = 512;
14390                 break;
14391         case FLASH_5752PAGE_SIZE_1K:
14392                 tp->nvram_pagesize = 1024;
14393                 break;
14394         case FLASH_5752PAGE_SIZE_2K:
14395                 tp->nvram_pagesize = 2048;
14396                 break;
14397         case FLASH_5752PAGE_SIZE_4K:
14398                 tp->nvram_pagesize = 4096;
14399                 break;
14400         case FLASH_5752PAGE_SIZE_264:
14401                 tp->nvram_pagesize = 264;
14402                 break;
14403         case FLASH_5752PAGE_SIZE_528:
14404                 tp->nvram_pagesize = 528;
14405                 break;
14406         }
14407 }
14408
14409 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14410 {
14411         u32 nvcfg1;
14412
14413         nvcfg1 = tr32(NVRAM_CFG1);
14414
14415         /* NVRAM protection for TPM */
14416         if (nvcfg1 & (1 << 27))
14417                 tg3_flag_set(tp, PROTECTED_NVRAM);
14418
14419         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14420         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14421         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14422                 tp->nvram_jedecnum = JEDEC_ATMEL;
14423                 tg3_flag_set(tp, NVRAM_BUFFERED);
14424                 break;
14425         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14426                 tp->nvram_jedecnum = JEDEC_ATMEL;
14427                 tg3_flag_set(tp, NVRAM_BUFFERED);
14428                 tg3_flag_set(tp, FLASH);
14429                 break;
14430         case FLASH_5752VENDOR_ST_M45PE10:
14431         case FLASH_5752VENDOR_ST_M45PE20:
14432         case FLASH_5752VENDOR_ST_M45PE40:
14433                 tp->nvram_jedecnum = JEDEC_ST;
14434                 tg3_flag_set(tp, NVRAM_BUFFERED);
14435                 tg3_flag_set(tp, FLASH);
14436                 break;
14437         }
14438
14439         if (tg3_flag(tp, FLASH)) {
14440                 tg3_nvram_get_pagesize(tp, nvcfg1);
14441         } else {
14442                 /* For eeprom, set pagesize to maximum eeprom size */
14443                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14444
14445                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14446                 tw32(NVRAM_CFG1, nvcfg1);
14447         }
14448 }
14449
14450 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14451 {
14452         u32 nvcfg1, protect = 0;
14453
14454         nvcfg1 = tr32(NVRAM_CFG1);
14455
14456         /* NVRAM protection for TPM */
14457         if (nvcfg1 & (1 << 27)) {
14458                 tg3_flag_set(tp, PROTECTED_NVRAM);
14459                 protect = 1;
14460         }
14461
14462         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14463         switch (nvcfg1) {
14464         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14465         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14466         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14467         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14468                 tp->nvram_jedecnum = JEDEC_ATMEL;
14469                 tg3_flag_set(tp, NVRAM_BUFFERED);
14470                 tg3_flag_set(tp, FLASH);
14471                 tp->nvram_pagesize = 264;
14472                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14473                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14474                         tp->nvram_size = (protect ? 0x3e200 :
14475                                           TG3_NVRAM_SIZE_512KB);
14476                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14477                         tp->nvram_size = (protect ? 0x1f200 :
14478                                           TG3_NVRAM_SIZE_256KB);
14479                 else
14480                         tp->nvram_size = (protect ? 0x1f200 :
14481                                           TG3_NVRAM_SIZE_128KB);
14482                 break;
14483         case FLASH_5752VENDOR_ST_M45PE10:
14484         case FLASH_5752VENDOR_ST_M45PE20:
14485         case FLASH_5752VENDOR_ST_M45PE40:
14486                 tp->nvram_jedecnum = JEDEC_ST;
14487                 tg3_flag_set(tp, NVRAM_BUFFERED);
14488                 tg3_flag_set(tp, FLASH);
14489                 tp->nvram_pagesize = 256;
14490                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14491                         tp->nvram_size = (protect ?
14492                                           TG3_NVRAM_SIZE_64KB :
14493                                           TG3_NVRAM_SIZE_128KB);
14494                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14495                         tp->nvram_size = (protect ?
14496                                           TG3_NVRAM_SIZE_64KB :
14497                                           TG3_NVRAM_SIZE_256KB);
14498                 else
14499                         tp->nvram_size = (protect ?
14500                                           TG3_NVRAM_SIZE_128KB :
14501                                           TG3_NVRAM_SIZE_512KB);
14502                 break;
14503         }
14504 }
14505
14506 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14507 {
14508         u32 nvcfg1;
14509
14510         nvcfg1 = tr32(NVRAM_CFG1);
14511
14512         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14513         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14514         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14515         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14516         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14517                 tp->nvram_jedecnum = JEDEC_ATMEL;
14518                 tg3_flag_set(tp, NVRAM_BUFFERED);
14519                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14520
14521                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14522                 tw32(NVRAM_CFG1, nvcfg1);
14523                 break;
14524         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14525         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14526         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14527         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14528                 tp->nvram_jedecnum = JEDEC_ATMEL;
14529                 tg3_flag_set(tp, NVRAM_BUFFERED);
14530                 tg3_flag_set(tp, FLASH);
14531                 tp->nvram_pagesize = 264;
14532                 break;
14533         case FLASH_5752VENDOR_ST_M45PE10:
14534         case FLASH_5752VENDOR_ST_M45PE20:
14535         case FLASH_5752VENDOR_ST_M45PE40:
14536                 tp->nvram_jedecnum = JEDEC_ST;
14537                 tg3_flag_set(tp, NVRAM_BUFFERED);
14538                 tg3_flag_set(tp, FLASH);
14539                 tp->nvram_pagesize = 256;
14540                 break;
14541         }
14542 }
14543
14544 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14545 {
14546         u32 nvcfg1, protect = 0;
14547
14548         nvcfg1 = tr32(NVRAM_CFG1);
14549
14550         /* NVRAM protection for TPM */
14551         if (nvcfg1 & (1 << 27)) {
14552                 tg3_flag_set(tp, PROTECTED_NVRAM);
14553                 protect = 1;
14554         }
14555
14556         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14557         switch (nvcfg1) {
14558         case FLASH_5761VENDOR_ATMEL_ADB021D:
14559         case FLASH_5761VENDOR_ATMEL_ADB041D:
14560         case FLASH_5761VENDOR_ATMEL_ADB081D:
14561         case FLASH_5761VENDOR_ATMEL_ADB161D:
14562         case FLASH_5761VENDOR_ATMEL_MDB021D:
14563         case FLASH_5761VENDOR_ATMEL_MDB041D:
14564         case FLASH_5761VENDOR_ATMEL_MDB081D:
14565         case FLASH_5761VENDOR_ATMEL_MDB161D:
14566                 tp->nvram_jedecnum = JEDEC_ATMEL;
14567                 tg3_flag_set(tp, NVRAM_BUFFERED);
14568                 tg3_flag_set(tp, FLASH);
14569                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14570                 tp->nvram_pagesize = 256;
14571                 break;
14572         case FLASH_5761VENDOR_ST_A_M45PE20:
14573         case FLASH_5761VENDOR_ST_A_M45PE40:
14574         case FLASH_5761VENDOR_ST_A_M45PE80:
14575         case FLASH_5761VENDOR_ST_A_M45PE16:
14576         case FLASH_5761VENDOR_ST_M_M45PE20:
14577         case FLASH_5761VENDOR_ST_M_M45PE40:
14578         case FLASH_5761VENDOR_ST_M_M45PE80:
14579         case FLASH_5761VENDOR_ST_M_M45PE16:
14580                 tp->nvram_jedecnum = JEDEC_ST;
14581                 tg3_flag_set(tp, NVRAM_BUFFERED);
14582                 tg3_flag_set(tp, FLASH);
14583                 tp->nvram_pagesize = 256;
14584                 break;
14585         }
14586
14587         if (protect) {
14588                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14589         } else {
14590                 switch (nvcfg1) {
14591                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14592                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14593                 case FLASH_5761VENDOR_ST_A_M45PE16:
14594                 case FLASH_5761VENDOR_ST_M_M45PE16:
14595                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14596                         break;
14597                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14598                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14599                 case FLASH_5761VENDOR_ST_A_M45PE80:
14600                 case FLASH_5761VENDOR_ST_M_M45PE80:
14601                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14602                         break;
14603                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14604                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14605                 case FLASH_5761VENDOR_ST_A_M45PE40:
14606                 case FLASH_5761VENDOR_ST_M_M45PE40:
14607                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14608                         break;
14609                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14610                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14611                 case FLASH_5761VENDOR_ST_A_M45PE20:
14612                 case FLASH_5761VENDOR_ST_M_M45PE20:
14613                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14614                         break;
14615                 }
14616         }
14617 }
14618
14619 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14620 {
14621         tp->nvram_jedecnum = JEDEC_ATMEL;
14622         tg3_flag_set(tp, NVRAM_BUFFERED);
14623         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14624 }
14625
14626 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14627 {
14628         u32 nvcfg1;
14629
14630         nvcfg1 = tr32(NVRAM_CFG1);
14631
14632         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14633         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14634         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14635                 tp->nvram_jedecnum = JEDEC_ATMEL;
14636                 tg3_flag_set(tp, NVRAM_BUFFERED);
14637                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14638
14639                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14640                 tw32(NVRAM_CFG1, nvcfg1);
14641                 return;
14642         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14643         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14644         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14645         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14646         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14647         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14648         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14649                 tp->nvram_jedecnum = JEDEC_ATMEL;
14650                 tg3_flag_set(tp, NVRAM_BUFFERED);
14651                 tg3_flag_set(tp, FLASH);
14652
14653                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14654                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14655                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14656                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14657                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14658                         break;
14659                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14660                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14661                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14662                         break;
14663                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14664                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14665                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14666                         break;
14667                 }
14668                 break;
14669         case FLASH_5752VENDOR_ST_M45PE10:
14670         case FLASH_5752VENDOR_ST_M45PE20:
14671         case FLASH_5752VENDOR_ST_M45PE40:
14672                 tp->nvram_jedecnum = JEDEC_ST;
14673                 tg3_flag_set(tp, NVRAM_BUFFERED);
14674                 tg3_flag_set(tp, FLASH);
14675
14676                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14677                 case FLASH_5752VENDOR_ST_M45PE10:
14678                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14679                         break;
14680                 case FLASH_5752VENDOR_ST_M45PE20:
14681                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14682                         break;
14683                 case FLASH_5752VENDOR_ST_M45PE40:
14684                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14685                         break;
14686                 }
14687                 break;
14688         default:
14689                 tg3_flag_set(tp, NO_NVRAM);
14690                 return;
14691         }
14692
14693         tg3_nvram_get_pagesize(tp, nvcfg1);
14694         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14695                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14696 }
14697
14698
14699 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14700 {
14701         u32 nvcfg1;
14702
14703         nvcfg1 = tr32(NVRAM_CFG1);
14704
14705         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14706         case FLASH_5717VENDOR_ATMEL_EEPROM:
14707         case FLASH_5717VENDOR_MICRO_EEPROM:
14708                 tp->nvram_jedecnum = JEDEC_ATMEL;
14709                 tg3_flag_set(tp, NVRAM_BUFFERED);
14710                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14711
14712                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14713                 tw32(NVRAM_CFG1, nvcfg1);
14714                 return;
14715         case FLASH_5717VENDOR_ATMEL_MDB011D:
14716         case FLASH_5717VENDOR_ATMEL_ADB011B:
14717         case FLASH_5717VENDOR_ATMEL_ADB011D:
14718         case FLASH_5717VENDOR_ATMEL_MDB021D:
14719         case FLASH_5717VENDOR_ATMEL_ADB021B:
14720         case FLASH_5717VENDOR_ATMEL_ADB021D:
14721         case FLASH_5717VENDOR_ATMEL_45USPT:
14722                 tp->nvram_jedecnum = JEDEC_ATMEL;
14723                 tg3_flag_set(tp, NVRAM_BUFFERED);
14724                 tg3_flag_set(tp, FLASH);
14725
14726                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14727                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14728                         /* Detect size with tg3_nvram_get_size() */
14729                         break;
14730                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14731                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14732                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14733                         break;
14734                 default:
14735                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14736                         break;
14737                 }
14738                 break;
14739         case FLASH_5717VENDOR_ST_M_M25PE10:
14740         case FLASH_5717VENDOR_ST_A_M25PE10:
14741         case FLASH_5717VENDOR_ST_M_M45PE10:
14742         case FLASH_5717VENDOR_ST_A_M45PE10:
14743         case FLASH_5717VENDOR_ST_M_M25PE20:
14744         case FLASH_5717VENDOR_ST_A_M25PE20:
14745         case FLASH_5717VENDOR_ST_M_M45PE20:
14746         case FLASH_5717VENDOR_ST_A_M45PE20:
14747         case FLASH_5717VENDOR_ST_25USPT:
14748         case FLASH_5717VENDOR_ST_45USPT:
14749                 tp->nvram_jedecnum = JEDEC_ST;
14750                 tg3_flag_set(tp, NVRAM_BUFFERED);
14751                 tg3_flag_set(tp, FLASH);
14752
14753                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14754                 case FLASH_5717VENDOR_ST_M_M25PE20:
14755                 case FLASH_5717VENDOR_ST_M_M45PE20:
14756                         /* Detect size with tg3_nvram_get_size() */
14757                         break;
14758                 case FLASH_5717VENDOR_ST_A_M25PE20:
14759                 case FLASH_5717VENDOR_ST_A_M45PE20:
14760                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14761                         break;
14762                 default:
14763                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14764                         break;
14765                 }
14766                 break;
14767         default:
14768                 tg3_flag_set(tp, NO_NVRAM);
14769                 return;
14770         }
14771
14772         tg3_nvram_get_pagesize(tp, nvcfg1);
14773         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14774                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14775 }
14776
14777 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14778 {
14779         u32 nvcfg1, nvmpinstrp;
14780
14781         nvcfg1 = tr32(NVRAM_CFG1);
14782         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14783
14784         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14785                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14786                         tg3_flag_set(tp, NO_NVRAM);
14787                         return;
14788                 }
14789
14790                 switch (nvmpinstrp) {
14791                 case FLASH_5762_EEPROM_HD:
14792                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14793                         break;
14794                 case FLASH_5762_EEPROM_LD:
14795                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14796                         break;
14797                 case FLASH_5720VENDOR_M_ST_M45PE20:
14798                         /* This pinstrap supports multiple sizes, so force it
14799                          * to read the actual size from location 0xf0.
14800                          */
14801                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14802                         break;
14803                 }
14804         }
14805
14806         switch (nvmpinstrp) {
14807         case FLASH_5720_EEPROM_HD:
14808         case FLASH_5720_EEPROM_LD:
14809                 tp->nvram_jedecnum = JEDEC_ATMEL;
14810                 tg3_flag_set(tp, NVRAM_BUFFERED);
14811
14812                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14813                 tw32(NVRAM_CFG1, nvcfg1);
14814                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14815                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14816                 else
14817                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14818                 return;
14819         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14820         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14821         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14822         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14823         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14824         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14825         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14826         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14827         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14828         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14829         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14830         case FLASH_5720VENDOR_ATMEL_45USPT:
14831                 tp->nvram_jedecnum = JEDEC_ATMEL;
14832                 tg3_flag_set(tp, NVRAM_BUFFERED);
14833                 tg3_flag_set(tp, FLASH);
14834
14835                 switch (nvmpinstrp) {
14836                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14837                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14838                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14839                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14840                         break;
14841                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14842                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14843                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14844                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14845                         break;
14846                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14847                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14848                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14849                         break;
14850                 default:
14851                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14852                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14853                         break;
14854                 }
14855                 break;
14856         case FLASH_5720VENDOR_M_ST_M25PE10:
14857         case FLASH_5720VENDOR_M_ST_M45PE10:
14858         case FLASH_5720VENDOR_A_ST_M25PE10:
14859         case FLASH_5720VENDOR_A_ST_M45PE10:
14860         case FLASH_5720VENDOR_M_ST_M25PE20:
14861         case FLASH_5720VENDOR_M_ST_M45PE20:
14862         case FLASH_5720VENDOR_A_ST_M25PE20:
14863         case FLASH_5720VENDOR_A_ST_M45PE20:
14864         case FLASH_5720VENDOR_M_ST_M25PE40:
14865         case FLASH_5720VENDOR_M_ST_M45PE40:
14866         case FLASH_5720VENDOR_A_ST_M25PE40:
14867         case FLASH_5720VENDOR_A_ST_M45PE40:
14868         case FLASH_5720VENDOR_M_ST_M25PE80:
14869         case FLASH_5720VENDOR_M_ST_M45PE80:
14870         case FLASH_5720VENDOR_A_ST_M25PE80:
14871         case FLASH_5720VENDOR_A_ST_M45PE80:
14872         case FLASH_5720VENDOR_ST_25USPT:
14873         case FLASH_5720VENDOR_ST_45USPT:
14874                 tp->nvram_jedecnum = JEDEC_ST;
14875                 tg3_flag_set(tp, NVRAM_BUFFERED);
14876                 tg3_flag_set(tp, FLASH);
14877
14878                 switch (nvmpinstrp) {
14879                 case FLASH_5720VENDOR_M_ST_M25PE20:
14880                 case FLASH_5720VENDOR_M_ST_M45PE20:
14881                 case FLASH_5720VENDOR_A_ST_M25PE20:
14882                 case FLASH_5720VENDOR_A_ST_M45PE20:
14883                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14884                         break;
14885                 case FLASH_5720VENDOR_M_ST_M25PE40:
14886                 case FLASH_5720VENDOR_M_ST_M45PE40:
14887                 case FLASH_5720VENDOR_A_ST_M25PE40:
14888                 case FLASH_5720VENDOR_A_ST_M45PE40:
14889                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14890                         break;
14891                 case FLASH_5720VENDOR_M_ST_M25PE80:
14892                 case FLASH_5720VENDOR_M_ST_M45PE80:
14893                 case FLASH_5720VENDOR_A_ST_M25PE80:
14894                 case FLASH_5720VENDOR_A_ST_M45PE80:
14895                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14896                         break;
14897                 default:
14898                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14899                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14900                         break;
14901                 }
14902                 break;
14903         default:
14904                 tg3_flag_set(tp, NO_NVRAM);
14905                 return;
14906         }
14907
14908         tg3_nvram_get_pagesize(tp, nvcfg1);
14909         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14910                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14911
14912         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14913                 u32 val;
14914
14915                 if (tg3_nvram_read(tp, 0, &val))
14916                         return;
14917
14918                 if (val != TG3_EEPROM_MAGIC &&
14919                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14920                         tg3_flag_set(tp, NO_NVRAM);
14921         }
14922 }
14923
14924 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14925 static void tg3_nvram_init(struct tg3 *tp)
14926 {
14927         if (tg3_flag(tp, IS_SSB_CORE)) {
14928                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14929                 tg3_flag_clear(tp, NVRAM);
14930                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14931                 tg3_flag_set(tp, NO_NVRAM);
14932                 return;
14933         }
14934
14935         tw32_f(GRC_EEPROM_ADDR,
14936              (EEPROM_ADDR_FSM_RESET |
14937               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14938                EEPROM_ADDR_CLKPERD_SHIFT)));
14939
14940         msleep(1);
14941
14942         /* Enable seeprom accesses. */
14943         tw32_f(GRC_LOCAL_CTRL,
14944              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14945         udelay(100);
14946
14947         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14948             tg3_asic_rev(tp) != ASIC_REV_5701) {
14949                 tg3_flag_set(tp, NVRAM);
14950
14951                 if (tg3_nvram_lock(tp)) {
14952                         netdev_warn(tp->dev,
14953                                     "Cannot get nvram lock, %s failed\n",
14954                                     __func__);
14955                         return;
14956                 }
14957                 tg3_enable_nvram_access(tp);
14958
14959                 tp->nvram_size = 0;
14960
14961                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14962                         tg3_get_5752_nvram_info(tp);
14963                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14964                         tg3_get_5755_nvram_info(tp);
14965                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14966                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14967                          tg3_asic_rev(tp) == ASIC_REV_5785)
14968                         tg3_get_5787_nvram_info(tp);
14969                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14970                         tg3_get_5761_nvram_info(tp);
14971                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14972                         tg3_get_5906_nvram_info(tp);
14973                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14974                          tg3_flag(tp, 57765_CLASS))
14975                         tg3_get_57780_nvram_info(tp);
14976                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14977                          tg3_asic_rev(tp) == ASIC_REV_5719)
14978                         tg3_get_5717_nvram_info(tp);
14979                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14980                          tg3_asic_rev(tp) == ASIC_REV_5762)
14981                         tg3_get_5720_nvram_info(tp);
14982                 else
14983                         tg3_get_nvram_info(tp);
14984
14985                 if (tp->nvram_size == 0)
14986                         tg3_get_nvram_size(tp);
14987
14988                 tg3_disable_nvram_access(tp);
14989                 tg3_nvram_unlock(tp);
14990
14991         } else {
14992                 tg3_flag_clear(tp, NVRAM);
14993                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14994
14995                 tg3_get_eeprom_size(tp);
14996         }
14997 }
14998
14999 struct subsys_tbl_ent {
15000         u16 subsys_vendor, subsys_devid;
15001         u32 phy_id;
15002 };
15003
15004 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15005         /* Broadcom boards. */
15006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15007           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15009           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15011           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15013           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15015           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15017           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15019           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15021           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15022         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15023           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15024         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15025           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15026         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15027           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15028
15029         /* 3com boards. */
15030         { TG3PCI_SUBVENDOR_ID_3COM,
15031           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15032         { TG3PCI_SUBVENDOR_ID_3COM,
15033           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15034         { TG3PCI_SUBVENDOR_ID_3COM,
15035           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15036         { TG3PCI_SUBVENDOR_ID_3COM,
15037           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15038         { TG3PCI_SUBVENDOR_ID_3COM,
15039           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15040
15041         /* DELL boards. */
15042         { TG3PCI_SUBVENDOR_ID_DELL,
15043           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15044         { TG3PCI_SUBVENDOR_ID_DELL,
15045           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15046         { TG3PCI_SUBVENDOR_ID_DELL,
15047           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15048         { TG3PCI_SUBVENDOR_ID_DELL,
15049           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15050
15051         /* Compaq boards. */
15052         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15053           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15055           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15056         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15057           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15058         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15059           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15060         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15061           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15062
15063         /* IBM boards. */
15064         { TG3PCI_SUBVENDOR_ID_IBM,
15065           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15066 };
15067
15068 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15069 {
15070         int i;
15071
15072         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15073                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15074                      tp->pdev->subsystem_vendor) &&
15075                     (subsys_id_to_phy_id[i].subsys_devid ==
15076                      tp->pdev->subsystem_device))
15077                         return &subsys_id_to_phy_id[i];
15078         }
15079         return NULL;
15080 }
15081
15082 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15083 {
15084         u32 val;
15085
15086         tp->phy_id = TG3_PHY_ID_INVALID;
15087         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15088
15089         /* Assume an onboard device and WOL capable by default.  */
15090         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15091         tg3_flag_set(tp, WOL_CAP);
15092
15093         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15094                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15095                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15096                         tg3_flag_set(tp, IS_NIC);
15097                 }
15098                 val = tr32(VCPU_CFGSHDW);
15099                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15100                         tg3_flag_set(tp, ASPM_WORKAROUND);
15101                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15102                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15103                         tg3_flag_set(tp, WOL_ENABLE);
15104                         device_set_wakeup_enable(&tp->pdev->dev, true);
15105                 }
15106                 goto done;
15107         }
15108
15109         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15110         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15111                 u32 nic_cfg, led_cfg;
15112                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15113                 u32 nic_phy_id, ver, eeprom_phy_id;
15114                 int eeprom_phy_serdes = 0;
15115
15116                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15117                 tp->nic_sram_data_cfg = nic_cfg;
15118
15119                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15120                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15121                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15122                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15123                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15124                     (ver > 0) && (ver < 0x100))
15125                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15126
15127                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15128                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15129
15130                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15131                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15132                     tg3_asic_rev(tp) == ASIC_REV_5720)
15133                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15134
15135                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15136                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15137                         eeprom_phy_serdes = 1;
15138
15139                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15140                 if (nic_phy_id != 0) {
15141                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15142                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15143
15144                         eeprom_phy_id  = (id1 >> 16) << 10;
15145                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15146                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15147                 } else
15148                         eeprom_phy_id = 0;
15149
15150                 tp->phy_id = eeprom_phy_id;
15151                 if (eeprom_phy_serdes) {
15152                         if (!tg3_flag(tp, 5705_PLUS))
15153                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15154                         else
15155                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15156                 }
15157
15158                 if (tg3_flag(tp, 5750_PLUS))
15159                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15160                                     SHASTA_EXT_LED_MODE_MASK);
15161                 else
15162                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15163
15164                 switch (led_cfg) {
15165                 default:
15166                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15167                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15168                         break;
15169
15170                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15171                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15172                         break;
15173
15174                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15175                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15176
15177                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15178                          * read on some older 5700/5701 bootcode.
15179                          */
15180                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15181                             tg3_asic_rev(tp) == ASIC_REV_5701)
15182                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15183
15184                         break;
15185
15186                 case SHASTA_EXT_LED_SHARED:
15187                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15188                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15189                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15190                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15191                                                  LED_CTRL_MODE_PHY_2);
15192
15193                         if (tg3_flag(tp, 5717_PLUS) ||
15194                             tg3_asic_rev(tp) == ASIC_REV_5762)
15195                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15196                                                 LED_CTRL_BLINK_RATE_MASK;
15197
15198                         break;
15199
15200                 case SHASTA_EXT_LED_MAC:
15201                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15202                         break;
15203
15204                 case SHASTA_EXT_LED_COMBO:
15205                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15206                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15207                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15208                                                  LED_CTRL_MODE_PHY_2);
15209                         break;
15210
15211                 }
15212
15213                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15214                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15215                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15216                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15217
15218                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15219                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15220
15221                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15222                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15223                         if ((tp->pdev->subsystem_vendor ==
15224                              PCI_VENDOR_ID_ARIMA) &&
15225                             (tp->pdev->subsystem_device == 0x205a ||
15226                              tp->pdev->subsystem_device == 0x2063))
15227                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15228                 } else {
15229                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15230                         tg3_flag_set(tp, IS_NIC);
15231                 }
15232
15233                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15234                         tg3_flag_set(tp, ENABLE_ASF);
15235                         if (tg3_flag(tp, 5750_PLUS))
15236                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15237                 }
15238
15239                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15240                     tg3_flag(tp, 5750_PLUS))
15241                         tg3_flag_set(tp, ENABLE_APE);
15242
15243                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15244                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15245                         tg3_flag_clear(tp, WOL_CAP);
15246
15247                 if (tg3_flag(tp, WOL_CAP) &&
15248                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15249                         tg3_flag_set(tp, WOL_ENABLE);
15250                         device_set_wakeup_enable(&tp->pdev->dev, true);
15251                 }
15252
15253                 if (cfg2 & (1 << 17))
15254                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15255
15256                 /* serdes signal pre-emphasis in register 0x590 set by */
15257                 /* bootcode if bit 18 is set */
15258                 if (cfg2 & (1 << 18))
15259                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15260
15261                 if ((tg3_flag(tp, 57765_PLUS) ||
15262                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15263                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15264                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15265                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15266
15267                 if (tg3_flag(tp, PCI_EXPRESS)) {
15268                         u32 cfg3;
15269
15270                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15271                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15272                             !tg3_flag(tp, 57765_PLUS) &&
15273                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15274                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15275                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15276                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15277                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15278                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15279                 }
15280
15281                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15282                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15283                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15284                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15285                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15286                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15287
15288                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15289                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15290         }
15291 done:
15292         if (tg3_flag(tp, WOL_CAP))
15293                 device_set_wakeup_enable(&tp->pdev->dev,
15294                                          tg3_flag(tp, WOL_ENABLE));
15295         else
15296                 device_set_wakeup_capable(&tp->pdev->dev, false);
15297 }
15298
15299 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15300 {
15301         int i, err;
15302         u32 val2, off = offset * 8;
15303
15304         err = tg3_nvram_lock(tp);
15305         if (err)
15306                 return err;
15307
15308         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15309         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15310                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15311         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15312         udelay(10);
15313
15314         for (i = 0; i < 100; i++) {
15315                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15316                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15317                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15318                         break;
15319                 }
15320                 udelay(10);
15321         }
15322
15323         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15324
15325         tg3_nvram_unlock(tp);
15326         if (val2 & APE_OTP_STATUS_CMD_DONE)
15327                 return 0;
15328
15329         return -EBUSY;
15330 }
15331
15332 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15333 {
15334         int i;
15335         u32 val;
15336
15337         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15338         tw32(OTP_CTRL, cmd);
15339
15340         /* Wait for up to 1 ms for command to execute. */
15341         for (i = 0; i < 100; i++) {
15342                 val = tr32(OTP_STATUS);
15343                 if (val & OTP_STATUS_CMD_DONE)
15344                         break;
15345                 udelay(10);
15346         }
15347
15348         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15349 }
15350
15351 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15352  * configuration is a 32-bit value that straddles the alignment boundary.
15353  * We do two 32-bit reads and then shift and merge the results.
15354  */
15355 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15356 {
15357         u32 bhalf_otp, thalf_otp;
15358
15359         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15360
15361         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15362                 return 0;
15363
15364         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15365
15366         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15367                 return 0;
15368
15369         thalf_otp = tr32(OTP_READ_DATA);
15370
15371         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15372
15373         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15374                 return 0;
15375
15376         bhalf_otp = tr32(OTP_READ_DATA);
15377
15378         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15379 }
15380
15381 static void tg3_phy_init_link_config(struct tg3 *tp)
15382 {
15383         u32 adv = ADVERTISED_Autoneg;
15384
15385         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15386                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15387                         adv |= ADVERTISED_1000baseT_Half;
15388                 adv |= ADVERTISED_1000baseT_Full;
15389         }
15390
15391         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15392                 adv |= ADVERTISED_100baseT_Half |
15393                        ADVERTISED_100baseT_Full |
15394                        ADVERTISED_10baseT_Half |
15395                        ADVERTISED_10baseT_Full |
15396                        ADVERTISED_TP;
15397         else
15398                 adv |= ADVERTISED_FIBRE;
15399
15400         tp->link_config.advertising = adv;
15401         tp->link_config.speed = SPEED_UNKNOWN;
15402         tp->link_config.duplex = DUPLEX_UNKNOWN;
15403         tp->link_config.autoneg = AUTONEG_ENABLE;
15404         tp->link_config.active_speed = SPEED_UNKNOWN;
15405         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15406
15407         tp->old_link = -1;
15408 }
15409
15410 static int tg3_phy_probe(struct tg3 *tp)
15411 {
15412         u32 hw_phy_id_1, hw_phy_id_2;
15413         u32 hw_phy_id, hw_phy_id_masked;
15414         int err;
15415
15416         /* flow control autonegotiation is default behavior */
15417         tg3_flag_set(tp, PAUSE_AUTONEG);
15418         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15419
15420         if (tg3_flag(tp, ENABLE_APE)) {
15421                 switch (tp->pci_fn) {
15422                 case 0:
15423                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15424                         break;
15425                 case 1:
15426                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15427                         break;
15428                 case 2:
15429                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15430                         break;
15431                 case 3:
15432                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15433                         break;
15434                 }
15435         }
15436
15437         if (!tg3_flag(tp, ENABLE_ASF) &&
15438             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15439             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15440                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15441                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15442
15443         if (tg3_flag(tp, USE_PHYLIB))
15444                 return tg3_phy_init(tp);
15445
15446         /* Reading the PHY ID register can conflict with ASF
15447          * firmware access to the PHY hardware.
15448          */
15449         err = 0;
15450         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15451                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15452         } else {
15453                 /* Now read the physical PHY_ID from the chip and verify
15454                  * that it is sane.  If it doesn't look good, we fall back
15455                  * to either the hard-coded table based PHY_ID and failing
15456                  * that the value found in the eeprom area.
15457                  */
15458                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15459                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15460
15461                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15462                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15463                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15464
15465                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15466         }
15467
15468         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15469                 tp->phy_id = hw_phy_id;
15470                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15471                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15472                 else
15473                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15474         } else {
15475                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15476                         /* Do nothing, phy ID already set up in
15477                          * tg3_get_eeprom_hw_cfg().
15478                          */
15479                 } else {
15480                         struct subsys_tbl_ent *p;
15481
15482                         /* No eeprom signature?  Try the hardcoded
15483                          * subsys device table.
15484                          */
15485                         p = tg3_lookup_by_subsys(tp);
15486                         if (p) {
15487                                 tp->phy_id = p->phy_id;
15488                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15489                                 /* For now we saw the IDs 0xbc050cd0,
15490                                  * 0xbc050f80 and 0xbc050c30 on devices
15491                                  * connected to an BCM4785 and there are
15492                                  * probably more. Just assume that the phy is
15493                                  * supported when it is connected to a SSB core
15494                                  * for now.
15495                                  */
15496                                 return -ENODEV;
15497                         }
15498
15499                         if (!tp->phy_id ||
15500                             tp->phy_id == TG3_PHY_ID_BCM8002)
15501                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15502                 }
15503         }
15504
15505         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15506             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15507              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15508              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15509              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15510              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15511               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15512              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15513               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15514                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15515
15516                 tp->eee.supported = SUPPORTED_100baseT_Full |
15517                                     SUPPORTED_1000baseT_Full;
15518                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15519                                      ADVERTISED_1000baseT_Full;
15520                 tp->eee.eee_enabled = 1;
15521                 tp->eee.tx_lpi_enabled = 1;
15522                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15523         }
15524
15525         tg3_phy_init_link_config(tp);
15526
15527         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15528             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15529             !tg3_flag(tp, ENABLE_APE) &&
15530             !tg3_flag(tp, ENABLE_ASF)) {
15531                 u32 bmsr, dummy;
15532
15533                 tg3_readphy(tp, MII_BMSR, &bmsr);
15534                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15535                     (bmsr & BMSR_LSTATUS))
15536                         goto skip_phy_reset;
15537
15538                 err = tg3_phy_reset(tp);
15539                 if (err)
15540                         return err;
15541
15542                 tg3_phy_set_wirespeed(tp);
15543
15544                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15545                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15546                                             tp->link_config.flowctrl);
15547
15548                         tg3_writephy(tp, MII_BMCR,
15549                                      BMCR_ANENABLE | BMCR_ANRESTART);
15550                 }
15551         }
15552
15553 skip_phy_reset:
15554         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15555                 err = tg3_init_5401phy_dsp(tp);
15556                 if (err)
15557                         return err;
15558
15559                 err = tg3_init_5401phy_dsp(tp);
15560         }
15561
15562         return err;
15563 }
15564
15565 static void tg3_read_vpd(struct tg3 *tp)
15566 {
15567         u8 *vpd_data;
15568         unsigned int block_end, rosize, len;
15569         u32 vpdlen;
15570         int j, i = 0;
15571
15572         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15573         if (!vpd_data)
15574                 goto out_no_vpd;
15575
15576         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15577         if (i < 0)
15578                 goto out_not_found;
15579
15580         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15581         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15582         i += PCI_VPD_LRDT_TAG_SIZE;
15583
15584         if (block_end > vpdlen)
15585                 goto out_not_found;
15586
15587         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15588                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15589         if (j > 0) {
15590                 len = pci_vpd_info_field_size(&vpd_data[j]);
15591
15592                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15593                 if (j + len > block_end || len != 4 ||
15594                     memcmp(&vpd_data[j], "1028", 4))
15595                         goto partno;
15596
15597                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15598                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15599                 if (j < 0)
15600                         goto partno;
15601
15602                 len = pci_vpd_info_field_size(&vpd_data[j]);
15603
15604                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15605                 if (j + len > block_end)
15606                         goto partno;
15607
15608                 if (len >= sizeof(tp->fw_ver))
15609                         len = sizeof(tp->fw_ver) - 1;
15610                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15611                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15612                          &vpd_data[j]);
15613         }
15614
15615 partno:
15616         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15617                                       PCI_VPD_RO_KEYWORD_PARTNO);
15618         if (i < 0)
15619                 goto out_not_found;
15620
15621         len = pci_vpd_info_field_size(&vpd_data[i]);
15622
15623         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15624         if (len > TG3_BPN_SIZE ||
15625             (len + i) > vpdlen)
15626                 goto out_not_found;
15627
15628         memcpy(tp->board_part_number, &vpd_data[i], len);
15629
15630 out_not_found:
15631         kfree(vpd_data);
15632         if (tp->board_part_number[0])
15633                 return;
15634
15635 out_no_vpd:
15636         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15637                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15638                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15639                         strcpy(tp->board_part_number, "BCM5717");
15640                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15641                         strcpy(tp->board_part_number, "BCM5718");
15642                 else
15643                         goto nomatch;
15644         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15645                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15646                         strcpy(tp->board_part_number, "BCM57780");
15647                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15648                         strcpy(tp->board_part_number, "BCM57760");
15649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15650                         strcpy(tp->board_part_number, "BCM57790");
15651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15652                         strcpy(tp->board_part_number, "BCM57788");
15653                 else
15654                         goto nomatch;
15655         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15656                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15657                         strcpy(tp->board_part_number, "BCM57761");
15658                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15659                         strcpy(tp->board_part_number, "BCM57765");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15661                         strcpy(tp->board_part_number, "BCM57781");
15662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15663                         strcpy(tp->board_part_number, "BCM57785");
15664                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15665                         strcpy(tp->board_part_number, "BCM57791");
15666                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15667                         strcpy(tp->board_part_number, "BCM57795");
15668                 else
15669                         goto nomatch;
15670         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15671                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15672                         strcpy(tp->board_part_number, "BCM57762");
15673                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15674                         strcpy(tp->board_part_number, "BCM57766");
15675                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15676                         strcpy(tp->board_part_number, "BCM57782");
15677                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15678                         strcpy(tp->board_part_number, "BCM57786");
15679                 else
15680                         goto nomatch;
15681         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15682                 strcpy(tp->board_part_number, "BCM95906");
15683         } else {
15684 nomatch:
15685                 strcpy(tp->board_part_number, "none");
15686         }
15687 }
15688
15689 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15690 {
15691         u32 val;
15692
15693         if (tg3_nvram_read(tp, offset, &val) ||
15694             (val & 0xfc000000) != 0x0c000000 ||
15695             tg3_nvram_read(tp, offset + 4, &val) ||
15696             val != 0)
15697                 return 0;
15698
15699         return 1;
15700 }
15701
15702 static void tg3_read_bc_ver(struct tg3 *tp)
15703 {
15704         u32 val, offset, start, ver_offset;
15705         int i, dst_off;
15706         bool newver = false;
15707
15708         if (tg3_nvram_read(tp, 0xc, &offset) ||
15709             tg3_nvram_read(tp, 0x4, &start))
15710                 return;
15711
15712         offset = tg3_nvram_logical_addr(tp, offset);
15713
15714         if (tg3_nvram_read(tp, offset, &val))
15715                 return;
15716
15717         if ((val & 0xfc000000) == 0x0c000000) {
15718                 if (tg3_nvram_read(tp, offset + 4, &val))
15719                         return;
15720
15721                 if (val == 0)
15722                         newver = true;
15723         }
15724
15725         dst_off = strlen(tp->fw_ver);
15726
15727         if (newver) {
15728                 if (TG3_VER_SIZE - dst_off < 16 ||
15729                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15730                         return;
15731
15732                 offset = offset + ver_offset - start;
15733                 for (i = 0; i < 16; i += 4) {
15734                         __be32 v;
15735                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15736                                 return;
15737
15738                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15739                 }
15740         } else {
15741                 u32 major, minor;
15742
15743                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15744                         return;
15745
15746                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15747                         TG3_NVM_BCVER_MAJSFT;
15748                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15749                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15750                          "v%d.%02d", major, minor);
15751         }
15752 }
15753
15754 static void tg3_read_hwsb_ver(struct tg3 *tp)
15755 {
15756         u32 val, major, minor;
15757
15758         /* Use native endian representation */
15759         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15760                 return;
15761
15762         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15763                 TG3_NVM_HWSB_CFG1_MAJSFT;
15764         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15765                 TG3_NVM_HWSB_CFG1_MINSFT;
15766
15767         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15768 }
15769
15770 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15771 {
15772         u32 offset, major, minor, build;
15773
15774         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15775
15776         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15777                 return;
15778
15779         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15780         case TG3_EEPROM_SB_REVISION_0:
15781                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15782                 break;
15783         case TG3_EEPROM_SB_REVISION_2:
15784                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15785                 break;
15786         case TG3_EEPROM_SB_REVISION_3:
15787                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15788                 break;
15789         case TG3_EEPROM_SB_REVISION_4:
15790                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15791                 break;
15792         case TG3_EEPROM_SB_REVISION_5:
15793                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15794                 break;
15795         case TG3_EEPROM_SB_REVISION_6:
15796                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15797                 break;
15798         default:
15799                 return;
15800         }
15801
15802         if (tg3_nvram_read(tp, offset, &val))
15803                 return;
15804
15805         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15806                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15807         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15808                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15809         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15810
15811         if (minor > 99 || build > 26)
15812                 return;
15813
15814         offset = strlen(tp->fw_ver);
15815         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15816                  " v%d.%02d", major, minor);
15817
15818         if (build > 0) {
15819                 offset = strlen(tp->fw_ver);
15820                 if (offset < TG3_VER_SIZE - 1)
15821                         tp->fw_ver[offset] = 'a' + build - 1;
15822         }
15823 }
15824
15825 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15826 {
15827         u32 val, offset, start;
15828         int i, vlen;
15829
15830         for (offset = TG3_NVM_DIR_START;
15831              offset < TG3_NVM_DIR_END;
15832              offset += TG3_NVM_DIRENT_SIZE) {
15833                 if (tg3_nvram_read(tp, offset, &val))
15834                         return;
15835
15836                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15837                         break;
15838         }
15839
15840         if (offset == TG3_NVM_DIR_END)
15841                 return;
15842
15843         if (!tg3_flag(tp, 5705_PLUS))
15844                 start = 0x08000000;
15845         else if (tg3_nvram_read(tp, offset - 4, &start))
15846                 return;
15847
15848         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15849             !tg3_fw_img_is_valid(tp, offset) ||
15850             tg3_nvram_read(tp, offset + 8, &val))
15851                 return;
15852
15853         offset += val - start;
15854
15855         vlen = strlen(tp->fw_ver);
15856
15857         tp->fw_ver[vlen++] = ',';
15858         tp->fw_ver[vlen++] = ' ';
15859
15860         for (i = 0; i < 4; i++) {
15861                 __be32 v;
15862                 if (tg3_nvram_read_be32(tp, offset, &v))
15863                         return;
15864
15865                 offset += sizeof(v);
15866
15867                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15868                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15869                         break;
15870                 }
15871
15872                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15873                 vlen += sizeof(v);
15874         }
15875 }
15876
15877 static void tg3_probe_ncsi(struct tg3 *tp)
15878 {
15879         u32 apedata;
15880
15881         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15882         if (apedata != APE_SEG_SIG_MAGIC)
15883                 return;
15884
15885         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15886         if (!(apedata & APE_FW_STATUS_READY))
15887                 return;
15888
15889         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15890                 tg3_flag_set(tp, APE_HAS_NCSI);
15891 }
15892
15893 static void tg3_read_dash_ver(struct tg3 *tp)
15894 {
15895         int vlen;
15896         u32 apedata;
15897         char *fwtype;
15898
15899         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15900
15901         if (tg3_flag(tp, APE_HAS_NCSI))
15902                 fwtype = "NCSI";
15903         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15904                 fwtype = "SMASH";
15905         else
15906                 fwtype = "DASH";
15907
15908         vlen = strlen(tp->fw_ver);
15909
15910         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15911                  fwtype,
15912                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15913                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15914                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15915                  (apedata & APE_FW_VERSION_BLDMSK));
15916 }
15917
15918 static void tg3_read_otp_ver(struct tg3 *tp)
15919 {
15920         u32 val, val2;
15921
15922         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15923                 return;
15924
15925         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15926             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15927             TG3_OTP_MAGIC0_VALID(val)) {
15928                 u64 val64 = (u64) val << 32 | val2;
15929                 u32 ver = 0;
15930                 int i, vlen;
15931
15932                 for (i = 0; i < 7; i++) {
15933                         if ((val64 & 0xff) == 0)
15934                                 break;
15935                         ver = val64 & 0xff;
15936                         val64 >>= 8;
15937                 }
15938                 vlen = strlen(tp->fw_ver);
15939                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15940         }
15941 }
15942
15943 static void tg3_read_fw_ver(struct tg3 *tp)
15944 {
15945         u32 val;
15946         bool vpd_vers = false;
15947
15948         if (tp->fw_ver[0] != 0)
15949                 vpd_vers = true;
15950
15951         if (tg3_flag(tp, NO_NVRAM)) {
15952                 strcat(tp->fw_ver, "sb");
15953                 tg3_read_otp_ver(tp);
15954                 return;
15955         }
15956
15957         if (tg3_nvram_read(tp, 0, &val))
15958                 return;
15959
15960         if (val == TG3_EEPROM_MAGIC)
15961                 tg3_read_bc_ver(tp);
15962         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15963                 tg3_read_sb_ver(tp, val);
15964         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15965                 tg3_read_hwsb_ver(tp);
15966
15967         if (tg3_flag(tp, ENABLE_ASF)) {
15968                 if (tg3_flag(tp, ENABLE_APE)) {
15969                         tg3_probe_ncsi(tp);
15970                         if (!vpd_vers)
15971                                 tg3_read_dash_ver(tp);
15972                 } else if (!vpd_vers) {
15973                         tg3_read_mgmtfw_ver(tp);
15974                 }
15975         }
15976
15977         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15978 }
15979
15980 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15981 {
15982         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15983                 return TG3_RX_RET_MAX_SIZE_5717;
15984         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15985                 return TG3_RX_RET_MAX_SIZE_5700;
15986         else
15987                 return TG3_RX_RET_MAX_SIZE_5705;
15988 }
15989
15990 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15991         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15992         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15993         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15994         { },
15995 };
15996
15997 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15998 {
15999         struct pci_dev *peer;
16000         unsigned int func, devnr = tp->pdev->devfn & ~7;
16001
16002         for (func = 0; func < 8; func++) {
16003                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16004                 if (peer && peer != tp->pdev)
16005                         break;
16006                 pci_dev_put(peer);
16007         }
16008         /* 5704 can be configured in single-port mode, set peer to
16009          * tp->pdev in that case.
16010          */
16011         if (!peer) {
16012                 peer = tp->pdev;
16013                 return peer;
16014         }
16015
16016         /*
16017          * We don't need to keep the refcount elevated; there's no way
16018          * to remove one half of this device without removing the other
16019          */
16020         pci_dev_put(peer);
16021
16022         return peer;
16023 }
16024
16025 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16026 {
16027         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16028         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16029                 u32 reg;
16030
16031                 /* All devices that use the alternate
16032                  * ASIC REV location have a CPMU.
16033                  */
16034                 tg3_flag_set(tp, CPMU_PRESENT);
16035
16036                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16037                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16038                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16040                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16041                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16042                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16043                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16044                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16045                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16046                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16047                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16048                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16049                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16050                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16051                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16052                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16053                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16054                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16055                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16056                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16057                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16058                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16059                 else
16060                         reg = TG3PCI_PRODID_ASICREV;
16061
16062                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16063         }
16064
16065         /* Wrong chip ID in 5752 A0. This code can be removed later
16066          * as A0 is not in production.
16067          */
16068         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16069                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16070
16071         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16072                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16073
16074         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16075             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16076             tg3_asic_rev(tp) == ASIC_REV_5720)
16077                 tg3_flag_set(tp, 5717_PLUS);
16078
16079         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16080             tg3_asic_rev(tp) == ASIC_REV_57766)
16081                 tg3_flag_set(tp, 57765_CLASS);
16082
16083         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16084              tg3_asic_rev(tp) == ASIC_REV_5762)
16085                 tg3_flag_set(tp, 57765_PLUS);
16086
16087         /* Intentionally exclude ASIC_REV_5906 */
16088         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16089             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16090             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16091             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16092             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16093             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16094             tg3_flag(tp, 57765_PLUS))
16095                 tg3_flag_set(tp, 5755_PLUS);
16096
16097         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16098             tg3_asic_rev(tp) == ASIC_REV_5714)
16099                 tg3_flag_set(tp, 5780_CLASS);
16100
16101         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16102             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16103             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16104             tg3_flag(tp, 5755_PLUS) ||
16105             tg3_flag(tp, 5780_CLASS))
16106                 tg3_flag_set(tp, 5750_PLUS);
16107
16108         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16109             tg3_flag(tp, 5750_PLUS))
16110                 tg3_flag_set(tp, 5705_PLUS);
16111 }
16112
16113 static bool tg3_10_100_only_device(struct tg3 *tp,
16114                                    const struct pci_device_id *ent)
16115 {
16116         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16117
16118         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16119              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16120             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16121                 return true;
16122
16123         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16124                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16125                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16126                                 return true;
16127                 } else {
16128                         return true;
16129                 }
16130         }
16131
16132         return false;
16133 }
16134
16135 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16136 {
16137         u32 misc_ctrl_reg;
16138         u32 pci_state_reg, grc_misc_cfg;
16139         u32 val;
16140         u16 pci_cmd;
16141         int err;
16142
16143         /* Force memory write invalidate off.  If we leave it on,
16144          * then on 5700_BX chips we have to enable a workaround.
16145          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16146          * to match the cacheline size.  The Broadcom driver have this
16147          * workaround but turns MWI off all the times so never uses
16148          * it.  This seems to suggest that the workaround is insufficient.
16149          */
16150         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16151         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16152         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16153
16154         /* Important! -- Make sure register accesses are byteswapped
16155          * correctly.  Also, for those chips that require it, make
16156          * sure that indirect register accesses are enabled before
16157          * the first operation.
16158          */
16159         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16160                               &misc_ctrl_reg);
16161         tp->misc_host_ctrl |= (misc_ctrl_reg &
16162                                MISC_HOST_CTRL_CHIPREV);
16163         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16164                                tp->misc_host_ctrl);
16165
16166         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16167
16168         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16169          * we need to disable memory and use config. cycles
16170          * only to access all registers. The 5702/03 chips
16171          * can mistakenly decode the special cycles from the
16172          * ICH chipsets as memory write cycles, causing corruption
16173          * of register and memory space. Only certain ICH bridges
16174          * will drive special cycles with non-zero data during the
16175          * address phase which can fall within the 5703's address
16176          * range. This is not an ICH bug as the PCI spec allows
16177          * non-zero address during special cycles. However, only
16178          * these ICH bridges are known to drive non-zero addresses
16179          * during special cycles.
16180          *
16181          * Since special cycles do not cross PCI bridges, we only
16182          * enable this workaround if the 5703 is on the secondary
16183          * bus of these ICH bridges.
16184          */
16185         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16186             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16187                 static struct tg3_dev_id {
16188                         u32     vendor;
16189                         u32     device;
16190                         u32     rev;
16191                 } ich_chipsets[] = {
16192                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16193                           PCI_ANY_ID },
16194                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16195                           PCI_ANY_ID },
16196                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16197                           0xa },
16198                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16199                           PCI_ANY_ID },
16200                         { },
16201                 };
16202                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16203                 struct pci_dev *bridge = NULL;
16204
16205                 while (pci_id->vendor != 0) {
16206                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16207                                                 bridge);
16208                         if (!bridge) {
16209                                 pci_id++;
16210                                 continue;
16211                         }
16212                         if (pci_id->rev != PCI_ANY_ID) {
16213                                 if (bridge->revision > pci_id->rev)
16214                                         continue;
16215                         }
16216                         if (bridge->subordinate &&
16217                             (bridge->subordinate->number ==
16218                              tp->pdev->bus->number)) {
16219                                 tg3_flag_set(tp, ICH_WORKAROUND);
16220                                 pci_dev_put(bridge);
16221                                 break;
16222                         }
16223                 }
16224         }
16225
16226         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16227                 static struct tg3_dev_id {
16228                         u32     vendor;
16229                         u32     device;
16230                 } bridge_chipsets[] = {
16231                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16232                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16233                         { },
16234                 };
16235                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16236                 struct pci_dev *bridge = NULL;
16237
16238                 while (pci_id->vendor != 0) {
16239                         bridge = pci_get_device(pci_id->vendor,
16240                                                 pci_id->device,
16241                                                 bridge);
16242                         if (!bridge) {
16243                                 pci_id++;
16244                                 continue;
16245                         }
16246                         if (bridge->subordinate &&
16247                             (bridge->subordinate->number <=
16248                              tp->pdev->bus->number) &&
16249                             (bridge->subordinate->busn_res.end >=
16250                              tp->pdev->bus->number)) {
16251                                 tg3_flag_set(tp, 5701_DMA_BUG);
16252                                 pci_dev_put(bridge);
16253                                 break;
16254                         }
16255                 }
16256         }
16257
16258         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16259          * DMA addresses > 40-bit. This bridge may have other additional
16260          * 57xx devices behind it in some 4-port NIC designs for example.
16261          * Any tg3 device found behind the bridge will also need the 40-bit
16262          * DMA workaround.
16263          */
16264         if (tg3_flag(tp, 5780_CLASS)) {
16265                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16266                 tp->msi_cap = tp->pdev->msi_cap;
16267         } else {
16268                 struct pci_dev *bridge = NULL;
16269
16270                 do {
16271                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16272                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16273                                                 bridge);
16274                         if (bridge && bridge->subordinate &&
16275                             (bridge->subordinate->number <=
16276                              tp->pdev->bus->number) &&
16277                             (bridge->subordinate->busn_res.end >=
16278                              tp->pdev->bus->number)) {
16279                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16280                                 pci_dev_put(bridge);
16281                                 break;
16282                         }
16283                 } while (bridge);
16284         }
16285
16286         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16287             tg3_asic_rev(tp) == ASIC_REV_5714)
16288                 tp->pdev_peer = tg3_find_peer(tp);
16289
16290         /* Determine TSO capabilities */
16291         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16292                 ; /* Do nothing. HW bug. */
16293         else if (tg3_flag(tp, 57765_PLUS))
16294                 tg3_flag_set(tp, HW_TSO_3);
16295         else if (tg3_flag(tp, 5755_PLUS) ||
16296                  tg3_asic_rev(tp) == ASIC_REV_5906)
16297                 tg3_flag_set(tp, HW_TSO_2);
16298         else if (tg3_flag(tp, 5750_PLUS)) {
16299                 tg3_flag_set(tp, HW_TSO_1);
16300                 tg3_flag_set(tp, TSO_BUG);
16301                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16302                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16303                         tg3_flag_clear(tp, TSO_BUG);
16304         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16305                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16306                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16307                 tg3_flag_set(tp, FW_TSO);
16308                 tg3_flag_set(tp, TSO_BUG);
16309                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16310                         tp->fw_needed = FIRMWARE_TG3TSO5;
16311                 else
16312                         tp->fw_needed = FIRMWARE_TG3TSO;
16313         }
16314
16315         /* Selectively allow TSO based on operating conditions */
16316         if (tg3_flag(tp, HW_TSO_1) ||
16317             tg3_flag(tp, HW_TSO_2) ||
16318             tg3_flag(tp, HW_TSO_3) ||
16319             tg3_flag(tp, FW_TSO)) {
16320                 /* For firmware TSO, assume ASF is disabled.
16321                  * We'll disable TSO later if we discover ASF
16322                  * is enabled in tg3_get_eeprom_hw_cfg().
16323                  */
16324                 tg3_flag_set(tp, TSO_CAPABLE);
16325         } else {
16326                 tg3_flag_clear(tp, TSO_CAPABLE);
16327                 tg3_flag_clear(tp, TSO_BUG);
16328                 tp->fw_needed = NULL;
16329         }
16330
16331         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16332                 tp->fw_needed = FIRMWARE_TG3;
16333
16334         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16335                 tp->fw_needed = FIRMWARE_TG357766;
16336
16337         tp->irq_max = 1;
16338
16339         if (tg3_flag(tp, 5750_PLUS)) {
16340                 tg3_flag_set(tp, SUPPORT_MSI);
16341                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16342                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16343                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16344                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16345                      tp->pdev_peer == tp->pdev))
16346                         tg3_flag_clear(tp, SUPPORT_MSI);
16347
16348                 if (tg3_flag(tp, 5755_PLUS) ||
16349                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16350                         tg3_flag_set(tp, 1SHOT_MSI);
16351                 }
16352
16353                 if (tg3_flag(tp, 57765_PLUS)) {
16354                         tg3_flag_set(tp, SUPPORT_MSIX);
16355                         tp->irq_max = TG3_IRQ_MAX_VECS;
16356                 }
16357         }
16358
16359         tp->txq_max = 1;
16360         tp->rxq_max = 1;
16361         if (tp->irq_max > 1) {
16362                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16363                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16364
16365                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16366                     tg3_asic_rev(tp) == ASIC_REV_5720)
16367                         tp->txq_max = tp->irq_max - 1;
16368         }
16369
16370         if (tg3_flag(tp, 5755_PLUS) ||
16371             tg3_asic_rev(tp) == ASIC_REV_5906)
16372                 tg3_flag_set(tp, SHORT_DMA_BUG);
16373
16374         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16375                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16376
16377         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16378             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16379             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16380             tg3_asic_rev(tp) == ASIC_REV_5762)
16381                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16382
16383         if (tg3_flag(tp, 57765_PLUS) &&
16384             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16385                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16386
16387         if (!tg3_flag(tp, 5705_PLUS) ||
16388             tg3_flag(tp, 5780_CLASS) ||
16389             tg3_flag(tp, USE_JUMBO_BDFLAG))
16390                 tg3_flag_set(tp, JUMBO_CAPABLE);
16391
16392         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16393                               &pci_state_reg);
16394
16395         if (pci_is_pcie(tp->pdev)) {
16396                 u16 lnkctl;
16397
16398                 tg3_flag_set(tp, PCI_EXPRESS);
16399
16400                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16401                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16402                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16403                                 tg3_flag_clear(tp, HW_TSO_2);
16404                                 tg3_flag_clear(tp, TSO_CAPABLE);
16405                         }
16406                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16407                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16408                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16409                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16410                                 tg3_flag_set(tp, CLKREQ_BUG);
16411                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16412                         tg3_flag_set(tp, L1PLLPD_EN);
16413                 }
16414         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16415                 /* BCM5785 devices are effectively PCIe devices, and should
16416                  * follow PCIe codepaths, but do not have a PCIe capabilities
16417                  * section.
16418                  */
16419                 tg3_flag_set(tp, PCI_EXPRESS);
16420         } else if (!tg3_flag(tp, 5705_PLUS) ||
16421                    tg3_flag(tp, 5780_CLASS)) {
16422                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16423                 if (!tp->pcix_cap) {
16424                         dev_err(&tp->pdev->dev,
16425                                 "Cannot find PCI-X capability, aborting\n");
16426                         return -EIO;
16427                 }
16428
16429                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16430                         tg3_flag_set(tp, PCIX_MODE);
16431         }
16432
16433         /* If we have an AMD 762 or VIA K8T800 chipset, write
16434          * reordering to the mailbox registers done by the host
16435          * controller can cause major troubles.  We read back from
16436          * every mailbox register write to force the writes to be
16437          * posted to the chip in order.
16438          */
16439         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16440             !tg3_flag(tp, PCI_EXPRESS))
16441                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16442
16443         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16444                              &tp->pci_cacheline_sz);
16445         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16446                              &tp->pci_lat_timer);
16447         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16448             tp->pci_lat_timer < 64) {
16449                 tp->pci_lat_timer = 64;
16450                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16451                                       tp->pci_lat_timer);
16452         }
16453
16454         /* Important! -- It is critical that the PCI-X hw workaround
16455          * situation is decided before the first MMIO register access.
16456          */
16457         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16458                 /* 5700 BX chips need to have their TX producer index
16459                  * mailboxes written twice to workaround a bug.
16460                  */
16461                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16462
16463                 /* If we are in PCI-X mode, enable register write workaround.
16464                  *
16465                  * The workaround is to use indirect register accesses
16466                  * for all chip writes not to mailbox registers.
16467                  */
16468                 if (tg3_flag(tp, PCIX_MODE)) {
16469                         u32 pm_reg;
16470
16471                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16472
16473                         /* The chip can have it's power management PCI config
16474                          * space registers clobbered due to this bug.
16475                          * So explicitly force the chip into D0 here.
16476                          */
16477                         pci_read_config_dword(tp->pdev,
16478                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16479                                               &pm_reg);
16480                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16481                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16482                         pci_write_config_dword(tp->pdev,
16483                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16484                                                pm_reg);
16485
16486                         /* Also, force SERR#/PERR# in PCI command. */
16487                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16488                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16489                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16490                 }
16491         }
16492
16493         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16494                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16495         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16496                 tg3_flag_set(tp, PCI_32BIT);
16497
16498         /* Chip-specific fixup from Broadcom driver */
16499         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16500             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16501                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16502                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16503         }
16504
16505         /* Default fast path register access methods */
16506         tp->read32 = tg3_read32;
16507         tp->write32 = tg3_write32;
16508         tp->read32_mbox = tg3_read32;
16509         tp->write32_mbox = tg3_write32;
16510         tp->write32_tx_mbox = tg3_write32;
16511         tp->write32_rx_mbox = tg3_write32;
16512
16513         /* Various workaround register access methods */
16514         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16515                 tp->write32 = tg3_write_indirect_reg32;
16516         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16517                  (tg3_flag(tp, PCI_EXPRESS) &&
16518                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16519                 /*
16520                  * Back to back register writes can cause problems on these
16521                  * chips, the workaround is to read back all reg writes
16522                  * except those to mailbox regs.
16523                  *
16524                  * See tg3_write_indirect_reg32().
16525                  */
16526                 tp->write32 = tg3_write_flush_reg32;
16527         }
16528
16529         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16530                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16531                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16532                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16533         }
16534
16535         if (tg3_flag(tp, ICH_WORKAROUND)) {
16536                 tp->read32 = tg3_read_indirect_reg32;
16537                 tp->write32 = tg3_write_indirect_reg32;
16538                 tp->read32_mbox = tg3_read_indirect_mbox;
16539                 tp->write32_mbox = tg3_write_indirect_mbox;
16540                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16541                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16542
16543                 iounmap(tp->regs);
16544                 tp->regs = NULL;
16545
16546                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16547                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16548                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16549         }
16550         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16551                 tp->read32_mbox = tg3_read32_mbox_5906;
16552                 tp->write32_mbox = tg3_write32_mbox_5906;
16553                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16554                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16555         }
16556
16557         if (tp->write32 == tg3_write_indirect_reg32 ||
16558             (tg3_flag(tp, PCIX_MODE) &&
16559              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16560               tg3_asic_rev(tp) == ASIC_REV_5701)))
16561                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16562
16563         /* The memory arbiter has to be enabled in order for SRAM accesses
16564          * to succeed.  Normally on powerup the tg3 chip firmware will make
16565          * sure it is enabled, but other entities such as system netboot
16566          * code might disable it.
16567          */
16568         val = tr32(MEMARB_MODE);
16569         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16570
16571         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16572         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16573             tg3_flag(tp, 5780_CLASS)) {
16574                 if (tg3_flag(tp, PCIX_MODE)) {
16575                         pci_read_config_dword(tp->pdev,
16576                                               tp->pcix_cap + PCI_X_STATUS,
16577                                               &val);
16578                         tp->pci_fn = val & 0x7;
16579                 }
16580         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16581                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16582                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16583                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16584                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16585                         val = tr32(TG3_CPMU_STATUS);
16586
16587                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16588                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16589                 else
16590                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16591                                      TG3_CPMU_STATUS_FSHFT_5719;
16592         }
16593
16594         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16595                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16596                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16597         }
16598
16599         /* Get eeprom hw config before calling tg3_set_power_state().
16600          * In particular, the TG3_FLAG_IS_NIC flag must be
16601          * determined before calling tg3_set_power_state() so that
16602          * we know whether or not to switch out of Vaux power.
16603          * When the flag is set, it means that GPIO1 is used for eeprom
16604          * write protect and also implies that it is a LOM where GPIOs
16605          * are not used to switch power.
16606          */
16607         tg3_get_eeprom_hw_cfg(tp);
16608
16609         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16610                 tg3_flag_clear(tp, TSO_CAPABLE);
16611                 tg3_flag_clear(tp, TSO_BUG);
16612                 tp->fw_needed = NULL;
16613         }
16614
16615         if (tg3_flag(tp, ENABLE_APE)) {
16616                 /* Allow reads and writes to the
16617                  * APE register and memory space.
16618                  */
16619                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16620                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16621                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16622                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16623                                        pci_state_reg);
16624
16625                 tg3_ape_lock_init(tp);
16626         }
16627
16628         /* Set up tp->grc_local_ctrl before calling
16629          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16630          * will bring 5700's external PHY out of reset.
16631          * It is also used as eeprom write protect on LOMs.
16632          */
16633         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16634         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16635             tg3_flag(tp, EEPROM_WRITE_PROT))
16636                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16637                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16638         /* Unused GPIO3 must be driven as output on 5752 because there
16639          * are no pull-up resistors on unused GPIO pins.
16640          */
16641         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16642                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16643
16644         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16645             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16646             tg3_flag(tp, 57765_CLASS))
16647                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16648
16649         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16650             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16651                 /* Turn off the debug UART. */
16652                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16653                 if (tg3_flag(tp, IS_NIC))
16654                         /* Keep VMain power. */
16655                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16656                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16657         }
16658
16659         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16660                 tp->grc_local_ctrl |=
16661                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16662
16663         /* Switch out of Vaux if it is a NIC */
16664         tg3_pwrsrc_switch_to_vmain(tp);
16665
16666         /* Derive initial jumbo mode from MTU assigned in
16667          * ether_setup() via the alloc_etherdev() call
16668          */
16669         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16670                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16671
16672         /* Determine WakeOnLan speed to use. */
16673         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16674             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16675             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16676             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16677                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16678         } else {
16679                 tg3_flag_set(tp, WOL_SPEED_100MB);
16680         }
16681
16682         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16683                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16684
16685         /* A few boards don't want Ethernet@WireSpeed phy feature */
16686         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16687             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16688              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16689              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16690             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16691             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16692                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16693
16694         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16695             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16696                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16697         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16698                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16699
16700         if (tg3_flag(tp, 5705_PLUS) &&
16701             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16702             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16703             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16704             !tg3_flag(tp, 57765_PLUS)) {
16705                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16706                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16707                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16708                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16709                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16710                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16711                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16712                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16713                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16714                 } else
16715                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16716         }
16717
16718         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16719             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16720                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16721                 if (tp->phy_otp == 0)
16722                         tp->phy_otp = TG3_OTP_DEFAULT;
16723         }
16724
16725         if (tg3_flag(tp, CPMU_PRESENT))
16726                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16727         else
16728                 tp->mi_mode = MAC_MI_MODE_BASE;
16729
16730         tp->coalesce_mode = 0;
16731         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16732             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16733                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16734
16735         /* Set these bits to enable statistics workaround. */
16736         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16737             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16738             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16739             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16740                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16741                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16742         }
16743
16744         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16745             tg3_asic_rev(tp) == ASIC_REV_57780)
16746                 tg3_flag_set(tp, USE_PHYLIB);
16747
16748         err = tg3_mdio_init(tp);
16749         if (err)
16750                 return err;
16751
16752         /* Initialize data/descriptor byte/word swapping. */
16753         val = tr32(GRC_MODE);
16754         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16755             tg3_asic_rev(tp) == ASIC_REV_5762)
16756                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16757                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16758                         GRC_MODE_B2HRX_ENABLE |
16759                         GRC_MODE_HTX2B_ENABLE |
16760                         GRC_MODE_HOST_STACKUP);
16761         else
16762                 val &= GRC_MODE_HOST_STACKUP;
16763
16764         tw32(GRC_MODE, val | tp->grc_mode);
16765
16766         tg3_switch_clocks(tp);
16767
16768         /* Clear this out for sanity. */
16769         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16770
16771         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16772         tw32(TG3PCI_REG_BASE_ADDR, 0);
16773
16774         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16775                               &pci_state_reg);
16776         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16777             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16778                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16779                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16780                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16781                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16782                         void __iomem *sram_base;
16783
16784                         /* Write some dummy words into the SRAM status block
16785                          * area, see if it reads back correctly.  If the return
16786                          * value is bad, force enable the PCIX workaround.
16787                          */
16788                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16789
16790                         writel(0x00000000, sram_base);
16791                         writel(0x00000000, sram_base + 4);
16792                         writel(0xffffffff, sram_base + 4);
16793                         if (readl(sram_base) != 0x00000000)
16794                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16795                 }
16796         }
16797
16798         udelay(50);
16799         tg3_nvram_init(tp);
16800
16801         /* If the device has an NVRAM, no need to load patch firmware */
16802         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16803             !tg3_flag(tp, NO_NVRAM))
16804                 tp->fw_needed = NULL;
16805
16806         grc_misc_cfg = tr32(GRC_MISC_CFG);
16807         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16808
16809         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16810             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16811              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16812                 tg3_flag_set(tp, IS_5788);
16813
16814         if (!tg3_flag(tp, IS_5788) &&
16815             tg3_asic_rev(tp) != ASIC_REV_5700)
16816                 tg3_flag_set(tp, TAGGED_STATUS);
16817         if (tg3_flag(tp, TAGGED_STATUS)) {
16818                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16819                                       HOSTCC_MODE_CLRTICK_TXBD);
16820
16821                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16822                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16823                                        tp->misc_host_ctrl);
16824         }
16825
16826         /* Preserve the APE MAC_MODE bits */
16827         if (tg3_flag(tp, ENABLE_APE))
16828                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16829         else
16830                 tp->mac_mode = 0;
16831
16832         if (tg3_10_100_only_device(tp, ent))
16833                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16834
16835         err = tg3_phy_probe(tp);
16836         if (err) {
16837                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16838                 /* ... but do not return immediately ... */
16839                 tg3_mdio_fini(tp);
16840         }
16841
16842         tg3_read_vpd(tp);
16843         tg3_read_fw_ver(tp);
16844
16845         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16846                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16847         } else {
16848                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16849                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16850                 else
16851                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16852         }
16853
16854         /* 5700 {AX,BX} chips have a broken status block link
16855          * change bit implementation, so we must use the
16856          * status register in those cases.
16857          */
16858         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16859                 tg3_flag_set(tp, USE_LINKCHG_REG);
16860         else
16861                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16862
16863         /* The led_ctrl is set during tg3_phy_probe, here we might
16864          * have to force the link status polling mechanism based
16865          * upon subsystem IDs.
16866          */
16867         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16868             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16869             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16870                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16871                 tg3_flag_set(tp, USE_LINKCHG_REG);
16872         }
16873
16874         /* For all SERDES we poll the MAC status register. */
16875         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16876                 tg3_flag_set(tp, POLL_SERDES);
16877         else
16878                 tg3_flag_clear(tp, POLL_SERDES);
16879
16880         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16881                 tg3_flag_set(tp, POLL_CPMU_LINK);
16882
16883         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16884         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16885         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16886             tg3_flag(tp, PCIX_MODE)) {
16887                 tp->rx_offset = NET_SKB_PAD;
16888 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16889                 tp->rx_copy_thresh = ~(u16)0;
16890 #endif
16891         }
16892
16893         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16894         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16895         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16896
16897         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16898
16899         /* Increment the rx prod index on the rx std ring by at most
16900          * 8 for these chips to workaround hw errata.
16901          */
16902         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16903             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16904             tg3_asic_rev(tp) == ASIC_REV_5755)
16905                 tp->rx_std_max_post = 8;
16906
16907         if (tg3_flag(tp, ASPM_WORKAROUND))
16908                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16909                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16910
16911         return err;
16912 }
16913
16914 #ifdef CONFIG_SPARC
16915 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16916 {
16917         struct net_device *dev = tp->dev;
16918         struct pci_dev *pdev = tp->pdev;
16919         struct device_node *dp = pci_device_to_OF_node(pdev);
16920         const unsigned char *addr;
16921         int len;
16922
16923         addr = of_get_property(dp, "local-mac-address", &len);
16924         if (addr && len == ETH_ALEN) {
16925                 memcpy(dev->dev_addr, addr, ETH_ALEN);
16926                 return 0;
16927         }
16928         return -ENODEV;
16929 }
16930
16931 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16932 {
16933         struct net_device *dev = tp->dev;
16934
16935         memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16936         return 0;
16937 }
16938 #endif
16939
16940 static int tg3_get_device_address(struct tg3 *tp)
16941 {
16942         struct net_device *dev = tp->dev;
16943         u32 hi, lo, mac_offset;
16944         int addr_ok = 0;
16945         int err;
16946
16947 #ifdef CONFIG_SPARC
16948         if (!tg3_get_macaddr_sparc(tp))
16949                 return 0;
16950 #endif
16951
16952         if (tg3_flag(tp, IS_SSB_CORE)) {
16953                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16954                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16955                         return 0;
16956         }
16957
16958         mac_offset = 0x7c;
16959         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16960             tg3_flag(tp, 5780_CLASS)) {
16961                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16962                         mac_offset = 0xcc;
16963                 if (tg3_nvram_lock(tp))
16964                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16965                 else
16966                         tg3_nvram_unlock(tp);
16967         } else if (tg3_flag(tp, 5717_PLUS)) {
16968                 if (tp->pci_fn & 1)
16969                         mac_offset = 0xcc;
16970                 if (tp->pci_fn > 1)
16971                         mac_offset += 0x18c;
16972         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16973                 mac_offset = 0x10;
16974
16975         /* First try to get it from MAC address mailbox. */
16976         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16977         if ((hi >> 16) == 0x484b) {
16978                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16979                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16980
16981                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16982                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16983                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16984                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16985                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16986
16987                 /* Some old bootcode may report a 0 MAC address in SRAM */
16988                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16989         }
16990         if (!addr_ok) {
16991                 /* Next, try NVRAM. */
16992                 if (!tg3_flag(tp, NO_NVRAM) &&
16993                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16994                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16995                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16996                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16997                 }
16998                 /* Finally just fetch it out of the MAC control regs. */
16999                 else {
17000                         hi = tr32(MAC_ADDR_0_HIGH);
17001                         lo = tr32(MAC_ADDR_0_LOW);
17002
17003                         dev->dev_addr[5] = lo & 0xff;
17004                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17005                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17006                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17007                         dev->dev_addr[1] = hi & 0xff;
17008                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17009                 }
17010         }
17011
17012         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17013 #ifdef CONFIG_SPARC
17014                 if (!tg3_get_default_macaddr_sparc(tp))
17015                         return 0;
17016 #endif
17017                 return -EINVAL;
17018         }
17019         return 0;
17020 }
17021
17022 #define BOUNDARY_SINGLE_CACHELINE       1
17023 #define BOUNDARY_MULTI_CACHELINE        2
17024
17025 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17026 {
17027         int cacheline_size;
17028         u8 byte;
17029         int goal;
17030
17031         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17032         if (byte == 0)
17033                 cacheline_size = 1024;
17034         else
17035                 cacheline_size = (int) byte * 4;
17036
17037         /* On 5703 and later chips, the boundary bits have no
17038          * effect.
17039          */
17040         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17041             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17042             !tg3_flag(tp, PCI_EXPRESS))
17043                 goto out;
17044
17045 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17046         goal = BOUNDARY_MULTI_CACHELINE;
17047 #else
17048 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17049         goal = BOUNDARY_SINGLE_CACHELINE;
17050 #else
17051         goal = 0;
17052 #endif
17053 #endif
17054
17055         if (tg3_flag(tp, 57765_PLUS)) {
17056                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17057                 goto out;
17058         }
17059
17060         if (!goal)
17061                 goto out;
17062
17063         /* PCI controllers on most RISC systems tend to disconnect
17064          * when a device tries to burst across a cache-line boundary.
17065          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17066          *
17067          * Unfortunately, for PCI-E there are only limited
17068          * write-side controls for this, and thus for reads
17069          * we will still get the disconnects.  We'll also waste
17070          * these PCI cycles for both read and write for chips
17071          * other than 5700 and 5701 which do not implement the
17072          * boundary bits.
17073          */
17074         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17075                 switch (cacheline_size) {
17076                 case 16:
17077                 case 32:
17078                 case 64:
17079                 case 128:
17080                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17081                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17082                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17083                         } else {
17084                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17085                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17086                         }
17087                         break;
17088
17089                 case 256:
17090                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17091                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17092                         break;
17093
17094                 default:
17095                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17096                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17097                         break;
17098                 }
17099         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17100                 switch (cacheline_size) {
17101                 case 16:
17102                 case 32:
17103                 case 64:
17104                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17105                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17106                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17107                                 break;
17108                         }
17109                         /* fallthrough */
17110                 case 128:
17111                 default:
17112                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17113                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17114                         break;
17115                 }
17116         } else {
17117                 switch (cacheline_size) {
17118                 case 16:
17119                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17120                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17121                                         DMA_RWCTRL_WRITE_BNDRY_16);
17122                                 break;
17123                         }
17124                         /* fallthrough */
17125                 case 32:
17126                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17127                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17128                                         DMA_RWCTRL_WRITE_BNDRY_32);
17129                                 break;
17130                         }
17131                         /* fallthrough */
17132                 case 64:
17133                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17134                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17135                                         DMA_RWCTRL_WRITE_BNDRY_64);
17136                                 break;
17137                         }
17138                         /* fallthrough */
17139                 case 128:
17140                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17141                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17142                                         DMA_RWCTRL_WRITE_BNDRY_128);
17143                                 break;
17144                         }
17145                         /* fallthrough */
17146                 case 256:
17147                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17148                                 DMA_RWCTRL_WRITE_BNDRY_256);
17149                         break;
17150                 case 512:
17151                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17152                                 DMA_RWCTRL_WRITE_BNDRY_512);
17153                         break;
17154                 case 1024:
17155                 default:
17156                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17157                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17158                         break;
17159                 }
17160         }
17161
17162 out:
17163         return val;
17164 }
17165
17166 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17167                            int size, bool to_device)
17168 {
17169         struct tg3_internal_buffer_desc test_desc;
17170         u32 sram_dma_descs;
17171         int i, ret;
17172
17173         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17174
17175         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17176         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17177         tw32(RDMAC_STATUS, 0);
17178         tw32(WDMAC_STATUS, 0);
17179
17180         tw32(BUFMGR_MODE, 0);
17181         tw32(FTQ_RESET, 0);
17182
17183         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17184         test_desc.addr_lo = buf_dma & 0xffffffff;
17185         test_desc.nic_mbuf = 0x00002100;
17186         test_desc.len = size;
17187
17188         /*
17189          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17190          * the *second* time the tg3 driver was getting loaded after an
17191          * initial scan.
17192          *
17193          * Broadcom tells me:
17194          *   ...the DMA engine is connected to the GRC block and a DMA
17195          *   reset may affect the GRC block in some unpredictable way...
17196          *   The behavior of resets to individual blocks has not been tested.
17197          *
17198          * Broadcom noted the GRC reset will also reset all sub-components.
17199          */
17200         if (to_device) {
17201                 test_desc.cqid_sqid = (13 << 8) | 2;
17202
17203                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17204                 udelay(40);
17205         } else {
17206                 test_desc.cqid_sqid = (16 << 8) | 7;
17207
17208                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17209                 udelay(40);
17210         }
17211         test_desc.flags = 0x00000005;
17212
17213         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17214                 u32 val;
17215
17216                 val = *(((u32 *)&test_desc) + i);
17217                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17218                                        sram_dma_descs + (i * sizeof(u32)));
17219                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17220         }
17221         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17222
17223         if (to_device)
17224                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17225         else
17226                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17227
17228         ret = -ENODEV;
17229         for (i = 0; i < 40; i++) {
17230                 u32 val;
17231
17232                 if (to_device)
17233                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17234                 else
17235                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17236                 if ((val & 0xffff) == sram_dma_descs) {
17237                         ret = 0;
17238                         break;
17239                 }
17240
17241                 udelay(100);
17242         }
17243
17244         return ret;
17245 }
17246
17247 #define TEST_BUFFER_SIZE        0x2000
17248
17249 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17250         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17251         { },
17252 };
17253
17254 static int tg3_test_dma(struct tg3 *tp)
17255 {
17256         dma_addr_t buf_dma;
17257         u32 *buf, saved_dma_rwctrl;
17258         int ret = 0;
17259
17260         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17261                                  &buf_dma, GFP_KERNEL);
17262         if (!buf) {
17263                 ret = -ENOMEM;
17264                 goto out_nofree;
17265         }
17266
17267         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17268                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17269
17270         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17271
17272         if (tg3_flag(tp, 57765_PLUS))
17273                 goto out;
17274
17275         if (tg3_flag(tp, PCI_EXPRESS)) {
17276                 /* DMA read watermark not used on PCIE */
17277                 tp->dma_rwctrl |= 0x00180000;
17278         } else if (!tg3_flag(tp, PCIX_MODE)) {
17279                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17280                     tg3_asic_rev(tp) == ASIC_REV_5750)
17281                         tp->dma_rwctrl |= 0x003f0000;
17282                 else
17283                         tp->dma_rwctrl |= 0x003f000f;
17284         } else {
17285                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17286                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17287                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17288                         u32 read_water = 0x7;
17289
17290                         /* If the 5704 is behind the EPB bridge, we can
17291                          * do the less restrictive ONE_DMA workaround for
17292                          * better performance.
17293                          */
17294                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17295                             tg3_asic_rev(tp) == ASIC_REV_5704)
17296                                 tp->dma_rwctrl |= 0x8000;
17297                         else if (ccval == 0x6 || ccval == 0x7)
17298                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17299
17300                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17301                                 read_water = 4;
17302                         /* Set bit 23 to enable PCIX hw bug fix */
17303                         tp->dma_rwctrl |=
17304                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17305                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17306                                 (1 << 23);
17307                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17308                         /* 5780 always in PCIX mode */
17309                         tp->dma_rwctrl |= 0x00144000;
17310                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17311                         /* 5714 always in PCIX mode */
17312                         tp->dma_rwctrl |= 0x00148000;
17313                 } else {
17314                         tp->dma_rwctrl |= 0x001b000f;
17315                 }
17316         }
17317         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17318                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17319
17320         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17321             tg3_asic_rev(tp) == ASIC_REV_5704)
17322                 tp->dma_rwctrl &= 0xfffffff0;
17323
17324         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17325             tg3_asic_rev(tp) == ASIC_REV_5701) {
17326                 /* Remove this if it causes problems for some boards. */
17327                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17328
17329                 /* On 5700/5701 chips, we need to set this bit.
17330                  * Otherwise the chip will issue cacheline transactions
17331                  * to streamable DMA memory with not all the byte
17332                  * enables turned on.  This is an error on several
17333                  * RISC PCI controllers, in particular sparc64.
17334                  *
17335                  * On 5703/5704 chips, this bit has been reassigned
17336                  * a different meaning.  In particular, it is used
17337                  * on those chips to enable a PCI-X workaround.
17338                  */
17339                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17340         }
17341
17342         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17343
17344
17345         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17346             tg3_asic_rev(tp) != ASIC_REV_5701)
17347                 goto out;
17348
17349         /* It is best to perform DMA test with maximum write burst size
17350          * to expose the 5700/5701 write DMA bug.
17351          */
17352         saved_dma_rwctrl = tp->dma_rwctrl;
17353         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17354         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17355
17356         while (1) {
17357                 u32 *p = buf, i;
17358
17359                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17360                         p[i] = i;
17361
17362                 /* Send the buffer to the chip. */
17363                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17364                 if (ret) {
17365                         dev_err(&tp->pdev->dev,
17366                                 "%s: Buffer write failed. err = %d\n",
17367                                 __func__, ret);
17368                         break;
17369                 }
17370
17371                 /* Now read it back. */
17372                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17373                 if (ret) {
17374                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17375                                 "err = %d\n", __func__, ret);
17376                         break;
17377                 }
17378
17379                 /* Verify it. */
17380                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17381                         if (p[i] == i)
17382                                 continue;
17383
17384                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17385                             DMA_RWCTRL_WRITE_BNDRY_16) {
17386                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17387                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17388                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17389                                 break;
17390                         } else {
17391                                 dev_err(&tp->pdev->dev,
17392                                         "%s: Buffer corrupted on read back! "
17393                                         "(%d != %d)\n", __func__, p[i], i);
17394                                 ret = -ENODEV;
17395                                 goto out;
17396                         }
17397                 }
17398
17399                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17400                         /* Success. */
17401                         ret = 0;
17402                         break;
17403                 }
17404         }
17405         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17406             DMA_RWCTRL_WRITE_BNDRY_16) {
17407                 /* DMA test passed without adjusting DMA boundary,
17408                  * now look for chipsets that are known to expose the
17409                  * DMA bug without failing the test.
17410                  */
17411                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17412                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17413                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17414                 } else {
17415                         /* Safe to use the calculated DMA boundary. */
17416                         tp->dma_rwctrl = saved_dma_rwctrl;
17417                 }
17418
17419                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17420         }
17421
17422 out:
17423         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17424 out_nofree:
17425         return ret;
17426 }
17427
17428 static void tg3_init_bufmgr_config(struct tg3 *tp)
17429 {
17430         if (tg3_flag(tp, 57765_PLUS)) {
17431                 tp->bufmgr_config.mbuf_read_dma_low_water =
17432                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17433                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17434                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17435                 tp->bufmgr_config.mbuf_high_water =
17436                         DEFAULT_MB_HIGH_WATER_57765;
17437
17438                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17439                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17440                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17441                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17442                 tp->bufmgr_config.mbuf_high_water_jumbo =
17443                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17444         } else if (tg3_flag(tp, 5705_PLUS)) {
17445                 tp->bufmgr_config.mbuf_read_dma_low_water =
17446                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17447                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17448                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17449                 tp->bufmgr_config.mbuf_high_water =
17450                         DEFAULT_MB_HIGH_WATER_5705;
17451                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17452                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17453                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17454                         tp->bufmgr_config.mbuf_high_water =
17455                                 DEFAULT_MB_HIGH_WATER_5906;
17456                 }
17457
17458                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17459                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17460                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17461                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17462                 tp->bufmgr_config.mbuf_high_water_jumbo =
17463                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17464         } else {
17465                 tp->bufmgr_config.mbuf_read_dma_low_water =
17466                         DEFAULT_MB_RDMA_LOW_WATER;
17467                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17468                         DEFAULT_MB_MACRX_LOW_WATER;
17469                 tp->bufmgr_config.mbuf_high_water =
17470                         DEFAULT_MB_HIGH_WATER;
17471
17472                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17473                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17474                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17475                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17476                 tp->bufmgr_config.mbuf_high_water_jumbo =
17477                         DEFAULT_MB_HIGH_WATER_JUMBO;
17478         }
17479
17480         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17481         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17482 }
17483
17484 static char *tg3_phy_string(struct tg3 *tp)
17485 {
17486         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17487         case TG3_PHY_ID_BCM5400:        return "5400";
17488         case TG3_PHY_ID_BCM5401:        return "5401";
17489         case TG3_PHY_ID_BCM5411:        return "5411";
17490         case TG3_PHY_ID_BCM5701:        return "5701";
17491         case TG3_PHY_ID_BCM5703:        return "5703";
17492         case TG3_PHY_ID_BCM5704:        return "5704";
17493         case TG3_PHY_ID_BCM5705:        return "5705";
17494         case TG3_PHY_ID_BCM5750:        return "5750";
17495         case TG3_PHY_ID_BCM5752:        return "5752";
17496         case TG3_PHY_ID_BCM5714:        return "5714";
17497         case TG3_PHY_ID_BCM5780:        return "5780";
17498         case TG3_PHY_ID_BCM5755:        return "5755";
17499         case TG3_PHY_ID_BCM5787:        return "5787";
17500         case TG3_PHY_ID_BCM5784:        return "5784";
17501         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17502         case TG3_PHY_ID_BCM5906:        return "5906";
17503         case TG3_PHY_ID_BCM5761:        return "5761";
17504         case TG3_PHY_ID_BCM5718C:       return "5718C";
17505         case TG3_PHY_ID_BCM5718S:       return "5718S";
17506         case TG3_PHY_ID_BCM57765:       return "57765";
17507         case TG3_PHY_ID_BCM5719C:       return "5719C";
17508         case TG3_PHY_ID_BCM5720C:       return "5720C";
17509         case TG3_PHY_ID_BCM5762:        return "5762C";
17510         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17511         case 0:                 return "serdes";
17512         default:                return "unknown";
17513         }
17514 }
17515
17516 static char *tg3_bus_string(struct tg3 *tp, char *str)
17517 {
17518         if (tg3_flag(tp, PCI_EXPRESS)) {
17519                 strcpy(str, "PCI Express");
17520                 return str;
17521         } else if (tg3_flag(tp, PCIX_MODE)) {
17522                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17523
17524                 strcpy(str, "PCIX:");
17525
17526                 if ((clock_ctrl == 7) ||
17527                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17528                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17529                         strcat(str, "133MHz");
17530                 else if (clock_ctrl == 0)
17531                         strcat(str, "33MHz");
17532                 else if (clock_ctrl == 2)
17533                         strcat(str, "50MHz");
17534                 else if (clock_ctrl == 4)
17535                         strcat(str, "66MHz");
17536                 else if (clock_ctrl == 6)
17537                         strcat(str, "100MHz");
17538         } else {
17539                 strcpy(str, "PCI:");
17540                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17541                         strcat(str, "66MHz");
17542                 else
17543                         strcat(str, "33MHz");
17544         }
17545         if (tg3_flag(tp, PCI_32BIT))
17546                 strcat(str, ":32-bit");
17547         else
17548                 strcat(str, ":64-bit");
17549         return str;
17550 }
17551
17552 static void tg3_init_coal(struct tg3 *tp)
17553 {
17554         struct ethtool_coalesce *ec = &tp->coal;
17555
17556         memset(ec, 0, sizeof(*ec));
17557         ec->cmd = ETHTOOL_GCOALESCE;
17558         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17559         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17560         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17561         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17562         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17563         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17564         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17565         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17566         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17567
17568         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17569                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17570                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17571                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17572                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17573                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17574         }
17575
17576         if (tg3_flag(tp, 5705_PLUS)) {
17577                 ec->rx_coalesce_usecs_irq = 0;
17578                 ec->tx_coalesce_usecs_irq = 0;
17579                 ec->stats_block_coalesce_usecs = 0;
17580         }
17581 }
17582
17583 static int tg3_init_one(struct pci_dev *pdev,
17584                                   const struct pci_device_id *ent)
17585 {
17586         struct net_device *dev;
17587         struct tg3 *tp;
17588         int i, err;
17589         u32 sndmbx, rcvmbx, intmbx;
17590         char str[40];
17591         u64 dma_mask, persist_dma_mask;
17592         netdev_features_t features = 0;
17593
17594         printk_once(KERN_INFO "%s\n", version);
17595
17596         err = pci_enable_device(pdev);
17597         if (err) {
17598                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17599                 return err;
17600         }
17601
17602         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17603         if (err) {
17604                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17605                 goto err_out_disable_pdev;
17606         }
17607
17608         pci_set_master(pdev);
17609
17610         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17611         if (!dev) {
17612                 err = -ENOMEM;
17613                 goto err_out_free_res;
17614         }
17615
17616         SET_NETDEV_DEV(dev, &pdev->dev);
17617
17618         tp = netdev_priv(dev);
17619         tp->pdev = pdev;
17620         tp->dev = dev;
17621         tp->rx_mode = TG3_DEF_RX_MODE;
17622         tp->tx_mode = TG3_DEF_TX_MODE;
17623         tp->irq_sync = 1;
17624         tp->pcierr_recovery = false;
17625
17626         if (tg3_debug > 0)
17627                 tp->msg_enable = tg3_debug;
17628         else
17629                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17630
17631         if (pdev_is_ssb_gige_core(pdev)) {
17632                 tg3_flag_set(tp, IS_SSB_CORE);
17633                 if (ssb_gige_must_flush_posted_writes(pdev))
17634                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17635                 if (ssb_gige_one_dma_at_once(pdev))
17636                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17637                 if (ssb_gige_have_roboswitch(pdev)) {
17638                         tg3_flag_set(tp, USE_PHYLIB);
17639                         tg3_flag_set(tp, ROBOSWITCH);
17640                 }
17641                 if (ssb_gige_is_rgmii(pdev))
17642                         tg3_flag_set(tp, RGMII_MODE);
17643         }
17644
17645         /* The word/byte swap controls here control register access byte
17646          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17647          * setting below.
17648          */
17649         tp->misc_host_ctrl =
17650                 MISC_HOST_CTRL_MASK_PCI_INT |
17651                 MISC_HOST_CTRL_WORD_SWAP |
17652                 MISC_HOST_CTRL_INDIR_ACCESS |
17653                 MISC_HOST_CTRL_PCISTATE_RW;
17654
17655         /* The NONFRM (non-frame) byte/word swap controls take effect
17656          * on descriptor entries, anything which isn't packet data.
17657          *
17658          * The StrongARM chips on the board (one for tx, one for rx)
17659          * are running in big-endian mode.
17660          */
17661         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17662                         GRC_MODE_WSWAP_NONFRM_DATA);
17663 #ifdef __BIG_ENDIAN
17664         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17665 #endif
17666         spin_lock_init(&tp->lock);
17667         spin_lock_init(&tp->indirect_lock);
17668         INIT_WORK(&tp->reset_task, tg3_reset_task);
17669
17670         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17671         if (!tp->regs) {
17672                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17673                 err = -ENOMEM;
17674                 goto err_out_free_dev;
17675         }
17676
17677         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17678             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17681             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17682             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17683             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17684             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17685             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17686             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17687             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17688             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17689             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17690             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17691             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17692                 tg3_flag_set(tp, ENABLE_APE);
17693                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17694                 if (!tp->aperegs) {
17695                         dev_err(&pdev->dev,
17696                                 "Cannot map APE registers, aborting\n");
17697                         err = -ENOMEM;
17698                         goto err_out_iounmap;
17699                 }
17700         }
17701
17702         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17703         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17704
17705         dev->ethtool_ops = &tg3_ethtool_ops;
17706         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17707         dev->netdev_ops = &tg3_netdev_ops;
17708         dev->irq = pdev->irq;
17709
17710         err = tg3_get_invariants(tp, ent);
17711         if (err) {
17712                 dev_err(&pdev->dev,
17713                         "Problem fetching invariants of chip, aborting\n");
17714                 goto err_out_apeunmap;
17715         }
17716
17717         /* The EPB bridge inside 5714, 5715, and 5780 and any
17718          * device behind the EPB cannot support DMA addresses > 40-bit.
17719          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17720          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17721          * do DMA address check in tg3_start_xmit().
17722          */
17723         if (tg3_flag(tp, IS_5788))
17724                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17725         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17726                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17727 #ifdef CONFIG_HIGHMEM
17728                 dma_mask = DMA_BIT_MASK(64);
17729 #endif
17730         } else
17731                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17732
17733         /* Configure DMA attributes. */
17734         if (dma_mask > DMA_BIT_MASK(32)) {
17735                 err = pci_set_dma_mask(pdev, dma_mask);
17736                 if (!err) {
17737                         features |= NETIF_F_HIGHDMA;
17738                         err = pci_set_consistent_dma_mask(pdev,
17739                                                           persist_dma_mask);
17740                         if (err < 0) {
17741                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17742                                         "DMA for consistent allocations\n");
17743                                 goto err_out_apeunmap;
17744                         }
17745                 }
17746         }
17747         if (err || dma_mask == DMA_BIT_MASK(32)) {
17748                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17749                 if (err) {
17750                         dev_err(&pdev->dev,
17751                                 "No usable DMA configuration, aborting\n");
17752                         goto err_out_apeunmap;
17753                 }
17754         }
17755
17756         tg3_init_bufmgr_config(tp);
17757
17758         /* 5700 B0 chips do not support checksumming correctly due
17759          * to hardware bugs.
17760          */
17761         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17762                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17763
17764                 if (tg3_flag(tp, 5755_PLUS))
17765                         features |= NETIF_F_IPV6_CSUM;
17766         }
17767
17768         /* TSO is on by default on chips that support hardware TSO.
17769          * Firmware TSO on older chips gives lower performance, so it
17770          * is off by default, but can be enabled using ethtool.
17771          */
17772         if ((tg3_flag(tp, HW_TSO_1) ||
17773              tg3_flag(tp, HW_TSO_2) ||
17774              tg3_flag(tp, HW_TSO_3)) &&
17775             (features & NETIF_F_IP_CSUM))
17776                 features |= NETIF_F_TSO;
17777         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17778                 if (features & NETIF_F_IPV6_CSUM)
17779                         features |= NETIF_F_TSO6;
17780                 if (tg3_flag(tp, HW_TSO_3) ||
17781                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17782                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17783                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17784                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17785                     tg3_asic_rev(tp) == ASIC_REV_57780)
17786                         features |= NETIF_F_TSO_ECN;
17787         }
17788
17789         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17790                          NETIF_F_HW_VLAN_CTAG_RX;
17791         dev->vlan_features |= features;
17792
17793         /*
17794          * Add loopback capability only for a subset of devices that support
17795          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17796          * loopback for the remaining devices.
17797          */
17798         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17799             !tg3_flag(tp, CPMU_PRESENT))
17800                 /* Add the loopback capability */
17801                 features |= NETIF_F_LOOPBACK;
17802
17803         dev->hw_features |= features;
17804         dev->priv_flags |= IFF_UNICAST_FLT;
17805
17806         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17807         dev->min_mtu = TG3_MIN_MTU;
17808         dev->max_mtu = TG3_MAX_MTU(tp);
17809
17810         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17811             !tg3_flag(tp, TSO_CAPABLE) &&
17812             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17813                 tg3_flag_set(tp, MAX_RXPEND_64);
17814                 tp->rx_pending = 63;
17815         }
17816
17817         err = tg3_get_device_address(tp);
17818         if (err) {
17819                 dev_err(&pdev->dev,
17820                         "Could not obtain valid ethernet address, aborting\n");
17821                 goto err_out_apeunmap;
17822         }
17823
17824         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17825         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17826         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17827         for (i = 0; i < tp->irq_max; i++) {
17828                 struct tg3_napi *tnapi = &tp->napi[i];
17829
17830                 tnapi->tp = tp;
17831                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17832
17833                 tnapi->int_mbox = intmbx;
17834                 if (i <= 4)
17835                         intmbx += 0x8;
17836                 else
17837                         intmbx += 0x4;
17838
17839                 tnapi->consmbox = rcvmbx;
17840                 tnapi->prodmbox = sndmbx;
17841
17842                 if (i)
17843                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17844                 else
17845                         tnapi->coal_now = HOSTCC_MODE_NOW;
17846
17847                 if (!tg3_flag(tp, SUPPORT_MSIX))
17848                         break;
17849
17850                 /*
17851                  * If we support MSIX, we'll be using RSS.  If we're using
17852                  * RSS, the first vector only handles link interrupts and the
17853                  * remaining vectors handle rx and tx interrupts.  Reuse the
17854                  * mailbox values for the next iteration.  The values we setup
17855                  * above are still useful for the single vectored mode.
17856                  */
17857                 if (!i)
17858                         continue;
17859
17860                 rcvmbx += 0x8;
17861
17862                 if (sndmbx & 0x4)
17863                         sndmbx -= 0x4;
17864                 else
17865                         sndmbx += 0xc;
17866         }
17867
17868         /*
17869          * Reset chip in case UNDI or EFI driver did not shutdown
17870          * DMA self test will enable WDMAC and we'll see (spurious)
17871          * pending DMA on the PCI bus at that point.
17872          */
17873         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17874             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17875                 tg3_full_lock(tp, 0);
17876                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17877                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17878                 tg3_full_unlock(tp);
17879         }
17880
17881         err = tg3_test_dma(tp);
17882         if (err) {
17883                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17884                 goto err_out_apeunmap;
17885         }
17886
17887         tg3_init_coal(tp);
17888
17889         pci_set_drvdata(pdev, dev);
17890
17891         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17892             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17893             tg3_asic_rev(tp) == ASIC_REV_5762)
17894                 tg3_flag_set(tp, PTP_CAPABLE);
17895
17896         tg3_timer_init(tp);
17897
17898         tg3_carrier_off(tp);
17899
17900         err = register_netdev(dev);
17901         if (err) {
17902                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17903                 goto err_out_apeunmap;
17904         }
17905
17906         if (tg3_flag(tp, PTP_CAPABLE)) {
17907                 tg3_ptp_init(tp);
17908                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17909                                                    &tp->pdev->dev);
17910                 if (IS_ERR(tp->ptp_clock))
17911                         tp->ptp_clock = NULL;
17912         }
17913
17914         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17915                     tp->board_part_number,
17916                     tg3_chip_rev_id(tp),
17917                     tg3_bus_string(tp, str),
17918                     dev->dev_addr);
17919
17920         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17921                 char *ethtype;
17922
17923                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17924                         ethtype = "10/100Base-TX";
17925                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17926                         ethtype = "1000Base-SX";
17927                 else
17928                         ethtype = "10/100/1000Base-T";
17929
17930                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17931                             "(WireSpeed[%d], EEE[%d])\n",
17932                             tg3_phy_string(tp), ethtype,
17933                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17934                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17935         }
17936
17937         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17938                     (dev->features & NETIF_F_RXCSUM) != 0,
17939                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17940                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17941                     tg3_flag(tp, ENABLE_ASF) != 0,
17942                     tg3_flag(tp, TSO_CAPABLE) != 0);
17943         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17944                     tp->dma_rwctrl,
17945                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17946                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17947
17948         pci_save_state(pdev);
17949
17950         return 0;
17951
17952 err_out_apeunmap:
17953         if (tp->aperegs) {
17954                 iounmap(tp->aperegs);
17955                 tp->aperegs = NULL;
17956         }
17957
17958 err_out_iounmap:
17959         if (tp->regs) {
17960                 iounmap(tp->regs);
17961                 tp->regs = NULL;
17962         }
17963
17964 err_out_free_dev:
17965         free_netdev(dev);
17966
17967 err_out_free_res:
17968         pci_release_regions(pdev);
17969
17970 err_out_disable_pdev:
17971         if (pci_is_enabled(pdev))
17972                 pci_disable_device(pdev);
17973         return err;
17974 }
17975
17976 static void tg3_remove_one(struct pci_dev *pdev)
17977 {
17978         struct net_device *dev = pci_get_drvdata(pdev);
17979
17980         if (dev) {
17981                 struct tg3 *tp = netdev_priv(dev);
17982
17983                 tg3_ptp_fini(tp);
17984
17985                 release_firmware(tp->fw);
17986
17987                 tg3_reset_task_cancel(tp);
17988
17989                 if (tg3_flag(tp, USE_PHYLIB)) {
17990                         tg3_phy_fini(tp);
17991                         tg3_mdio_fini(tp);
17992                 }
17993
17994                 unregister_netdev(dev);
17995                 if (tp->aperegs) {
17996                         iounmap(tp->aperegs);
17997                         tp->aperegs = NULL;
17998                 }
17999                 if (tp->regs) {
18000                         iounmap(tp->regs);
18001                         tp->regs = NULL;
18002                 }
18003                 free_netdev(dev);
18004                 pci_release_regions(pdev);
18005                 pci_disable_device(pdev);
18006         }
18007 }
18008
18009 #ifdef CONFIG_PM_SLEEP
18010 static int tg3_suspend(struct device *device)
18011 {
18012         struct pci_dev *pdev = to_pci_dev(device);
18013         struct net_device *dev = pci_get_drvdata(pdev);
18014         struct tg3 *tp = netdev_priv(dev);
18015         int err = 0;
18016
18017         rtnl_lock();
18018
18019         if (!netif_running(dev))
18020                 goto unlock;
18021
18022         tg3_reset_task_cancel(tp);
18023         tg3_phy_stop(tp);
18024         tg3_netif_stop(tp);
18025
18026         tg3_timer_stop(tp);
18027
18028         tg3_full_lock(tp, 1);
18029         tg3_disable_ints(tp);
18030         tg3_full_unlock(tp);
18031
18032         netif_device_detach(dev);
18033
18034         tg3_full_lock(tp, 0);
18035         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18036         tg3_flag_clear(tp, INIT_COMPLETE);
18037         tg3_full_unlock(tp);
18038
18039         err = tg3_power_down_prepare(tp);
18040         if (err) {
18041                 int err2;
18042
18043                 tg3_full_lock(tp, 0);
18044
18045                 tg3_flag_set(tp, INIT_COMPLETE);
18046                 err2 = tg3_restart_hw(tp, true);
18047                 if (err2)
18048                         goto out;
18049
18050                 tg3_timer_start(tp);
18051
18052                 netif_device_attach(dev);
18053                 tg3_netif_start(tp);
18054
18055 out:
18056                 tg3_full_unlock(tp);
18057
18058                 if (!err2)
18059                         tg3_phy_start(tp);
18060         }
18061
18062 unlock:
18063         rtnl_unlock();
18064         return err;
18065 }
18066
18067 static int tg3_resume(struct device *device)
18068 {
18069         struct pci_dev *pdev = to_pci_dev(device);
18070         struct net_device *dev = pci_get_drvdata(pdev);
18071         struct tg3 *tp = netdev_priv(dev);
18072         int err = 0;
18073
18074         rtnl_lock();
18075
18076         if (!netif_running(dev))
18077                 goto unlock;
18078
18079         netif_device_attach(dev);
18080
18081         tg3_full_lock(tp, 0);
18082
18083         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18084
18085         tg3_flag_set(tp, INIT_COMPLETE);
18086         err = tg3_restart_hw(tp,
18087                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18088         if (err)
18089                 goto out;
18090
18091         tg3_timer_start(tp);
18092
18093         tg3_netif_start(tp);
18094
18095 out:
18096         tg3_full_unlock(tp);
18097
18098         if (!err)
18099                 tg3_phy_start(tp);
18100
18101 unlock:
18102         rtnl_unlock();
18103         return err;
18104 }
18105 #endif /* CONFIG_PM_SLEEP */
18106
18107 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18108
18109 static void tg3_shutdown(struct pci_dev *pdev)
18110 {
18111         struct net_device *dev = pci_get_drvdata(pdev);
18112         struct tg3 *tp = netdev_priv(dev);
18113
18114         rtnl_lock();
18115         netif_device_detach(dev);
18116
18117         if (netif_running(dev))
18118                 dev_close(dev);
18119
18120         if (system_state == SYSTEM_POWER_OFF)
18121                 tg3_power_down(tp);
18122
18123         rtnl_unlock();
18124 }
18125
18126 /**
18127  * tg3_io_error_detected - called when PCI error is detected
18128  * @pdev: Pointer to PCI device
18129  * @state: The current pci connection state
18130  *
18131  * This function is called after a PCI bus error affecting
18132  * this device has been detected.
18133  */
18134 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18135                                               pci_channel_state_t state)
18136 {
18137         struct net_device *netdev = pci_get_drvdata(pdev);
18138         struct tg3 *tp = netdev_priv(netdev);
18139         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18140
18141         netdev_info(netdev, "PCI I/O error detected\n");
18142
18143         rtnl_lock();
18144
18145         /* We probably don't have netdev yet */
18146         if (!netdev || !netif_running(netdev))
18147                 goto done;
18148
18149         /* We needn't recover from permanent error */
18150         if (state == pci_channel_io_frozen)
18151                 tp->pcierr_recovery = true;
18152
18153         tg3_phy_stop(tp);
18154
18155         tg3_netif_stop(tp);
18156
18157         tg3_timer_stop(tp);
18158
18159         /* Want to make sure that the reset task doesn't run */
18160         tg3_reset_task_cancel(tp);
18161
18162         netif_device_detach(netdev);
18163
18164         /* Clean up software state, even if MMIO is blocked */
18165         tg3_full_lock(tp, 0);
18166         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18167         tg3_full_unlock(tp);
18168
18169 done:
18170         if (state == pci_channel_io_perm_failure) {
18171                 if (netdev) {
18172                         tg3_napi_enable(tp);
18173                         dev_close(netdev);
18174                 }
18175                 err = PCI_ERS_RESULT_DISCONNECT;
18176         } else {
18177                 pci_disable_device(pdev);
18178         }
18179
18180         rtnl_unlock();
18181
18182         return err;
18183 }
18184
18185 /**
18186  * tg3_io_slot_reset - called after the pci bus has been reset.
18187  * @pdev: Pointer to PCI device
18188  *
18189  * Restart the card from scratch, as if from a cold-boot.
18190  * At this point, the card has exprienced a hard reset,
18191  * followed by fixups by BIOS, and has its config space
18192  * set up identically to what it was at cold boot.
18193  */
18194 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18195 {
18196         struct net_device *netdev = pci_get_drvdata(pdev);
18197         struct tg3 *tp = netdev_priv(netdev);
18198         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18199         int err;
18200
18201         rtnl_lock();
18202
18203         if (pci_enable_device(pdev)) {
18204                 dev_err(&pdev->dev,
18205                         "Cannot re-enable PCI device after reset.\n");
18206                 goto done;
18207         }
18208
18209         pci_set_master(pdev);
18210         pci_restore_state(pdev);
18211         pci_save_state(pdev);
18212
18213         if (!netdev || !netif_running(netdev)) {
18214                 rc = PCI_ERS_RESULT_RECOVERED;
18215                 goto done;
18216         }
18217
18218         err = tg3_power_up(tp);
18219         if (err)
18220                 goto done;
18221
18222         rc = PCI_ERS_RESULT_RECOVERED;
18223
18224 done:
18225         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18226                 tg3_napi_enable(tp);
18227                 dev_close(netdev);
18228         }
18229         rtnl_unlock();
18230
18231         return rc;
18232 }
18233
18234 /**
18235  * tg3_io_resume - called when traffic can start flowing again.
18236  * @pdev: Pointer to PCI device
18237  *
18238  * This callback is called when the error recovery driver tells
18239  * us that its OK to resume normal operation.
18240  */
18241 static void tg3_io_resume(struct pci_dev *pdev)
18242 {
18243         struct net_device *netdev = pci_get_drvdata(pdev);
18244         struct tg3 *tp = netdev_priv(netdev);
18245         int err;
18246
18247         rtnl_lock();
18248
18249         if (!netdev || !netif_running(netdev))
18250                 goto done;
18251
18252         tg3_full_lock(tp, 0);
18253         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18254         tg3_flag_set(tp, INIT_COMPLETE);
18255         err = tg3_restart_hw(tp, true);
18256         if (err) {
18257                 tg3_full_unlock(tp);
18258                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18259                 goto done;
18260         }
18261
18262         netif_device_attach(netdev);
18263
18264         tg3_timer_start(tp);
18265
18266         tg3_netif_start(tp);
18267
18268         tg3_full_unlock(tp);
18269
18270         tg3_phy_start(tp);
18271
18272 done:
18273         tp->pcierr_recovery = false;
18274         rtnl_unlock();
18275 }
18276
18277 static const struct pci_error_handlers tg3_err_handler = {
18278         .error_detected = tg3_io_error_detected,
18279         .slot_reset     = tg3_io_slot_reset,
18280         .resume         = tg3_io_resume
18281 };
18282
18283 static struct pci_driver tg3_driver = {
18284         .name           = DRV_MODULE_NAME,
18285         .id_table       = tg3_pci_tbl,
18286         .probe          = tg3_init_one,
18287         .remove         = tg3_remove_one,
18288         .err_handler    = &tg3_err_handler,
18289         .driver.pm      = &tg3_pm_ops,
18290         .shutdown       = tg3_shutdown,
18291 };
18292
18293 module_pci_driver(tg3_driver);