tg3: driver sleeps indefinitely when EEH errors exceed eeh_max_freezes
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM                     3
101 #define TG3_MIN_NUM                     137
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static const struct pci_device_id tg3_pci_tbl[] = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353         {}
354 };
355
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357
358 static const struct {
359         const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361         { "rx_octets" },
362         { "rx_fragments" },
363         { "rx_ucast_packets" },
364         { "rx_mcast_packets" },
365         { "rx_bcast_packets" },
366         { "rx_fcs_errors" },
367         { "rx_align_errors" },
368         { "rx_xon_pause_rcvd" },
369         { "rx_xoff_pause_rcvd" },
370         { "rx_mac_ctrl_rcvd" },
371         { "rx_xoff_entered" },
372         { "rx_frame_too_long_errors" },
373         { "rx_jabbers" },
374         { "rx_undersize_packets" },
375         { "rx_in_length_errors" },
376         { "rx_out_length_errors" },
377         { "rx_64_or_less_octet_packets" },
378         { "rx_65_to_127_octet_packets" },
379         { "rx_128_to_255_octet_packets" },
380         { "rx_256_to_511_octet_packets" },
381         { "rx_512_to_1023_octet_packets" },
382         { "rx_1024_to_1522_octet_packets" },
383         { "rx_1523_to_2047_octet_packets" },
384         { "rx_2048_to_4095_octet_packets" },
385         { "rx_4096_to_8191_octet_packets" },
386         { "rx_8192_to_9022_octet_packets" },
387
388         { "tx_octets" },
389         { "tx_collisions" },
390
391         { "tx_xon_sent" },
392         { "tx_xoff_sent" },
393         { "tx_flow_control" },
394         { "tx_mac_errors" },
395         { "tx_single_collisions" },
396         { "tx_mult_collisions" },
397         { "tx_deferred" },
398         { "tx_excessive_collisions" },
399         { "tx_late_collisions" },
400         { "tx_collide_2times" },
401         { "tx_collide_3times" },
402         { "tx_collide_4times" },
403         { "tx_collide_5times" },
404         { "tx_collide_6times" },
405         { "tx_collide_7times" },
406         { "tx_collide_8times" },
407         { "tx_collide_9times" },
408         { "tx_collide_10times" },
409         { "tx_collide_11times" },
410         { "tx_collide_12times" },
411         { "tx_collide_13times" },
412         { "tx_collide_14times" },
413         { "tx_collide_15times" },
414         { "tx_ucast_packets" },
415         { "tx_mcast_packets" },
416         { "tx_bcast_packets" },
417         { "tx_carrier_sense_errors" },
418         { "tx_discards" },
419         { "tx_errors" },
420
421         { "dma_writeq_full" },
422         { "dma_write_prioq_full" },
423         { "rxbds_empty" },
424         { "rx_discards" },
425         { "rx_errors" },
426         { "rx_threshold_hit" },
427
428         { "dma_readq_full" },
429         { "dma_read_prioq_full" },
430         { "tx_comp_queue_full" },
431
432         { "ring_set_send_prod_index" },
433         { "ring_status_update" },
434         { "nic_irqs" },
435         { "nic_avoided_irqs" },
436         { "nic_tx_threshold_hit" },
437
438         { "mbuf_lwm_thresh_hit" },
439 };
440
441 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST          0
443 #define TG3_LINK_TEST           1
444 #define TG3_REGISTER_TEST       2
445 #define TG3_MEMORY_TEST         3
446 #define TG3_MAC_LOOPB_TEST      4
447 #define TG3_PHY_LOOPB_TEST      5
448 #define TG3_EXT_LOOPB_TEST      6
449 #define TG3_INTERRUPT_TEST      7
450
451
452 static const struct {
453         const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
456         [TG3_LINK_TEST]         = { "link test         (online) " },
457         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
458         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
459         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
460         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
461         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
462         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
463 };
464
465 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
466
467
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470         writel(val, tp->regs + off);
471 }
472
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475         return readl(tp->regs + off);
476 }
477
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480         writel(val, tp->aperegs + off);
481 }
482
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485         return readl(tp->aperegs + off);
486 }
487
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490         unsigned long flags;
491
492         spin_lock_irqsave(&tp->indirect_lock, flags);
493         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495         spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500         writel(val, tp->regs + off);
501         readl(tp->regs + off);
502 }
503
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506         unsigned long flags;
507         u32 val;
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513         return val;
514 }
515
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518         unsigned long flags;
519
520         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525         if (off == TG3_RX_STD_PROD_IDX_REG) {
526                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527                                        TG3_64BIT_REG_LOW, val);
528                 return;
529         }
530
531         spin_lock_irqsave(&tp->indirect_lock, flags);
532         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535
536         /* In indirect mode when disabling interrupts, we also need
537          * to clear the interrupt bit in the GRC local ctrl register.
538          */
539         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540             (val == 0x1)) {
541                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543         }
544 }
545
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548         unsigned long flags;
549         u32 val;
550
551         spin_lock_irqsave(&tp->indirect_lock, flags);
552         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554         spin_unlock_irqrestore(&tp->indirect_lock, flags);
555         return val;
556 }
557
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566                 /* Non-posted methods */
567                 tp->write32(tp, off, val);
568         else {
569                 /* Posted method */
570                 tg3_write32(tp, off, val);
571                 if (usec_wait)
572                         udelay(usec_wait);
573                 tp->read32(tp, off);
574         }
575         /* Wait again after the read for the posted method to guarantee that
576          * the wait time is met.
577          */
578         if (usec_wait)
579                 udelay(usec_wait);
580 }
581
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584         tp->write32_mbox(tp, off, val);
585         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587              !tg3_flag(tp, ICH_WORKAROUND)))
588                 tp->read32_mbox(tp, off);
589 }
590
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593         void __iomem *mbox = tp->regs + off;
594         writel(val, mbox);
595         if (tg3_flag(tp, TXD_MBOX_HWBUG))
596                 writel(val, mbox);
597         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598             tg3_flag(tp, FLUSH_POSTED_WRITES))
599                 readl(mbox);
600 }
601
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604         return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609         writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611
612 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
617
618 #define tw32(reg, val)                  tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)                       tp->read32(tp, reg)
622
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625         unsigned long flags;
626
627         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629                 return;
630
631         spin_lock_irqsave(&tp->indirect_lock, flags);
632         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         } else {
639                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         }
645         spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650         unsigned long flags;
651
652         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654                 *val = 0;
655                 return;
656         }
657
658         spin_lock_irqsave(&tp->indirect_lock, flags);
659         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662
663                 /* Always leave this as zero. */
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         } else {
666                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 *val = tr32(TG3PCI_MEM_WIN_DATA);
668
669                 /* Always leave this as zero. */
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         }
672         spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677         int i;
678         u32 regbase, bit;
679
680         if (tg3_asic_rev(tp) == ASIC_REV_5761)
681                 regbase = TG3_APE_LOCK_GRANT;
682         else
683                 regbase = TG3_APE_PER_LOCK_GRANT;
684
685         /* Make sure the driver hasn't any stale locks. */
686         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687                 switch (i) {
688                 case TG3_APE_LOCK_PHY0:
689                 case TG3_APE_LOCK_PHY1:
690                 case TG3_APE_LOCK_PHY2:
691                 case TG3_APE_LOCK_PHY3:
692                         bit = APE_LOCK_GRANT_DRIVER;
693                         break;
694                 default:
695                         if (!tp->pci_fn)
696                                 bit = APE_LOCK_GRANT_DRIVER;
697                         else
698                                 bit = 1 << tp->pci_fn;
699                 }
700                 tg3_ape_write32(tp, regbase + 4 * i, bit);
701         }
702
703 }
704
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707         int i, off;
708         int ret = 0;
709         u32 status, req, gnt, bit;
710
711         if (!tg3_flag(tp, ENABLE_APE))
712                 return 0;
713
714         switch (locknum) {
715         case TG3_APE_LOCK_GPIO:
716                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717                         return 0;
718                 /* fall through */
719         case TG3_APE_LOCK_GRC:
720         case TG3_APE_LOCK_MEM:
721                 if (!tp->pci_fn)
722                         bit = APE_LOCK_REQ_DRIVER;
723                 else
724                         bit = 1 << tp->pci_fn;
725                 break;
726         case TG3_APE_LOCK_PHY0:
727         case TG3_APE_LOCK_PHY1:
728         case TG3_APE_LOCK_PHY2:
729         case TG3_APE_LOCK_PHY3:
730                 bit = APE_LOCK_REQ_DRIVER;
731                 break;
732         default:
733                 return -EINVAL;
734         }
735
736         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737                 req = TG3_APE_LOCK_REQ;
738                 gnt = TG3_APE_LOCK_GRANT;
739         } else {
740                 req = TG3_APE_PER_LOCK_REQ;
741                 gnt = TG3_APE_PER_LOCK_GRANT;
742         }
743
744         off = 4 * locknum;
745
746         tg3_ape_write32(tp, req + off, bit);
747
748         /* Wait for up to 1 millisecond to acquire lock. */
749         for (i = 0; i < 100; i++) {
750                 status = tg3_ape_read32(tp, gnt + off);
751                 if (status == bit)
752                         break;
753                 if (pci_channel_offline(tp->pdev))
754                         break;
755
756                 udelay(10);
757         }
758
759         if (status != bit) {
760                 /* Revoke the lock request. */
761                 tg3_ape_write32(tp, gnt + off, bit);
762                 ret = -EBUSY;
763         }
764
765         return ret;
766 }
767
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770         u32 gnt, bit;
771
772         if (!tg3_flag(tp, ENABLE_APE))
773                 return;
774
775         switch (locknum) {
776         case TG3_APE_LOCK_GPIO:
777                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
778                         return;
779                 /* fall through */
780         case TG3_APE_LOCK_GRC:
781         case TG3_APE_LOCK_MEM:
782                 if (!tp->pci_fn)
783                         bit = APE_LOCK_GRANT_DRIVER;
784                 else
785                         bit = 1 << tp->pci_fn;
786                 break;
787         case TG3_APE_LOCK_PHY0:
788         case TG3_APE_LOCK_PHY1:
789         case TG3_APE_LOCK_PHY2:
790         case TG3_APE_LOCK_PHY3:
791                 bit = APE_LOCK_GRANT_DRIVER;
792                 break;
793         default:
794                 return;
795         }
796
797         if (tg3_asic_rev(tp) == ASIC_REV_5761)
798                 gnt = TG3_APE_LOCK_GRANT;
799         else
800                 gnt = TG3_APE_PER_LOCK_GRANT;
801
802         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807         u32 apedata;
808
809         while (timeout_us) {
810                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811                         return -EBUSY;
812
813                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815                         break;
816
817                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818
819                 udelay(10);
820                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821         }
822
823         return timeout_us ? 0 : -EBUSY;
824 }
825
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906 #endif
907
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910         int err;
911         u32 apedata;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914         if (apedata != APE_SEG_SIG_MAGIC)
915                 return -EAGAIN;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918         if (!(apedata & APE_FW_STATUS_READY))
919                 return -EAGAIN;
920
921         /* Wait for up to 20 millisecond for APE to service previous event. */
922         err = tg3_ape_event_lock(tp, 20000);
923         if (err)
924                 return err;
925
926         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927                         event | APE_EVENT_STATUS_EVENT_PENDING);
928
929         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932         return 0;
933 }
934
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937         u32 event;
938         u32 apedata;
939
940         if (!tg3_flag(tp, ENABLE_APE))
941                 return;
942
943         switch (kind) {
944         case RESET_KIND_INIT:
945                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947                                 APE_HOST_SEG_SIG_MAGIC);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949                                 APE_HOST_SEG_LEN_MAGIC);
950                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955                                 APE_HOST_BEHAV_NO_PHYLOCK);
956                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957                                     TG3_APE_HOST_DRVR_STATE_START);
958
959                 event = APE_EVENT_STATUS_STATE_START;
960                 break;
961         case RESET_KIND_SHUTDOWN:
962                 if (device_may_wakeup(&tp->pdev->dev) &&
963                     tg3_flag(tp, WOL_ENABLE)) {
964                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965                                             TG3_APE_HOST_WOL_SPEED_AUTO);
966                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967                 } else
968                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969
970                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971
972                 event = APE_EVENT_STATUS_STATE_UNLOAD;
973                 break;
974         default:
975                 return;
976         }
977
978         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980         tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984                                    unsigned long interval)
985 {
986         /* Check if hb interval has exceeded */
987         if (!tg3_flag(tp, ENABLE_APE) ||
988             time_before(jiffies, tp->ape_hb_jiffies + interval))
989                 return;
990
991         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992         tp->ape_hb_jiffies = jiffies;
993 }
994
995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997         int i;
998
999         tw32(TG3PCI_MISC_HOST_CTRL,
1000              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001         for (i = 0; i < tp->irq_max; i++)
1002                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004
1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007         int i;
1008
1009         tp->irq_sync = 0;
1010         wmb();
1011
1012         tw32(TG3PCI_MISC_HOST_CTRL,
1013              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014
1015         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016         for (i = 0; i < tp->irq_cnt; i++) {
1017                 struct tg3_napi *tnapi = &tp->napi[i];
1018
1019                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020                 if (tg3_flag(tp, 1SHOT_MSI))
1021                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022
1023                 tp->coal_now |= tnapi->coal_now;
1024         }
1025
1026         /* Force an initial interrupt */
1027         if (!tg3_flag(tp, TAGGED_STATUS) &&
1028             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030         else
1031                 tw32(HOSTCC_MODE, tp->coal_now);
1032
1033         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038         struct tg3 *tp = tnapi->tp;
1039         struct tg3_hw_status *sblk = tnapi->hw_status;
1040         unsigned int work_exists = 0;
1041
1042         /* check for phy events */
1043         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044                 if (sblk->status & SD_STATUS_LINK_CHG)
1045                         work_exists = 1;
1046         }
1047
1048         /* check for TX work to do */
1049         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050                 work_exists = 1;
1051
1052         /* check for RX work to do */
1053         if (tnapi->rx_rcb_prod_idx &&
1054             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055                 work_exists = 1;
1056
1057         return work_exists;
1058 }
1059
1060 /* tg3_int_reenable
1061  *  similar to tg3_enable_ints, but it accurately determines whether there
1062  *  is new work pending and can return without flushing the PIO write
1063  *  which reenables interrupts
1064  */
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067         struct tg3 *tp = tnapi->tp;
1068
1069         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070
1071         /* When doing tagged status, this work check is unnecessary.
1072          * The last_tag we write above tells the chip which piece of
1073          * work we've completed.
1074          */
1075         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079
1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082         u32 clock_ctrl;
1083         u32 orig_clock_ctrl;
1084
1085         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086                 return;
1087
1088         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089
1090         orig_clock_ctrl = clock_ctrl;
1091         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092                        CLOCK_CTRL_CLKRUN_OENABLE |
1093                        0x1f);
1094         tp->pci_clock_ctrl = clock_ctrl;
1095
1096         if (tg3_flag(tp, 5705_PLUS)) {
1097                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100                 }
1101         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103                             clock_ctrl |
1104                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105                             40);
1106                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108                             40);
1109         }
1110         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112
1113 #define PHY_BUSY_LOOPS  5000
1114
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116                          u32 *val)
1117 {
1118         u32 frame_val;
1119         unsigned int loops;
1120         int ret;
1121
1122         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123                 tw32_f(MAC_MI_MODE,
1124                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125                 udelay(80);
1126         }
1127
1128         tg3_ape_lock(tp, tp->phy_ape_lock);
1129
1130         *val = 0x0;
1131
1132         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133                       MI_COM_PHY_ADDR_MASK);
1134         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135                       MI_COM_REG_ADDR_MASK);
1136         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137
1138         tw32_f(MAC_MI_COM, frame_val);
1139
1140         loops = PHY_BUSY_LOOPS;
1141         while (loops != 0) {
1142                 udelay(10);
1143                 frame_val = tr32(MAC_MI_COM);
1144
1145                 if ((frame_val & MI_COM_BUSY) == 0) {
1146                         udelay(5);
1147                         frame_val = tr32(MAC_MI_COM);
1148                         break;
1149                 }
1150                 loops -= 1;
1151         }
1152
1153         ret = -EBUSY;
1154         if (loops != 0) {
1155                 *val = frame_val & MI_COM_DATA_MASK;
1156                 ret = 0;
1157         }
1158
1159         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1161                 udelay(80);
1162         }
1163
1164         tg3_ape_unlock(tp, tp->phy_ape_lock);
1165
1166         return ret;
1167 }
1168
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175                           u32 val)
1176 {
1177         u32 frame_val;
1178         unsigned int loops;
1179         int ret;
1180
1181         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183                 return 0;
1184
1185         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186                 tw32_f(MAC_MI_MODE,
1187                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188                 udelay(80);
1189         }
1190
1191         tg3_ape_lock(tp, tp->phy_ape_lock);
1192
1193         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194                       MI_COM_PHY_ADDR_MASK);
1195         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196                       MI_COM_REG_ADDR_MASK);
1197         frame_val |= (val & MI_COM_DATA_MASK);
1198         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199
1200         tw32_f(MAC_MI_COM, frame_val);
1201
1202         loops = PHY_BUSY_LOOPS;
1203         while (loops != 0) {
1204                 udelay(10);
1205                 frame_val = tr32(MAC_MI_COM);
1206                 if ((frame_val & MI_COM_BUSY) == 0) {
1207                         udelay(5);
1208                         frame_val = tr32(MAC_MI_COM);
1209                         break;
1210                 }
1211                 loops -= 1;
1212         }
1213
1214         ret = -EBUSY;
1215         if (loops != 0)
1216                 ret = 0;
1217
1218         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1220                 udelay(80);
1221         }
1222
1223         tg3_ape_unlock(tp, tp->phy_ape_lock);
1224
1225         return ret;
1226 }
1227
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235         int err;
1236
1237         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238         if (err)
1239                 goto done;
1240
1241         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251
1252 done:
1253         return err;
1254 }
1255
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258         int err;
1259
1260         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261         if (err)
1262                 goto done;
1263
1264         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274
1275 done:
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295         if (!err)
1296                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297
1298         return err;
1299 }
1300
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303         int err;
1304
1305         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1308         if (!err)
1309                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310
1311         return err;
1312 }
1313
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317                 set |= MII_TG3_AUXCTL_MISC_WREN;
1318
1319         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324         u32 val;
1325         int err;
1326
1327         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328
1329         if (err)
1330                 return err;
1331
1332         if (enable)
1333                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334         else
1335                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336
1337         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339
1340         return err;
1341 }
1342
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346                             reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351         u32 phy_control;
1352         int limit, err;
1353
1354         /* OK, reset it, and poll the BMCR_RESET bit until it
1355          * clears or we time out.
1356          */
1357         phy_control = BMCR_RESET;
1358         err = tg3_writephy(tp, MII_BMCR, phy_control);
1359         if (err != 0)
1360                 return -EBUSY;
1361
1362         limit = 5000;
1363         while (limit--) {
1364                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365                 if (err != 0)
1366                         return -EBUSY;
1367
1368                 if ((phy_control & BMCR_RESET) == 0) {
1369                         udelay(40);
1370                         break;
1371                 }
1372                 udelay(10);
1373         }
1374         if (limit < 0)
1375                 return -EBUSY;
1376
1377         return 0;
1378 }
1379
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 val;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_readphy(tp, mii_id, reg, &val))
1388                 val = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return val;
1393 }
1394
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397         struct tg3 *tp = bp->priv;
1398         u32 ret = 0;
1399
1400         spin_lock_bh(&tp->lock);
1401
1402         if (__tg3_writephy(tp, mii_id, reg, val))
1403                 ret = -EIO;
1404
1405         spin_unlock_bh(&tp->lock);
1406
1407         return ret;
1408 }
1409
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412         u32 val;
1413         struct phy_device *phydev;
1414
1415         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417         case PHY_ID_BCM50610:
1418         case PHY_ID_BCM50610M:
1419                 val = MAC_PHYCFG2_50610_LED_MODES;
1420                 break;
1421         case PHY_ID_BCMAC131:
1422                 val = MAC_PHYCFG2_AC131_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8211C:
1425                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426                 break;
1427         case PHY_ID_RTL8201E:
1428                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429                 break;
1430         default:
1431                 return;
1432         }
1433
1434         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435                 tw32(MAC_PHYCFG2, val);
1436
1437                 val = tr32(MAC_PHYCFG1);
1438                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441                 tw32(MAC_PHYCFG1, val);
1442
1443                 return;
1444         }
1445
1446         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448                        MAC_PHYCFG2_FMODE_MASK_MASK |
1449                        MAC_PHYCFG2_GMODE_MASK_MASK |
1450                        MAC_PHYCFG2_ACT_MASK_MASK   |
1451                        MAC_PHYCFG2_QUAL_MASK_MASK |
1452                        MAC_PHYCFG2_INBAND_ENABLE;
1453
1454         tw32(MAC_PHYCFG2, val);
1455
1456         val = tr32(MAC_PHYCFG1);
1457         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464         }
1465         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467         tw32(MAC_PHYCFG1, val);
1468
1469         val = tr32(MAC_EXT_RGMII_MODE);
1470         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471                  MAC_RGMII_MODE_RX_QUALITY |
1472                  MAC_RGMII_MODE_RX_ACTIVITY |
1473                  MAC_RGMII_MODE_RX_ENG_DET |
1474                  MAC_RGMII_MODE_TX_ENABLE |
1475                  MAC_RGMII_MODE_TX_LOWPWR |
1476                  MAC_RGMII_MODE_TX_RESET);
1477         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479                         val |= MAC_RGMII_MODE_RX_INT_B |
1480                                MAC_RGMII_MODE_RX_QUALITY |
1481                                MAC_RGMII_MODE_RX_ACTIVITY |
1482                                MAC_RGMII_MODE_RX_ENG_DET;
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484                         val |= MAC_RGMII_MODE_TX_ENABLE |
1485                                MAC_RGMII_MODE_TX_LOWPWR |
1486                                MAC_RGMII_MODE_TX_RESET;
1487         }
1488         tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494         tw32_f(MAC_MI_MODE, tp->mi_mode);
1495         udelay(80);
1496
1497         if (tg3_flag(tp, MDIOBUS_INITED) &&
1498             tg3_asic_rev(tp) == ASIC_REV_5785)
1499                 tg3_mdio_config_5785(tp);
1500 }
1501
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504         int i;
1505         u32 reg;
1506         struct phy_device *phydev;
1507
1508         if (tg3_flag(tp, 5717_PLUS)) {
1509                 u32 is_serdes;
1510
1511                 tp->phy_addr = tp->pci_fn + 1;
1512
1513                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515                 else
1516                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1518                 if (is_serdes)
1519                         tp->phy_addr += 7;
1520         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521                 int addr;
1522
1523                 addr = ssb_gige_get_phyaddr(tp->pdev);
1524                 if (addr < 0)
1525                         return addr;
1526                 tp->phy_addr = addr;
1527         } else
1528                 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530         tg3_mdio_start(tp);
1531
1532         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533                 return 0;
1534
1535         tp->mdio_bus = mdiobus_alloc();
1536         if (tp->mdio_bus == NULL)
1537                 return -ENOMEM;
1538
1539         tp->mdio_bus->name     = "tg3 mdio bus";
1540         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542         tp->mdio_bus->priv     = tp;
1543         tp->mdio_bus->parent   = &tp->pdev->dev;
1544         tp->mdio_bus->read     = &tg3_mdio_read;
1545         tp->mdio_bus->write    = &tg3_mdio_write;
1546         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547
1548         /* The bus registration will look for all the PHYs on the mdio bus.
1549          * Unfortunately, it does not ensure the PHY is powered up before
1550          * accessing the PHY ID registers.  A chip reset is the
1551          * quickest way to bring the device back to an operational state..
1552          */
1553         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554                 tg3_bmcr_reset(tp);
1555
1556         i = mdiobus_register(tp->mdio_bus);
1557         if (i) {
1558                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559                 mdiobus_free(tp->mdio_bus);
1560                 return i;
1561         }
1562
1563         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564
1565         if (!phydev || !phydev->drv) {
1566                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567                 mdiobus_unregister(tp->mdio_bus);
1568                 mdiobus_free(tp->mdio_bus);
1569                 return -ENODEV;
1570         }
1571
1572         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573         case PHY_ID_BCM57780:
1574                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576                 break;
1577         case PHY_ID_BCM50610:
1578         case PHY_ID_BCM50610M:
1579                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580                                      PHY_BRCM_RX_REFCLK_UNUSED |
1581                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1584                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1585                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1586                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1587                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1588                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1589                 /* fall through */
1590         case PHY_ID_RTL8211C:
1591                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1592                 break;
1593         case PHY_ID_RTL8201E:
1594         case PHY_ID_BCMAC131:
1595                 phydev->interface = PHY_INTERFACE_MODE_MII;
1596                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1597                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1598                 break;
1599         }
1600
1601         tg3_flag_set(tp, MDIOBUS_INITED);
1602
1603         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1604                 tg3_mdio_config_5785(tp);
1605
1606         return 0;
1607 }
1608
1609 static void tg3_mdio_fini(struct tg3 *tp)
1610 {
1611         if (tg3_flag(tp, MDIOBUS_INITED)) {
1612                 tg3_flag_clear(tp, MDIOBUS_INITED);
1613                 mdiobus_unregister(tp->mdio_bus);
1614                 mdiobus_free(tp->mdio_bus);
1615         }
1616 }
1617
1618 /* tp->lock is held. */
1619 static inline void tg3_generate_fw_event(struct tg3 *tp)
1620 {
1621         u32 val;
1622
1623         val = tr32(GRC_RX_CPU_EVENT);
1624         val |= GRC_RX_CPU_DRIVER_EVENT;
1625         tw32_f(GRC_RX_CPU_EVENT, val);
1626
1627         tp->last_event_jiffies = jiffies;
1628 }
1629
1630 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1631
1632 /* tp->lock is held. */
1633 static void tg3_wait_for_event_ack(struct tg3 *tp)
1634 {
1635         int i;
1636         unsigned int delay_cnt;
1637         long time_remain;
1638
1639         /* If enough time has passed, no wait is necessary. */
1640         time_remain = (long)(tp->last_event_jiffies + 1 +
1641                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1642                       (long)jiffies;
1643         if (time_remain < 0)
1644                 return;
1645
1646         /* Check if we can shorten the wait time. */
1647         delay_cnt = jiffies_to_usecs(time_remain);
1648         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1649                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1650         delay_cnt = (delay_cnt >> 3) + 1;
1651
1652         for (i = 0; i < delay_cnt; i++) {
1653                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1654                         break;
1655                 if (pci_channel_offline(tp->pdev))
1656                         break;
1657
1658                 udelay(8);
1659         }
1660 }
1661
1662 /* tp->lock is held. */
1663 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1664 {
1665         u32 reg, val;
1666
1667         val = 0;
1668         if (!tg3_readphy(tp, MII_BMCR, &reg))
1669                 val = reg << 16;
1670         if (!tg3_readphy(tp, MII_BMSR, &reg))
1671                 val |= (reg & 0xffff);
1672         *data++ = val;
1673
1674         val = 0;
1675         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1676                 val = reg << 16;
1677         if (!tg3_readphy(tp, MII_LPA, &reg))
1678                 val |= (reg & 0xffff);
1679         *data++ = val;
1680
1681         val = 0;
1682         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1683                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1684                         val = reg << 16;
1685                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1686                         val |= (reg & 0xffff);
1687         }
1688         *data++ = val;
1689
1690         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1691                 val = reg << 16;
1692         else
1693                 val = 0;
1694         *data++ = val;
1695 }
1696
1697 /* tp->lock is held. */
1698 static void tg3_ump_link_report(struct tg3 *tp)
1699 {
1700         u32 data[4];
1701
1702         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1703                 return;
1704
1705         tg3_phy_gather_ump_data(tp, data);
1706
1707         tg3_wait_for_event_ack(tp);
1708
1709         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1710         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1711         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1712         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1713         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1714         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1715
1716         tg3_generate_fw_event(tp);
1717 }
1718
1719 /* tp->lock is held. */
1720 static void tg3_stop_fw(struct tg3 *tp)
1721 {
1722         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1723                 /* Wait for RX cpu to ACK the previous event. */
1724                 tg3_wait_for_event_ack(tp);
1725
1726                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1727
1728                 tg3_generate_fw_event(tp);
1729
1730                 /* Wait for RX cpu to ACK this event. */
1731                 tg3_wait_for_event_ack(tp);
1732         }
1733 }
1734
1735 /* tp->lock is held. */
1736 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1737 {
1738         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1739                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1740
1741         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 /* tp->lock is held. */
1765 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1766 {
1767         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1768                 switch (kind) {
1769                 case RESET_KIND_INIT:
1770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1771                                       DRV_STATE_START_DONE);
1772                         break;
1773
1774                 case RESET_KIND_SHUTDOWN:
1775                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1776                                       DRV_STATE_UNLOAD_DONE);
1777                         break;
1778
1779                 default:
1780                         break;
1781                 }
1782         }
1783 }
1784
1785 /* tp->lock is held. */
1786 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1787 {
1788         if (tg3_flag(tp, ENABLE_ASF)) {
1789                 switch (kind) {
1790                 case RESET_KIND_INIT:
1791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1792                                       DRV_STATE_START);
1793                         break;
1794
1795                 case RESET_KIND_SHUTDOWN:
1796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1797                                       DRV_STATE_UNLOAD);
1798                         break;
1799
1800                 case RESET_KIND_SUSPEND:
1801                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1802                                       DRV_STATE_SUSPEND);
1803                         break;
1804
1805                 default:
1806                         break;
1807                 }
1808         }
1809 }
1810
1811 static int tg3_poll_fw(struct tg3 *tp)
1812 {
1813         int i;
1814         u32 val;
1815
1816         if (tg3_flag(tp, NO_FWARE_REPORTED))
1817                 return 0;
1818
1819         if (tg3_flag(tp, IS_SSB_CORE)) {
1820                 /* We don't use firmware. */
1821                 return 0;
1822         }
1823
1824         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1825                 /* Wait up to 20ms for init done. */
1826                 for (i = 0; i < 200; i++) {
1827                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1828                                 return 0;
1829                         if (pci_channel_offline(tp->pdev))
1830                                 return -ENODEV;
1831
1832                         udelay(100);
1833                 }
1834                 return -ENODEV;
1835         }
1836
1837         /* Wait for firmware initialization to complete. */
1838         for (i = 0; i < 100000; i++) {
1839                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1840                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1841                         break;
1842                 if (pci_channel_offline(tp->pdev)) {
1843                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1844                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1845                                 netdev_info(tp->dev, "No firmware running\n");
1846                         }
1847
1848                         break;
1849                 }
1850
1851                 udelay(10);
1852         }
1853
1854         /* Chip might not be fitted with firmware.  Some Sun onboard
1855          * parts are configured like that.  So don't signal the timeout
1856          * of the above loop as an error, but do report the lack of
1857          * running firmware once.
1858          */
1859         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1860                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1861
1862                 netdev_info(tp->dev, "No firmware running\n");
1863         }
1864
1865         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1866                 /* The 57765 A0 needs a little more
1867                  * time to do some important work.
1868                  */
1869                 mdelay(10);
1870         }
1871
1872         return 0;
1873 }
1874
1875 static void tg3_link_report(struct tg3 *tp)
1876 {
1877         if (!netif_carrier_ok(tp->dev)) {
1878                 netif_info(tp, link, tp->dev, "Link is down\n");
1879                 tg3_ump_link_report(tp);
1880         } else if (netif_msg_link(tp)) {
1881                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1882                             (tp->link_config.active_speed == SPEED_1000 ?
1883                              1000 :
1884                              (tp->link_config.active_speed == SPEED_100 ?
1885                               100 : 10)),
1886                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1887                              "full" : "half"));
1888
1889                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1890                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1891                             "on" : "off",
1892                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1893                             "on" : "off");
1894
1895                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1896                         netdev_info(tp->dev, "EEE is %s\n",
1897                                     tp->setlpicnt ? "enabled" : "disabled");
1898
1899                 tg3_ump_link_report(tp);
1900         }
1901
1902         tp->link_up = netif_carrier_ok(tp->dev);
1903 }
1904
1905 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1906 {
1907         u32 flowctrl = 0;
1908
1909         if (adv & ADVERTISE_PAUSE_CAP) {
1910                 flowctrl |= FLOW_CTRL_RX;
1911                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1912                         flowctrl |= FLOW_CTRL_TX;
1913         } else if (adv & ADVERTISE_PAUSE_ASYM)
1914                 flowctrl |= FLOW_CTRL_TX;
1915
1916         return flowctrl;
1917 }
1918
1919 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1920 {
1921         u16 miireg;
1922
1923         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1924                 miireg = ADVERTISE_1000XPAUSE;
1925         else if (flow_ctrl & FLOW_CTRL_TX)
1926                 miireg = ADVERTISE_1000XPSE_ASYM;
1927         else if (flow_ctrl & FLOW_CTRL_RX)
1928                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1929         else
1930                 miireg = 0;
1931
1932         return miireg;
1933 }
1934
1935 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1936 {
1937         u32 flowctrl = 0;
1938
1939         if (adv & ADVERTISE_1000XPAUSE) {
1940                 flowctrl |= FLOW_CTRL_RX;
1941                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1942                         flowctrl |= FLOW_CTRL_TX;
1943         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1944                 flowctrl |= FLOW_CTRL_TX;
1945
1946         return flowctrl;
1947 }
1948
1949 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1950 {
1951         u8 cap = 0;
1952
1953         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1954                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1955         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1956                 if (lcladv & ADVERTISE_1000XPAUSE)
1957                         cap = FLOW_CTRL_RX;
1958                 if (rmtadv & ADVERTISE_1000XPAUSE)
1959                         cap = FLOW_CTRL_TX;
1960         }
1961
1962         return cap;
1963 }
1964
1965 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1966 {
1967         u8 autoneg;
1968         u8 flowctrl = 0;
1969         u32 old_rx_mode = tp->rx_mode;
1970         u32 old_tx_mode = tp->tx_mode;
1971
1972         if (tg3_flag(tp, USE_PHYLIB))
1973                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1974         else
1975                 autoneg = tp->link_config.autoneg;
1976
1977         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1978                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1979                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1980                 else
1981                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1982         } else
1983                 flowctrl = tp->link_config.flowctrl;
1984
1985         tp->link_config.active_flowctrl = flowctrl;
1986
1987         if (flowctrl & FLOW_CTRL_RX)
1988                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1989         else
1990                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1991
1992         if (old_rx_mode != tp->rx_mode)
1993                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1994
1995         if (flowctrl & FLOW_CTRL_TX)
1996                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1997         else
1998                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1999
2000         if (old_tx_mode != tp->tx_mode)
2001                 tw32_f(MAC_TX_MODE, tp->tx_mode);
2002 }
2003
2004 static void tg3_adjust_link(struct net_device *dev)
2005 {
2006         u8 oldflowctrl, linkmesg = 0;
2007         u32 mac_mode, lcl_adv, rmt_adv;
2008         struct tg3 *tp = netdev_priv(dev);
2009         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2010
2011         spin_lock_bh(&tp->lock);
2012
2013         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2014                                     MAC_MODE_HALF_DUPLEX);
2015
2016         oldflowctrl = tp->link_config.active_flowctrl;
2017
2018         if (phydev->link) {
2019                 lcl_adv = 0;
2020                 rmt_adv = 0;
2021
2022                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2023                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2024                 else if (phydev->speed == SPEED_1000 ||
2025                          tg3_asic_rev(tp) != ASIC_REV_5785)
2026                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2027                 else
2028                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2029
2030                 if (phydev->duplex == DUPLEX_HALF)
2031                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2032                 else {
2033                         lcl_adv = mii_advertise_flowctrl(
2034                                   tp->link_config.flowctrl);
2035
2036                         if (phydev->pause)
2037                                 rmt_adv = LPA_PAUSE_CAP;
2038                         if (phydev->asym_pause)
2039                                 rmt_adv |= LPA_PAUSE_ASYM;
2040                 }
2041
2042                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2043         } else
2044                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2045
2046         if (mac_mode != tp->mac_mode) {
2047                 tp->mac_mode = mac_mode;
2048                 tw32_f(MAC_MODE, tp->mac_mode);
2049                 udelay(40);
2050         }
2051
2052         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2053                 if (phydev->speed == SPEED_10)
2054                         tw32(MAC_MI_STAT,
2055                              MAC_MI_STAT_10MBPS_MODE |
2056                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057                 else
2058                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2059         }
2060
2061         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2062                 tw32(MAC_TX_LENGTHS,
2063                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2064                       (6 << TX_LENGTHS_IPG_SHIFT) |
2065                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2066         else
2067                 tw32(MAC_TX_LENGTHS,
2068                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2069                       (6 << TX_LENGTHS_IPG_SHIFT) |
2070                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2071
2072         if (phydev->link != tp->old_link ||
2073             phydev->speed != tp->link_config.active_speed ||
2074             phydev->duplex != tp->link_config.active_duplex ||
2075             oldflowctrl != tp->link_config.active_flowctrl)
2076                 linkmesg = 1;
2077
2078         tp->old_link = phydev->link;
2079         tp->link_config.active_speed = phydev->speed;
2080         tp->link_config.active_duplex = phydev->duplex;
2081
2082         spin_unlock_bh(&tp->lock);
2083
2084         if (linkmesg)
2085                 tg3_link_report(tp);
2086 }
2087
2088 static int tg3_phy_init(struct tg3 *tp)
2089 {
2090         struct phy_device *phydev;
2091
2092         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2093                 return 0;
2094
2095         /* Bring the PHY back to a known state. */
2096         tg3_bmcr_reset(tp);
2097
2098         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2099
2100         /* Attach the MAC to the PHY. */
2101         phydev = phy_connect(tp->dev, phydev_name(phydev),
2102                              tg3_adjust_link, phydev->interface);
2103         if (IS_ERR(phydev)) {
2104                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2105                 return PTR_ERR(phydev);
2106         }
2107
2108         /* Mask with MAC supported features. */
2109         switch (phydev->interface) {
2110         case PHY_INTERFACE_MODE_GMII:
2111         case PHY_INTERFACE_MODE_RGMII:
2112                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2113                         phy_set_max_speed(phydev, SPEED_1000);
2114                         phy_support_asym_pause(phydev);
2115                         break;
2116                 }
2117                 /* fall through */
2118         case PHY_INTERFACE_MODE_MII:
2119                 phy_set_max_speed(phydev, SPEED_100);
2120                 phy_support_asym_pause(phydev);
2121                 break;
2122         default:
2123                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2124                 return -EINVAL;
2125         }
2126
2127         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128
2129         phy_attached_info(phydev);
2130
2131         return 0;
2132 }
2133
2134 static void tg3_phy_start(struct tg3 *tp)
2135 {
2136         struct phy_device *phydev;
2137
2138         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139                 return;
2140
2141         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2142
2143         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145                 phydev->speed = tp->link_config.speed;
2146                 phydev->duplex = tp->link_config.duplex;
2147                 phydev->autoneg = tp->link_config.autoneg;
2148                 ethtool_convert_legacy_u32_to_link_mode(
2149                         phydev->advertising, tp->link_config.advertising);
2150         }
2151
2152         phy_start(phydev);
2153
2154         phy_start_aneg(phydev);
2155 }
2156
2157 static void tg3_phy_stop(struct tg3 *tp)
2158 {
2159         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2160                 return;
2161
2162         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163 }
2164
2165 static void tg3_phy_fini(struct tg3 *tp)
2166 {
2167         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2168                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2169                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2170         }
2171 }
2172
2173 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2174 {
2175         int err;
2176         u32 val;
2177
2178         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2179                 return 0;
2180
2181         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2182                 /* Cannot do read-modify-write on 5401 */
2183                 err = tg3_phy_auxctl_write(tp,
2184                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2185                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2186                                            0x4c20);
2187                 goto done;
2188         }
2189
2190         err = tg3_phy_auxctl_read(tp,
2191                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2192         if (err)
2193                 return err;
2194
2195         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2196         err = tg3_phy_auxctl_write(tp,
2197                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2198
2199 done:
2200         return err;
2201 }
2202
2203 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2204 {
2205         u32 phytest;
2206
2207         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2208                 u32 phy;
2209
2210                 tg3_writephy(tp, MII_TG3_FET_TEST,
2211                              phytest | MII_TG3_FET_SHADOW_EN);
2212                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2213                         if (enable)
2214                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2215                         else
2216                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2217                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2218                 }
2219                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2220         }
2221 }
2222
2223 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2224 {
2225         u32 reg;
2226
2227         if (!tg3_flag(tp, 5705_PLUS) ||
2228             (tg3_flag(tp, 5717_PLUS) &&
2229              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2230                 return;
2231
2232         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2233                 tg3_phy_fet_toggle_apd(tp, enable);
2234                 return;
2235         }
2236
2237         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2238               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2239               MII_TG3_MISC_SHDW_SCR5_SDTL |
2240               MII_TG3_MISC_SHDW_SCR5_C125OE;
2241         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2242                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2243
2244         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2245
2246
2247         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2248         if (enable)
2249                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2250
2251         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2252 }
2253
2254 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2255 {
2256         u32 phy;
2257
2258         if (!tg3_flag(tp, 5705_PLUS) ||
2259             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260                 return;
2261
2262         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263                 u32 ephy;
2264
2265                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2266                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2267
2268                         tg3_writephy(tp, MII_TG3_FET_TEST,
2269                                      ephy | MII_TG3_FET_SHADOW_EN);
2270                         if (!tg3_readphy(tp, reg, &phy)) {
2271                                 if (enable)
2272                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 else
2274                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2275                                 tg3_writephy(tp, reg, phy);
2276                         }
2277                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2278                 }
2279         } else {
2280                 int ret;
2281
2282                 ret = tg3_phy_auxctl_read(tp,
2283                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284                 if (!ret) {
2285                         if (enable)
2286                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         else
2288                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2289                         tg3_phy_auxctl_write(tp,
2290                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2291                 }
2292         }
2293 }
2294
2295 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2296 {
2297         int ret;
2298         u32 val;
2299
2300         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301                 return;
2302
2303         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2304         if (!ret)
2305                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2306                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 }
2308
2309 static void tg3_phy_apply_otp(struct tg3 *tp)
2310 {
2311         u32 otp, phy;
2312
2313         if (!tp->phy_otp)
2314                 return;
2315
2316         otp = tp->phy_otp;
2317
2318         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319                 return;
2320
2321         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2322         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2323         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2324
2325         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2326               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2327         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2328
2329         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2330         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2331         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2332
2333         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2334         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2335
2336         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2337         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2338
2339         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2340               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2341         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2342
2343         tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 }
2345
2346 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2347 {
2348         u32 val;
2349         struct ethtool_eee *dest = &tp->eee;
2350
2351         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2352                 return;
2353
2354         if (eee)
2355                 dest = eee;
2356
2357         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2358                 return;
2359
2360         /* Pull eee_active */
2361         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2362             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2363                 dest->eee_active = 1;
2364         } else
2365                 dest->eee_active = 0;
2366
2367         /* Pull lp advertised settings */
2368         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2369                 return;
2370         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372         /* Pull advertised and eee_enabled settings */
2373         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2374                 return;
2375         dest->eee_enabled = !!val;
2376         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2377
2378         /* Pull tx_lpi_enabled */
2379         val = tr32(TG3_CPMU_EEE_MODE);
2380         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2381
2382         /* Pull lpi timer value */
2383         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2384 }
2385
2386 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2387 {
2388         u32 val;
2389
2390         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2391                 return;
2392
2393         tp->setlpicnt = 0;
2394
2395         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2396             current_link_up &&
2397             tp->link_config.active_duplex == DUPLEX_FULL &&
2398             (tp->link_config.active_speed == SPEED_100 ||
2399              tp->link_config.active_speed == SPEED_1000)) {
2400                 u32 eeectl;
2401
2402                 if (tp->link_config.active_speed == SPEED_1000)
2403                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2404                 else
2405                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2406
2407                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2408
2409                 tg3_eee_pull_config(tp, NULL);
2410                 if (tp->eee.eee_active)
2411                         tp->setlpicnt = 2;
2412         }
2413
2414         if (!tp->setlpicnt) {
2415                 if (current_link_up &&
2416                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2417                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2418                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2419                 }
2420
2421                 val = tr32(TG3_CPMU_EEE_MODE);
2422                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2423         }
2424 }
2425
2426 static void tg3_phy_eee_enable(struct tg3 *tp)
2427 {
2428         u32 val;
2429
2430         if (tp->link_config.active_speed == SPEED_1000 &&
2431             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2432              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2433              tg3_flag(tp, 57765_CLASS)) &&
2434             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2435                 val = MII_TG3_DSP_TAP26_ALNOKO |
2436                       MII_TG3_DSP_TAP26_RMRXSTO;
2437                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2438                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2439         }
2440
2441         val = tr32(TG3_CPMU_EEE_MODE);
2442         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2443 }
2444
2445 static int tg3_wait_macro_done(struct tg3 *tp)
2446 {
2447         int limit = 100;
2448
2449         while (limit--) {
2450                 u32 tmp32;
2451
2452                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2453                         if ((tmp32 & 0x1000) == 0)
2454                                 break;
2455                 }
2456         }
2457         if (limit < 0)
2458                 return -EBUSY;
2459
2460         return 0;
2461 }
2462
2463 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2464 {
2465         static const u32 test_pat[4][6] = {
2466         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2467         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2468         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2469         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2470         };
2471         int chan;
2472
2473         for (chan = 0; chan < 4; chan++) {
2474                 int i;
2475
2476                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2477                              (chan * 0x2000) | 0x0200);
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2479
2480                 for (i = 0; i < 6; i++)
2481                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2482                                      test_pat[chan][i]);
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2485                 if (tg3_wait_macro_done(tp)) {
2486                         *resetp = 1;
2487                         return -EBUSY;
2488                 }
2489
2490                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2491                              (chan * 0x2000) | 0x0200);
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2499                 if (tg3_wait_macro_done(tp)) {
2500                         *resetp = 1;
2501                         return -EBUSY;
2502                 }
2503
2504                 for (i = 0; i < 6; i += 2) {
2505                         u32 low, high;
2506
2507                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2508                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2509                             tg3_wait_macro_done(tp)) {
2510                                 *resetp = 1;
2511                                 return -EBUSY;
2512                         }
2513                         low &= 0x7fff;
2514                         high &= 0x000f;
2515                         if (low != test_pat[chan][i] ||
2516                             high != test_pat[chan][i+1]) {
2517                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2518                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2519                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2520
2521                                 return -EBUSY;
2522                         }
2523                 }
2524         }
2525
2526         return 0;
2527 }
2528
2529 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2530 {
2531         int chan;
2532
2533         for (chan = 0; chan < 4; chan++) {
2534                 int i;
2535
2536                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2537                              (chan * 0x2000) | 0x0200);
2538                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2539                 for (i = 0; i < 6; i++)
2540                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2541                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2542                 if (tg3_wait_macro_done(tp))
2543                         return -EBUSY;
2544         }
2545
2546         return 0;
2547 }
2548
2549 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2550 {
2551         u32 reg32, phy9_orig;
2552         int retries, do_phy_reset, err;
2553
2554         retries = 10;
2555         do_phy_reset = 1;
2556         do {
2557                 if (do_phy_reset) {
2558                         err = tg3_bmcr_reset(tp);
2559                         if (err)
2560                                 return err;
2561                         do_phy_reset = 0;
2562                 }
2563
2564                 /* Disable transmitter and interrupt.  */
2565                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2566                         continue;
2567
2568                 reg32 |= 0x3000;
2569                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2570
2571                 /* Set full-duplex, 1000 mbps.  */
2572                 tg3_writephy(tp, MII_BMCR,
2573                              BMCR_FULLDPLX | BMCR_SPEED1000);
2574
2575                 /* Set to master mode.  */
2576                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2577                         continue;
2578
2579                 tg3_writephy(tp, MII_CTRL1000,
2580                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2581
2582                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2583                 if (err)
2584                         return err;
2585
2586                 /* Block the PHY control access.  */
2587                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2588
2589                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2590                 if (!err)
2591                         break;
2592         } while (--retries);
2593
2594         err = tg3_phy_reset_chanpat(tp);
2595         if (err)
2596                 return err;
2597
2598         tg3_phydsp_write(tp, 0x8005, 0x0000);
2599
2600         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2601         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2602
2603         tg3_phy_toggle_auxctl_smdsp(tp, false);
2604
2605         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2606
2607         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2608         if (err)
2609                 return err;
2610
2611         reg32 &= ~0x3000;
2612         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2613
2614         return 0;
2615 }
2616
2617 static void tg3_carrier_off(struct tg3 *tp)
2618 {
2619         netif_carrier_off(tp->dev);
2620         tp->link_up = false;
2621 }
2622
2623 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2624 {
2625         if (tg3_flag(tp, ENABLE_ASF))
2626                 netdev_warn(tp->dev,
2627                             "Management side-band traffic will be interrupted during phy settings change\n");
2628 }
2629
2630 /* This will reset the tigon3 PHY if there is no valid
2631  * link unless the FORCE argument is non-zero.
2632  */
2633 static int tg3_phy_reset(struct tg3 *tp)
2634 {
2635         u32 val, cpmuctrl;
2636         int err;
2637
2638         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2639                 val = tr32(GRC_MISC_CFG);
2640                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2641                 udelay(40);
2642         }
2643         err  = tg3_readphy(tp, MII_BMSR, &val);
2644         err |= tg3_readphy(tp, MII_BMSR, &val);
2645         if (err != 0)
2646                 return -EBUSY;
2647
2648         if (netif_running(tp->dev) && tp->link_up) {
2649                 netif_carrier_off(tp->dev);
2650                 tg3_link_report(tp);
2651         }
2652
2653         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2654             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2655             tg3_asic_rev(tp) == ASIC_REV_5705) {
2656                 err = tg3_phy_reset_5703_4_5(tp);
2657                 if (err)
2658                         return err;
2659                 goto out;
2660         }
2661
2662         cpmuctrl = 0;
2663         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2664             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2665                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2666                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2667                         tw32(TG3_CPMU_CTRL,
2668                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2669         }
2670
2671         err = tg3_bmcr_reset(tp);
2672         if (err)
2673                 return err;
2674
2675         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2676                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2677                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2678
2679                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2680         }
2681
2682         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2683             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2684                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2685                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2686                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2687                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2688                         udelay(40);
2689                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2690                 }
2691         }
2692
2693         if (tg3_flag(tp, 5717_PLUS) &&
2694             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2695                 return 0;
2696
2697         tg3_phy_apply_otp(tp);
2698
2699         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2700                 tg3_phy_toggle_apd(tp, true);
2701         else
2702                 tg3_phy_toggle_apd(tp, false);
2703
2704 out:
2705         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2706             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2707                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2708                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2709                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2710         }
2711
2712         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2713                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715         }
2716
2717         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2718                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2719                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2720                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2721                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2722                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2723                 }
2724         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2725                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2726                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2727                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2728                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2729                                 tg3_writephy(tp, MII_TG3_TEST1,
2730                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2731                         } else
2732                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2733
2734                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2735                 }
2736         }
2737
2738         /* Set Extended packet length bit (bit 14) on all chips that */
2739         /* support jumbo frames */
2740         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2741                 /* Cannot do read-modify-write on 5401 */
2742                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2743         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2744                 /* Set bit 14 with read-modify-write to preserve other bits */
2745                 err = tg3_phy_auxctl_read(tp,
2746                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2747                 if (!err)
2748                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2749                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2750         }
2751
2752         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2753          * jumbo frames transmission.
2754          */
2755         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2756                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2757                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2758                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2759         }
2760
2761         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2762                 /* adjust output voltage */
2763                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2764         }
2765
2766         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2767                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2768
2769         tg3_phy_toggle_automdix(tp, true);
2770         tg3_phy_set_wirespeed(tp);
2771         return 0;
2772 }
2773
2774 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2775 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2776 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2777                                           TG3_GPIO_MSG_NEED_VAUX)
2778 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2779         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2780          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2781          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2782          (TG3_GPIO_MSG_DRVR_PRES << 12))
2783
2784 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2785         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2786          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2787          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2788          (TG3_GPIO_MSG_NEED_VAUX << 12))
2789
2790 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2791 {
2792         u32 status, shift;
2793
2794         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2795             tg3_asic_rev(tp) == ASIC_REV_5719)
2796                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2797         else
2798                 status = tr32(TG3_CPMU_DRV_STATUS);
2799
2800         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2801         status &= ~(TG3_GPIO_MSG_MASK << shift);
2802         status |= (newstat << shift);
2803
2804         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2805             tg3_asic_rev(tp) == ASIC_REV_5719)
2806                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2807         else
2808                 tw32(TG3_CPMU_DRV_STATUS, status);
2809
2810         return status >> TG3_APE_GPIO_MSG_SHIFT;
2811 }
2812
2813 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2814 {
2815         if (!tg3_flag(tp, IS_NIC))
2816                 return 0;
2817
2818         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2819             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2820             tg3_asic_rev(tp) == ASIC_REV_5720) {
2821                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2822                         return -EIO;
2823
2824                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2825
2826                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2827                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2828
2829                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2830         } else {
2831                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2832                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2833         }
2834
2835         return 0;
2836 }
2837
2838 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2839 {
2840         u32 grc_local_ctrl;
2841
2842         if (!tg3_flag(tp, IS_NIC) ||
2843             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2844             tg3_asic_rev(tp) == ASIC_REV_5701)
2845                 return;
2846
2847         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2848
2849         tw32_wait_f(GRC_LOCAL_CTRL,
2850                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2851                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2852
2853         tw32_wait_f(GRC_LOCAL_CTRL,
2854                     grc_local_ctrl,
2855                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2856
2857         tw32_wait_f(GRC_LOCAL_CTRL,
2858                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2859                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2860 }
2861
2862 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2863 {
2864         if (!tg3_flag(tp, IS_NIC))
2865                 return;
2866
2867         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2868             tg3_asic_rev(tp) == ASIC_REV_5701) {
2869                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2870                             (GRC_LCLCTRL_GPIO_OE0 |
2871                              GRC_LCLCTRL_GPIO_OE1 |
2872                              GRC_LCLCTRL_GPIO_OE2 |
2873                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2874                              GRC_LCLCTRL_GPIO_OUTPUT1),
2875                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2876         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2877                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2878                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2879                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2880                                      GRC_LCLCTRL_GPIO_OE1 |
2881                                      GRC_LCLCTRL_GPIO_OE2 |
2882                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2883                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2884                                      tp->grc_local_ctrl;
2885                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2886                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2887
2888                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2889                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2890                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2891
2892                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2893                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2894                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2895         } else {
2896                 u32 no_gpio2;
2897                 u32 grc_local_ctrl = 0;
2898
2899                 /* Workaround to prevent overdrawing Amps. */
2900                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2901                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2902                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2903                                     grc_local_ctrl,
2904                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2905                 }
2906
2907                 /* On 5753 and variants, GPIO2 cannot be used. */
2908                 no_gpio2 = tp->nic_sram_data_cfg &
2909                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2910
2911                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2912                                   GRC_LCLCTRL_GPIO_OE1 |
2913                                   GRC_LCLCTRL_GPIO_OE2 |
2914                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2915                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2916                 if (no_gpio2) {
2917                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2918                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2919                 }
2920                 tw32_wait_f(GRC_LOCAL_CTRL,
2921                             tp->grc_local_ctrl | grc_local_ctrl,
2922                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2925
2926                 tw32_wait_f(GRC_LOCAL_CTRL,
2927                             tp->grc_local_ctrl | grc_local_ctrl,
2928                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2929
2930                 if (!no_gpio2) {
2931                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2932                         tw32_wait_f(GRC_LOCAL_CTRL,
2933                                     tp->grc_local_ctrl | grc_local_ctrl,
2934                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2935                 }
2936         }
2937 }
2938
2939 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2940 {
2941         u32 msg = 0;
2942
2943         /* Serialize power state transitions */
2944         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2945                 return;
2946
2947         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2948                 msg = TG3_GPIO_MSG_NEED_VAUX;
2949
2950         msg = tg3_set_function_status(tp, msg);
2951
2952         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2953                 goto done;
2954
2955         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2956                 tg3_pwrsrc_switch_to_vaux(tp);
2957         else
2958                 tg3_pwrsrc_die_with_vmain(tp);
2959
2960 done:
2961         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2962 }
2963
2964 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2965 {
2966         bool need_vaux = false;
2967
2968         /* The GPIOs do something completely different on 57765. */
2969         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2970                 return;
2971
2972         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2973             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2974             tg3_asic_rev(tp) == ASIC_REV_5720) {
2975                 tg3_frob_aux_power_5717(tp, include_wol ?
2976                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2977                 return;
2978         }
2979
2980         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2981                 struct net_device *dev_peer;
2982
2983                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2984
2985                 /* remove_one() may have been run on the peer. */
2986                 if (dev_peer) {
2987                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2988
2989                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2990                                 return;
2991
2992                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2993                             tg3_flag(tp_peer, ENABLE_ASF))
2994                                 need_vaux = true;
2995                 }
2996         }
2997
2998         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2999             tg3_flag(tp, ENABLE_ASF))
3000                 need_vaux = true;
3001
3002         if (need_vaux)
3003                 tg3_pwrsrc_switch_to_vaux(tp);
3004         else
3005                 tg3_pwrsrc_die_with_vmain(tp);
3006 }
3007
3008 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3009 {
3010         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3011                 return 1;
3012         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3013                 if (speed != SPEED_10)
3014                         return 1;
3015         } else if (speed == SPEED_10)
3016                 return 1;
3017
3018         return 0;
3019 }
3020
3021 static bool tg3_phy_power_bug(struct tg3 *tp)
3022 {
3023         switch (tg3_asic_rev(tp)) {
3024         case ASIC_REV_5700:
3025         case ASIC_REV_5704:
3026                 return true;
3027         case ASIC_REV_5780:
3028                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3029                         return true;
3030                 return false;
3031         case ASIC_REV_5717:
3032                 if (!tp->pci_fn)
3033                         return true;
3034                 return false;
3035         case ASIC_REV_5719:
3036         case ASIC_REV_5720:
3037                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3038                     !tp->pci_fn)
3039                         return true;
3040                 return false;
3041         }
3042
3043         return false;
3044 }
3045
3046 static bool tg3_phy_led_bug(struct tg3 *tp)
3047 {
3048         switch (tg3_asic_rev(tp)) {
3049         case ASIC_REV_5719:
3050         case ASIC_REV_5720:
3051                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3052                     !tp->pci_fn)
3053                         return true;
3054                 return false;
3055         }
3056
3057         return false;
3058 }
3059
3060 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3061 {
3062         u32 val;
3063
3064         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3065                 return;
3066
3067         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3068                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3069                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3070                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3071
3072                         sg_dig_ctrl |=
3073                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3074                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3075                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3076                 }
3077                 return;
3078         }
3079
3080         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3081                 tg3_bmcr_reset(tp);
3082                 val = tr32(GRC_MISC_CFG);
3083                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3084                 udelay(40);
3085                 return;
3086         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3087                 u32 phytest;
3088                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3089                         u32 phy;
3090
3091                         tg3_writephy(tp, MII_ADVERTISE, 0);
3092                         tg3_writephy(tp, MII_BMCR,
3093                                      BMCR_ANENABLE | BMCR_ANRESTART);
3094
3095                         tg3_writephy(tp, MII_TG3_FET_TEST,
3096                                      phytest | MII_TG3_FET_SHADOW_EN);
3097                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3098                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3099                                 tg3_writephy(tp,
3100                                              MII_TG3_FET_SHDW_AUXMODE4,
3101                                              phy);
3102                         }
3103                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3104                 }
3105                 return;
3106         } else if (do_low_power) {
3107                 if (!tg3_phy_led_bug(tp))
3108                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3109                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3110
3111                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3112                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3113                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3114                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3115         }
3116
3117         /* The PHY should not be powered down on some chips because
3118          * of bugs.
3119          */
3120         if (tg3_phy_power_bug(tp))
3121                 return;
3122
3123         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3124             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3125                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3126                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3127                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3128                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3129         }
3130
3131         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3132 }
3133
3134 /* tp->lock is held. */
3135 static int tg3_nvram_lock(struct tg3 *tp)
3136 {
3137         if (tg3_flag(tp, NVRAM)) {
3138                 int i;
3139
3140                 if (tp->nvram_lock_cnt == 0) {
3141                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3142                         for (i = 0; i < 8000; i++) {
3143                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3144                                         break;
3145                                 udelay(20);
3146                         }
3147                         if (i == 8000) {
3148                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3149                                 return -ENODEV;
3150                         }
3151                 }
3152                 tp->nvram_lock_cnt++;
3153         }
3154         return 0;
3155 }
3156
3157 /* tp->lock is held. */
3158 static void tg3_nvram_unlock(struct tg3 *tp)
3159 {
3160         if (tg3_flag(tp, NVRAM)) {
3161                 if (tp->nvram_lock_cnt > 0)
3162                         tp->nvram_lock_cnt--;
3163                 if (tp->nvram_lock_cnt == 0)
3164                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3165         }
3166 }
3167
3168 /* tp->lock is held. */
3169 static void tg3_enable_nvram_access(struct tg3 *tp)
3170 {
3171         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3172                 u32 nvaccess = tr32(NVRAM_ACCESS);
3173
3174                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3175         }
3176 }
3177
3178 /* tp->lock is held. */
3179 static void tg3_disable_nvram_access(struct tg3 *tp)
3180 {
3181         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3182                 u32 nvaccess = tr32(NVRAM_ACCESS);
3183
3184                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3185         }
3186 }
3187
3188 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3189                                         u32 offset, u32 *val)
3190 {
3191         u32 tmp;
3192         int i;
3193
3194         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3195                 return -EINVAL;
3196
3197         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3198                                         EEPROM_ADDR_DEVID_MASK |
3199                                         EEPROM_ADDR_READ);
3200         tw32(GRC_EEPROM_ADDR,
3201              tmp |
3202              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3203              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3204               EEPROM_ADDR_ADDR_MASK) |
3205              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3206
3207         for (i = 0; i < 1000; i++) {
3208                 tmp = tr32(GRC_EEPROM_ADDR);
3209
3210                 if (tmp & EEPROM_ADDR_COMPLETE)
3211                         break;
3212                 msleep(1);
3213         }
3214         if (!(tmp & EEPROM_ADDR_COMPLETE))
3215                 return -EBUSY;
3216
3217         tmp = tr32(GRC_EEPROM_DATA);
3218
3219         /*
3220          * The data will always be opposite the native endian
3221          * format.  Perform a blind byteswap to compensate.
3222          */
3223         *val = swab32(tmp);
3224
3225         return 0;
3226 }
3227
3228 #define NVRAM_CMD_TIMEOUT 10000
3229
3230 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3231 {
3232         int i;
3233
3234         tw32(NVRAM_CMD, nvram_cmd);
3235         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3236                 usleep_range(10, 40);
3237                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3238                         udelay(10);
3239                         break;
3240                 }
3241         }
3242
3243         if (i == NVRAM_CMD_TIMEOUT)
3244                 return -EBUSY;
3245
3246         return 0;
3247 }
3248
3249 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3250 {
3251         if (tg3_flag(tp, NVRAM) &&
3252             tg3_flag(tp, NVRAM_BUFFERED) &&
3253             tg3_flag(tp, FLASH) &&
3254             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3255             (tp->nvram_jedecnum == JEDEC_ATMEL))
3256
3257                 addr = ((addr / tp->nvram_pagesize) <<
3258                         ATMEL_AT45DB0X1B_PAGE_POS) +
3259                        (addr % tp->nvram_pagesize);
3260
3261         return addr;
3262 }
3263
3264 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3265 {
3266         if (tg3_flag(tp, NVRAM) &&
3267             tg3_flag(tp, NVRAM_BUFFERED) &&
3268             tg3_flag(tp, FLASH) &&
3269             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3270             (tp->nvram_jedecnum == JEDEC_ATMEL))
3271
3272                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3273                         tp->nvram_pagesize) +
3274                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3275
3276         return addr;
3277 }
3278
3279 /* NOTE: Data read in from NVRAM is byteswapped according to
3280  * the byteswapping settings for all other register accesses.
3281  * tg3 devices are BE devices, so on a BE machine, the data
3282  * returned will be exactly as it is seen in NVRAM.  On a LE
3283  * machine, the 32-bit value will be byteswapped.
3284  */
3285 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3286 {
3287         int ret;
3288
3289         if (!tg3_flag(tp, NVRAM))
3290                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3291
3292         offset = tg3_nvram_phys_addr(tp, offset);
3293
3294         if (offset > NVRAM_ADDR_MSK)
3295                 return -EINVAL;
3296
3297         ret = tg3_nvram_lock(tp);
3298         if (ret)
3299                 return ret;
3300
3301         tg3_enable_nvram_access(tp);
3302
3303         tw32(NVRAM_ADDR, offset);
3304         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3305                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3306
3307         if (ret == 0)
3308                 *val = tr32(NVRAM_RDDATA);
3309
3310         tg3_disable_nvram_access(tp);
3311
3312         tg3_nvram_unlock(tp);
3313
3314         return ret;
3315 }
3316
3317 /* Ensures NVRAM data is in bytestream format. */
3318 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3319 {
3320         u32 v;
3321         int res = tg3_nvram_read(tp, offset, &v);
3322         if (!res)
3323                 *val = cpu_to_be32(v);
3324         return res;
3325 }
3326
3327 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3328                                     u32 offset, u32 len, u8 *buf)
3329 {
3330         int i, j, rc = 0;
3331         u32 val;
3332
3333         for (i = 0; i < len; i += 4) {
3334                 u32 addr;
3335                 __be32 data;
3336
3337                 addr = offset + i;
3338
3339                 memcpy(&data, buf + i, 4);
3340
3341                 /*
3342                  * The SEEPROM interface expects the data to always be opposite
3343                  * the native endian format.  We accomplish this by reversing
3344                  * all the operations that would have been performed on the
3345                  * data from a call to tg3_nvram_read_be32().
3346                  */
3347                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3348
3349                 val = tr32(GRC_EEPROM_ADDR);
3350                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3351
3352                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3353                         EEPROM_ADDR_READ);
3354                 tw32(GRC_EEPROM_ADDR, val |
3355                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3356                         (addr & EEPROM_ADDR_ADDR_MASK) |
3357                         EEPROM_ADDR_START |
3358                         EEPROM_ADDR_WRITE);
3359
3360                 for (j = 0; j < 1000; j++) {
3361                         val = tr32(GRC_EEPROM_ADDR);
3362
3363                         if (val & EEPROM_ADDR_COMPLETE)
3364                                 break;
3365                         msleep(1);
3366                 }
3367                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3368                         rc = -EBUSY;
3369                         break;
3370                 }
3371         }
3372
3373         return rc;
3374 }
3375
3376 /* offset and length are dword aligned */
3377 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3378                 u8 *buf)
3379 {
3380         int ret = 0;
3381         u32 pagesize = tp->nvram_pagesize;
3382         u32 pagemask = pagesize - 1;
3383         u32 nvram_cmd;
3384         u8 *tmp;
3385
3386         tmp = kmalloc(pagesize, GFP_KERNEL);
3387         if (tmp == NULL)
3388                 return -ENOMEM;
3389
3390         while (len) {
3391                 int j;
3392                 u32 phy_addr, page_off, size;
3393
3394                 phy_addr = offset & ~pagemask;
3395
3396                 for (j = 0; j < pagesize; j += 4) {
3397                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3398                                                   (__be32 *) (tmp + j));
3399                         if (ret)
3400                                 break;
3401                 }
3402                 if (ret)
3403                         break;
3404
3405                 page_off = offset & pagemask;
3406                 size = pagesize;
3407                 if (len < size)
3408                         size = len;
3409
3410                 len -= size;
3411
3412                 memcpy(tmp + page_off, buf, size);
3413
3414                 offset = offset + (pagesize - page_off);
3415
3416                 tg3_enable_nvram_access(tp);
3417
3418                 /*
3419                  * Before we can erase the flash page, we need
3420                  * to issue a special "write enable" command.
3421                  */
3422                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3423
3424                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3425                         break;
3426
3427                 /* Erase the target page */
3428                 tw32(NVRAM_ADDR, phy_addr);
3429
3430                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3431                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3432
3433                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434                         break;
3435
3436                 /* Issue another write enable to start the write. */
3437                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3438
3439                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3440                         break;
3441
3442                 for (j = 0; j < pagesize; j += 4) {
3443                         __be32 data;
3444
3445                         data = *((__be32 *) (tmp + j));
3446
3447                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3448
3449                         tw32(NVRAM_ADDR, phy_addr + j);
3450
3451                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3452                                 NVRAM_CMD_WR;
3453
3454                         if (j == 0)
3455                                 nvram_cmd |= NVRAM_CMD_FIRST;
3456                         else if (j == (pagesize - 4))
3457                                 nvram_cmd |= NVRAM_CMD_LAST;
3458
3459                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3460                         if (ret)
3461                                 break;
3462                 }
3463                 if (ret)
3464                         break;
3465         }
3466
3467         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468         tg3_nvram_exec_cmd(tp, nvram_cmd);
3469
3470         kfree(tmp);
3471
3472         return ret;
3473 }
3474
3475 /* offset and length are dword aligned */
3476 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3477                 u8 *buf)
3478 {
3479         int i, ret = 0;
3480
3481         for (i = 0; i < len; i += 4, offset += 4) {
3482                 u32 page_off, phy_addr, nvram_cmd;
3483                 __be32 data;
3484
3485                 memcpy(&data, buf + i, 4);
3486                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3487
3488                 page_off = offset % tp->nvram_pagesize;
3489
3490                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3491
3492                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3493
3494                 if (page_off == 0 || i == 0)
3495                         nvram_cmd |= NVRAM_CMD_FIRST;
3496                 if (page_off == (tp->nvram_pagesize - 4))
3497                         nvram_cmd |= NVRAM_CMD_LAST;
3498
3499                 if (i == (len - 4))
3500                         nvram_cmd |= NVRAM_CMD_LAST;
3501
3502                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3503                     !tg3_flag(tp, FLASH) ||
3504                     !tg3_flag(tp, 57765_PLUS))
3505                         tw32(NVRAM_ADDR, phy_addr);
3506
3507                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3508                     !tg3_flag(tp, 5755_PLUS) &&
3509                     (tp->nvram_jedecnum == JEDEC_ST) &&
3510                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3511                         u32 cmd;
3512
3513                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3514                         ret = tg3_nvram_exec_cmd(tp, cmd);
3515                         if (ret)
3516                                 break;
3517                 }
3518                 if (!tg3_flag(tp, FLASH)) {
3519                         /* We always do complete word writes to eeprom. */
3520                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3521                 }
3522
3523                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3524                 if (ret)
3525                         break;
3526         }
3527         return ret;
3528 }
3529
3530 /* offset and length are dword aligned */
3531 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3532 {
3533         int ret;
3534
3535         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3536                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3537                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3538                 udelay(40);
3539         }
3540
3541         if (!tg3_flag(tp, NVRAM)) {
3542                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3543         } else {
3544                 u32 grc_mode;
3545
3546                 ret = tg3_nvram_lock(tp);
3547                 if (ret)
3548                         return ret;
3549
3550                 tg3_enable_nvram_access(tp);
3551                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3552                         tw32(NVRAM_WRITE1, 0x406);
3553
3554                 grc_mode = tr32(GRC_MODE);
3555                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3556
3557                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3558                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3559                                 buf);
3560                 } else {
3561                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3562                                 buf);
3563                 }
3564
3565                 grc_mode = tr32(GRC_MODE);
3566                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3567
3568                 tg3_disable_nvram_access(tp);
3569                 tg3_nvram_unlock(tp);
3570         }
3571
3572         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3573                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3574                 udelay(40);
3575         }
3576
3577         return ret;
3578 }
3579
3580 #define RX_CPU_SCRATCH_BASE     0x30000
3581 #define RX_CPU_SCRATCH_SIZE     0x04000
3582 #define TX_CPU_SCRATCH_BASE     0x34000
3583 #define TX_CPU_SCRATCH_SIZE     0x04000
3584
3585 /* tp->lock is held. */
3586 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3587 {
3588         int i;
3589         const int iters = 10000;
3590
3591         for (i = 0; i < iters; i++) {
3592                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3593                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3594                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3595                         break;
3596                 if (pci_channel_offline(tp->pdev))
3597                         return -EBUSY;
3598         }
3599
3600         return (i == iters) ? -EBUSY : 0;
3601 }
3602
3603 /* tp->lock is held. */
3604 static int tg3_rxcpu_pause(struct tg3 *tp)
3605 {
3606         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3607
3608         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3609         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3610         udelay(10);
3611
3612         return rc;
3613 }
3614
3615 /* tp->lock is held. */
3616 static int tg3_txcpu_pause(struct tg3 *tp)
3617 {
3618         return tg3_pause_cpu(tp, TX_CPU_BASE);
3619 }
3620
3621 /* tp->lock is held. */
3622 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3623 {
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626 }
3627
3628 /* tp->lock is held. */
3629 static void tg3_rxcpu_resume(struct tg3 *tp)
3630 {
3631         tg3_resume_cpu(tp, RX_CPU_BASE);
3632 }
3633
3634 /* tp->lock is held. */
3635 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3636 {
3637         int rc;
3638
3639         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3640
3641         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3642                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3643
3644                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3645                 return 0;
3646         }
3647         if (cpu_base == RX_CPU_BASE) {
3648                 rc = tg3_rxcpu_pause(tp);
3649         } else {
3650                 /*
3651                  * There is only an Rx CPU for the 5750 derivative in the
3652                  * BCM4785.
3653                  */
3654                 if (tg3_flag(tp, IS_SSB_CORE))
3655                         return 0;
3656
3657                 rc = tg3_txcpu_pause(tp);
3658         }
3659
3660         if (rc) {
3661                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3662                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3663                 return -ENODEV;
3664         }
3665
3666         /* Clear firmware's nvram arbitration. */
3667         if (tg3_flag(tp, NVRAM))
3668                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3669         return 0;
3670 }
3671
3672 static int tg3_fw_data_len(struct tg3 *tp,
3673                            const struct tg3_firmware_hdr *fw_hdr)
3674 {
3675         int fw_len;
3676
3677         /* Non fragmented firmware have one firmware header followed by a
3678          * contiguous chunk of data to be written. The length field in that
3679          * header is not the length of data to be written but the complete
3680          * length of the bss. The data length is determined based on
3681          * tp->fw->size minus headers.
3682          *
3683          * Fragmented firmware have a main header followed by multiple
3684          * fragments. Each fragment is identical to non fragmented firmware
3685          * with a firmware header followed by a contiguous chunk of data. In
3686          * the main header, the length field is unused and set to 0xffffffff.
3687          * In each fragment header the length is the entire size of that
3688          * fragment i.e. fragment data + header length. Data length is
3689          * therefore length field in the header minus TG3_FW_HDR_LEN.
3690          */
3691         if (tp->fw_len == 0xffffffff)
3692                 fw_len = be32_to_cpu(fw_hdr->len);
3693         else
3694                 fw_len = tp->fw->size;
3695
3696         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3697 }
3698
3699 /* tp->lock is held. */
3700 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3701                                  u32 cpu_scratch_base, int cpu_scratch_size,
3702                                  const struct tg3_firmware_hdr *fw_hdr)
3703 {
3704         int err, i;
3705         void (*write_op)(struct tg3 *, u32, u32);
3706         int total_len = tp->fw->size;
3707
3708         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3709                 netdev_err(tp->dev,
3710                            "%s: Trying to load TX cpu firmware which is 5705\n",
3711                            __func__);
3712                 return -EINVAL;
3713         }
3714
3715         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3716                 write_op = tg3_write_mem;
3717         else
3718                 write_op = tg3_write_indirect_reg32;
3719
3720         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3721                 /* It is possible that bootcode is still loading at this point.
3722                  * Get the nvram lock first before halting the cpu.
3723                  */
3724                 int lock_err = tg3_nvram_lock(tp);
3725                 err = tg3_halt_cpu(tp, cpu_base);
3726                 if (!lock_err)
3727                         tg3_nvram_unlock(tp);
3728                 if (err)
3729                         goto out;
3730
3731                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3732                         write_op(tp, cpu_scratch_base + i, 0);
3733                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3734                 tw32(cpu_base + CPU_MODE,
3735                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3736         } else {
3737                 /* Subtract additional main header for fragmented firmware and
3738                  * advance to the first fragment
3739                  */
3740                 total_len -= TG3_FW_HDR_LEN;
3741                 fw_hdr++;
3742         }
3743
3744         do {
3745                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3746                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3747                         write_op(tp, cpu_scratch_base +
3748                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3749                                      (i * sizeof(u32)),
3750                                  be32_to_cpu(fw_data[i]));
3751
3752                 total_len -= be32_to_cpu(fw_hdr->len);
3753
3754                 /* Advance to next fragment */
3755                 fw_hdr = (struct tg3_firmware_hdr *)
3756                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3757         } while (total_len > 0);
3758
3759         err = 0;
3760
3761 out:
3762         return err;
3763 }
3764
3765 /* tp->lock is held. */
3766 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3767 {
3768         int i;
3769         const int iters = 5;
3770
3771         tw32(cpu_base + CPU_STATE, 0xffffffff);
3772         tw32_f(cpu_base + CPU_PC, pc);
3773
3774         for (i = 0; i < iters; i++) {
3775                 if (tr32(cpu_base + CPU_PC) == pc)
3776                         break;
3777                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3778                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3779                 tw32_f(cpu_base + CPU_PC, pc);
3780                 udelay(1000);
3781         }
3782
3783         return (i == iters) ? -EBUSY : 0;
3784 }
3785
3786 /* tp->lock is held. */
3787 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3788 {
3789         const struct tg3_firmware_hdr *fw_hdr;
3790         int err;
3791
3792         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3793
3794         /* Firmware blob starts with version numbers, followed by
3795            start address and length. We are setting complete length.
3796            length = end_address_of_bss - start_address_of_text.
3797            Remainder is the blob to be loaded contiguously
3798            from start address. */
3799
3800         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3801                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3807                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3808                                     fw_hdr);
3809         if (err)
3810                 return err;
3811
3812         /* Now startup only the RX cpu. */
3813         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3814                                        be32_to_cpu(fw_hdr->base_addr));
3815         if (err) {
3816                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3817                            "should be %08x\n", __func__,
3818                            tr32(RX_CPU_BASE + CPU_PC),
3819                                 be32_to_cpu(fw_hdr->base_addr));
3820                 return -ENODEV;
3821         }
3822
3823         tg3_rxcpu_resume(tp);
3824
3825         return 0;
3826 }
3827
3828 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3829 {
3830         const int iters = 1000;
3831         int i;
3832         u32 val;
3833
3834         /* Wait for boot code to complete initialization and enter service
3835          * loop. It is then safe to download service patches
3836          */
3837         for (i = 0; i < iters; i++) {
3838                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3839                         break;
3840
3841                 udelay(10);
3842         }
3843
3844         if (i == iters) {
3845                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3846                 return -EBUSY;
3847         }
3848
3849         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3850         if (val & 0xff) {
3851                 netdev_warn(tp->dev,
3852                             "Other patches exist. Not downloading EEE patch\n");
3853                 return -EEXIST;
3854         }
3855
3856         return 0;
3857 }
3858
3859 /* tp->lock is held. */
3860 static void tg3_load_57766_firmware(struct tg3 *tp)
3861 {
3862         struct tg3_firmware_hdr *fw_hdr;
3863
3864         if (!tg3_flag(tp, NO_NVRAM))
3865                 return;
3866
3867         if (tg3_validate_rxcpu_state(tp))
3868                 return;
3869
3870         if (!tp->fw)
3871                 return;
3872
3873         /* This firmware blob has a different format than older firmware
3874          * releases as given below. The main difference is we have fragmented
3875          * data to be written to non-contiguous locations.
3876          *
3877          * In the beginning we have a firmware header identical to other
3878          * firmware which consists of version, base addr and length. The length
3879          * here is unused and set to 0xffffffff.
3880          *
3881          * This is followed by a series of firmware fragments which are
3882          * individually identical to previous firmware. i.e. they have the
3883          * firmware header and followed by data for that fragment. The version
3884          * field of the individual fragment header is unused.
3885          */
3886
3887         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3888         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3889                 return;
3890
3891         if (tg3_rxcpu_pause(tp))
3892                 return;
3893
3894         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3895         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3896
3897         tg3_rxcpu_resume(tp);
3898 }
3899
3900 /* tp->lock is held. */
3901 static int tg3_load_tso_firmware(struct tg3 *tp)
3902 {
3903         const struct tg3_firmware_hdr *fw_hdr;
3904         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3905         int err;
3906
3907         if (!tg3_flag(tp, FW_TSO))
3908                 return 0;
3909
3910         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3911
3912         /* Firmware blob starts with version numbers, followed by
3913            start address and length. We are setting complete length.
3914            length = end_address_of_bss - start_address_of_text.
3915            Remainder is the blob to be loaded contiguously
3916            from start address. */
3917
3918         cpu_scratch_size = tp->fw_len;
3919
3920         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3921                 cpu_base = RX_CPU_BASE;
3922                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3923         } else {
3924                 cpu_base = TX_CPU_BASE;
3925                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3926                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3927         }
3928
3929         err = tg3_load_firmware_cpu(tp, cpu_base,
3930                                     cpu_scratch_base, cpu_scratch_size,
3931                                     fw_hdr);
3932         if (err)
3933                 return err;
3934
3935         /* Now startup the cpu. */
3936         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3937                                        be32_to_cpu(fw_hdr->base_addr));
3938         if (err) {
3939                 netdev_err(tp->dev,
3940                            "%s fails to set CPU PC, is %08x should be %08x\n",
3941                            __func__, tr32(cpu_base + CPU_PC),
3942                            be32_to_cpu(fw_hdr->base_addr));
3943                 return -ENODEV;
3944         }
3945
3946         tg3_resume_cpu(tp, cpu_base);
3947         return 0;
3948 }
3949
3950 /* tp->lock is held. */
3951 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3952 {
3953         u32 addr_high, addr_low;
3954
3955         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3956         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3957                     (mac_addr[4] <<  8) | mac_addr[5]);
3958
3959         if (index < 4) {
3960                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3961                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3962         } else {
3963                 index -= 4;
3964                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3965                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3966         }
3967 }
3968
3969 /* tp->lock is held. */
3970 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3971 {
3972         u32 addr_high;
3973         int i;
3974
3975         for (i = 0; i < 4; i++) {
3976                 if (i == 1 && skip_mac_1)
3977                         continue;
3978                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979         }
3980
3981         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3982             tg3_asic_rev(tp) == ASIC_REV_5704) {
3983                 for (i = 4; i < 16; i++)
3984                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3985         }
3986
3987         addr_high = (tp->dev->dev_addr[0] +
3988                      tp->dev->dev_addr[1] +
3989                      tp->dev->dev_addr[2] +
3990                      tp->dev->dev_addr[3] +
3991                      tp->dev->dev_addr[4] +
3992                      tp->dev->dev_addr[5]) &
3993                 TX_BACKOFF_SEED_MASK;
3994         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3995 }
3996
3997 static void tg3_enable_register_access(struct tg3 *tp)
3998 {
3999         /*
4000          * Make sure register accesses (indirect or otherwise) will function
4001          * correctly.
4002          */
4003         pci_write_config_dword(tp->pdev,
4004                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4005 }
4006
4007 static int tg3_power_up(struct tg3 *tp)
4008 {
4009         int err;
4010
4011         tg3_enable_register_access(tp);
4012
4013         err = pci_set_power_state(tp->pdev, PCI_D0);
4014         if (!err) {
4015                 /* Switch out of Vaux if it is a NIC */
4016                 tg3_pwrsrc_switch_to_vmain(tp);
4017         } else {
4018                 netdev_err(tp->dev, "Transition to D0 failed\n");
4019         }
4020
4021         return err;
4022 }
4023
4024 static int tg3_setup_phy(struct tg3 *, bool);
4025
4026 static int tg3_power_down_prepare(struct tg3 *tp)
4027 {
4028         u32 misc_host_ctrl;
4029         bool device_should_wake, do_low_power;
4030
4031         tg3_enable_register_access(tp);
4032
4033         /* Restore the CLKREQ setting. */
4034         if (tg3_flag(tp, CLKREQ_BUG))
4035                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4036                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4037
4038         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4039         tw32(TG3PCI_MISC_HOST_CTRL,
4040              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4041
4042         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4043                              tg3_flag(tp, WOL_ENABLE);
4044
4045         if (tg3_flag(tp, USE_PHYLIB)) {
4046                 do_low_power = false;
4047                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4048                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4049                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4050                         struct phy_device *phydev;
4051                         u32 phyid;
4052
4053                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4054
4055                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4056
4057                         tp->link_config.speed = phydev->speed;
4058                         tp->link_config.duplex = phydev->duplex;
4059                         tp->link_config.autoneg = phydev->autoneg;
4060                         ethtool_convert_link_mode_to_legacy_u32(
4061                                 &tp->link_config.advertising,
4062                                 phydev->advertising);
4063
4064                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4065                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4066                                          advertising);
4067                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4068                                          advertising);
4069                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4070                                          advertising);
4071
4072                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4073                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4074                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4075                                                          advertising);
4076                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4077                                                          advertising);
4078                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4079                                                          advertising);
4080                                 } else {
4081                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4082                                                          advertising);
4083                                 }
4084                         }
4085
4086                         linkmode_copy(phydev->advertising, advertising);
4087                         phy_start_aneg(phydev);
4088
4089                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4090                         if (phyid != PHY_ID_BCMAC131) {
4091                                 phyid &= PHY_BCM_OUI_MASK;
4092                                 if (phyid == PHY_BCM_OUI_1 ||
4093                                     phyid == PHY_BCM_OUI_2 ||
4094                                     phyid == PHY_BCM_OUI_3)
4095                                         do_low_power = true;
4096                         }
4097                 }
4098         } else {
4099                 do_low_power = true;
4100
4101                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4102                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4103
4104                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4105                         tg3_setup_phy(tp, false);
4106         }
4107
4108         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4109                 u32 val;
4110
4111                 val = tr32(GRC_VCPU_EXT_CTRL);
4112                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4113         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4114                 int i;
4115                 u32 val;
4116
4117                 for (i = 0; i < 200; i++) {
4118                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4119                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4120                                 break;
4121                         msleep(1);
4122                 }
4123         }
4124         if (tg3_flag(tp, WOL_CAP))
4125                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4126                                                      WOL_DRV_STATE_SHUTDOWN |
4127                                                      WOL_DRV_WOL |
4128                                                      WOL_SET_MAGIC_PKT);
4129
4130         if (device_should_wake) {
4131                 u32 mac_mode;
4132
4133                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4134                         if (do_low_power &&
4135                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4136                                 tg3_phy_auxctl_write(tp,
4137                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4138                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4139                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4140                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4141                                 udelay(40);
4142                         }
4143
4144                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4145                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4146                         else if (tp->phy_flags &
4147                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4148                                 if (tp->link_config.active_speed == SPEED_1000)
4149                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4150                                 else
4151                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4152                         } else
4153                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4154
4155                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4156                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4157                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4158                                              SPEED_100 : SPEED_10;
4159                                 if (tg3_5700_link_polarity(tp, speed))
4160                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4161                                 else
4162                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4163                         }
4164                 } else {
4165                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4166                 }
4167
4168                 if (!tg3_flag(tp, 5750_PLUS))
4169                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4170
4171                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4172                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4173                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4174                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4175
4176                 if (tg3_flag(tp, ENABLE_APE))
4177                         mac_mode |= MAC_MODE_APE_TX_EN |
4178                                     MAC_MODE_APE_RX_EN |
4179                                     MAC_MODE_TDE_ENABLE;
4180
4181                 tw32_f(MAC_MODE, mac_mode);
4182                 udelay(100);
4183
4184                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4185                 udelay(10);
4186         }
4187
4188         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4189             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4190              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4191                 u32 base_val;
4192
4193                 base_val = tp->pci_clock_ctrl;
4194                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4195                              CLOCK_CTRL_TXCLK_DISABLE);
4196
4197                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4198                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4199         } else if (tg3_flag(tp, 5780_CLASS) ||
4200                    tg3_flag(tp, CPMU_PRESENT) ||
4201                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4202                 /* do nothing */
4203         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4204                 u32 newbits1, newbits2;
4205
4206                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4207                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4208                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4209                                     CLOCK_CTRL_TXCLK_DISABLE |
4210                                     CLOCK_CTRL_ALTCLK);
4211                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212                 } else if (tg3_flag(tp, 5705_PLUS)) {
4213                         newbits1 = CLOCK_CTRL_625_CORE;
4214                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4215                 } else {
4216                         newbits1 = CLOCK_CTRL_ALTCLK;
4217                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4218                 }
4219
4220                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4221                             40);
4222
4223                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4224                             40);
4225
4226                 if (!tg3_flag(tp, 5705_PLUS)) {
4227                         u32 newbits3;
4228
4229                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4230                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4231                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4232                                             CLOCK_CTRL_TXCLK_DISABLE |
4233                                             CLOCK_CTRL_44MHZ_CORE);
4234                         } else {
4235                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4236                         }
4237
4238                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4239                                     tp->pci_clock_ctrl | newbits3, 40);
4240                 }
4241         }
4242
4243         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4244                 tg3_power_down_phy(tp, do_low_power);
4245
4246         tg3_frob_aux_power(tp, true);
4247
4248         /* Workaround for unstable PLL clock */
4249         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4250             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4251              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4252                 u32 val = tr32(0x7d00);
4253
4254                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4255                 tw32(0x7d00, val);
4256                 if (!tg3_flag(tp, ENABLE_ASF)) {
4257                         int err;
4258
4259                         err = tg3_nvram_lock(tp);
4260                         tg3_halt_cpu(tp, RX_CPU_BASE);
4261                         if (!err)
4262                                 tg3_nvram_unlock(tp);
4263                 }
4264         }
4265
4266         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4267
4268         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4269
4270         return 0;
4271 }
4272
4273 static void tg3_power_down(struct tg3 *tp)
4274 {
4275         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4276         pci_set_power_state(tp->pdev, PCI_D3hot);
4277 }
4278
4279 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4280 {
4281         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4282         case MII_TG3_AUX_STAT_10HALF:
4283                 *speed = SPEED_10;
4284                 *duplex = DUPLEX_HALF;
4285                 break;
4286
4287         case MII_TG3_AUX_STAT_10FULL:
4288                 *speed = SPEED_10;
4289                 *duplex = DUPLEX_FULL;
4290                 break;
4291
4292         case MII_TG3_AUX_STAT_100HALF:
4293                 *speed = SPEED_100;
4294                 *duplex = DUPLEX_HALF;
4295                 break;
4296
4297         case MII_TG3_AUX_STAT_100FULL:
4298                 *speed = SPEED_100;
4299                 *duplex = DUPLEX_FULL;
4300                 break;
4301
4302         case MII_TG3_AUX_STAT_1000HALF:
4303                 *speed = SPEED_1000;
4304                 *duplex = DUPLEX_HALF;
4305                 break;
4306
4307         case MII_TG3_AUX_STAT_1000FULL:
4308                 *speed = SPEED_1000;
4309                 *duplex = DUPLEX_FULL;
4310                 break;
4311
4312         default:
4313                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4314                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4315                                  SPEED_10;
4316                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4317                                   DUPLEX_HALF;
4318                         break;
4319                 }
4320                 *speed = SPEED_UNKNOWN;
4321                 *duplex = DUPLEX_UNKNOWN;
4322                 break;
4323         }
4324 }
4325
4326 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4327 {
4328         int err = 0;
4329         u32 val, new_adv;
4330
4331         new_adv = ADVERTISE_CSMA;
4332         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4333         new_adv |= mii_advertise_flowctrl(flowctrl);
4334
4335         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4336         if (err)
4337                 goto done;
4338
4339         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4340                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4341
4342                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4343                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4344                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4345
4346                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4347                 if (err)
4348                         goto done;
4349         }
4350
4351         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4352                 goto done;
4353
4354         tw32(TG3_CPMU_EEE_MODE,
4355              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4356
4357         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4358         if (!err) {
4359                 u32 err2;
4360
4361                 val = 0;
4362                 /* Advertise 100-BaseTX EEE ability */
4363                 if (advertise & ADVERTISED_100baseT_Full)
4364                         val |= MDIO_AN_EEE_ADV_100TX;
4365                 /* Advertise 1000-BaseT EEE ability */
4366                 if (advertise & ADVERTISED_1000baseT_Full)
4367                         val |= MDIO_AN_EEE_ADV_1000T;
4368
4369                 if (!tp->eee.eee_enabled) {
4370                         val = 0;
4371                         tp->eee.advertised = 0;
4372                 } else {
4373                         tp->eee.advertised = advertise &
4374                                              (ADVERTISED_100baseT_Full |
4375                                               ADVERTISED_1000baseT_Full);
4376                 }
4377
4378                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4379                 if (err)
4380                         val = 0;
4381
4382                 switch (tg3_asic_rev(tp)) {
4383                 case ASIC_REV_5717:
4384                 case ASIC_REV_57765:
4385                 case ASIC_REV_57766:
4386                 case ASIC_REV_5719:
4387                         /* If we advertised any eee advertisements above... */
4388                         if (val)
4389                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4390                                       MII_TG3_DSP_TAP26_RMRXSTO |
4391                                       MII_TG3_DSP_TAP26_OPCSINPT;
4392                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4393                         /* Fall through */
4394                 case ASIC_REV_5720:
4395                 case ASIC_REV_5762:
4396                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4397                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4398                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4399                 }
4400
4401                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4402                 if (!err)
4403                         err = err2;
4404         }
4405
4406 done:
4407         return err;
4408 }
4409
4410 static void tg3_phy_copper_begin(struct tg3 *tp)
4411 {
4412         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4413             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4414                 u32 adv, fc;
4415
4416                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4417                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4418                         adv = ADVERTISED_10baseT_Half |
4419                               ADVERTISED_10baseT_Full;
4420                         if (tg3_flag(tp, WOL_SPEED_100MB))
4421                                 adv |= ADVERTISED_100baseT_Half |
4422                                        ADVERTISED_100baseT_Full;
4423                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4424                                 if (!(tp->phy_flags &
4425                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4426                                         adv |= ADVERTISED_1000baseT_Half;
4427                                 adv |= ADVERTISED_1000baseT_Full;
4428                         }
4429
4430                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4431                 } else {
4432                         adv = tp->link_config.advertising;
4433                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4434                                 adv &= ~(ADVERTISED_1000baseT_Half |
4435                                          ADVERTISED_1000baseT_Full);
4436
4437                         fc = tp->link_config.flowctrl;
4438                 }
4439
4440                 tg3_phy_autoneg_cfg(tp, adv, fc);
4441
4442                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4443                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4444                         /* Normally during power down we want to autonegotiate
4445                          * the lowest possible speed for WOL. However, to avoid
4446                          * link flap, we leave it untouched.
4447                          */
4448                         return;
4449                 }
4450
4451                 tg3_writephy(tp, MII_BMCR,
4452                              BMCR_ANENABLE | BMCR_ANRESTART);
4453         } else {
4454                 int i;
4455                 u32 bmcr, orig_bmcr;
4456
4457                 tp->link_config.active_speed = tp->link_config.speed;
4458                 tp->link_config.active_duplex = tp->link_config.duplex;
4459
4460                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4461                         /* With autoneg disabled, 5715 only links up when the
4462                          * advertisement register has the configured speed
4463                          * enabled.
4464                          */
4465                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4466                 }
4467
4468                 bmcr = 0;
4469                 switch (tp->link_config.speed) {
4470                 default:
4471                 case SPEED_10:
4472                         break;
4473
4474                 case SPEED_100:
4475                         bmcr |= BMCR_SPEED100;
4476                         break;
4477
4478                 case SPEED_1000:
4479                         bmcr |= BMCR_SPEED1000;
4480                         break;
4481                 }
4482
4483                 if (tp->link_config.duplex == DUPLEX_FULL)
4484                         bmcr |= BMCR_FULLDPLX;
4485
4486                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4487                     (bmcr != orig_bmcr)) {
4488                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4489                         for (i = 0; i < 1500; i++) {
4490                                 u32 tmp;
4491
4492                                 udelay(10);
4493                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4494                                     tg3_readphy(tp, MII_BMSR, &tmp))
4495                                         continue;
4496                                 if (!(tmp & BMSR_LSTATUS)) {
4497                                         udelay(40);
4498                                         break;
4499                                 }
4500                         }
4501                         tg3_writephy(tp, MII_BMCR, bmcr);
4502                         udelay(40);
4503                 }
4504         }
4505 }
4506
4507 static int tg3_phy_pull_config(struct tg3 *tp)
4508 {
4509         int err;
4510         u32 val;
4511
4512         err = tg3_readphy(tp, MII_BMCR, &val);
4513         if (err)
4514                 goto done;
4515
4516         if (!(val & BMCR_ANENABLE)) {
4517                 tp->link_config.autoneg = AUTONEG_DISABLE;
4518                 tp->link_config.advertising = 0;
4519                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4520
4521                 err = -EIO;
4522
4523                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4524                 case 0:
4525                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526                                 goto done;
4527
4528                         tp->link_config.speed = SPEED_10;
4529                         break;
4530                 case BMCR_SPEED100:
4531                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4532                                 goto done;
4533
4534                         tp->link_config.speed = SPEED_100;
4535                         break;
4536                 case BMCR_SPEED1000:
4537                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4538                                 tp->link_config.speed = SPEED_1000;
4539                                 break;
4540                         }
4541                         /* Fall through */
4542                 default:
4543                         goto done;
4544                 }
4545
4546                 if (val & BMCR_FULLDPLX)
4547                         tp->link_config.duplex = DUPLEX_FULL;
4548                 else
4549                         tp->link_config.duplex = DUPLEX_HALF;
4550
4551                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4552
4553                 err = 0;
4554                 goto done;
4555         }
4556
4557         tp->link_config.autoneg = AUTONEG_ENABLE;
4558         tp->link_config.advertising = ADVERTISED_Autoneg;
4559         tg3_flag_set(tp, PAUSE_AUTONEG);
4560
4561         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4562                 u32 adv;
4563
4564                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4565                 if (err)
4566                         goto done;
4567
4568                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4569                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4570
4571                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4572         } else {
4573                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4574         }
4575
4576         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4577                 u32 adv;
4578
4579                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4580                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4581                         if (err)
4582                                 goto done;
4583
4584                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4585                 } else {
4586                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4587                         if (err)
4588                                 goto done;
4589
4590                         adv = tg3_decode_flowctrl_1000X(val);
4591                         tp->link_config.flowctrl = adv;
4592
4593                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4594                         adv = mii_adv_to_ethtool_adv_x(val);
4595                 }
4596
4597                 tp->link_config.advertising |= adv;
4598         }
4599
4600 done:
4601         return err;
4602 }
4603
4604 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4605 {
4606         int err;
4607
4608         /* Turn off tap power management. */
4609         /* Set Extended packet length bit */
4610         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4611
4612         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4613         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4614         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4615         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4616         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4617
4618         udelay(40);
4619
4620         return err;
4621 }
4622
4623 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4624 {
4625         struct ethtool_eee eee;
4626
4627         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4628                 return true;
4629
4630         tg3_eee_pull_config(tp, &eee);
4631
4632         if (tp->eee.eee_enabled) {
4633                 if (tp->eee.advertised != eee.advertised ||
4634                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4635                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4636                         return false;
4637         } else {
4638                 /* EEE is disabled but we're advertising */
4639                 if (eee.advertised)
4640                         return false;
4641         }
4642
4643         return true;
4644 }
4645
4646 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4647 {
4648         u32 advmsk, tgtadv, advertising;
4649
4650         advertising = tp->link_config.advertising;
4651         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4652
4653         advmsk = ADVERTISE_ALL;
4654         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4655                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4656                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4657         }
4658
4659         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4660                 return false;
4661
4662         if ((*lcladv & advmsk) != tgtadv)
4663                 return false;
4664
4665         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4666                 u32 tg3_ctrl;
4667
4668                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4669
4670                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4671                         return false;
4672
4673                 if (tgtadv &&
4674                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4675                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4676                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4677                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4678                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4679                 } else {
4680                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4681                 }
4682
4683                 if (tg3_ctrl != tgtadv)
4684                         return false;
4685         }
4686
4687         return true;
4688 }
4689
4690 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4691 {
4692         u32 lpeth = 0;
4693
4694         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4695                 u32 val;
4696
4697                 if (tg3_readphy(tp, MII_STAT1000, &val))
4698                         return false;
4699
4700                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4701         }
4702
4703         if (tg3_readphy(tp, MII_LPA, rmtadv))
4704                 return false;
4705
4706         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4707         tp->link_config.rmt_adv = lpeth;
4708
4709         return true;
4710 }
4711
4712 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4713 {
4714         if (curr_link_up != tp->link_up) {
4715                 if (curr_link_up) {
4716                         netif_carrier_on(tp->dev);
4717                 } else {
4718                         netif_carrier_off(tp->dev);
4719                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4720                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4721                 }
4722
4723                 tg3_link_report(tp);
4724                 return true;
4725         }
4726
4727         return false;
4728 }
4729
4730 static void tg3_clear_mac_status(struct tg3 *tp)
4731 {
4732         tw32(MAC_EVENT, 0);
4733
4734         tw32_f(MAC_STATUS,
4735                MAC_STATUS_SYNC_CHANGED |
4736                MAC_STATUS_CFG_CHANGED |
4737                MAC_STATUS_MI_COMPLETION |
4738                MAC_STATUS_LNKSTATE_CHANGED);
4739         udelay(40);
4740 }
4741
4742 static void tg3_setup_eee(struct tg3 *tp)
4743 {
4744         u32 val;
4745
4746         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4747               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4748         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4749                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4750
4751         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4752
4753         tw32_f(TG3_CPMU_EEE_CTRL,
4754                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4755
4756         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4757               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4758               TG3_CPMU_EEEMD_LPI_IN_RX |
4759               TG3_CPMU_EEEMD_EEE_ENABLE;
4760
4761         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4762                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4763
4764         if (tg3_flag(tp, ENABLE_APE))
4765                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4766
4767         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4768
4769         tw32_f(TG3_CPMU_EEE_DBTMR1,
4770                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4771                (tp->eee.tx_lpi_timer & 0xffff));
4772
4773         tw32_f(TG3_CPMU_EEE_DBTMR2,
4774                TG3_CPMU_DBTMR2_APE_TX_2047US |
4775                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4776 }
4777
4778 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4779 {
4780         bool current_link_up;
4781         u32 bmsr, val;
4782         u32 lcl_adv, rmt_adv;
4783         u32 current_speed;
4784         u8 current_duplex;
4785         int i, err;
4786
4787         tg3_clear_mac_status(tp);
4788
4789         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4790                 tw32_f(MAC_MI_MODE,
4791                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4792                 udelay(80);
4793         }
4794
4795         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4796
4797         /* Some third-party PHYs need to be reset on link going
4798          * down.
4799          */
4800         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4801              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4802              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4803             tp->link_up) {
4804                 tg3_readphy(tp, MII_BMSR, &bmsr);
4805                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4806                     !(bmsr & BMSR_LSTATUS))
4807                         force_reset = true;
4808         }
4809         if (force_reset)
4810                 tg3_phy_reset(tp);
4811
4812         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4813                 tg3_readphy(tp, MII_BMSR, &bmsr);
4814                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4815                     !tg3_flag(tp, INIT_COMPLETE))
4816                         bmsr = 0;
4817
4818                 if (!(bmsr & BMSR_LSTATUS)) {
4819                         err = tg3_init_5401phy_dsp(tp);
4820                         if (err)
4821                                 return err;
4822
4823                         tg3_readphy(tp, MII_BMSR, &bmsr);
4824                         for (i = 0; i < 1000; i++) {
4825                                 udelay(10);
4826                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4827                                     (bmsr & BMSR_LSTATUS)) {
4828                                         udelay(40);
4829                                         break;
4830                                 }
4831                         }
4832
4833                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4834                             TG3_PHY_REV_BCM5401_B0 &&
4835                             !(bmsr & BMSR_LSTATUS) &&
4836                             tp->link_config.active_speed == SPEED_1000) {
4837                                 err = tg3_phy_reset(tp);
4838                                 if (!err)
4839                                         err = tg3_init_5401phy_dsp(tp);
4840                                 if (err)
4841                                         return err;
4842                         }
4843                 }
4844         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4845                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4846                 /* 5701 {A0,B0} CRC bug workaround */
4847                 tg3_writephy(tp, 0x15, 0x0a75);
4848                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4849                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4850                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4851         }
4852
4853         /* Clear pending interrupts... */
4854         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4855         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4856
4857         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4858                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4859         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4860                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4861
4862         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4863             tg3_asic_rev(tp) == ASIC_REV_5701) {
4864                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4865                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4866                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4867                 else
4868                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4869         }
4870
4871         current_link_up = false;
4872         current_speed = SPEED_UNKNOWN;
4873         current_duplex = DUPLEX_UNKNOWN;
4874         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4875         tp->link_config.rmt_adv = 0;
4876
4877         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4878                 err = tg3_phy_auxctl_read(tp,
4879                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4880                                           &val);
4881                 if (!err && !(val & (1 << 10))) {
4882                         tg3_phy_auxctl_write(tp,
4883                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4884                                              val | (1 << 10));
4885                         goto relink;
4886                 }
4887         }
4888
4889         bmsr = 0;
4890         for (i = 0; i < 100; i++) {
4891                 tg3_readphy(tp, MII_BMSR, &bmsr);
4892                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4893                     (bmsr & BMSR_LSTATUS))
4894                         break;
4895                 udelay(40);
4896         }
4897
4898         if (bmsr & BMSR_LSTATUS) {
4899                 u32 aux_stat, bmcr;
4900
4901                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4902                 for (i = 0; i < 2000; i++) {
4903                         udelay(10);
4904                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4905                             aux_stat)
4906                                 break;
4907                 }
4908
4909                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4910                                              &current_speed,
4911                                              &current_duplex);
4912
4913                 bmcr = 0;
4914                 for (i = 0; i < 200; i++) {
4915                         tg3_readphy(tp, MII_BMCR, &bmcr);
4916                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4917                                 continue;
4918                         if (bmcr && bmcr != 0x7fff)
4919                                 break;
4920                         udelay(10);
4921                 }
4922
4923                 lcl_adv = 0;
4924                 rmt_adv = 0;
4925
4926                 tp->link_config.active_speed = current_speed;
4927                 tp->link_config.active_duplex = current_duplex;
4928
4929                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4930                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4931
4932                         if ((bmcr & BMCR_ANENABLE) &&
4933                             eee_config_ok &&
4934                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4935                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4936                                 current_link_up = true;
4937
4938                         /* EEE settings changes take effect only after a phy
4939                          * reset.  If we have skipped a reset due to Link Flap
4940                          * Avoidance being enabled, do it now.
4941                          */
4942                         if (!eee_config_ok &&
4943                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4944                             !force_reset) {
4945                                 tg3_setup_eee(tp);
4946                                 tg3_phy_reset(tp);
4947                         }
4948                 } else {
4949                         if (!(bmcr & BMCR_ANENABLE) &&
4950                             tp->link_config.speed == current_speed &&
4951                             tp->link_config.duplex == current_duplex) {
4952                                 current_link_up = true;
4953                         }
4954                 }
4955
4956                 if (current_link_up &&
4957                     tp->link_config.active_duplex == DUPLEX_FULL) {
4958                         u32 reg, bit;
4959
4960                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4961                                 reg = MII_TG3_FET_GEN_STAT;
4962                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4963                         } else {
4964                                 reg = MII_TG3_EXT_STAT;
4965                                 bit = MII_TG3_EXT_STAT_MDIX;
4966                         }
4967
4968                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4969                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4970
4971                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4972                 }
4973         }
4974
4975 relink:
4976         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4977                 tg3_phy_copper_begin(tp);
4978
4979                 if (tg3_flag(tp, ROBOSWITCH)) {
4980                         current_link_up = true;
4981                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4982                         current_speed = SPEED_1000;
4983                         current_duplex = DUPLEX_FULL;
4984                         tp->link_config.active_speed = current_speed;
4985                         tp->link_config.active_duplex = current_duplex;
4986                 }
4987
4988                 tg3_readphy(tp, MII_BMSR, &bmsr);
4989                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4990                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4991                         current_link_up = true;
4992         }
4993
4994         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4995         if (current_link_up) {
4996                 if (tp->link_config.active_speed == SPEED_100 ||
4997                     tp->link_config.active_speed == SPEED_10)
4998                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4999                 else
5000                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5001         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
5002                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5003         else
5004                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5005
5006         /* In order for the 5750 core in BCM4785 chip to work properly
5007          * in RGMII mode, the Led Control Register must be set up.
5008          */
5009         if (tg3_flag(tp, RGMII_MODE)) {
5010                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5011                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5012
5013                 if (tp->link_config.active_speed == SPEED_10)
5014                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5015                 else if (tp->link_config.active_speed == SPEED_100)
5016                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5017                                      LED_CTRL_100MBPS_ON);
5018                 else if (tp->link_config.active_speed == SPEED_1000)
5019                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5020                                      LED_CTRL_1000MBPS_ON);
5021
5022                 tw32(MAC_LED_CTRL, led_ctrl);
5023                 udelay(40);
5024         }
5025
5026         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5027         if (tp->link_config.active_duplex == DUPLEX_HALF)
5028                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5029
5030         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5031                 if (current_link_up &&
5032                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5033                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5034                 else
5035                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5036         }
5037
5038         /* ??? Without this setting Netgear GA302T PHY does not
5039          * ??? send/receive packets...
5040          */
5041         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5042             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5043                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5044                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5045                 udelay(80);
5046         }
5047
5048         tw32_f(MAC_MODE, tp->mac_mode);
5049         udelay(40);
5050
5051         tg3_phy_eee_adjust(tp, current_link_up);
5052
5053         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5054                 /* Polled via timer. */
5055                 tw32_f(MAC_EVENT, 0);
5056         } else {
5057                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5058         }
5059         udelay(40);
5060
5061         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5062             current_link_up &&
5063             tp->link_config.active_speed == SPEED_1000 &&
5064             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5065                 udelay(120);
5066                 tw32_f(MAC_STATUS,
5067                      (MAC_STATUS_SYNC_CHANGED |
5068                       MAC_STATUS_CFG_CHANGED));
5069                 udelay(40);
5070                 tg3_write_mem(tp,
5071                               NIC_SRAM_FIRMWARE_MBOX,
5072                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5073         }
5074
5075         /* Prevent send BD corruption. */
5076         if (tg3_flag(tp, CLKREQ_BUG)) {
5077                 if (tp->link_config.active_speed == SPEED_100 ||
5078                     tp->link_config.active_speed == SPEED_10)
5079                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5080                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5081                 else
5082                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5083                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5084         }
5085
5086         tg3_test_and_report_link_chg(tp, current_link_up);
5087
5088         return 0;
5089 }
5090
5091 struct tg3_fiber_aneginfo {
5092         int state;
5093 #define ANEG_STATE_UNKNOWN              0
5094 #define ANEG_STATE_AN_ENABLE            1
5095 #define ANEG_STATE_RESTART_INIT         2
5096 #define ANEG_STATE_RESTART              3
5097 #define ANEG_STATE_DISABLE_LINK_OK      4
5098 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5099 #define ANEG_STATE_ABILITY_DETECT       6
5100 #define ANEG_STATE_ACK_DETECT_INIT      7
5101 #define ANEG_STATE_ACK_DETECT           8
5102 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5103 #define ANEG_STATE_COMPLETE_ACK         10
5104 #define ANEG_STATE_IDLE_DETECT_INIT     11
5105 #define ANEG_STATE_IDLE_DETECT          12
5106 #define ANEG_STATE_LINK_OK              13
5107 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5108 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5109
5110         u32 flags;
5111 #define MR_AN_ENABLE            0x00000001
5112 #define MR_RESTART_AN           0x00000002
5113 #define MR_AN_COMPLETE          0x00000004
5114 #define MR_PAGE_RX              0x00000008
5115 #define MR_NP_LOADED            0x00000010
5116 #define MR_TOGGLE_TX            0x00000020
5117 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5118 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5119 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5120 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5121 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5122 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5123 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5124 #define MR_TOGGLE_RX            0x00002000
5125 #define MR_NP_RX                0x00004000
5126
5127 #define MR_LINK_OK              0x80000000
5128
5129         unsigned long link_time, cur_time;
5130
5131         u32 ability_match_cfg;
5132         int ability_match_count;
5133
5134         char ability_match, idle_match, ack_match;
5135
5136         u32 txconfig, rxconfig;
5137 #define ANEG_CFG_NP             0x00000080
5138 #define ANEG_CFG_ACK            0x00000040
5139 #define ANEG_CFG_RF2            0x00000020
5140 #define ANEG_CFG_RF1            0x00000010
5141 #define ANEG_CFG_PS2            0x00000001
5142 #define ANEG_CFG_PS1            0x00008000
5143 #define ANEG_CFG_HD             0x00004000
5144 #define ANEG_CFG_FD             0x00002000
5145 #define ANEG_CFG_INVAL          0x00001f06
5146
5147 };
5148 #define ANEG_OK         0
5149 #define ANEG_DONE       1
5150 #define ANEG_TIMER_ENAB 2
5151 #define ANEG_FAILED     -1
5152
5153 #define ANEG_STATE_SETTLE_TIME  10000
5154
5155 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5156                                    struct tg3_fiber_aneginfo *ap)
5157 {
5158         u16 flowctrl;
5159         unsigned long delta;
5160         u32 rx_cfg_reg;
5161         int ret;
5162
5163         if (ap->state == ANEG_STATE_UNKNOWN) {
5164                 ap->rxconfig = 0;
5165                 ap->link_time = 0;
5166                 ap->cur_time = 0;
5167                 ap->ability_match_cfg = 0;
5168                 ap->ability_match_count = 0;
5169                 ap->ability_match = 0;
5170                 ap->idle_match = 0;
5171                 ap->ack_match = 0;
5172         }
5173         ap->cur_time++;
5174
5175         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5176                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5177
5178                 if (rx_cfg_reg != ap->ability_match_cfg) {
5179                         ap->ability_match_cfg = rx_cfg_reg;
5180                         ap->ability_match = 0;
5181                         ap->ability_match_count = 0;
5182                 } else {
5183                         if (++ap->ability_match_count > 1) {
5184                                 ap->ability_match = 1;
5185                                 ap->ability_match_cfg = rx_cfg_reg;
5186                         }
5187                 }
5188                 if (rx_cfg_reg & ANEG_CFG_ACK)
5189                         ap->ack_match = 1;
5190                 else
5191                         ap->ack_match = 0;
5192
5193                 ap->idle_match = 0;
5194         } else {
5195                 ap->idle_match = 1;
5196                 ap->ability_match_cfg = 0;
5197                 ap->ability_match_count = 0;
5198                 ap->ability_match = 0;
5199                 ap->ack_match = 0;
5200
5201                 rx_cfg_reg = 0;
5202         }
5203
5204         ap->rxconfig = rx_cfg_reg;
5205         ret = ANEG_OK;
5206
5207         switch (ap->state) {
5208         case ANEG_STATE_UNKNOWN:
5209                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5210                         ap->state = ANEG_STATE_AN_ENABLE;
5211
5212                 /* fall through */
5213         case ANEG_STATE_AN_ENABLE:
5214                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5215                 if (ap->flags & MR_AN_ENABLE) {
5216                         ap->link_time = 0;
5217                         ap->cur_time = 0;
5218                         ap->ability_match_cfg = 0;
5219                         ap->ability_match_count = 0;
5220                         ap->ability_match = 0;
5221                         ap->idle_match = 0;
5222                         ap->ack_match = 0;
5223
5224                         ap->state = ANEG_STATE_RESTART_INIT;
5225                 } else {
5226                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5227                 }
5228                 break;
5229
5230         case ANEG_STATE_RESTART_INIT:
5231                 ap->link_time = ap->cur_time;
5232                 ap->flags &= ~(MR_NP_LOADED);
5233                 ap->txconfig = 0;
5234                 tw32(MAC_TX_AUTO_NEG, 0);
5235                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5236                 tw32_f(MAC_MODE, tp->mac_mode);
5237                 udelay(40);
5238
5239                 ret = ANEG_TIMER_ENAB;
5240                 ap->state = ANEG_STATE_RESTART;
5241
5242                 /* fall through */
5243         case ANEG_STATE_RESTART:
5244                 delta = ap->cur_time - ap->link_time;
5245                 if (delta > ANEG_STATE_SETTLE_TIME)
5246                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5247                 else
5248                         ret = ANEG_TIMER_ENAB;
5249                 break;
5250
5251         case ANEG_STATE_DISABLE_LINK_OK:
5252                 ret = ANEG_DONE;
5253                 break;
5254
5255         case ANEG_STATE_ABILITY_DETECT_INIT:
5256                 ap->flags &= ~(MR_TOGGLE_TX);
5257                 ap->txconfig = ANEG_CFG_FD;
5258                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5259                 if (flowctrl & ADVERTISE_1000XPAUSE)
5260                         ap->txconfig |= ANEG_CFG_PS1;
5261                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5262                         ap->txconfig |= ANEG_CFG_PS2;
5263                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5264                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5265                 tw32_f(MAC_MODE, tp->mac_mode);
5266                 udelay(40);
5267
5268                 ap->state = ANEG_STATE_ABILITY_DETECT;
5269                 break;
5270
5271         case ANEG_STATE_ABILITY_DETECT:
5272                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5273                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5274                 break;
5275
5276         case ANEG_STATE_ACK_DETECT_INIT:
5277                 ap->txconfig |= ANEG_CFG_ACK;
5278                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5279                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5280                 tw32_f(MAC_MODE, tp->mac_mode);
5281                 udelay(40);
5282
5283                 ap->state = ANEG_STATE_ACK_DETECT;
5284
5285                 /* fall through */
5286         case ANEG_STATE_ACK_DETECT:
5287                 if (ap->ack_match != 0) {
5288                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5289                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5290                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5291                         } else {
5292                                 ap->state = ANEG_STATE_AN_ENABLE;
5293                         }
5294                 } else if (ap->ability_match != 0 &&
5295                            ap->rxconfig == 0) {
5296                         ap->state = ANEG_STATE_AN_ENABLE;
5297                 }
5298                 break;
5299
5300         case ANEG_STATE_COMPLETE_ACK_INIT:
5301                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5302                         ret = ANEG_FAILED;
5303                         break;
5304                 }
5305                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5306                                MR_LP_ADV_HALF_DUPLEX |
5307                                MR_LP_ADV_SYM_PAUSE |
5308                                MR_LP_ADV_ASYM_PAUSE |
5309                                MR_LP_ADV_REMOTE_FAULT1 |
5310                                MR_LP_ADV_REMOTE_FAULT2 |
5311                                MR_LP_ADV_NEXT_PAGE |
5312                                MR_TOGGLE_RX |
5313                                MR_NP_RX);
5314                 if (ap->rxconfig & ANEG_CFG_FD)
5315                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5316                 if (ap->rxconfig & ANEG_CFG_HD)
5317                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5318                 if (ap->rxconfig & ANEG_CFG_PS1)
5319                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5320                 if (ap->rxconfig & ANEG_CFG_PS2)
5321                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5322                 if (ap->rxconfig & ANEG_CFG_RF1)
5323                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5324                 if (ap->rxconfig & ANEG_CFG_RF2)
5325                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5326                 if (ap->rxconfig & ANEG_CFG_NP)
5327                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5328
5329                 ap->link_time = ap->cur_time;
5330
5331                 ap->flags ^= (MR_TOGGLE_TX);
5332                 if (ap->rxconfig & 0x0008)
5333                         ap->flags |= MR_TOGGLE_RX;
5334                 if (ap->rxconfig & ANEG_CFG_NP)
5335                         ap->flags |= MR_NP_RX;
5336                 ap->flags |= MR_PAGE_RX;
5337
5338                 ap->state = ANEG_STATE_COMPLETE_ACK;
5339                 ret = ANEG_TIMER_ENAB;
5340                 break;
5341
5342         case ANEG_STATE_COMPLETE_ACK:
5343                 if (ap->ability_match != 0 &&
5344                     ap->rxconfig == 0) {
5345                         ap->state = ANEG_STATE_AN_ENABLE;
5346                         break;
5347                 }
5348                 delta = ap->cur_time - ap->link_time;
5349                 if (delta > ANEG_STATE_SETTLE_TIME) {
5350                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5351                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5352                         } else {
5353                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5354                                     !(ap->flags & MR_NP_RX)) {
5355                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5356                                 } else {
5357                                         ret = ANEG_FAILED;
5358                                 }
5359                         }
5360                 }
5361                 break;
5362
5363         case ANEG_STATE_IDLE_DETECT_INIT:
5364                 ap->link_time = ap->cur_time;
5365                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5366                 tw32_f(MAC_MODE, tp->mac_mode);
5367                 udelay(40);
5368
5369                 ap->state = ANEG_STATE_IDLE_DETECT;
5370                 ret = ANEG_TIMER_ENAB;
5371                 break;
5372
5373         case ANEG_STATE_IDLE_DETECT:
5374                 if (ap->ability_match != 0 &&
5375                     ap->rxconfig == 0) {
5376                         ap->state = ANEG_STATE_AN_ENABLE;
5377                         break;
5378                 }
5379                 delta = ap->cur_time - ap->link_time;
5380                 if (delta > ANEG_STATE_SETTLE_TIME) {
5381                         /* XXX another gem from the Broadcom driver :( */
5382                         ap->state = ANEG_STATE_LINK_OK;
5383                 }
5384                 break;
5385
5386         case ANEG_STATE_LINK_OK:
5387                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5388                 ret = ANEG_DONE;
5389                 break;
5390
5391         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5392                 /* ??? unimplemented */
5393                 break;
5394
5395         case ANEG_STATE_NEXT_PAGE_WAIT:
5396                 /* ??? unimplemented */
5397                 break;
5398
5399         default:
5400                 ret = ANEG_FAILED;
5401                 break;
5402         }
5403
5404         return ret;
5405 }
5406
5407 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5408 {
5409         int res = 0;
5410         struct tg3_fiber_aneginfo aninfo;
5411         int status = ANEG_FAILED;
5412         unsigned int tick;
5413         u32 tmp;
5414
5415         tw32_f(MAC_TX_AUTO_NEG, 0);
5416
5417         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5418         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5419         udelay(40);
5420
5421         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5422         udelay(40);
5423
5424         memset(&aninfo, 0, sizeof(aninfo));
5425         aninfo.flags |= MR_AN_ENABLE;
5426         aninfo.state = ANEG_STATE_UNKNOWN;
5427         aninfo.cur_time = 0;
5428         tick = 0;
5429         while (++tick < 195000) {
5430                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5431                 if (status == ANEG_DONE || status == ANEG_FAILED)
5432                         break;
5433
5434                 udelay(1);
5435         }
5436
5437         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5438         tw32_f(MAC_MODE, tp->mac_mode);
5439         udelay(40);
5440
5441         *txflags = aninfo.txconfig;
5442         *rxflags = aninfo.flags;
5443
5444         if (status == ANEG_DONE &&
5445             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5446                              MR_LP_ADV_FULL_DUPLEX)))
5447                 res = 1;
5448
5449         return res;
5450 }
5451
5452 static void tg3_init_bcm8002(struct tg3 *tp)
5453 {
5454         u32 mac_status = tr32(MAC_STATUS);
5455         int i;
5456
5457         /* Reset when initting first time or we have a link. */
5458         if (tg3_flag(tp, INIT_COMPLETE) &&
5459             !(mac_status & MAC_STATUS_PCS_SYNCED))
5460                 return;
5461
5462         /* Set PLL lock range. */
5463         tg3_writephy(tp, 0x16, 0x8007);
5464
5465         /* SW reset */
5466         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5467
5468         /* Wait for reset to complete. */
5469         /* XXX schedule_timeout() ... */
5470         for (i = 0; i < 500; i++)
5471                 udelay(10);
5472
5473         /* Config mode; select PMA/Ch 1 regs. */
5474         tg3_writephy(tp, 0x10, 0x8411);
5475
5476         /* Enable auto-lock and comdet, select txclk for tx. */
5477         tg3_writephy(tp, 0x11, 0x0a10);
5478
5479         tg3_writephy(tp, 0x18, 0x00a0);
5480         tg3_writephy(tp, 0x16, 0x41ff);
5481
5482         /* Assert and deassert POR. */
5483         tg3_writephy(tp, 0x13, 0x0400);
5484         udelay(40);
5485         tg3_writephy(tp, 0x13, 0x0000);
5486
5487         tg3_writephy(tp, 0x11, 0x0a50);
5488         udelay(40);
5489         tg3_writephy(tp, 0x11, 0x0a10);
5490
5491         /* Wait for signal to stabilize */
5492         /* XXX schedule_timeout() ... */
5493         for (i = 0; i < 15000; i++)
5494                 udelay(10);
5495
5496         /* Deselect the channel register so we can read the PHYID
5497          * later.
5498          */
5499         tg3_writephy(tp, 0x10, 0x8011);
5500 }
5501
5502 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5503 {
5504         u16 flowctrl;
5505         bool current_link_up;
5506         u32 sg_dig_ctrl, sg_dig_status;
5507         u32 serdes_cfg, expected_sg_dig_ctrl;
5508         int workaround, port_a;
5509
5510         serdes_cfg = 0;
5511         expected_sg_dig_ctrl = 0;
5512         workaround = 0;
5513         port_a = 1;
5514         current_link_up = false;
5515
5516         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5517             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5518                 workaround = 1;
5519                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5520                         port_a = 0;
5521
5522                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5523                 /* preserve bits 20-23 for voltage regulator */
5524                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5525         }
5526
5527         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5528
5529         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5530                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5531                         if (workaround) {
5532                                 u32 val = serdes_cfg;
5533
5534                                 if (port_a)
5535                                         val |= 0xc010000;
5536                                 else
5537                                         val |= 0x4010000;
5538                                 tw32_f(MAC_SERDES_CFG, val);
5539                         }
5540
5541                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5542                 }
5543                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5544                         tg3_setup_flow_control(tp, 0, 0);
5545                         current_link_up = true;
5546                 }
5547                 goto out;
5548         }
5549
5550         /* Want auto-negotiation.  */
5551         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5552
5553         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5554         if (flowctrl & ADVERTISE_1000XPAUSE)
5555                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5556         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5557                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5558
5559         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5560                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5561                     tp->serdes_counter &&
5562                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5563                                     MAC_STATUS_RCVD_CFG)) ==
5564                      MAC_STATUS_PCS_SYNCED)) {
5565                         tp->serdes_counter--;
5566                         current_link_up = true;
5567                         goto out;
5568                 }
5569 restart_autoneg:
5570                 if (workaround)
5571                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5572                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5573                 udelay(5);
5574                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5575
5576                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5577                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5578         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5579                                  MAC_STATUS_SIGNAL_DET)) {
5580                 sg_dig_status = tr32(SG_DIG_STATUS);
5581                 mac_status = tr32(MAC_STATUS);
5582
5583                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5584                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5585                         u32 local_adv = 0, remote_adv = 0;
5586
5587                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5588                                 local_adv |= ADVERTISE_1000XPAUSE;
5589                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5590                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5591
5592                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5593                                 remote_adv |= LPA_1000XPAUSE;
5594                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5595                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5596
5597                         tp->link_config.rmt_adv =
5598                                            mii_adv_to_ethtool_adv_x(remote_adv);
5599
5600                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5601                         current_link_up = true;
5602                         tp->serdes_counter = 0;
5603                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5604                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5605                         if (tp->serdes_counter)
5606                                 tp->serdes_counter--;
5607                         else {
5608                                 if (workaround) {
5609                                         u32 val = serdes_cfg;
5610
5611                                         if (port_a)
5612                                                 val |= 0xc010000;
5613                                         else
5614                                                 val |= 0x4010000;
5615
5616                                         tw32_f(MAC_SERDES_CFG, val);
5617                                 }
5618
5619                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5620                                 udelay(40);
5621
5622                                 /* Link parallel detection - link is up */
5623                                 /* only if we have PCS_SYNC and not */
5624                                 /* receiving config code words */
5625                                 mac_status = tr32(MAC_STATUS);
5626                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5627                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5628                                         tg3_setup_flow_control(tp, 0, 0);
5629                                         current_link_up = true;
5630                                         tp->phy_flags |=
5631                                                 TG3_PHYFLG_PARALLEL_DETECT;
5632                                         tp->serdes_counter =
5633                                                 SERDES_PARALLEL_DET_TIMEOUT;
5634                                 } else
5635                                         goto restart_autoneg;
5636                         }
5637                 }
5638         } else {
5639                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5640                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5641         }
5642
5643 out:
5644         return current_link_up;
5645 }
5646
5647 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5648 {
5649         bool current_link_up = false;
5650
5651         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5652                 goto out;
5653
5654         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5655                 u32 txflags, rxflags;
5656                 int i;
5657
5658                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5659                         u32 local_adv = 0, remote_adv = 0;
5660
5661                         if (txflags & ANEG_CFG_PS1)
5662                                 local_adv |= ADVERTISE_1000XPAUSE;
5663                         if (txflags & ANEG_CFG_PS2)
5664                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5665
5666                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5667                                 remote_adv |= LPA_1000XPAUSE;
5668                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5669                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5670
5671                         tp->link_config.rmt_adv =
5672                                            mii_adv_to_ethtool_adv_x(remote_adv);
5673
5674                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5675
5676                         current_link_up = true;
5677                 }
5678                 for (i = 0; i < 30; i++) {
5679                         udelay(20);
5680                         tw32_f(MAC_STATUS,
5681                                (MAC_STATUS_SYNC_CHANGED |
5682                                 MAC_STATUS_CFG_CHANGED));
5683                         udelay(40);
5684                         if ((tr32(MAC_STATUS) &
5685                              (MAC_STATUS_SYNC_CHANGED |
5686                               MAC_STATUS_CFG_CHANGED)) == 0)
5687                                 break;
5688                 }
5689
5690                 mac_status = tr32(MAC_STATUS);
5691                 if (!current_link_up &&
5692                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5693                     !(mac_status & MAC_STATUS_RCVD_CFG))
5694                         current_link_up = true;
5695         } else {
5696                 tg3_setup_flow_control(tp, 0, 0);
5697
5698                 /* Forcing 1000FD link up. */
5699                 current_link_up = true;
5700
5701                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5702                 udelay(40);
5703
5704                 tw32_f(MAC_MODE, tp->mac_mode);
5705                 udelay(40);
5706         }
5707
5708 out:
5709         return current_link_up;
5710 }
5711
5712 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5713 {
5714         u32 orig_pause_cfg;
5715         u32 orig_active_speed;
5716         u8 orig_active_duplex;
5717         u32 mac_status;
5718         bool current_link_up;
5719         int i;
5720
5721         orig_pause_cfg = tp->link_config.active_flowctrl;
5722         orig_active_speed = tp->link_config.active_speed;
5723         orig_active_duplex = tp->link_config.active_duplex;
5724
5725         if (!tg3_flag(tp, HW_AUTONEG) &&
5726             tp->link_up &&
5727             tg3_flag(tp, INIT_COMPLETE)) {
5728                 mac_status = tr32(MAC_STATUS);
5729                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5730                                MAC_STATUS_SIGNAL_DET |
5731                                MAC_STATUS_CFG_CHANGED |
5732                                MAC_STATUS_RCVD_CFG);
5733                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5734                                    MAC_STATUS_SIGNAL_DET)) {
5735                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5736                                             MAC_STATUS_CFG_CHANGED));
5737                         return 0;
5738                 }
5739         }
5740
5741         tw32_f(MAC_TX_AUTO_NEG, 0);
5742
5743         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5744         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5745         tw32_f(MAC_MODE, tp->mac_mode);
5746         udelay(40);
5747
5748         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5749                 tg3_init_bcm8002(tp);
5750
5751         /* Enable link change event even when serdes polling.  */
5752         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5753         udelay(40);
5754
5755         current_link_up = false;
5756         tp->link_config.rmt_adv = 0;
5757         mac_status = tr32(MAC_STATUS);
5758
5759         if (tg3_flag(tp, HW_AUTONEG))
5760                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5761         else
5762                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5763
5764         tp->napi[0].hw_status->status =
5765                 (SD_STATUS_UPDATED |
5766                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5767
5768         for (i = 0; i < 100; i++) {
5769                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5770                                     MAC_STATUS_CFG_CHANGED));
5771                 udelay(5);
5772                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5773                                          MAC_STATUS_CFG_CHANGED |
5774                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5775                         break;
5776         }
5777
5778         mac_status = tr32(MAC_STATUS);
5779         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5780                 current_link_up = false;
5781                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5782                     tp->serdes_counter == 0) {
5783                         tw32_f(MAC_MODE, (tp->mac_mode |
5784                                           MAC_MODE_SEND_CONFIGS));
5785                         udelay(1);
5786                         tw32_f(MAC_MODE, tp->mac_mode);
5787                 }
5788         }
5789
5790         if (current_link_up) {
5791                 tp->link_config.active_speed = SPEED_1000;
5792                 tp->link_config.active_duplex = DUPLEX_FULL;
5793                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794                                     LED_CTRL_LNKLED_OVERRIDE |
5795                                     LED_CTRL_1000MBPS_ON));
5796         } else {
5797                 tp->link_config.active_speed = SPEED_UNKNOWN;
5798                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5799                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5800                                     LED_CTRL_LNKLED_OVERRIDE |
5801                                     LED_CTRL_TRAFFIC_OVERRIDE));
5802         }
5803
5804         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5805                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5806                 if (orig_pause_cfg != now_pause_cfg ||
5807                     orig_active_speed != tp->link_config.active_speed ||
5808                     orig_active_duplex != tp->link_config.active_duplex)
5809                         tg3_link_report(tp);
5810         }
5811
5812         return 0;
5813 }
5814
5815 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5816 {
5817         int err = 0;
5818         u32 bmsr, bmcr;
5819         u32 current_speed = SPEED_UNKNOWN;
5820         u8 current_duplex = DUPLEX_UNKNOWN;
5821         bool current_link_up = false;
5822         u32 local_adv, remote_adv, sgsr;
5823
5824         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5825              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5826              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5827              (sgsr & SERDES_TG3_SGMII_MODE)) {
5828
5829                 if (force_reset)
5830                         tg3_phy_reset(tp);
5831
5832                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5833
5834                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5835                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5836                 } else {
5837                         current_link_up = true;
5838                         if (sgsr & SERDES_TG3_SPEED_1000) {
5839                                 current_speed = SPEED_1000;
5840                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5841                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5842                                 current_speed = SPEED_100;
5843                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5844                         } else {
5845                                 current_speed = SPEED_10;
5846                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5847                         }
5848
5849                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5850                                 current_duplex = DUPLEX_FULL;
5851                         else
5852                                 current_duplex = DUPLEX_HALF;
5853                 }
5854
5855                 tw32_f(MAC_MODE, tp->mac_mode);
5856                 udelay(40);
5857
5858                 tg3_clear_mac_status(tp);
5859
5860                 goto fiber_setup_done;
5861         }
5862
5863         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5864         tw32_f(MAC_MODE, tp->mac_mode);
5865         udelay(40);
5866
5867         tg3_clear_mac_status(tp);
5868
5869         if (force_reset)
5870                 tg3_phy_reset(tp);
5871
5872         tp->link_config.rmt_adv = 0;
5873
5874         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5875         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5876         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5877                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5878                         bmsr |= BMSR_LSTATUS;
5879                 else
5880                         bmsr &= ~BMSR_LSTATUS;
5881         }
5882
5883         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5884
5885         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5886             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5887                 /* do nothing, just check for link up at the end */
5888         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5889                 u32 adv, newadv;
5890
5891                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5892                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5893                                  ADVERTISE_1000XPAUSE |
5894                                  ADVERTISE_1000XPSE_ASYM |
5895                                  ADVERTISE_SLCT);
5896
5897                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5898                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5899
5900                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5901                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5902                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5903                         tg3_writephy(tp, MII_BMCR, bmcr);
5904
5905                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5906                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5907                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5908
5909                         return err;
5910                 }
5911         } else {
5912                 u32 new_bmcr;
5913
5914                 bmcr &= ~BMCR_SPEED1000;
5915                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5916
5917                 if (tp->link_config.duplex == DUPLEX_FULL)
5918                         new_bmcr |= BMCR_FULLDPLX;
5919
5920                 if (new_bmcr != bmcr) {
5921                         /* BMCR_SPEED1000 is a reserved bit that needs
5922                          * to be set on write.
5923                          */
5924                         new_bmcr |= BMCR_SPEED1000;
5925
5926                         /* Force a linkdown */
5927                         if (tp->link_up) {
5928                                 u32 adv;
5929
5930                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5931                                 adv &= ~(ADVERTISE_1000XFULL |
5932                                          ADVERTISE_1000XHALF |
5933                                          ADVERTISE_SLCT);
5934                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5935                                 tg3_writephy(tp, MII_BMCR, bmcr |
5936                                                            BMCR_ANRESTART |
5937                                                            BMCR_ANENABLE);
5938                                 udelay(10);
5939                                 tg3_carrier_off(tp);
5940                         }
5941                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5942                         bmcr = new_bmcr;
5943                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5944                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5945                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5946                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5947                                         bmsr |= BMSR_LSTATUS;
5948                                 else
5949                                         bmsr &= ~BMSR_LSTATUS;
5950                         }
5951                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5952                 }
5953         }
5954
5955         if (bmsr & BMSR_LSTATUS) {
5956                 current_speed = SPEED_1000;
5957                 current_link_up = true;
5958                 if (bmcr & BMCR_FULLDPLX)
5959                         current_duplex = DUPLEX_FULL;
5960                 else
5961                         current_duplex = DUPLEX_HALF;
5962
5963                 local_adv = 0;
5964                 remote_adv = 0;
5965
5966                 if (bmcr & BMCR_ANENABLE) {
5967                         u32 common;
5968
5969                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5970                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5971                         common = local_adv & remote_adv;
5972                         if (common & (ADVERTISE_1000XHALF |
5973                                       ADVERTISE_1000XFULL)) {
5974                                 if (common & ADVERTISE_1000XFULL)
5975                                         current_duplex = DUPLEX_FULL;
5976                                 else
5977                                         current_duplex = DUPLEX_HALF;
5978
5979                                 tp->link_config.rmt_adv =
5980                                            mii_adv_to_ethtool_adv_x(remote_adv);
5981                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5982                                 /* Link is up via parallel detect */
5983                         } else {
5984                                 current_link_up = false;
5985                         }
5986                 }
5987         }
5988
5989 fiber_setup_done:
5990         if (current_link_up && current_duplex == DUPLEX_FULL)
5991                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5992
5993         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5994         if (tp->link_config.active_duplex == DUPLEX_HALF)
5995                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5996
5997         tw32_f(MAC_MODE, tp->mac_mode);
5998         udelay(40);
5999
6000         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
6001
6002         tp->link_config.active_speed = current_speed;
6003         tp->link_config.active_duplex = current_duplex;
6004
6005         tg3_test_and_report_link_chg(tp, current_link_up);
6006         return err;
6007 }
6008
6009 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6010 {
6011         if (tp->serdes_counter) {
6012                 /* Give autoneg time to complete. */
6013                 tp->serdes_counter--;
6014                 return;
6015         }
6016
6017         if (!tp->link_up &&
6018             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6019                 u32 bmcr;
6020
6021                 tg3_readphy(tp, MII_BMCR, &bmcr);
6022                 if (bmcr & BMCR_ANENABLE) {
6023                         u32 phy1, phy2;
6024
6025                         /* Select shadow register 0x1f */
6026                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6027                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6028
6029                         /* Select expansion interrupt status register */
6030                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6031                                          MII_TG3_DSP_EXP1_INT_STAT);
6032                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6033                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6034
6035                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6036                                 /* We have signal detect and not receiving
6037                                  * config code words, link is up by parallel
6038                                  * detection.
6039                                  */
6040
6041                                 bmcr &= ~BMCR_ANENABLE;
6042                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6043                                 tg3_writephy(tp, MII_BMCR, bmcr);
6044                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6045                         }
6046                 }
6047         } else if (tp->link_up &&
6048                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6049                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6050                 u32 phy2;
6051
6052                 /* Select expansion interrupt status register */
6053                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6054                                  MII_TG3_DSP_EXP1_INT_STAT);
6055                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6056                 if (phy2 & 0x20) {
6057                         u32 bmcr;
6058
6059                         /* Config code words received, turn on autoneg. */
6060                         tg3_readphy(tp, MII_BMCR, &bmcr);
6061                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6062
6063                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6064
6065                 }
6066         }
6067 }
6068
6069 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6070 {
6071         u32 val;
6072         int err;
6073
6074         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6075                 err = tg3_setup_fiber_phy(tp, force_reset);
6076         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6077                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6078         else
6079                 err = tg3_setup_copper_phy(tp, force_reset);
6080
6081         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6082                 u32 scale;
6083
6084                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6085                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6086                         scale = 65;
6087                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6088                         scale = 6;
6089                 else
6090                         scale = 12;
6091
6092                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6093                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6094                 tw32(GRC_MISC_CFG, val);
6095         }
6096
6097         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6098               (6 << TX_LENGTHS_IPG_SHIFT);
6099         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6100             tg3_asic_rev(tp) == ASIC_REV_5762)
6101                 val |= tr32(MAC_TX_LENGTHS) &
6102                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6103                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6104
6105         if (tp->link_config.active_speed == SPEED_1000 &&
6106             tp->link_config.active_duplex == DUPLEX_HALF)
6107                 tw32(MAC_TX_LENGTHS, val |
6108                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6109         else
6110                 tw32(MAC_TX_LENGTHS, val |
6111                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6112
6113         if (!tg3_flag(tp, 5705_PLUS)) {
6114                 if (tp->link_up) {
6115                         tw32(HOSTCC_STAT_COAL_TICKS,
6116                              tp->coal.stats_block_coalesce_usecs);
6117                 } else {
6118                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6119                 }
6120         }
6121
6122         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6123                 val = tr32(PCIE_PWR_MGMT_THRESH);
6124                 if (!tp->link_up)
6125                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6126                               tp->pwrmgmt_thresh;
6127                 else
6128                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6129                 tw32(PCIE_PWR_MGMT_THRESH, val);
6130         }
6131
6132         return err;
6133 }
6134
6135 /* tp->lock must be held */
6136 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6137 {
6138         u64 stamp;
6139
6140         ptp_read_system_prets(sts);
6141         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6142         ptp_read_system_postts(sts);
6143         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6144
6145         return stamp;
6146 }
6147
6148 /* tp->lock must be held */
6149 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6150 {
6151         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6152
6153         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6154         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6155         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6156         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6157 }
6158
6159 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6160 static inline void tg3_full_unlock(struct tg3 *tp);
6161 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6162 {
6163         struct tg3 *tp = netdev_priv(dev);
6164
6165         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6166                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6167                                 SOF_TIMESTAMPING_SOFTWARE;
6168
6169         if (tg3_flag(tp, PTP_CAPABLE)) {
6170                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6171                                         SOF_TIMESTAMPING_RX_HARDWARE |
6172                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6173         }
6174
6175         if (tp->ptp_clock)
6176                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6177         else
6178                 info->phc_index = -1;
6179
6180         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6181
6182         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6183                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6184                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6185                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6186         return 0;
6187 }
6188
6189 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6190 {
6191         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6192         bool neg_adj = false;
6193         u32 correction = 0;
6194
6195         if (ppb < 0) {
6196                 neg_adj = true;
6197                 ppb = -ppb;
6198         }
6199
6200         /* Frequency adjustment is performed using hardware with a 24 bit
6201          * accumulator and a programmable correction value. On each clk, the
6202          * correction value gets added to the accumulator and when it
6203          * overflows, the time counter is incremented/decremented.
6204          *
6205          * So conversion from ppb to correction value is
6206          *              ppb * (1 << 24) / 1000000000
6207          */
6208         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6209                      TG3_EAV_REF_CLK_CORRECT_MASK;
6210
6211         tg3_full_lock(tp, 0);
6212
6213         if (correction)
6214                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6215                      TG3_EAV_REF_CLK_CORRECT_EN |
6216                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6217         else
6218                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6219
6220         tg3_full_unlock(tp);
6221
6222         return 0;
6223 }
6224
6225 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6226 {
6227         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228
6229         tg3_full_lock(tp, 0);
6230         tp->ptp_adjust += delta;
6231         tg3_full_unlock(tp);
6232
6233         return 0;
6234 }
6235
6236 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6237                             struct ptp_system_timestamp *sts)
6238 {
6239         u64 ns;
6240         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6241
6242         tg3_full_lock(tp, 0);
6243         ns = tg3_refclk_read(tp, sts);
6244         ns += tp->ptp_adjust;
6245         tg3_full_unlock(tp);
6246
6247         *ts = ns_to_timespec64(ns);
6248
6249         return 0;
6250 }
6251
6252 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6253                            const struct timespec64 *ts)
6254 {
6255         u64 ns;
6256         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6257
6258         ns = timespec64_to_ns(ts);
6259
6260         tg3_full_lock(tp, 0);
6261         tg3_refclk_write(tp, ns);
6262         tp->ptp_adjust = 0;
6263         tg3_full_unlock(tp);
6264
6265         return 0;
6266 }
6267
6268 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6269                           struct ptp_clock_request *rq, int on)
6270 {
6271         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6272         u32 clock_ctl;
6273         int rval = 0;
6274
6275         switch (rq->type) {
6276         case PTP_CLK_REQ_PEROUT:
6277                 /* Reject requests with unsupported flags */
6278                 if (rq->perout.flags)
6279                         return -EOPNOTSUPP;
6280
6281                 if (rq->perout.index != 0)
6282                         return -EINVAL;
6283
6284                 tg3_full_lock(tp, 0);
6285                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6286                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6287
6288                 if (on) {
6289                         u64 nsec;
6290
6291                         nsec = rq->perout.start.sec * 1000000000ULL +
6292                                rq->perout.start.nsec;
6293
6294                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6295                                 netdev_warn(tp->dev,
6296                                             "Device supports only a one-shot timesync output, period must be 0\n");
6297                                 rval = -EINVAL;
6298                                 goto err_out;
6299                         }
6300
6301                         if (nsec & (1ULL << 63)) {
6302                                 netdev_warn(tp->dev,
6303                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6304                                 rval = -EINVAL;
6305                                 goto err_out;
6306                         }
6307
6308                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6309                         tw32(TG3_EAV_WATCHDOG0_MSB,
6310                              TG3_EAV_WATCHDOG0_EN |
6311                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6312
6313                         tw32(TG3_EAV_REF_CLCK_CTL,
6314                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6315                 } else {
6316                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6317                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6318                 }
6319
6320 err_out:
6321                 tg3_full_unlock(tp);
6322                 return rval;
6323
6324         default:
6325                 break;
6326         }
6327
6328         return -EOPNOTSUPP;
6329 }
6330
6331 static const struct ptp_clock_info tg3_ptp_caps = {
6332         .owner          = THIS_MODULE,
6333         .name           = "tg3 clock",
6334         .max_adj        = 250000000,
6335         .n_alarm        = 0,
6336         .n_ext_ts       = 0,
6337         .n_per_out      = 1,
6338         .n_pins         = 0,
6339         .pps            = 0,
6340         .adjfreq        = tg3_ptp_adjfreq,
6341         .adjtime        = tg3_ptp_adjtime,
6342         .gettimex64     = tg3_ptp_gettimex,
6343         .settime64      = tg3_ptp_settime,
6344         .enable         = tg3_ptp_enable,
6345 };
6346
6347 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6348                                      struct skb_shared_hwtstamps *timestamp)
6349 {
6350         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6351         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6352                                            tp->ptp_adjust);
6353 }
6354
6355 /* tp->lock must be held */
6356 static void tg3_ptp_init(struct tg3 *tp)
6357 {
6358         if (!tg3_flag(tp, PTP_CAPABLE))
6359                 return;
6360
6361         /* Initialize the hardware clock to the system time. */
6362         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6363         tp->ptp_adjust = 0;
6364         tp->ptp_info = tg3_ptp_caps;
6365 }
6366
6367 /* tp->lock must be held */
6368 static void tg3_ptp_resume(struct tg3 *tp)
6369 {
6370         if (!tg3_flag(tp, PTP_CAPABLE))
6371                 return;
6372
6373         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6374         tp->ptp_adjust = 0;
6375 }
6376
6377 static void tg3_ptp_fini(struct tg3 *tp)
6378 {
6379         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6380                 return;
6381
6382         ptp_clock_unregister(tp->ptp_clock);
6383         tp->ptp_clock = NULL;
6384         tp->ptp_adjust = 0;
6385 }
6386
6387 static inline int tg3_irq_sync(struct tg3 *tp)
6388 {
6389         return tp->irq_sync;
6390 }
6391
6392 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6393 {
6394         int i;
6395
6396         dst = (u32 *)((u8 *)dst + off);
6397         for (i = 0; i < len; i += sizeof(u32))
6398                 *dst++ = tr32(off + i);
6399 }
6400
6401 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6402 {
6403         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6404         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6405         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6406         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6407         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6408         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6409         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6410         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6412         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6413         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6414         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6415         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6416         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6417         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6418         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6419         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6420         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6421         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6422
6423         if (tg3_flag(tp, SUPPORT_MSIX))
6424                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6425
6426         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6427         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6428         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6429         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6430         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6431         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6432         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6433         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6434
6435         if (!tg3_flag(tp, 5705_PLUS)) {
6436                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6437                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6438                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6439         }
6440
6441         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6442         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6443         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6444         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6445         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6446
6447         if (tg3_flag(tp, NVRAM))
6448                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6449 }
6450
6451 static void tg3_dump_state(struct tg3 *tp)
6452 {
6453         int i;
6454         u32 *regs;
6455
6456         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6457         if (!regs)
6458                 return;
6459
6460         if (tg3_flag(tp, PCI_EXPRESS)) {
6461                 /* Read up to but not including private PCI registers */
6462                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6463                         regs[i / sizeof(u32)] = tr32(i);
6464         } else
6465                 tg3_dump_legacy_regs(tp, regs);
6466
6467         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6468                 if (!regs[i + 0] && !regs[i + 1] &&
6469                     !regs[i + 2] && !regs[i + 3])
6470                         continue;
6471
6472                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6473                            i * 4,
6474                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6475         }
6476
6477         kfree(regs);
6478
6479         for (i = 0; i < tp->irq_cnt; i++) {
6480                 struct tg3_napi *tnapi = &tp->napi[i];
6481
6482                 /* SW status block */
6483                 netdev_err(tp->dev,
6484                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6485                            i,
6486                            tnapi->hw_status->status,
6487                            tnapi->hw_status->status_tag,
6488                            tnapi->hw_status->rx_jumbo_consumer,
6489                            tnapi->hw_status->rx_consumer,
6490                            tnapi->hw_status->rx_mini_consumer,
6491                            tnapi->hw_status->idx[0].rx_producer,
6492                            tnapi->hw_status->idx[0].tx_consumer);
6493
6494                 netdev_err(tp->dev,
6495                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6496                            i,
6497                            tnapi->last_tag, tnapi->last_irq_tag,
6498                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6499                            tnapi->rx_rcb_ptr,
6500                            tnapi->prodring.rx_std_prod_idx,
6501                            tnapi->prodring.rx_std_cons_idx,
6502                            tnapi->prodring.rx_jmb_prod_idx,
6503                            tnapi->prodring.rx_jmb_cons_idx);
6504         }
6505 }
6506
6507 /* This is called whenever we suspect that the system chipset is re-
6508  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6509  * is bogus tx completions. We try to recover by setting the
6510  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6511  * in the workqueue.
6512  */
6513 static void tg3_tx_recover(struct tg3 *tp)
6514 {
6515         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6516                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6517
6518         netdev_warn(tp->dev,
6519                     "The system may be re-ordering memory-mapped I/O "
6520                     "cycles to the network device, attempting to recover. "
6521                     "Please report the problem to the driver maintainer "
6522                     "and include system chipset information.\n");
6523
6524         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6525 }
6526
6527 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6528 {
6529         /* Tell compiler to fetch tx indices from memory. */
6530         barrier();
6531         return tnapi->tx_pending -
6532                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6533 }
6534
6535 /* Tigon3 never reports partial packet sends.  So we do not
6536  * need special logic to handle SKBs that have not had all
6537  * of their frags sent yet, like SunGEM does.
6538  */
6539 static void tg3_tx(struct tg3_napi *tnapi)
6540 {
6541         struct tg3 *tp = tnapi->tp;
6542         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6543         u32 sw_idx = tnapi->tx_cons;
6544         struct netdev_queue *txq;
6545         int index = tnapi - tp->napi;
6546         unsigned int pkts_compl = 0, bytes_compl = 0;
6547
6548         if (tg3_flag(tp, ENABLE_TSS))
6549                 index--;
6550
6551         txq = netdev_get_tx_queue(tp->dev, index);
6552
6553         while (sw_idx != hw_idx) {
6554                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6555                 struct sk_buff *skb = ri->skb;
6556                 int i, tx_bug = 0;
6557
6558                 if (unlikely(skb == NULL)) {
6559                         tg3_tx_recover(tp);
6560                         return;
6561                 }
6562
6563                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6564                         struct skb_shared_hwtstamps timestamp;
6565                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6566                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6567
6568                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6569
6570                         skb_tstamp_tx(skb, &timestamp);
6571                 }
6572
6573                 pci_unmap_single(tp->pdev,
6574                                  dma_unmap_addr(ri, mapping),
6575                                  skb_headlen(skb),
6576                                  PCI_DMA_TODEVICE);
6577
6578                 ri->skb = NULL;
6579
6580                 while (ri->fragmented) {
6581                         ri->fragmented = false;
6582                         sw_idx = NEXT_TX(sw_idx);
6583                         ri = &tnapi->tx_buffers[sw_idx];
6584                 }
6585
6586                 sw_idx = NEXT_TX(sw_idx);
6587
6588                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6589                         ri = &tnapi->tx_buffers[sw_idx];
6590                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6591                                 tx_bug = 1;
6592
6593                         pci_unmap_page(tp->pdev,
6594                                        dma_unmap_addr(ri, mapping),
6595                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6596                                        PCI_DMA_TODEVICE);
6597
6598                         while (ri->fragmented) {
6599                                 ri->fragmented = false;
6600                                 sw_idx = NEXT_TX(sw_idx);
6601                                 ri = &tnapi->tx_buffers[sw_idx];
6602                         }
6603
6604                         sw_idx = NEXT_TX(sw_idx);
6605                 }
6606
6607                 pkts_compl++;
6608                 bytes_compl += skb->len;
6609
6610                 dev_consume_skb_any(skb);
6611
6612                 if (unlikely(tx_bug)) {
6613                         tg3_tx_recover(tp);
6614                         return;
6615                 }
6616         }
6617
6618         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6619
6620         tnapi->tx_cons = sw_idx;
6621
6622         /* Need to make the tx_cons update visible to tg3_start_xmit()
6623          * before checking for netif_queue_stopped().  Without the
6624          * memory barrier, there is a small possibility that tg3_start_xmit()
6625          * will miss it and cause the queue to be stopped forever.
6626          */
6627         smp_mb();
6628
6629         if (unlikely(netif_tx_queue_stopped(txq) &&
6630                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6631                 __netif_tx_lock(txq, smp_processor_id());
6632                 if (netif_tx_queue_stopped(txq) &&
6633                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6634                         netif_tx_wake_queue(txq);
6635                 __netif_tx_unlock(txq);
6636         }
6637 }
6638
6639 static void tg3_frag_free(bool is_frag, void *data)
6640 {
6641         if (is_frag)
6642                 skb_free_frag(data);
6643         else
6644                 kfree(data);
6645 }
6646
6647 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6648 {
6649         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6650                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6651
6652         if (!ri->data)
6653                 return;
6654
6655         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6656                          map_sz, PCI_DMA_FROMDEVICE);
6657         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6658         ri->data = NULL;
6659 }
6660
6661
6662 /* Returns size of skb allocated or < 0 on error.
6663  *
6664  * We only need to fill in the address because the other members
6665  * of the RX descriptor are invariant, see tg3_init_rings.
6666  *
6667  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6668  * posting buffers we only dirty the first cache line of the RX
6669  * descriptor (containing the address).  Whereas for the RX status
6670  * buffers the cpu only reads the last cacheline of the RX descriptor
6671  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6672  */
6673 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6674                              u32 opaque_key, u32 dest_idx_unmasked,
6675                              unsigned int *frag_size)
6676 {
6677         struct tg3_rx_buffer_desc *desc;
6678         struct ring_info *map;
6679         u8 *data;
6680         dma_addr_t mapping;
6681         int skb_size, data_size, dest_idx;
6682
6683         switch (opaque_key) {
6684         case RXD_OPAQUE_RING_STD:
6685                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6686                 desc = &tpr->rx_std[dest_idx];
6687                 map = &tpr->rx_std_buffers[dest_idx];
6688                 data_size = tp->rx_pkt_map_sz;
6689                 break;
6690
6691         case RXD_OPAQUE_RING_JUMBO:
6692                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6693                 desc = &tpr->rx_jmb[dest_idx].std;
6694                 map = &tpr->rx_jmb_buffers[dest_idx];
6695                 data_size = TG3_RX_JMB_MAP_SZ;
6696                 break;
6697
6698         default:
6699                 return -EINVAL;
6700         }
6701
6702         /* Do not overwrite any of the map or rp information
6703          * until we are sure we can commit to a new buffer.
6704          *
6705          * Callers depend upon this behavior and assume that
6706          * we leave everything unchanged if we fail.
6707          */
6708         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6709                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6710         if (skb_size <= PAGE_SIZE) {
6711                 data = napi_alloc_frag(skb_size);
6712                 *frag_size = skb_size;
6713         } else {
6714                 data = kmalloc(skb_size, GFP_ATOMIC);
6715                 *frag_size = 0;
6716         }
6717         if (!data)
6718                 return -ENOMEM;
6719
6720         mapping = pci_map_single(tp->pdev,
6721                                  data + TG3_RX_OFFSET(tp),
6722                                  data_size,
6723                                  PCI_DMA_FROMDEVICE);
6724         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6725                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6726                 return -EIO;
6727         }
6728
6729         map->data = data;
6730         dma_unmap_addr_set(map, mapping, mapping);
6731
6732         desc->addr_hi = ((u64)mapping >> 32);
6733         desc->addr_lo = ((u64)mapping & 0xffffffff);
6734
6735         return data_size;
6736 }
6737
6738 /* We only need to move over in the address because the other
6739  * members of the RX descriptor are invariant.  See notes above
6740  * tg3_alloc_rx_data for full details.
6741  */
6742 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6743                            struct tg3_rx_prodring_set *dpr,
6744                            u32 opaque_key, int src_idx,
6745                            u32 dest_idx_unmasked)
6746 {
6747         struct tg3 *tp = tnapi->tp;
6748         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6749         struct ring_info *src_map, *dest_map;
6750         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6751         int dest_idx;
6752
6753         switch (opaque_key) {
6754         case RXD_OPAQUE_RING_STD:
6755                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6756                 dest_desc = &dpr->rx_std[dest_idx];
6757                 dest_map = &dpr->rx_std_buffers[dest_idx];
6758                 src_desc = &spr->rx_std[src_idx];
6759                 src_map = &spr->rx_std_buffers[src_idx];
6760                 break;
6761
6762         case RXD_OPAQUE_RING_JUMBO:
6763                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6764                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6765                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6766                 src_desc = &spr->rx_jmb[src_idx].std;
6767                 src_map = &spr->rx_jmb_buffers[src_idx];
6768                 break;
6769
6770         default:
6771                 return;
6772         }
6773
6774         dest_map->data = src_map->data;
6775         dma_unmap_addr_set(dest_map, mapping,
6776                            dma_unmap_addr(src_map, mapping));
6777         dest_desc->addr_hi = src_desc->addr_hi;
6778         dest_desc->addr_lo = src_desc->addr_lo;
6779
6780         /* Ensure that the update to the skb happens after the physical
6781          * addresses have been transferred to the new BD location.
6782          */
6783         smp_wmb();
6784
6785         src_map->data = NULL;
6786 }
6787
6788 /* The RX ring scheme is composed of multiple rings which post fresh
6789  * buffers to the chip, and one special ring the chip uses to report
6790  * status back to the host.
6791  *
6792  * The special ring reports the status of received packets to the
6793  * host.  The chip does not write into the original descriptor the
6794  * RX buffer was obtained from.  The chip simply takes the original
6795  * descriptor as provided by the host, updates the status and length
6796  * field, then writes this into the next status ring entry.
6797  *
6798  * Each ring the host uses to post buffers to the chip is described
6799  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6800  * it is first placed into the on-chip ram.  When the packet's length
6801  * is known, it walks down the TG3_BDINFO entries to select the ring.
6802  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6803  * which is within the range of the new packet's length is chosen.
6804  *
6805  * The "separate ring for rx status" scheme may sound queer, but it makes
6806  * sense from a cache coherency perspective.  If only the host writes
6807  * to the buffer post rings, and only the chip writes to the rx status
6808  * rings, then cache lines never move beyond shared-modified state.
6809  * If both the host and chip were to write into the same ring, cache line
6810  * eviction could occur since both entities want it in an exclusive state.
6811  */
6812 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6813 {
6814         struct tg3 *tp = tnapi->tp;
6815         u32 work_mask, rx_std_posted = 0;
6816         u32 std_prod_idx, jmb_prod_idx;
6817         u32 sw_idx = tnapi->rx_rcb_ptr;
6818         u16 hw_idx;
6819         int received;
6820         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6821
6822         hw_idx = *(tnapi->rx_rcb_prod_idx);
6823         /*
6824          * We need to order the read of hw_idx and the read of
6825          * the opaque cookie.
6826          */
6827         rmb();
6828         work_mask = 0;
6829         received = 0;
6830         std_prod_idx = tpr->rx_std_prod_idx;
6831         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6832         while (sw_idx != hw_idx && budget > 0) {
6833                 struct ring_info *ri;
6834                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6835                 unsigned int len;
6836                 struct sk_buff *skb;
6837                 dma_addr_t dma_addr;
6838                 u32 opaque_key, desc_idx, *post_ptr;
6839                 u8 *data;
6840                 u64 tstamp = 0;
6841
6842                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6843                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6844                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6845                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6846                         dma_addr = dma_unmap_addr(ri, mapping);
6847                         data = ri->data;
6848                         post_ptr = &std_prod_idx;
6849                         rx_std_posted++;
6850                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6851                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6852                         dma_addr = dma_unmap_addr(ri, mapping);
6853                         data = ri->data;
6854                         post_ptr = &jmb_prod_idx;
6855                 } else
6856                         goto next_pkt_nopost;
6857
6858                 work_mask |= opaque_key;
6859
6860                 if (desc->err_vlan & RXD_ERR_MASK) {
6861                 drop_it:
6862                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6863                                        desc_idx, *post_ptr);
6864                 drop_it_no_recycle:
6865                         /* Other statistics kept track of by card. */
6866                         tp->rx_dropped++;
6867                         goto next_pkt;
6868                 }
6869
6870                 prefetch(data + TG3_RX_OFFSET(tp));
6871                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6872                       ETH_FCS_LEN;
6873
6874                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6875                      RXD_FLAG_PTPSTAT_PTPV1 ||
6876                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6877                      RXD_FLAG_PTPSTAT_PTPV2) {
6878                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6879                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6880                 }
6881
6882                 if (len > TG3_RX_COPY_THRESH(tp)) {
6883                         int skb_size;
6884                         unsigned int frag_size;
6885
6886                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6887                                                     *post_ptr, &frag_size);
6888                         if (skb_size < 0)
6889                                 goto drop_it;
6890
6891                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6892                                          PCI_DMA_FROMDEVICE);
6893
6894                         /* Ensure that the update to the data happens
6895                          * after the usage of the old DMA mapping.
6896                          */
6897                         smp_wmb();
6898
6899                         ri->data = NULL;
6900
6901                         skb = build_skb(data, frag_size);
6902                         if (!skb) {
6903                                 tg3_frag_free(frag_size != 0, data);
6904                                 goto drop_it_no_recycle;
6905                         }
6906                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6907                 } else {
6908                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6909                                        desc_idx, *post_ptr);
6910
6911                         skb = netdev_alloc_skb(tp->dev,
6912                                                len + TG3_RAW_IP_ALIGN);
6913                         if (skb == NULL)
6914                                 goto drop_it_no_recycle;
6915
6916                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6917                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6918                         memcpy(skb->data,
6919                                data + TG3_RX_OFFSET(tp),
6920                                len);
6921                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6922                 }
6923
6924                 skb_put(skb, len);
6925                 if (tstamp)
6926                         tg3_hwclock_to_timestamp(tp, tstamp,
6927                                                  skb_hwtstamps(skb));
6928
6929                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6930                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6931                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6932                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6933                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6934                 else
6935                         skb_checksum_none_assert(skb);
6936
6937                 skb->protocol = eth_type_trans(skb, tp->dev);
6938
6939                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6940                     skb->protocol != htons(ETH_P_8021Q) &&
6941                     skb->protocol != htons(ETH_P_8021AD)) {
6942                         dev_kfree_skb_any(skb);
6943                         goto drop_it_no_recycle;
6944                 }
6945
6946                 if (desc->type_flags & RXD_FLAG_VLAN &&
6947                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6948                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6949                                                desc->err_vlan & RXD_VLAN_MASK);
6950
6951                 napi_gro_receive(&tnapi->napi, skb);
6952
6953                 received++;
6954                 budget--;
6955
6956 next_pkt:
6957                 (*post_ptr)++;
6958
6959                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6960                         tpr->rx_std_prod_idx = std_prod_idx &
6961                                                tp->rx_std_ring_mask;
6962                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6963                                      tpr->rx_std_prod_idx);
6964                         work_mask &= ~RXD_OPAQUE_RING_STD;
6965                         rx_std_posted = 0;
6966                 }
6967 next_pkt_nopost:
6968                 sw_idx++;
6969                 sw_idx &= tp->rx_ret_ring_mask;
6970
6971                 /* Refresh hw_idx to see if there is new work */
6972                 if (sw_idx == hw_idx) {
6973                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6974                         rmb();
6975                 }
6976         }
6977
6978         /* ACK the status ring. */
6979         tnapi->rx_rcb_ptr = sw_idx;
6980         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6981
6982         /* Refill RX ring(s). */
6983         if (!tg3_flag(tp, ENABLE_RSS)) {
6984                 /* Sync BD data before updating mailbox */
6985                 wmb();
6986
6987                 if (work_mask & RXD_OPAQUE_RING_STD) {
6988                         tpr->rx_std_prod_idx = std_prod_idx &
6989                                                tp->rx_std_ring_mask;
6990                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6991                                      tpr->rx_std_prod_idx);
6992                 }
6993                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6994                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6995                                                tp->rx_jmb_ring_mask;
6996                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6997                                      tpr->rx_jmb_prod_idx);
6998                 }
6999         } else if (work_mask) {
7000                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
7001                  * updated before the producer indices can be updated.
7002                  */
7003                 smp_wmb();
7004
7005                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7006                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7007
7008                 if (tnapi != &tp->napi[1]) {
7009                         tp->rx_refill = true;
7010                         napi_schedule(&tp->napi[1].napi);
7011                 }
7012         }
7013
7014         return received;
7015 }
7016
7017 static void tg3_poll_link(struct tg3 *tp)
7018 {
7019         /* handle link change and other phy events */
7020         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7021                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7022
7023                 if (sblk->status & SD_STATUS_LINK_CHG) {
7024                         sblk->status = SD_STATUS_UPDATED |
7025                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7026                         spin_lock(&tp->lock);
7027                         if (tg3_flag(tp, USE_PHYLIB)) {
7028                                 tw32_f(MAC_STATUS,
7029                                      (MAC_STATUS_SYNC_CHANGED |
7030                                       MAC_STATUS_CFG_CHANGED |
7031                                       MAC_STATUS_MI_COMPLETION |
7032                                       MAC_STATUS_LNKSTATE_CHANGED));
7033                                 udelay(40);
7034                         } else
7035                                 tg3_setup_phy(tp, false);
7036                         spin_unlock(&tp->lock);
7037                 }
7038         }
7039 }
7040
7041 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7042                                 struct tg3_rx_prodring_set *dpr,
7043                                 struct tg3_rx_prodring_set *spr)
7044 {
7045         u32 si, di, cpycnt, src_prod_idx;
7046         int i, err = 0;
7047
7048         while (1) {
7049                 src_prod_idx = spr->rx_std_prod_idx;
7050
7051                 /* Make sure updates to the rx_std_buffers[] entries and the
7052                  * standard producer index are seen in the correct order.
7053                  */
7054                 smp_rmb();
7055
7056                 if (spr->rx_std_cons_idx == src_prod_idx)
7057                         break;
7058
7059                 if (spr->rx_std_cons_idx < src_prod_idx)
7060                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7061                 else
7062                         cpycnt = tp->rx_std_ring_mask + 1 -
7063                                  spr->rx_std_cons_idx;
7064
7065                 cpycnt = min(cpycnt,
7066                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7067
7068                 si = spr->rx_std_cons_idx;
7069                 di = dpr->rx_std_prod_idx;
7070
7071                 for (i = di; i < di + cpycnt; i++) {
7072                         if (dpr->rx_std_buffers[i].data) {
7073                                 cpycnt = i - di;
7074                                 err = -ENOSPC;
7075                                 break;
7076                         }
7077                 }
7078
7079                 if (!cpycnt)
7080                         break;
7081
7082                 /* Ensure that updates to the rx_std_buffers ring and the
7083                  * shadowed hardware producer ring from tg3_recycle_skb() are
7084                  * ordered correctly WRT the skb check above.
7085                  */
7086                 smp_rmb();
7087
7088                 memcpy(&dpr->rx_std_buffers[di],
7089                        &spr->rx_std_buffers[si],
7090                        cpycnt * sizeof(struct ring_info));
7091
7092                 for (i = 0; i < cpycnt; i++, di++, si++) {
7093                         struct tg3_rx_buffer_desc *sbd, *dbd;
7094                         sbd = &spr->rx_std[si];
7095                         dbd = &dpr->rx_std[di];
7096                         dbd->addr_hi = sbd->addr_hi;
7097                         dbd->addr_lo = sbd->addr_lo;
7098                 }
7099
7100                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7101                                        tp->rx_std_ring_mask;
7102                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7103                                        tp->rx_std_ring_mask;
7104         }
7105
7106         while (1) {
7107                 src_prod_idx = spr->rx_jmb_prod_idx;
7108
7109                 /* Make sure updates to the rx_jmb_buffers[] entries and
7110                  * the jumbo producer index are seen in the correct order.
7111                  */
7112                 smp_rmb();
7113
7114                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7115                         break;
7116
7117                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7118                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7119                 else
7120                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7121                                  spr->rx_jmb_cons_idx;
7122
7123                 cpycnt = min(cpycnt,
7124                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7125
7126                 si = spr->rx_jmb_cons_idx;
7127                 di = dpr->rx_jmb_prod_idx;
7128
7129                 for (i = di; i < di + cpycnt; i++) {
7130                         if (dpr->rx_jmb_buffers[i].data) {
7131                                 cpycnt = i - di;
7132                                 err = -ENOSPC;
7133                                 break;
7134                         }
7135                 }
7136
7137                 if (!cpycnt)
7138                         break;
7139
7140                 /* Ensure that updates to the rx_jmb_buffers ring and the
7141                  * shadowed hardware producer ring from tg3_recycle_skb() are
7142                  * ordered correctly WRT the skb check above.
7143                  */
7144                 smp_rmb();
7145
7146                 memcpy(&dpr->rx_jmb_buffers[di],
7147                        &spr->rx_jmb_buffers[si],
7148                        cpycnt * sizeof(struct ring_info));
7149
7150                 for (i = 0; i < cpycnt; i++, di++, si++) {
7151                         struct tg3_rx_buffer_desc *sbd, *dbd;
7152                         sbd = &spr->rx_jmb[si].std;
7153                         dbd = &dpr->rx_jmb[di].std;
7154                         dbd->addr_hi = sbd->addr_hi;
7155                         dbd->addr_lo = sbd->addr_lo;
7156                 }
7157
7158                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7159                                        tp->rx_jmb_ring_mask;
7160                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7161                                        tp->rx_jmb_ring_mask;
7162         }
7163
7164         return err;
7165 }
7166
7167 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7168 {
7169         struct tg3 *tp = tnapi->tp;
7170
7171         /* run TX completion thread */
7172         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7173                 tg3_tx(tnapi);
7174                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7175                         return work_done;
7176         }
7177
7178         if (!tnapi->rx_rcb_prod_idx)
7179                 return work_done;
7180
7181         /* run RX thread, within the bounds set by NAPI.
7182          * All RX "locking" is done by ensuring outside
7183          * code synchronizes with tg3->napi.poll()
7184          */
7185         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7186                 work_done += tg3_rx(tnapi, budget - work_done);
7187
7188         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7189                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7190                 int i, err = 0;
7191                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7192                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7193
7194                 tp->rx_refill = false;
7195                 for (i = 1; i <= tp->rxq_cnt; i++)
7196                         err |= tg3_rx_prodring_xfer(tp, dpr,
7197                                                     &tp->napi[i].prodring);
7198
7199                 wmb();
7200
7201                 if (std_prod_idx != dpr->rx_std_prod_idx)
7202                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7203                                      dpr->rx_std_prod_idx);
7204
7205                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7206                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7207                                      dpr->rx_jmb_prod_idx);
7208
7209                 if (err)
7210                         tw32_f(HOSTCC_MODE, tp->coal_now);
7211         }
7212
7213         return work_done;
7214 }
7215
7216 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7217 {
7218         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7219                 schedule_work(&tp->reset_task);
7220 }
7221
7222 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7223 {
7224         cancel_work_sync(&tp->reset_task);
7225         tg3_flag_clear(tp, RESET_TASK_PENDING);
7226         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7227 }
7228
7229 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7230 {
7231         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7232         struct tg3 *tp = tnapi->tp;
7233         int work_done = 0;
7234         struct tg3_hw_status *sblk = tnapi->hw_status;
7235
7236         while (1) {
7237                 work_done = tg3_poll_work(tnapi, work_done, budget);
7238
7239                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7240                         goto tx_recovery;
7241
7242                 if (unlikely(work_done >= budget))
7243                         break;
7244
7245                 /* tp->last_tag is used in tg3_int_reenable() below
7246                  * to tell the hw how much work has been processed,
7247                  * so we must read it before checking for more work.
7248                  */
7249                 tnapi->last_tag = sblk->status_tag;
7250                 tnapi->last_irq_tag = tnapi->last_tag;
7251                 rmb();
7252
7253                 /* check for RX/TX work to do */
7254                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7255                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7256
7257                         /* This test here is not race free, but will reduce
7258                          * the number of interrupts by looping again.
7259                          */
7260                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7261                                 continue;
7262
7263                         napi_complete_done(napi, work_done);
7264                         /* Reenable interrupts. */
7265                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7266
7267                         /* This test here is synchronized by napi_schedule()
7268                          * and napi_complete() to close the race condition.
7269                          */
7270                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7271                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7272                                                   HOSTCC_MODE_ENABLE |
7273                                                   tnapi->coal_now);
7274                         }
7275                         break;
7276                 }
7277         }
7278
7279         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7280         return work_done;
7281
7282 tx_recovery:
7283         /* work_done is guaranteed to be less than budget. */
7284         napi_complete(napi);
7285         tg3_reset_task_schedule(tp);
7286         return work_done;
7287 }
7288
7289 static void tg3_process_error(struct tg3 *tp)
7290 {
7291         u32 val;
7292         bool real_error = false;
7293
7294         if (tg3_flag(tp, ERROR_PROCESSED))
7295                 return;
7296
7297         /* Check Flow Attention register */
7298         val = tr32(HOSTCC_FLOW_ATTN);
7299         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7300                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7301                 real_error = true;
7302         }
7303
7304         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7305                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7306                 real_error = true;
7307         }
7308
7309         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7310                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7311                 real_error = true;
7312         }
7313
7314         if (!real_error)
7315                 return;
7316
7317         tg3_dump_state(tp);
7318
7319         tg3_flag_set(tp, ERROR_PROCESSED);
7320         tg3_reset_task_schedule(tp);
7321 }
7322
7323 static int tg3_poll(struct napi_struct *napi, int budget)
7324 {
7325         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7326         struct tg3 *tp = tnapi->tp;
7327         int work_done = 0;
7328         struct tg3_hw_status *sblk = tnapi->hw_status;
7329
7330         while (1) {
7331                 if (sblk->status & SD_STATUS_ERROR)
7332                         tg3_process_error(tp);
7333
7334                 tg3_poll_link(tp);
7335
7336                 work_done = tg3_poll_work(tnapi, work_done, budget);
7337
7338                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7339                         goto tx_recovery;
7340
7341                 if (unlikely(work_done >= budget))
7342                         break;
7343
7344                 if (tg3_flag(tp, TAGGED_STATUS)) {
7345                         /* tp->last_tag is used in tg3_int_reenable() below
7346                          * to tell the hw how much work has been processed,
7347                          * so we must read it before checking for more work.
7348                          */
7349                         tnapi->last_tag = sblk->status_tag;
7350                         tnapi->last_irq_tag = tnapi->last_tag;
7351                         rmb();
7352                 } else
7353                         sblk->status &= ~SD_STATUS_UPDATED;
7354
7355                 if (likely(!tg3_has_work(tnapi))) {
7356                         napi_complete_done(napi, work_done);
7357                         tg3_int_reenable(tnapi);
7358                         break;
7359                 }
7360         }
7361
7362         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7363         return work_done;
7364
7365 tx_recovery:
7366         /* work_done is guaranteed to be less than budget. */
7367         napi_complete(napi);
7368         tg3_reset_task_schedule(tp);
7369         return work_done;
7370 }
7371
7372 static void tg3_napi_disable(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         for (i = tp->irq_cnt - 1; i >= 0; i--)
7377                 napi_disable(&tp->napi[i].napi);
7378 }
7379
7380 static void tg3_napi_enable(struct tg3 *tp)
7381 {
7382         int i;
7383
7384         for (i = 0; i < tp->irq_cnt; i++)
7385                 napi_enable(&tp->napi[i].napi);
7386 }
7387
7388 static void tg3_napi_init(struct tg3 *tp)
7389 {
7390         int i;
7391
7392         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7393         for (i = 1; i < tp->irq_cnt; i++)
7394                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7395 }
7396
7397 static void tg3_napi_fini(struct tg3 *tp)
7398 {
7399         int i;
7400
7401         for (i = 0; i < tp->irq_cnt; i++)
7402                 netif_napi_del(&tp->napi[i].napi);
7403 }
7404
7405 static inline void tg3_netif_stop(struct tg3 *tp)
7406 {
7407         netif_trans_update(tp->dev);    /* prevent tx timeout */
7408         tg3_napi_disable(tp);
7409         netif_carrier_off(tp->dev);
7410         netif_tx_disable(tp->dev);
7411 }
7412
7413 /* tp->lock must be held */
7414 static inline void tg3_netif_start(struct tg3 *tp)
7415 {
7416         tg3_ptp_resume(tp);
7417
7418         /* NOTE: unconditional netif_tx_wake_all_queues is only
7419          * appropriate so long as all callers are assured to
7420          * have free tx slots (such as after tg3_init_hw)
7421          */
7422         netif_tx_wake_all_queues(tp->dev);
7423
7424         if (tp->link_up)
7425                 netif_carrier_on(tp->dev);
7426
7427         tg3_napi_enable(tp);
7428         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7429         tg3_enable_ints(tp);
7430 }
7431
7432 static void tg3_irq_quiesce(struct tg3 *tp)
7433         __releases(tp->lock)
7434         __acquires(tp->lock)
7435 {
7436         int i;
7437
7438         BUG_ON(tp->irq_sync);
7439
7440         tp->irq_sync = 1;
7441         smp_mb();
7442
7443         spin_unlock_bh(&tp->lock);
7444
7445         for (i = 0; i < tp->irq_cnt; i++)
7446                 synchronize_irq(tp->napi[i].irq_vec);
7447
7448         spin_lock_bh(&tp->lock);
7449 }
7450
7451 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7452  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7453  * with as well.  Most of the time, this is not necessary except when
7454  * shutting down the device.
7455  */
7456 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7457 {
7458         spin_lock_bh(&tp->lock);
7459         if (irq_sync)
7460                 tg3_irq_quiesce(tp);
7461 }
7462
7463 static inline void tg3_full_unlock(struct tg3 *tp)
7464 {
7465         spin_unlock_bh(&tp->lock);
7466 }
7467
7468 /* One-shot MSI handler - Chip automatically disables interrupt
7469  * after sending MSI so driver doesn't have to do it.
7470  */
7471 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7472 {
7473         struct tg3_napi *tnapi = dev_id;
7474         struct tg3 *tp = tnapi->tp;
7475
7476         prefetch(tnapi->hw_status);
7477         if (tnapi->rx_rcb)
7478                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479
7480         if (likely(!tg3_irq_sync(tp)))
7481                 napi_schedule(&tnapi->napi);
7482
7483         return IRQ_HANDLED;
7484 }
7485
7486 /* MSI ISR - No need to check for interrupt sharing and no need to
7487  * flush status block and interrupt mailbox. PCI ordering rules
7488  * guarantee that MSI will arrive after the status block.
7489  */
7490 static irqreturn_t tg3_msi(int irq, void *dev_id)
7491 {
7492         struct tg3_napi *tnapi = dev_id;
7493         struct tg3 *tp = tnapi->tp;
7494
7495         prefetch(tnapi->hw_status);
7496         if (tnapi->rx_rcb)
7497                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7498         /*
7499          * Writing any value to intr-mbox-0 clears PCI INTA# and
7500          * chip-internal interrupt pending events.
7501          * Writing non-zero to intr-mbox-0 additional tells the
7502          * NIC to stop sending us irqs, engaging "in-intr-handler"
7503          * event coalescing.
7504          */
7505         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7506         if (likely(!tg3_irq_sync(tp)))
7507                 napi_schedule(&tnapi->napi);
7508
7509         return IRQ_RETVAL(1);
7510 }
7511
7512 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7513 {
7514         struct tg3_napi *tnapi = dev_id;
7515         struct tg3 *tp = tnapi->tp;
7516         struct tg3_hw_status *sblk = tnapi->hw_status;
7517         unsigned int handled = 1;
7518
7519         /* In INTx mode, it is possible for the interrupt to arrive at
7520          * the CPU before the status block posted prior to the interrupt.
7521          * Reading the PCI State register will confirm whether the
7522          * interrupt is ours and will flush the status block.
7523          */
7524         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7525                 if (tg3_flag(tp, CHIP_RESETTING) ||
7526                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7527                         handled = 0;
7528                         goto out;
7529                 }
7530         }
7531
7532         /*
7533          * Writing any value to intr-mbox-0 clears PCI INTA# and
7534          * chip-internal interrupt pending events.
7535          * Writing non-zero to intr-mbox-0 additional tells the
7536          * NIC to stop sending us irqs, engaging "in-intr-handler"
7537          * event coalescing.
7538          *
7539          * Flush the mailbox to de-assert the IRQ immediately to prevent
7540          * spurious interrupts.  The flush impacts performance but
7541          * excessive spurious interrupts can be worse in some cases.
7542          */
7543         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7544         if (tg3_irq_sync(tp))
7545                 goto out;
7546         sblk->status &= ~SD_STATUS_UPDATED;
7547         if (likely(tg3_has_work(tnapi))) {
7548                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7549                 napi_schedule(&tnapi->napi);
7550         } else {
7551                 /* No work, shared interrupt perhaps?  re-enable
7552                  * interrupts, and flush that PCI write
7553                  */
7554                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7555                                0x00000000);
7556         }
7557 out:
7558         return IRQ_RETVAL(handled);
7559 }
7560
7561 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7562 {
7563         struct tg3_napi *tnapi = dev_id;
7564         struct tg3 *tp = tnapi->tp;
7565         struct tg3_hw_status *sblk = tnapi->hw_status;
7566         unsigned int handled = 1;
7567
7568         /* In INTx mode, it is possible for the interrupt to arrive at
7569          * the CPU before the status block posted prior to the interrupt.
7570          * Reading the PCI State register will confirm whether the
7571          * interrupt is ours and will flush the status block.
7572          */
7573         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7574                 if (tg3_flag(tp, CHIP_RESETTING) ||
7575                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7576                         handled = 0;
7577                         goto out;
7578                 }
7579         }
7580
7581         /*
7582          * writing any value to intr-mbox-0 clears PCI INTA# and
7583          * chip-internal interrupt pending events.
7584          * writing non-zero to intr-mbox-0 additional tells the
7585          * NIC to stop sending us irqs, engaging "in-intr-handler"
7586          * event coalescing.
7587          *
7588          * Flush the mailbox to de-assert the IRQ immediately to prevent
7589          * spurious interrupts.  The flush impacts performance but
7590          * excessive spurious interrupts can be worse in some cases.
7591          */
7592         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7593
7594         /*
7595          * In a shared interrupt configuration, sometimes other devices'
7596          * interrupts will scream.  We record the current status tag here
7597          * so that the above check can report that the screaming interrupts
7598          * are unhandled.  Eventually they will be silenced.
7599          */
7600         tnapi->last_irq_tag = sblk->status_tag;
7601
7602         if (tg3_irq_sync(tp))
7603                 goto out;
7604
7605         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7606
7607         napi_schedule(&tnapi->napi);
7608
7609 out:
7610         return IRQ_RETVAL(handled);
7611 }
7612
7613 /* ISR for interrupt test */
7614 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7615 {
7616         struct tg3_napi *tnapi = dev_id;
7617         struct tg3 *tp = tnapi->tp;
7618         struct tg3_hw_status *sblk = tnapi->hw_status;
7619
7620         if ((sblk->status & SD_STATUS_UPDATED) ||
7621             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7622                 tg3_disable_ints(tp);
7623                 return IRQ_RETVAL(1);
7624         }
7625         return IRQ_RETVAL(0);
7626 }
7627
7628 #ifdef CONFIG_NET_POLL_CONTROLLER
7629 static void tg3_poll_controller(struct net_device *dev)
7630 {
7631         int i;
7632         struct tg3 *tp = netdev_priv(dev);
7633
7634         if (tg3_irq_sync(tp))
7635                 return;
7636
7637         for (i = 0; i < tp->irq_cnt; i++)
7638                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7639 }
7640 #endif
7641
7642 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7643 {
7644         struct tg3 *tp = netdev_priv(dev);
7645
7646         if (netif_msg_tx_err(tp)) {
7647                 netdev_err(dev, "transmit timed out, resetting\n");
7648                 tg3_dump_state(tp);
7649         }
7650
7651         tg3_reset_task_schedule(tp);
7652 }
7653
7654 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7655 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7656 {
7657         u32 base = (u32) mapping & 0xffffffff;
7658
7659         return base + len + 8 < base;
7660 }
7661
7662 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7663  * of any 4GB boundaries: 4G, 8G, etc
7664  */
7665 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7666                                            u32 len, u32 mss)
7667 {
7668         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7669                 u32 base = (u32) mapping & 0xffffffff;
7670
7671                 return ((base + len + (mss & 0x3fff)) < base);
7672         }
7673         return 0;
7674 }
7675
7676 /* Test for DMA addresses > 40-bit */
7677 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7678                                           int len)
7679 {
7680 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7681         if (tg3_flag(tp, 40BIT_DMA_BUG))
7682                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7683         return 0;
7684 #else
7685         return 0;
7686 #endif
7687 }
7688
7689 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7690                                  dma_addr_t mapping, u32 len, u32 flags,
7691                                  u32 mss, u32 vlan)
7692 {
7693         txbd->addr_hi = ((u64) mapping >> 32);
7694         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7695         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7696         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7697 }
7698
7699 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7700                             dma_addr_t map, u32 len, u32 flags,
7701                             u32 mss, u32 vlan)
7702 {
7703         struct tg3 *tp = tnapi->tp;
7704         bool hwbug = false;
7705
7706         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7707                 hwbug = true;
7708
7709         if (tg3_4g_overflow_test(map, len))
7710                 hwbug = true;
7711
7712         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7713                 hwbug = true;
7714
7715         if (tg3_40bit_overflow_test(tp, map, len))
7716                 hwbug = true;
7717
7718         if (tp->dma_limit) {
7719                 u32 prvidx = *entry;
7720                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7721                 while (len > tp->dma_limit && *budget) {
7722                         u32 frag_len = tp->dma_limit;
7723                         len -= tp->dma_limit;
7724
7725                         /* Avoid the 8byte DMA problem */
7726                         if (len <= 8) {
7727                                 len += tp->dma_limit / 2;
7728                                 frag_len = tp->dma_limit / 2;
7729                         }
7730
7731                         tnapi->tx_buffers[*entry].fragmented = true;
7732
7733                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7734                                       frag_len, tmp_flag, mss, vlan);
7735                         *budget -= 1;
7736                         prvidx = *entry;
7737                         *entry = NEXT_TX(*entry);
7738
7739                         map += frag_len;
7740                 }
7741
7742                 if (len) {
7743                         if (*budget) {
7744                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7745                                               len, flags, mss, vlan);
7746                                 *budget -= 1;
7747                                 *entry = NEXT_TX(*entry);
7748                         } else {
7749                                 hwbug = true;
7750                                 tnapi->tx_buffers[prvidx].fragmented = false;
7751                         }
7752                 }
7753         } else {
7754                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7755                               len, flags, mss, vlan);
7756                 *entry = NEXT_TX(*entry);
7757         }
7758
7759         return hwbug;
7760 }
7761
7762 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7763 {
7764         int i;
7765         struct sk_buff *skb;
7766         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7767
7768         skb = txb->skb;
7769         txb->skb = NULL;
7770
7771         pci_unmap_single(tnapi->tp->pdev,
7772                          dma_unmap_addr(txb, mapping),
7773                          skb_headlen(skb),
7774                          PCI_DMA_TODEVICE);
7775
7776         while (txb->fragmented) {
7777                 txb->fragmented = false;
7778                 entry = NEXT_TX(entry);
7779                 txb = &tnapi->tx_buffers[entry];
7780         }
7781
7782         for (i = 0; i <= last; i++) {
7783                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7784
7785                 entry = NEXT_TX(entry);
7786                 txb = &tnapi->tx_buffers[entry];
7787
7788                 pci_unmap_page(tnapi->tp->pdev,
7789                                dma_unmap_addr(txb, mapping),
7790                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7791
7792                 while (txb->fragmented) {
7793                         txb->fragmented = false;
7794                         entry = NEXT_TX(entry);
7795                         txb = &tnapi->tx_buffers[entry];
7796                 }
7797         }
7798 }
7799
7800 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7801 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7802                                        struct sk_buff **pskb,
7803                                        u32 *entry, u32 *budget,
7804                                        u32 base_flags, u32 mss, u32 vlan)
7805 {
7806         struct tg3 *tp = tnapi->tp;
7807         struct sk_buff *new_skb, *skb = *pskb;
7808         dma_addr_t new_addr = 0;
7809         int ret = 0;
7810
7811         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7812                 new_skb = skb_copy(skb, GFP_ATOMIC);
7813         else {
7814                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7815
7816                 new_skb = skb_copy_expand(skb,
7817                                           skb_headroom(skb) + more_headroom,
7818                                           skb_tailroom(skb), GFP_ATOMIC);
7819         }
7820
7821         if (!new_skb) {
7822                 ret = -1;
7823         } else {
7824                 /* New SKB is guaranteed to be linear. */
7825                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7826                                           PCI_DMA_TODEVICE);
7827                 /* Make sure the mapping succeeded */
7828                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7829                         dev_kfree_skb_any(new_skb);
7830                         ret = -1;
7831                 } else {
7832                         u32 save_entry = *entry;
7833
7834                         base_flags |= TXD_FLAG_END;
7835
7836                         tnapi->tx_buffers[*entry].skb = new_skb;
7837                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7838                                            mapping, new_addr);
7839
7840                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7841                                             new_skb->len, base_flags,
7842                                             mss, vlan)) {
7843                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7844                                 dev_kfree_skb_any(new_skb);
7845                                 ret = -1;
7846                         }
7847                 }
7848         }
7849
7850         dev_consume_skb_any(skb);
7851         *pskb = new_skb;
7852         return ret;
7853 }
7854
7855 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7856 {
7857         /* Check if we will never have enough descriptors,
7858          * as gso_segs can be more than current ring size
7859          */
7860         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7861 }
7862
7863 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7864
7865 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7866  * indicated in tg3_tx_frag_set()
7867  */
7868 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7869                        struct netdev_queue *txq, struct sk_buff *skb)
7870 {
7871         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7872         struct sk_buff *segs, *seg, *next;
7873
7874         /* Estimate the number of fragments in the worst case */
7875         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7876                 netif_tx_stop_queue(txq);
7877
7878                 /* netif_tx_stop_queue() must be done before checking
7879                  * checking tx index in tg3_tx_avail() below, because in
7880                  * tg3_tx(), we update tx index before checking for
7881                  * netif_tx_queue_stopped().
7882                  */
7883                 smp_mb();
7884                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7885                         return NETDEV_TX_BUSY;
7886
7887                 netif_tx_wake_queue(txq);
7888         }
7889
7890         segs = skb_gso_segment(skb, tp->dev->features &
7891                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7892         if (IS_ERR(segs) || !segs)
7893                 goto tg3_tso_bug_end;
7894
7895         skb_list_walk_safe(segs, seg, next) {
7896                 skb_mark_not_on_list(seg);
7897                 tg3_start_xmit(seg, tp->dev);
7898         }
7899
7900 tg3_tso_bug_end:
7901         dev_consume_skb_any(skb);
7902
7903         return NETDEV_TX_OK;
7904 }
7905
7906 /* hard_start_xmit for all devices */
7907 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7908 {
7909         struct tg3 *tp = netdev_priv(dev);
7910         u32 len, entry, base_flags, mss, vlan = 0;
7911         u32 budget;
7912         int i = -1, would_hit_hwbug;
7913         dma_addr_t mapping;
7914         struct tg3_napi *tnapi;
7915         struct netdev_queue *txq;
7916         unsigned int last;
7917         struct iphdr *iph = NULL;
7918         struct tcphdr *tcph = NULL;
7919         __sum16 tcp_csum = 0, ip_csum = 0;
7920         __be16 ip_tot_len = 0;
7921
7922         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7923         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7924         if (tg3_flag(tp, ENABLE_TSS))
7925                 tnapi++;
7926
7927         budget = tg3_tx_avail(tnapi);
7928
7929         /* We are running in BH disabled context with netif_tx_lock
7930          * and TX reclaim runs via tp->napi.poll inside of a software
7931          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7932          * no IRQ context deadlocks to worry about either.  Rejoice!
7933          */
7934         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7935                 if (!netif_tx_queue_stopped(txq)) {
7936                         netif_tx_stop_queue(txq);
7937
7938                         /* This is a hard error, log it. */
7939                         netdev_err(dev,
7940                                    "BUG! Tx Ring full when queue awake!\n");
7941                 }
7942                 return NETDEV_TX_BUSY;
7943         }
7944
7945         entry = tnapi->tx_prod;
7946         base_flags = 0;
7947
7948         mss = skb_shinfo(skb)->gso_size;
7949         if (mss) {
7950                 u32 tcp_opt_len, hdr_len;
7951
7952                 if (skb_cow_head(skb, 0))
7953                         goto drop;
7954
7955                 iph = ip_hdr(skb);
7956                 tcp_opt_len = tcp_optlen(skb);
7957
7958                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7959
7960                 /* HW/FW can not correctly segment packets that have been
7961                  * vlan encapsulated.
7962                  */
7963                 if (skb->protocol == htons(ETH_P_8021Q) ||
7964                     skb->protocol == htons(ETH_P_8021AD)) {
7965                         if (tg3_tso_bug_gso_check(tnapi, skb))
7966                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7967                         goto drop;
7968                 }
7969
7970                 if (!skb_is_gso_v6(skb)) {
7971                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7972                             tg3_flag(tp, TSO_BUG)) {
7973                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7974                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7975                                 goto drop;
7976                         }
7977                         ip_csum = iph->check;
7978                         ip_tot_len = iph->tot_len;
7979                         iph->check = 0;
7980                         iph->tot_len = htons(mss + hdr_len);
7981                 }
7982
7983                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7984                                TXD_FLAG_CPU_POST_DMA);
7985
7986                 tcph = tcp_hdr(skb);
7987                 tcp_csum = tcph->check;
7988
7989                 if (tg3_flag(tp, HW_TSO_1) ||
7990                     tg3_flag(tp, HW_TSO_2) ||
7991                     tg3_flag(tp, HW_TSO_3)) {
7992                         tcph->check = 0;
7993                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7994                 } else {
7995                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7996                                                          0, IPPROTO_TCP, 0);
7997                 }
7998
7999                 if (tg3_flag(tp, HW_TSO_3)) {
8000                         mss |= (hdr_len & 0xc) << 12;
8001                         if (hdr_len & 0x10)
8002                                 base_flags |= 0x00000010;
8003                         base_flags |= (hdr_len & 0x3e0) << 5;
8004                 } else if (tg3_flag(tp, HW_TSO_2))
8005                         mss |= hdr_len << 9;
8006                 else if (tg3_flag(tp, HW_TSO_1) ||
8007                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8008                         if (tcp_opt_len || iph->ihl > 5) {
8009                                 int tsflags;
8010
8011                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8012                                 mss |= (tsflags << 11);
8013                         }
8014                 } else {
8015                         if (tcp_opt_len || iph->ihl > 5) {
8016                                 int tsflags;
8017
8018                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8019                                 base_flags |= tsflags << 12;
8020                         }
8021                 }
8022         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8023                 /* HW/FW can not correctly checksum packets that have been
8024                  * vlan encapsulated.
8025                  */
8026                 if (skb->protocol == htons(ETH_P_8021Q) ||
8027                     skb->protocol == htons(ETH_P_8021AD)) {
8028                         if (skb_checksum_help(skb))
8029                                 goto drop;
8030                 } else  {
8031                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8032                 }
8033         }
8034
8035         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8036             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8037                 base_flags |= TXD_FLAG_JMB_PKT;
8038
8039         if (skb_vlan_tag_present(skb)) {
8040                 base_flags |= TXD_FLAG_VLAN;
8041                 vlan = skb_vlan_tag_get(skb);
8042         }
8043
8044         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8045             tg3_flag(tp, TX_TSTAMP_EN)) {
8046                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8047                 base_flags |= TXD_FLAG_HWTSTAMP;
8048         }
8049
8050         len = skb_headlen(skb);
8051
8052         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8053         if (pci_dma_mapping_error(tp->pdev, mapping))
8054                 goto drop;
8055
8056
8057         tnapi->tx_buffers[entry].skb = skb;
8058         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8059
8060         would_hit_hwbug = 0;
8061
8062         if (tg3_flag(tp, 5701_DMA_BUG))
8063                 would_hit_hwbug = 1;
8064
8065         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8066                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8067                             mss, vlan)) {
8068                 would_hit_hwbug = 1;
8069         } else if (skb_shinfo(skb)->nr_frags > 0) {
8070                 u32 tmp_mss = mss;
8071
8072                 if (!tg3_flag(tp, HW_TSO_1) &&
8073                     !tg3_flag(tp, HW_TSO_2) &&
8074                     !tg3_flag(tp, HW_TSO_3))
8075                         tmp_mss = 0;
8076
8077                 /* Now loop through additional data
8078                  * fragments, and queue them.
8079                  */
8080                 last = skb_shinfo(skb)->nr_frags - 1;
8081                 for (i = 0; i <= last; i++) {
8082                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8083
8084                         len = skb_frag_size(frag);
8085                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8086                                                    len, DMA_TO_DEVICE);
8087
8088                         tnapi->tx_buffers[entry].skb = NULL;
8089                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8090                                            mapping);
8091                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8092                                 goto dma_error;
8093
8094                         if (!budget ||
8095                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8096                                             len, base_flags |
8097                                             ((i == last) ? TXD_FLAG_END : 0),
8098                                             tmp_mss, vlan)) {
8099                                 would_hit_hwbug = 1;
8100                                 break;
8101                         }
8102                 }
8103         }
8104
8105         if (would_hit_hwbug) {
8106                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8107
8108                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8109                         /* If it's a TSO packet, do GSO instead of
8110                          * allocating and copying to a large linear SKB
8111                          */
8112                         if (ip_tot_len) {
8113                                 iph->check = ip_csum;
8114                                 iph->tot_len = ip_tot_len;
8115                         }
8116                         tcph->check = tcp_csum;
8117                         return tg3_tso_bug(tp, tnapi, txq, skb);
8118                 }
8119
8120                 /* If the workaround fails due to memory/mapping
8121                  * failure, silently drop this packet.
8122                  */
8123                 entry = tnapi->tx_prod;
8124                 budget = tg3_tx_avail(tnapi);
8125                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8126                                                 base_flags, mss, vlan))
8127                         goto drop_nofree;
8128         }
8129
8130         skb_tx_timestamp(skb);
8131         netdev_tx_sent_queue(txq, skb->len);
8132
8133         /* Sync BD data before updating mailbox */
8134         wmb();
8135
8136         tnapi->tx_prod = entry;
8137         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8138                 netif_tx_stop_queue(txq);
8139
8140                 /* netif_tx_stop_queue() must be done before checking
8141                  * checking tx index in tg3_tx_avail() below, because in
8142                  * tg3_tx(), we update tx index before checking for
8143                  * netif_tx_queue_stopped().
8144                  */
8145                 smp_mb();
8146                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8147                         netif_tx_wake_queue(txq);
8148         }
8149
8150         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8151                 /* Packets are ready, update Tx producer idx on card. */
8152                 tw32_tx_mbox(tnapi->prodmbox, entry);
8153         }
8154
8155         return NETDEV_TX_OK;
8156
8157 dma_error:
8158         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8159         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8160 drop:
8161         dev_kfree_skb_any(skb);
8162 drop_nofree:
8163         tp->tx_dropped++;
8164         return NETDEV_TX_OK;
8165 }
8166
8167 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8168 {
8169         if (enable) {
8170                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8171                                   MAC_MODE_PORT_MODE_MASK);
8172
8173                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8174
8175                 if (!tg3_flag(tp, 5705_PLUS))
8176                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8177
8178                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8179                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8180                 else
8181                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8182         } else {
8183                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8184
8185                 if (tg3_flag(tp, 5705_PLUS) ||
8186                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8187                     tg3_asic_rev(tp) == ASIC_REV_5700)
8188                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8189         }
8190
8191         tw32(MAC_MODE, tp->mac_mode);
8192         udelay(40);
8193 }
8194
8195 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8196 {
8197         u32 val, bmcr, mac_mode, ptest = 0;
8198
8199         tg3_phy_toggle_apd(tp, false);
8200         tg3_phy_toggle_automdix(tp, false);
8201
8202         if (extlpbk && tg3_phy_set_extloopbk(tp))
8203                 return -EIO;
8204
8205         bmcr = BMCR_FULLDPLX;
8206         switch (speed) {
8207         case SPEED_10:
8208                 break;
8209         case SPEED_100:
8210                 bmcr |= BMCR_SPEED100;
8211                 break;
8212         case SPEED_1000:
8213         default:
8214                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8215                         speed = SPEED_100;
8216                         bmcr |= BMCR_SPEED100;
8217                 } else {
8218                         speed = SPEED_1000;
8219                         bmcr |= BMCR_SPEED1000;
8220                 }
8221         }
8222
8223         if (extlpbk) {
8224                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8225                         tg3_readphy(tp, MII_CTRL1000, &val);
8226                         val |= CTL1000_AS_MASTER |
8227                                CTL1000_ENABLE_MASTER;
8228                         tg3_writephy(tp, MII_CTRL1000, val);
8229                 } else {
8230                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8231                                 MII_TG3_FET_PTEST_TRIM_2;
8232                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8233                 }
8234         } else
8235                 bmcr |= BMCR_LOOPBACK;
8236
8237         tg3_writephy(tp, MII_BMCR, bmcr);
8238
8239         /* The write needs to be flushed for the FETs */
8240         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8241                 tg3_readphy(tp, MII_BMCR, &bmcr);
8242
8243         udelay(40);
8244
8245         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8246             tg3_asic_rev(tp) == ASIC_REV_5785) {
8247                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8248                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8249                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8250
8251                 /* The write needs to be flushed for the AC131 */
8252                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8253         }
8254
8255         /* Reset to prevent losing 1st rx packet intermittently */
8256         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8257             tg3_flag(tp, 5780_CLASS)) {
8258                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8259                 udelay(10);
8260                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8261         }
8262
8263         mac_mode = tp->mac_mode &
8264                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8265         if (speed == SPEED_1000)
8266                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8267         else
8268                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8269
8270         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8271                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8272
8273                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8274                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8275                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8276                         mac_mode |= MAC_MODE_LINK_POLARITY;
8277
8278                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8279                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8280         }
8281
8282         tw32(MAC_MODE, mac_mode);
8283         udelay(40);
8284
8285         return 0;
8286 }
8287
8288 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8289 {
8290         struct tg3 *tp = netdev_priv(dev);
8291
8292         if (features & NETIF_F_LOOPBACK) {
8293                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8294                         return;
8295
8296                 spin_lock_bh(&tp->lock);
8297                 tg3_mac_loopback(tp, true);
8298                 netif_carrier_on(tp->dev);
8299                 spin_unlock_bh(&tp->lock);
8300                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8301         } else {
8302                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8303                         return;
8304
8305                 spin_lock_bh(&tp->lock);
8306                 tg3_mac_loopback(tp, false);
8307                 /* Force link status check */
8308                 tg3_setup_phy(tp, true);
8309                 spin_unlock_bh(&tp->lock);
8310                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8311         }
8312 }
8313
8314 static netdev_features_t tg3_fix_features(struct net_device *dev,
8315         netdev_features_t features)
8316 {
8317         struct tg3 *tp = netdev_priv(dev);
8318
8319         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8320                 features &= ~NETIF_F_ALL_TSO;
8321
8322         return features;
8323 }
8324
8325 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8326 {
8327         netdev_features_t changed = dev->features ^ features;
8328
8329         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8330                 tg3_set_loopback(dev, features);
8331
8332         return 0;
8333 }
8334
8335 static void tg3_rx_prodring_free(struct tg3 *tp,
8336                                  struct tg3_rx_prodring_set *tpr)
8337 {
8338         int i;
8339
8340         if (tpr != &tp->napi[0].prodring) {
8341                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8342                      i = (i + 1) & tp->rx_std_ring_mask)
8343                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344                                         tp->rx_pkt_map_sz);
8345
8346                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8347                         for (i = tpr->rx_jmb_cons_idx;
8348                              i != tpr->rx_jmb_prod_idx;
8349                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8350                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8351                                                 TG3_RX_JMB_MAP_SZ);
8352                         }
8353                 }
8354
8355                 return;
8356         }
8357
8358         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8359                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8360                                 tp->rx_pkt_map_sz);
8361
8362         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8363                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8364                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8365                                         TG3_RX_JMB_MAP_SZ);
8366         }
8367 }
8368
8369 /* Initialize rx rings for packet processing.
8370  *
8371  * The chip has been shut down and the driver detached from
8372  * the networking, so no interrupts or new tx packets will
8373  * end up in the driver.  tp->{tx,}lock are held and thus
8374  * we may not sleep.
8375  */
8376 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8377                                  struct tg3_rx_prodring_set *tpr)
8378 {
8379         u32 i, rx_pkt_dma_sz;
8380
8381         tpr->rx_std_cons_idx = 0;
8382         tpr->rx_std_prod_idx = 0;
8383         tpr->rx_jmb_cons_idx = 0;
8384         tpr->rx_jmb_prod_idx = 0;
8385
8386         if (tpr != &tp->napi[0].prodring) {
8387                 memset(&tpr->rx_std_buffers[0], 0,
8388                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8389                 if (tpr->rx_jmb_buffers)
8390                         memset(&tpr->rx_jmb_buffers[0], 0,
8391                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8392                 goto done;
8393         }
8394
8395         /* Zero out all descriptors. */
8396         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8397
8398         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8399         if (tg3_flag(tp, 5780_CLASS) &&
8400             tp->dev->mtu > ETH_DATA_LEN)
8401                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8402         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8403
8404         /* Initialize invariants of the rings, we only set this
8405          * stuff once.  This works because the card does not
8406          * write into the rx buffer posting rings.
8407          */
8408         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8409                 struct tg3_rx_buffer_desc *rxd;
8410
8411                 rxd = &tpr->rx_std[i];
8412                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8413                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8414                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8415                                (i << RXD_OPAQUE_INDEX_SHIFT));
8416         }
8417
8418         /* Now allocate fresh SKBs for each rx ring. */
8419         for (i = 0; i < tp->rx_pending; i++) {
8420                 unsigned int frag_size;
8421
8422                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8423                                       &frag_size) < 0) {
8424                         netdev_warn(tp->dev,
8425                                     "Using a smaller RX standard ring. Only "
8426                                     "%d out of %d buffers were allocated "
8427                                     "successfully\n", i, tp->rx_pending);
8428                         if (i == 0)
8429                                 goto initfail;
8430                         tp->rx_pending = i;
8431                         break;
8432                 }
8433         }
8434
8435         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8436                 goto done;
8437
8438         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8439
8440         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8441                 goto done;
8442
8443         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8444                 struct tg3_rx_buffer_desc *rxd;
8445
8446                 rxd = &tpr->rx_jmb[i].std;
8447                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8448                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8449                                   RXD_FLAG_JUMBO;
8450                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8451                        (i << RXD_OPAQUE_INDEX_SHIFT));
8452         }
8453
8454         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8455                 unsigned int frag_size;
8456
8457                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8458                                       &frag_size) < 0) {
8459                         netdev_warn(tp->dev,
8460                                     "Using a smaller RX jumbo ring. Only %d "
8461                                     "out of %d buffers were allocated "
8462                                     "successfully\n", i, tp->rx_jumbo_pending);
8463                         if (i == 0)
8464                                 goto initfail;
8465                         tp->rx_jumbo_pending = i;
8466                         break;
8467                 }
8468         }
8469
8470 done:
8471         return 0;
8472
8473 initfail:
8474         tg3_rx_prodring_free(tp, tpr);
8475         return -ENOMEM;
8476 }
8477
8478 static void tg3_rx_prodring_fini(struct tg3 *tp,
8479                                  struct tg3_rx_prodring_set *tpr)
8480 {
8481         kfree(tpr->rx_std_buffers);
8482         tpr->rx_std_buffers = NULL;
8483         kfree(tpr->rx_jmb_buffers);
8484         tpr->rx_jmb_buffers = NULL;
8485         if (tpr->rx_std) {
8486                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8487                                   tpr->rx_std, tpr->rx_std_mapping);
8488                 tpr->rx_std = NULL;
8489         }
8490         if (tpr->rx_jmb) {
8491                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8492                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8493                 tpr->rx_jmb = NULL;
8494         }
8495 }
8496
8497 static int tg3_rx_prodring_init(struct tg3 *tp,
8498                                 struct tg3_rx_prodring_set *tpr)
8499 {
8500         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8501                                       GFP_KERNEL);
8502         if (!tpr->rx_std_buffers)
8503                 return -ENOMEM;
8504
8505         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8506                                          TG3_RX_STD_RING_BYTES(tp),
8507                                          &tpr->rx_std_mapping,
8508                                          GFP_KERNEL);
8509         if (!tpr->rx_std)
8510                 goto err_out;
8511
8512         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8513                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8514                                               GFP_KERNEL);
8515                 if (!tpr->rx_jmb_buffers)
8516                         goto err_out;
8517
8518                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8519                                                  TG3_RX_JMB_RING_BYTES(tp),
8520                                                  &tpr->rx_jmb_mapping,
8521                                                  GFP_KERNEL);
8522                 if (!tpr->rx_jmb)
8523                         goto err_out;
8524         }
8525
8526         return 0;
8527
8528 err_out:
8529         tg3_rx_prodring_fini(tp, tpr);
8530         return -ENOMEM;
8531 }
8532
8533 /* Free up pending packets in all rx/tx rings.
8534  *
8535  * The chip has been shut down and the driver detached from
8536  * the networking, so no interrupts or new tx packets will
8537  * end up in the driver.  tp->{tx,}lock is not held and we are not
8538  * in an interrupt context and thus may sleep.
8539  */
8540 static void tg3_free_rings(struct tg3 *tp)
8541 {
8542         int i, j;
8543
8544         for (j = 0; j < tp->irq_cnt; j++) {
8545                 struct tg3_napi *tnapi = &tp->napi[j];
8546
8547                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8548
8549                 if (!tnapi->tx_buffers)
8550                         continue;
8551
8552                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8553                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8554
8555                         if (!skb)
8556                                 continue;
8557
8558                         tg3_tx_skb_unmap(tnapi, i,
8559                                          skb_shinfo(skb)->nr_frags - 1);
8560
8561                         dev_consume_skb_any(skb);
8562                 }
8563                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8564         }
8565 }
8566
8567 /* Initialize tx/rx rings for packet processing.
8568  *
8569  * The chip has been shut down and the driver detached from
8570  * the networking, so no interrupts or new tx packets will
8571  * end up in the driver.  tp->{tx,}lock are held and thus
8572  * we may not sleep.
8573  */
8574 static int tg3_init_rings(struct tg3 *tp)
8575 {
8576         int i;
8577
8578         /* Free up all the SKBs. */
8579         tg3_free_rings(tp);
8580
8581         for (i = 0; i < tp->irq_cnt; i++) {
8582                 struct tg3_napi *tnapi = &tp->napi[i];
8583
8584                 tnapi->last_tag = 0;
8585                 tnapi->last_irq_tag = 0;
8586                 tnapi->hw_status->status = 0;
8587                 tnapi->hw_status->status_tag = 0;
8588                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8589
8590                 tnapi->tx_prod = 0;
8591                 tnapi->tx_cons = 0;
8592                 if (tnapi->tx_ring)
8593                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8594
8595                 tnapi->rx_rcb_ptr = 0;
8596                 if (tnapi->rx_rcb)
8597                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8598
8599                 if (tnapi->prodring.rx_std &&
8600                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8601                         tg3_free_rings(tp);
8602                         return -ENOMEM;
8603                 }
8604         }
8605
8606         return 0;
8607 }
8608
8609 static void tg3_mem_tx_release(struct tg3 *tp)
8610 {
8611         int i;
8612
8613         for (i = 0; i < tp->irq_max; i++) {
8614                 struct tg3_napi *tnapi = &tp->napi[i];
8615
8616                 if (tnapi->tx_ring) {
8617                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8618                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8619                         tnapi->tx_ring = NULL;
8620                 }
8621
8622                 kfree(tnapi->tx_buffers);
8623                 tnapi->tx_buffers = NULL;
8624         }
8625 }
8626
8627 static int tg3_mem_tx_acquire(struct tg3 *tp)
8628 {
8629         int i;
8630         struct tg3_napi *tnapi = &tp->napi[0];
8631
8632         /* If multivector TSS is enabled, vector 0 does not handle
8633          * tx interrupts.  Don't allocate any resources for it.
8634          */
8635         if (tg3_flag(tp, ENABLE_TSS))
8636                 tnapi++;
8637
8638         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8639                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8640                                             sizeof(struct tg3_tx_ring_info),
8641                                             GFP_KERNEL);
8642                 if (!tnapi->tx_buffers)
8643                         goto err_out;
8644
8645                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8646                                                     TG3_TX_RING_BYTES,
8647                                                     &tnapi->tx_desc_mapping,
8648                                                     GFP_KERNEL);
8649                 if (!tnapi->tx_ring)
8650                         goto err_out;
8651         }
8652
8653         return 0;
8654
8655 err_out:
8656         tg3_mem_tx_release(tp);
8657         return -ENOMEM;
8658 }
8659
8660 static void tg3_mem_rx_release(struct tg3 *tp)
8661 {
8662         int i;
8663
8664         for (i = 0; i < tp->irq_max; i++) {
8665                 struct tg3_napi *tnapi = &tp->napi[i];
8666
8667                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8668
8669                 if (!tnapi->rx_rcb)
8670                         continue;
8671
8672                 dma_free_coherent(&tp->pdev->dev,
8673                                   TG3_RX_RCB_RING_BYTES(tp),
8674                                   tnapi->rx_rcb,
8675                                   tnapi->rx_rcb_mapping);
8676                 tnapi->rx_rcb = NULL;
8677         }
8678 }
8679
8680 static int tg3_mem_rx_acquire(struct tg3 *tp)
8681 {
8682         unsigned int i, limit;
8683
8684         limit = tp->rxq_cnt;
8685
8686         /* If RSS is enabled, we need a (dummy) producer ring
8687          * set on vector zero.  This is the true hw prodring.
8688          */
8689         if (tg3_flag(tp, ENABLE_RSS))
8690                 limit++;
8691
8692         for (i = 0; i < limit; i++) {
8693                 struct tg3_napi *tnapi = &tp->napi[i];
8694
8695                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8696                         goto err_out;
8697
8698                 /* If multivector RSS is enabled, vector 0
8699                  * does not handle rx or tx interrupts.
8700                  * Don't allocate any resources for it.
8701                  */
8702                 if (!i && tg3_flag(tp, ENABLE_RSS))
8703                         continue;
8704
8705                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8706                                                    TG3_RX_RCB_RING_BYTES(tp),
8707                                                    &tnapi->rx_rcb_mapping,
8708                                                    GFP_KERNEL);
8709                 if (!tnapi->rx_rcb)
8710                         goto err_out;
8711         }
8712
8713         return 0;
8714
8715 err_out:
8716         tg3_mem_rx_release(tp);
8717         return -ENOMEM;
8718 }
8719
8720 /*
8721  * Must not be invoked with interrupt sources disabled and
8722  * the hardware shutdown down.
8723  */
8724 static void tg3_free_consistent(struct tg3 *tp)
8725 {
8726         int i;
8727
8728         for (i = 0; i < tp->irq_cnt; i++) {
8729                 struct tg3_napi *tnapi = &tp->napi[i];
8730
8731                 if (tnapi->hw_status) {
8732                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8733                                           tnapi->hw_status,
8734                                           tnapi->status_mapping);
8735                         tnapi->hw_status = NULL;
8736                 }
8737         }
8738
8739         tg3_mem_rx_release(tp);
8740         tg3_mem_tx_release(tp);
8741
8742         /* tp->hw_stats can be referenced safely:
8743          *     1. under rtnl_lock
8744          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8745          */
8746         if (tp->hw_stats) {
8747                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8748                                   tp->hw_stats, tp->stats_mapping);
8749                 tp->hw_stats = NULL;
8750         }
8751 }
8752
8753 /*
8754  * Must not be invoked with interrupt sources disabled and
8755  * the hardware shutdown down.  Can sleep.
8756  */
8757 static int tg3_alloc_consistent(struct tg3 *tp)
8758 {
8759         int i;
8760
8761         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8762                                           sizeof(struct tg3_hw_stats),
8763                                           &tp->stats_mapping, GFP_KERNEL);
8764         if (!tp->hw_stats)
8765                 goto err_out;
8766
8767         for (i = 0; i < tp->irq_cnt; i++) {
8768                 struct tg3_napi *tnapi = &tp->napi[i];
8769                 struct tg3_hw_status *sblk;
8770
8771                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8772                                                       TG3_HW_STATUS_SIZE,
8773                                                       &tnapi->status_mapping,
8774                                                       GFP_KERNEL);
8775                 if (!tnapi->hw_status)
8776                         goto err_out;
8777
8778                 sblk = tnapi->hw_status;
8779
8780                 if (tg3_flag(tp, ENABLE_RSS)) {
8781                         u16 *prodptr = NULL;
8782
8783                         /*
8784                          * When RSS is enabled, the status block format changes
8785                          * slightly.  The "rx_jumbo_consumer", "reserved",
8786                          * and "rx_mini_consumer" members get mapped to the
8787                          * other three rx return ring producer indexes.
8788                          */
8789                         switch (i) {
8790                         case 1:
8791                                 prodptr = &sblk->idx[0].rx_producer;
8792                                 break;
8793                         case 2:
8794                                 prodptr = &sblk->rx_jumbo_consumer;
8795                                 break;
8796                         case 3:
8797                                 prodptr = &sblk->reserved;
8798                                 break;
8799                         case 4:
8800                                 prodptr = &sblk->rx_mini_consumer;
8801                                 break;
8802                         }
8803                         tnapi->rx_rcb_prod_idx = prodptr;
8804                 } else {
8805                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8806                 }
8807         }
8808
8809         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8810                 goto err_out;
8811
8812         return 0;
8813
8814 err_out:
8815         tg3_free_consistent(tp);
8816         return -ENOMEM;
8817 }
8818
8819 #define MAX_WAIT_CNT 1000
8820
8821 /* To stop a block, clear the enable bit and poll till it
8822  * clears.  tp->lock is held.
8823  */
8824 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8825 {
8826         unsigned int i;
8827         u32 val;
8828
8829         if (tg3_flag(tp, 5705_PLUS)) {
8830                 switch (ofs) {
8831                 case RCVLSC_MODE:
8832                 case DMAC_MODE:
8833                 case MBFREE_MODE:
8834                 case BUFMGR_MODE:
8835                 case MEMARB_MODE:
8836                         /* We can't enable/disable these bits of the
8837                          * 5705/5750, just say success.
8838                          */
8839                         return 0;
8840
8841                 default:
8842                         break;
8843                 }
8844         }
8845
8846         val = tr32(ofs);
8847         val &= ~enable_bit;
8848         tw32_f(ofs, val);
8849
8850         for (i = 0; i < MAX_WAIT_CNT; i++) {
8851                 if (pci_channel_offline(tp->pdev)) {
8852                         dev_err(&tp->pdev->dev,
8853                                 "tg3_stop_block device offline, "
8854                                 "ofs=%lx enable_bit=%x\n",
8855                                 ofs, enable_bit);
8856                         return -ENODEV;
8857                 }
8858
8859                 udelay(100);
8860                 val = tr32(ofs);
8861                 if ((val & enable_bit) == 0)
8862                         break;
8863         }
8864
8865         if (i == MAX_WAIT_CNT && !silent) {
8866                 dev_err(&tp->pdev->dev,
8867                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8868                         ofs, enable_bit);
8869                 return -ENODEV;
8870         }
8871
8872         return 0;
8873 }
8874
8875 /* tp->lock is held. */
8876 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8877 {
8878         int i, err;
8879
8880         tg3_disable_ints(tp);
8881
8882         if (pci_channel_offline(tp->pdev)) {
8883                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8884                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8885                 err = -ENODEV;
8886                 goto err_no_dev;
8887         }
8888
8889         tp->rx_mode &= ~RX_MODE_ENABLE;
8890         tw32_f(MAC_RX_MODE, tp->rx_mode);
8891         udelay(10);
8892
8893         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8894         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8897         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8898         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8899
8900         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8901         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8902         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8903         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8904         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8905         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8906         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8907
8908         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8909         tw32_f(MAC_MODE, tp->mac_mode);
8910         udelay(40);
8911
8912         tp->tx_mode &= ~TX_MODE_ENABLE;
8913         tw32_f(MAC_TX_MODE, tp->tx_mode);
8914
8915         for (i = 0; i < MAX_WAIT_CNT; i++) {
8916                 udelay(100);
8917                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8918                         break;
8919         }
8920         if (i >= MAX_WAIT_CNT) {
8921                 dev_err(&tp->pdev->dev,
8922                         "%s timed out, TX_MODE_ENABLE will not clear "
8923                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8924                 err |= -ENODEV;
8925         }
8926
8927         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8928         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8929         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8930
8931         tw32(FTQ_RESET, 0xffffffff);
8932         tw32(FTQ_RESET, 0x00000000);
8933
8934         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8935         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8936
8937 err_no_dev:
8938         for (i = 0; i < tp->irq_cnt; i++) {
8939                 struct tg3_napi *tnapi = &tp->napi[i];
8940                 if (tnapi->hw_status)
8941                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8942         }
8943
8944         return err;
8945 }
8946
8947 /* Save PCI command register before chip reset */
8948 static void tg3_save_pci_state(struct tg3 *tp)
8949 {
8950         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8951 }
8952
8953 /* Restore PCI state after chip reset */
8954 static void tg3_restore_pci_state(struct tg3 *tp)
8955 {
8956         u32 val;
8957
8958         /* Re-enable indirect register accesses. */
8959         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8960                                tp->misc_host_ctrl);
8961
8962         /* Set MAX PCI retry to zero. */
8963         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8964         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8965             tg3_flag(tp, PCIX_MODE))
8966                 val |= PCISTATE_RETRY_SAME_DMA;
8967         /* Allow reads and writes to the APE register and memory space. */
8968         if (tg3_flag(tp, ENABLE_APE))
8969                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8970                        PCISTATE_ALLOW_APE_SHMEM_WR |
8971                        PCISTATE_ALLOW_APE_PSPACE_WR;
8972         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8973
8974         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8975
8976         if (!tg3_flag(tp, PCI_EXPRESS)) {
8977                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8978                                       tp->pci_cacheline_sz);
8979                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8980                                       tp->pci_lat_timer);
8981         }
8982
8983         /* Make sure PCI-X relaxed ordering bit is clear. */
8984         if (tg3_flag(tp, PCIX_MODE)) {
8985                 u16 pcix_cmd;
8986
8987                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8988                                      &pcix_cmd);
8989                 pcix_cmd &= ~PCI_X_CMD_ERO;
8990                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8991                                       pcix_cmd);
8992         }
8993
8994         if (tg3_flag(tp, 5780_CLASS)) {
8995
8996                 /* Chip reset on 5780 will reset MSI enable bit,
8997                  * so need to restore it.
8998                  */
8999                 if (tg3_flag(tp, USING_MSI)) {
9000                         u16 ctrl;
9001
9002                         pci_read_config_word(tp->pdev,
9003                                              tp->msi_cap + PCI_MSI_FLAGS,
9004                                              &ctrl);
9005                         pci_write_config_word(tp->pdev,
9006                                               tp->msi_cap + PCI_MSI_FLAGS,
9007                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9008                         val = tr32(MSGINT_MODE);
9009                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9010                 }
9011         }
9012 }
9013
9014 static void tg3_override_clk(struct tg3 *tp)
9015 {
9016         u32 val;
9017
9018         switch (tg3_asic_rev(tp)) {
9019         case ASIC_REV_5717:
9020                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9021                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9022                      TG3_CPMU_MAC_ORIDE_ENABLE);
9023                 break;
9024
9025         case ASIC_REV_5719:
9026         case ASIC_REV_5720:
9027                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9028                 break;
9029
9030         default:
9031                 return;
9032         }
9033 }
9034
9035 static void tg3_restore_clk(struct tg3 *tp)
9036 {
9037         u32 val;
9038
9039         switch (tg3_asic_rev(tp)) {
9040         case ASIC_REV_5717:
9041                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9042                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9043                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9044                 break;
9045
9046         case ASIC_REV_5719:
9047         case ASIC_REV_5720:
9048                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9049                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9050                 break;
9051
9052         default:
9053                 return;
9054         }
9055 }
9056
9057 /* tp->lock is held. */
9058 static int tg3_chip_reset(struct tg3 *tp)
9059         __releases(tp->lock)
9060         __acquires(tp->lock)
9061 {
9062         u32 val;
9063         void (*write_op)(struct tg3 *, u32, u32);
9064         int i, err;
9065
9066         if (!pci_device_is_present(tp->pdev))
9067                 return -ENODEV;
9068
9069         tg3_nvram_lock(tp);
9070
9071         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9072
9073         /* No matching tg3_nvram_unlock() after this because
9074          * chip reset below will undo the nvram lock.
9075          */
9076         tp->nvram_lock_cnt = 0;
9077
9078         /* GRC_MISC_CFG core clock reset will clear the memory
9079          * enable bit in PCI register 4 and the MSI enable bit
9080          * on some chips, so we save relevant registers here.
9081          */
9082         tg3_save_pci_state(tp);
9083
9084         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9085             tg3_flag(tp, 5755_PLUS))
9086                 tw32(GRC_FASTBOOT_PC, 0);
9087
9088         /*
9089          * We must avoid the readl() that normally takes place.
9090          * It locks machines, causes machine checks, and other
9091          * fun things.  So, temporarily disable the 5701
9092          * hardware workaround, while we do the reset.
9093          */
9094         write_op = tp->write32;
9095         if (write_op == tg3_write_flush_reg32)
9096                 tp->write32 = tg3_write32;
9097
9098         /* Prevent the irq handler from reading or writing PCI registers
9099          * during chip reset when the memory enable bit in the PCI command
9100          * register may be cleared.  The chip does not generate interrupt
9101          * at this time, but the irq handler may still be called due to irq
9102          * sharing or irqpoll.
9103          */
9104         tg3_flag_set(tp, CHIP_RESETTING);
9105         for (i = 0; i < tp->irq_cnt; i++) {
9106                 struct tg3_napi *tnapi = &tp->napi[i];
9107                 if (tnapi->hw_status) {
9108                         tnapi->hw_status->status = 0;
9109                         tnapi->hw_status->status_tag = 0;
9110                 }
9111                 tnapi->last_tag = 0;
9112                 tnapi->last_irq_tag = 0;
9113         }
9114         smp_mb();
9115
9116         tg3_full_unlock(tp);
9117
9118         for (i = 0; i < tp->irq_cnt; i++)
9119                 synchronize_irq(tp->napi[i].irq_vec);
9120
9121         tg3_full_lock(tp, 0);
9122
9123         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9124                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9125                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9126         }
9127
9128         /* do the reset */
9129         val = GRC_MISC_CFG_CORECLK_RESET;
9130
9131         if (tg3_flag(tp, PCI_EXPRESS)) {
9132                 /* Force PCIe 1.0a mode */
9133                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9134                     !tg3_flag(tp, 57765_PLUS) &&
9135                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9136                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9137                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9138
9139                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9140                         tw32(GRC_MISC_CFG, (1 << 29));
9141                         val |= (1 << 29);
9142                 }
9143         }
9144
9145         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9146                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9147                 tw32(GRC_VCPU_EXT_CTRL,
9148                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9149         }
9150
9151         /* Set the clock to the highest frequency to avoid timeouts. With link
9152          * aware mode, the clock speed could be slow and bootcode does not
9153          * complete within the expected time. Override the clock to allow the
9154          * bootcode to finish sooner and then restore it.
9155          */
9156         tg3_override_clk(tp);
9157
9158         /* Manage gphy power for all CPMU absent PCIe devices. */
9159         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9160                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9161
9162         tw32(GRC_MISC_CFG, val);
9163
9164         /* restore 5701 hardware bug workaround write method */
9165         tp->write32 = write_op;
9166
9167         /* Unfortunately, we have to delay before the PCI read back.
9168          * Some 575X chips even will not respond to a PCI cfg access
9169          * when the reset command is given to the chip.
9170          *
9171          * How do these hardware designers expect things to work
9172          * properly if the PCI write is posted for a long period
9173          * of time?  It is always necessary to have some method by
9174          * which a register read back can occur to push the write
9175          * out which does the reset.
9176          *
9177          * For most tg3 variants the trick below was working.
9178          * Ho hum...
9179          */
9180         udelay(120);
9181
9182         /* Flush PCI posted writes.  The normal MMIO registers
9183          * are inaccessible at this time so this is the only
9184          * way to make this reliably (actually, this is no longer
9185          * the case, see above).  I tried to use indirect
9186          * register read/write but this upset some 5701 variants.
9187          */
9188         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9189
9190         udelay(120);
9191
9192         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9193                 u16 val16;
9194
9195                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9196                         int j;
9197                         u32 cfg_val;
9198
9199                         /* Wait for link training to complete.  */
9200                         for (j = 0; j < 5000; j++)
9201                                 udelay(100);
9202
9203                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9204                         pci_write_config_dword(tp->pdev, 0xc4,
9205                                                cfg_val | (1 << 15));
9206                 }
9207
9208                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9209                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9210                 /*
9211                  * Older PCIe devices only support the 128 byte
9212                  * MPS setting.  Enforce the restriction.
9213                  */
9214                 if (!tg3_flag(tp, CPMU_PRESENT))
9215                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9216                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9217
9218                 /* Clear error status */
9219                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9220                                       PCI_EXP_DEVSTA_CED |
9221                                       PCI_EXP_DEVSTA_NFED |
9222                                       PCI_EXP_DEVSTA_FED |
9223                                       PCI_EXP_DEVSTA_URD);
9224         }
9225
9226         tg3_restore_pci_state(tp);
9227
9228         tg3_flag_clear(tp, CHIP_RESETTING);
9229         tg3_flag_clear(tp, ERROR_PROCESSED);
9230
9231         val = 0;
9232         if (tg3_flag(tp, 5780_CLASS))
9233                 val = tr32(MEMARB_MODE);
9234         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9235
9236         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9237                 tg3_stop_fw(tp);
9238                 tw32(0x5000, 0x400);
9239         }
9240
9241         if (tg3_flag(tp, IS_SSB_CORE)) {
9242                 /*
9243                  * BCM4785: In order to avoid repercussions from using
9244                  * potentially defective internal ROM, stop the Rx RISC CPU,
9245                  * which is not required.
9246                  */
9247                 tg3_stop_fw(tp);
9248                 tg3_halt_cpu(tp, RX_CPU_BASE);
9249         }
9250
9251         err = tg3_poll_fw(tp);
9252         if (err)
9253                 return err;
9254
9255         tw32(GRC_MODE, tp->grc_mode);
9256
9257         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9258                 val = tr32(0xc4);
9259
9260                 tw32(0xc4, val | (1 << 15));
9261         }
9262
9263         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9264             tg3_asic_rev(tp) == ASIC_REV_5705) {
9265                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9266                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9267                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9268                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9269         }
9270
9271         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9272                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9273                 val = tp->mac_mode;
9274         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9275                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9276                 val = tp->mac_mode;
9277         } else
9278                 val = 0;
9279
9280         tw32_f(MAC_MODE, val);
9281         udelay(40);
9282
9283         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9284
9285         tg3_mdio_start(tp);
9286
9287         if (tg3_flag(tp, PCI_EXPRESS) &&
9288             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9289             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9290             !tg3_flag(tp, 57765_PLUS)) {
9291                 val = tr32(0x7c00);
9292
9293                 tw32(0x7c00, val | (1 << 25));
9294         }
9295
9296         tg3_restore_clk(tp);
9297
9298         /* Increase the core clock speed to fix tx timeout issue for 5762
9299          * with 100Mbps link speed.
9300          */
9301         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9302                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9303                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9304                      TG3_CPMU_MAC_ORIDE_ENABLE);
9305         }
9306
9307         /* Reprobe ASF enable state.  */
9308         tg3_flag_clear(tp, ENABLE_ASF);
9309         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9310                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9311
9312         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9313         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9314         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9315                 u32 nic_cfg;
9316
9317                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9318                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9319                         tg3_flag_set(tp, ENABLE_ASF);
9320                         tp->last_event_jiffies = jiffies;
9321                         if (tg3_flag(tp, 5750_PLUS))
9322                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9323
9324                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9325                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9326                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9327                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9328                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9329                 }
9330         }
9331
9332         return 0;
9333 }
9334
9335 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9336 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9337 static void __tg3_set_rx_mode(struct net_device *);
9338
9339 /* tp->lock is held. */
9340 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9341 {
9342         int err;
9343
9344         tg3_stop_fw(tp);
9345
9346         tg3_write_sig_pre_reset(tp, kind);
9347
9348         tg3_abort_hw(tp, silent);
9349         err = tg3_chip_reset(tp);
9350
9351         __tg3_set_mac_addr(tp, false);
9352
9353         tg3_write_sig_legacy(tp, kind);
9354         tg3_write_sig_post_reset(tp, kind);
9355
9356         if (tp->hw_stats) {
9357                 /* Save the stats across chip resets... */
9358                 tg3_get_nstats(tp, &tp->net_stats_prev);
9359                 tg3_get_estats(tp, &tp->estats_prev);
9360
9361                 /* And make sure the next sample is new data */
9362                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9363         }
9364
9365         return err;
9366 }
9367
9368 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9369 {
9370         struct tg3 *tp = netdev_priv(dev);
9371         struct sockaddr *addr = p;
9372         int err = 0;
9373         bool skip_mac_1 = false;
9374
9375         if (!is_valid_ether_addr(addr->sa_data))
9376                 return -EADDRNOTAVAIL;
9377
9378         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9379
9380         if (!netif_running(dev))
9381                 return 0;
9382
9383         if (tg3_flag(tp, ENABLE_ASF)) {
9384                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9385
9386                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9387                 addr0_low = tr32(MAC_ADDR_0_LOW);
9388                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9389                 addr1_low = tr32(MAC_ADDR_1_LOW);
9390
9391                 /* Skip MAC addr 1 if ASF is using it. */
9392                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9393                     !(addr1_high == 0 && addr1_low == 0))
9394                         skip_mac_1 = true;
9395         }
9396         spin_lock_bh(&tp->lock);
9397         __tg3_set_mac_addr(tp, skip_mac_1);
9398         __tg3_set_rx_mode(dev);
9399         spin_unlock_bh(&tp->lock);
9400
9401         return err;
9402 }
9403
9404 /* tp->lock is held. */
9405 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9406                            dma_addr_t mapping, u32 maxlen_flags,
9407                            u32 nic_addr)
9408 {
9409         tg3_write_mem(tp,
9410                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9411                       ((u64) mapping >> 32));
9412         tg3_write_mem(tp,
9413                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9414                       ((u64) mapping & 0xffffffff));
9415         tg3_write_mem(tp,
9416                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9417                        maxlen_flags);
9418
9419         if (!tg3_flag(tp, 5705_PLUS))
9420                 tg3_write_mem(tp,
9421                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9422                               nic_addr);
9423 }
9424
9425
9426 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9427 {
9428         int i = 0;
9429
9430         if (!tg3_flag(tp, ENABLE_TSS)) {
9431                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9432                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9433                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9434         } else {
9435                 tw32(HOSTCC_TXCOL_TICKS, 0);
9436                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9437                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9438
9439                 for (; i < tp->txq_cnt; i++) {
9440                         u32 reg;
9441
9442                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9443                         tw32(reg, ec->tx_coalesce_usecs);
9444                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9445                         tw32(reg, ec->tx_max_coalesced_frames);
9446                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9447                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9448                 }
9449         }
9450
9451         for (; i < tp->irq_max - 1; i++) {
9452                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9453                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9454                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9455         }
9456 }
9457
9458 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9459 {
9460         int i = 0;
9461         u32 limit = tp->rxq_cnt;
9462
9463         if (!tg3_flag(tp, ENABLE_RSS)) {
9464                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9465                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9466                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9467                 limit--;
9468         } else {
9469                 tw32(HOSTCC_RXCOL_TICKS, 0);
9470                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9471                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9472         }
9473
9474         for (; i < limit; i++) {
9475                 u32 reg;
9476
9477                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9478                 tw32(reg, ec->rx_coalesce_usecs);
9479                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9480                 tw32(reg, ec->rx_max_coalesced_frames);
9481                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9482                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9483         }
9484
9485         for (; i < tp->irq_max - 1; i++) {
9486                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9487                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9488                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9489         }
9490 }
9491
9492 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9493 {
9494         tg3_coal_tx_init(tp, ec);
9495         tg3_coal_rx_init(tp, ec);
9496
9497         if (!tg3_flag(tp, 5705_PLUS)) {
9498                 u32 val = ec->stats_block_coalesce_usecs;
9499
9500                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9501                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9502
9503                 if (!tp->link_up)
9504                         val = 0;
9505
9506                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9507         }
9508 }
9509
9510 /* tp->lock is held. */
9511 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9512 {
9513         u32 txrcb, limit;
9514
9515         /* Disable all transmit rings but the first. */
9516         if (!tg3_flag(tp, 5705_PLUS))
9517                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9518         else if (tg3_flag(tp, 5717_PLUS))
9519                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9520         else if (tg3_flag(tp, 57765_CLASS) ||
9521                  tg3_asic_rev(tp) == ASIC_REV_5762)
9522                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9523         else
9524                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9525
9526         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9527              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9528                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9529                               BDINFO_FLAGS_DISABLED);
9530 }
9531
9532 /* tp->lock is held. */
9533 static void tg3_tx_rcbs_init(struct tg3 *tp)
9534 {
9535         int i = 0;
9536         u32 txrcb = NIC_SRAM_SEND_RCB;
9537
9538         if (tg3_flag(tp, ENABLE_TSS))
9539                 i++;
9540
9541         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9542                 struct tg3_napi *tnapi = &tp->napi[i];
9543
9544                 if (!tnapi->tx_ring)
9545                         continue;
9546
9547                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9548                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9549                                NIC_SRAM_TX_BUFFER_DESC);
9550         }
9551 }
9552
9553 /* tp->lock is held. */
9554 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9555 {
9556         u32 rxrcb, limit;
9557
9558         /* Disable all receive return rings but the first. */
9559         if (tg3_flag(tp, 5717_PLUS))
9560                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9561         else if (!tg3_flag(tp, 5705_PLUS))
9562                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9563         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9564                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9565                  tg3_flag(tp, 57765_CLASS))
9566                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9567         else
9568                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9569
9570         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9571              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9572                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9573                               BDINFO_FLAGS_DISABLED);
9574 }
9575
9576 /* tp->lock is held. */
9577 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9578 {
9579         int i = 0;
9580         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9581
9582         if (tg3_flag(tp, ENABLE_RSS))
9583                 i++;
9584
9585         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9586                 struct tg3_napi *tnapi = &tp->napi[i];
9587
9588                 if (!tnapi->rx_rcb)
9589                         continue;
9590
9591                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9592                                (tp->rx_ret_ring_mask + 1) <<
9593                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9594         }
9595 }
9596
9597 /* tp->lock is held. */
9598 static void tg3_rings_reset(struct tg3 *tp)
9599 {
9600         int i;
9601         u32 stblk;
9602         struct tg3_napi *tnapi = &tp->napi[0];
9603
9604         tg3_tx_rcbs_disable(tp);
9605
9606         tg3_rx_ret_rcbs_disable(tp);
9607
9608         /* Disable interrupts */
9609         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9610         tp->napi[0].chk_msi_cnt = 0;
9611         tp->napi[0].last_rx_cons = 0;
9612         tp->napi[0].last_tx_cons = 0;
9613
9614         /* Zero mailbox registers. */
9615         if (tg3_flag(tp, SUPPORT_MSIX)) {
9616                 for (i = 1; i < tp->irq_max; i++) {
9617                         tp->napi[i].tx_prod = 0;
9618                         tp->napi[i].tx_cons = 0;
9619                         if (tg3_flag(tp, ENABLE_TSS))
9620                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9621                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9622                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9623                         tp->napi[i].chk_msi_cnt = 0;
9624                         tp->napi[i].last_rx_cons = 0;
9625                         tp->napi[i].last_tx_cons = 0;
9626                 }
9627                 if (!tg3_flag(tp, ENABLE_TSS))
9628                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9629         } else {
9630                 tp->napi[0].tx_prod = 0;
9631                 tp->napi[0].tx_cons = 0;
9632                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9633                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9634         }
9635
9636         /* Make sure the NIC-based send BD rings are disabled. */
9637         if (!tg3_flag(tp, 5705_PLUS)) {
9638                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9639                 for (i = 0; i < 16; i++)
9640                         tw32_tx_mbox(mbox + i * 8, 0);
9641         }
9642
9643         /* Clear status block in ram. */
9644         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9645
9646         /* Set status block DMA address */
9647         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9648              ((u64) tnapi->status_mapping >> 32));
9649         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9650              ((u64) tnapi->status_mapping & 0xffffffff));
9651
9652         stblk = HOSTCC_STATBLCK_RING1;
9653
9654         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9655                 u64 mapping = (u64)tnapi->status_mapping;
9656                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9657                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9658                 stblk += 8;
9659
9660                 /* Clear status block in ram. */
9661                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9662         }
9663
9664         tg3_tx_rcbs_init(tp);
9665         tg3_rx_ret_rcbs_init(tp);
9666 }
9667
9668 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9669 {
9670         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9671
9672         if (!tg3_flag(tp, 5750_PLUS) ||
9673             tg3_flag(tp, 5780_CLASS) ||
9674             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9675             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9676             tg3_flag(tp, 57765_PLUS))
9677                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9678         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9679                  tg3_asic_rev(tp) == ASIC_REV_5787)
9680                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9681         else
9682                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9683
9684         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9685         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9686
9687         val = min(nic_rep_thresh, host_rep_thresh);
9688         tw32(RCVBDI_STD_THRESH, val);
9689
9690         if (tg3_flag(tp, 57765_PLUS))
9691                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9692
9693         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9694                 return;
9695
9696         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9697
9698         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9699
9700         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9701         tw32(RCVBDI_JUMBO_THRESH, val);
9702
9703         if (tg3_flag(tp, 57765_PLUS))
9704                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9705 }
9706
9707 static inline u32 calc_crc(unsigned char *buf, int len)
9708 {
9709         u32 reg;
9710         u32 tmp;
9711         int j, k;
9712
9713         reg = 0xffffffff;
9714
9715         for (j = 0; j < len; j++) {
9716                 reg ^= buf[j];
9717
9718                 for (k = 0; k < 8; k++) {
9719                         tmp = reg & 0x01;
9720
9721                         reg >>= 1;
9722
9723                         if (tmp)
9724                                 reg ^= CRC32_POLY_LE;
9725                 }
9726         }
9727
9728         return ~reg;
9729 }
9730
9731 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9732 {
9733         /* accept or reject all multicast frames */
9734         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9735         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9736         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9737         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9738 }
9739
9740 static void __tg3_set_rx_mode(struct net_device *dev)
9741 {
9742         struct tg3 *tp = netdev_priv(dev);
9743         u32 rx_mode;
9744
9745         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9746                                   RX_MODE_KEEP_VLAN_TAG);
9747
9748 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9749         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9750          * flag clear.
9751          */
9752         if (!tg3_flag(tp, ENABLE_ASF))
9753                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9754 #endif
9755
9756         if (dev->flags & IFF_PROMISC) {
9757                 /* Promiscuous mode. */
9758                 rx_mode |= RX_MODE_PROMISC;
9759         } else if (dev->flags & IFF_ALLMULTI) {
9760                 /* Accept all multicast. */
9761                 tg3_set_multi(tp, 1);
9762         } else if (netdev_mc_empty(dev)) {
9763                 /* Reject all multicast. */
9764                 tg3_set_multi(tp, 0);
9765         } else {
9766                 /* Accept one or more multicast(s). */
9767                 struct netdev_hw_addr *ha;
9768                 u32 mc_filter[4] = { 0, };
9769                 u32 regidx;
9770                 u32 bit;
9771                 u32 crc;
9772
9773                 netdev_for_each_mc_addr(ha, dev) {
9774                         crc = calc_crc(ha->addr, ETH_ALEN);
9775                         bit = ~crc & 0x7f;
9776                         regidx = (bit & 0x60) >> 5;
9777                         bit &= 0x1f;
9778                         mc_filter[regidx] |= (1 << bit);
9779                 }
9780
9781                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9782                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9783                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9784                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9785         }
9786
9787         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9788                 rx_mode |= RX_MODE_PROMISC;
9789         } else if (!(dev->flags & IFF_PROMISC)) {
9790                 /* Add all entries into to the mac addr filter list */
9791                 int i = 0;
9792                 struct netdev_hw_addr *ha;
9793
9794                 netdev_for_each_uc_addr(ha, dev) {
9795                         __tg3_set_one_mac_addr(tp, ha->addr,
9796                                                i + TG3_UCAST_ADDR_IDX(tp));
9797                         i++;
9798                 }
9799         }
9800
9801         if (rx_mode != tp->rx_mode) {
9802                 tp->rx_mode = rx_mode;
9803                 tw32_f(MAC_RX_MODE, rx_mode);
9804                 udelay(10);
9805         }
9806 }
9807
9808 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9809 {
9810         int i;
9811
9812         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9813                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9814 }
9815
9816 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9817 {
9818         int i;
9819
9820         if (!tg3_flag(tp, SUPPORT_MSIX))
9821                 return;
9822
9823         if (tp->rxq_cnt == 1) {
9824                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9825                 return;
9826         }
9827
9828         /* Validate table against current IRQ count */
9829         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9830                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9831                         break;
9832         }
9833
9834         if (i != TG3_RSS_INDIR_TBL_SIZE)
9835                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9836 }
9837
9838 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9839 {
9840         int i = 0;
9841         u32 reg = MAC_RSS_INDIR_TBL_0;
9842
9843         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9844                 u32 val = tp->rss_ind_tbl[i];
9845                 i++;
9846                 for (; i % 8; i++) {
9847                         val <<= 4;
9848                         val |= tp->rss_ind_tbl[i];
9849                 }
9850                 tw32(reg, val);
9851                 reg += 4;
9852         }
9853 }
9854
9855 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9856 {
9857         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9858                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9859         else
9860                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9861 }
9862
9863 /* tp->lock is held. */
9864 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9865 {
9866         u32 val, rdmac_mode;
9867         int i, err, limit;
9868         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9869
9870         tg3_disable_ints(tp);
9871
9872         tg3_stop_fw(tp);
9873
9874         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9875
9876         if (tg3_flag(tp, INIT_COMPLETE))
9877                 tg3_abort_hw(tp, 1);
9878
9879         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9880             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9881                 tg3_phy_pull_config(tp);
9882                 tg3_eee_pull_config(tp, NULL);
9883                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9884         }
9885
9886         /* Enable MAC control of LPI */
9887         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9888                 tg3_setup_eee(tp);
9889
9890         if (reset_phy)
9891                 tg3_phy_reset(tp);
9892
9893         err = tg3_chip_reset(tp);
9894         if (err)
9895                 return err;
9896
9897         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9898
9899         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9900                 val = tr32(TG3_CPMU_CTRL);
9901                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9902                 tw32(TG3_CPMU_CTRL, val);
9903
9904                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9905                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9906                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9907                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9908
9909                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9910                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9911                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9912                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9913
9914                 val = tr32(TG3_CPMU_HST_ACC);
9915                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9916                 val |= CPMU_HST_ACC_MACCLK_6_25;
9917                 tw32(TG3_CPMU_HST_ACC, val);
9918         }
9919
9920         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9921                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9922                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9923                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9924                 tw32(PCIE_PWR_MGMT_THRESH, val);
9925
9926                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9927                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9928
9929                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9930
9931                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9932                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9933         }
9934
9935         if (tg3_flag(tp, L1PLLPD_EN)) {
9936                 u32 grc_mode = tr32(GRC_MODE);
9937
9938                 /* Access the lower 1K of PL PCIE block registers. */
9939                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9940                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9941
9942                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9943                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9944                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9945
9946                 tw32(GRC_MODE, grc_mode);
9947         }
9948
9949         if (tg3_flag(tp, 57765_CLASS)) {
9950                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9951                         u32 grc_mode = tr32(GRC_MODE);
9952
9953                         /* Access the lower 1K of PL PCIE block registers. */
9954                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9955                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9956
9957                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9958                                    TG3_PCIE_PL_LO_PHYCTL5);
9959                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9960                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9961
9962                         tw32(GRC_MODE, grc_mode);
9963                 }
9964
9965                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9966                         u32 grc_mode;
9967
9968                         /* Fix transmit hangs */
9969                         val = tr32(TG3_CPMU_PADRNG_CTL);
9970                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9971                         tw32(TG3_CPMU_PADRNG_CTL, val);
9972
9973                         grc_mode = tr32(GRC_MODE);
9974
9975                         /* Access the lower 1K of DL PCIE block registers. */
9976                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9977                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9978
9979                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9980                                    TG3_PCIE_DL_LO_FTSMAX);
9981                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9982                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9983                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9984
9985                         tw32(GRC_MODE, grc_mode);
9986                 }
9987
9988                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9989                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9990                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9991                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9992         }
9993
9994         /* This works around an issue with Athlon chipsets on
9995          * B3 tigon3 silicon.  This bit has no effect on any
9996          * other revision.  But do not set this on PCI Express
9997          * chips and don't even touch the clocks if the CPMU is present.
9998          */
9999         if (!tg3_flag(tp, CPMU_PRESENT)) {
10000                 if (!tg3_flag(tp, PCI_EXPRESS))
10001                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
10002                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
10003         }
10004
10005         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10006             tg3_flag(tp, PCIX_MODE)) {
10007                 val = tr32(TG3PCI_PCISTATE);
10008                 val |= PCISTATE_RETRY_SAME_DMA;
10009                 tw32(TG3PCI_PCISTATE, val);
10010         }
10011
10012         if (tg3_flag(tp, ENABLE_APE)) {
10013                 /* Allow reads and writes to the
10014                  * APE register and memory space.
10015                  */
10016                 val = tr32(TG3PCI_PCISTATE);
10017                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10018                        PCISTATE_ALLOW_APE_SHMEM_WR |
10019                        PCISTATE_ALLOW_APE_PSPACE_WR;
10020                 tw32(TG3PCI_PCISTATE, val);
10021         }
10022
10023         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10024                 /* Enable some hw fixes.  */
10025                 val = tr32(TG3PCI_MSI_DATA);
10026                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10027                 tw32(TG3PCI_MSI_DATA, val);
10028         }
10029
10030         /* Descriptor ring init may make accesses to the
10031          * NIC SRAM area to setup the TX descriptors, so we
10032          * can only do this after the hardware has been
10033          * successfully reset.
10034          */
10035         err = tg3_init_rings(tp);
10036         if (err)
10037                 return err;
10038
10039         if (tg3_flag(tp, 57765_PLUS)) {
10040                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10041                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10042                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10043                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10044                 if (!tg3_flag(tp, 57765_CLASS) &&
10045                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10046                     tg3_asic_rev(tp) != ASIC_REV_5762)
10047                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10048                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10049         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10050                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10051                 /* This value is determined during the probe time DMA
10052                  * engine test, tg3_test_dma.
10053                  */
10054                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10055         }
10056
10057         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10058                           GRC_MODE_4X_NIC_SEND_RINGS |
10059                           GRC_MODE_NO_TX_PHDR_CSUM |
10060                           GRC_MODE_NO_RX_PHDR_CSUM);
10061         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10062
10063         /* Pseudo-header checksum is done by hardware logic and not
10064          * the offload processers, so make the chip do the pseudo-
10065          * header checksums on receive.  For transmit it is more
10066          * convenient to do the pseudo-header checksum in software
10067          * as Linux does that on transmit for us in all cases.
10068          */
10069         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10070
10071         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10072         if (tp->rxptpctl)
10073                 tw32(TG3_RX_PTP_CTL,
10074                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10075
10076         if (tg3_flag(tp, PTP_CAPABLE))
10077                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10078
10079         tw32(GRC_MODE, tp->grc_mode | val);
10080
10081         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10082          * south bridge limitation. As a workaround, Driver is setting MRRS
10083          * to 2048 instead of default 4096.
10084          */
10085         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10086             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10087                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10088                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10089         }
10090
10091         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10092         val = tr32(GRC_MISC_CFG);
10093         val &= ~0xff;
10094         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10095         tw32(GRC_MISC_CFG, val);
10096
10097         /* Initialize MBUF/DESC pool. */
10098         if (tg3_flag(tp, 5750_PLUS)) {
10099                 /* Do nothing.  */
10100         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10101                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10102                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10103                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10104                 else
10105                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10106                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10107                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10108         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10109                 int fw_len;
10110
10111                 fw_len = tp->fw_len;
10112                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10113                 tw32(BUFMGR_MB_POOL_ADDR,
10114                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10115                 tw32(BUFMGR_MB_POOL_SIZE,
10116                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10117         }
10118
10119         if (tp->dev->mtu <= ETH_DATA_LEN) {
10120                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10121                      tp->bufmgr_config.mbuf_read_dma_low_water);
10122                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10123                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10124                 tw32(BUFMGR_MB_HIGH_WATER,
10125                      tp->bufmgr_config.mbuf_high_water);
10126         } else {
10127                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10128                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10129                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10130                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10131                 tw32(BUFMGR_MB_HIGH_WATER,
10132                      tp->bufmgr_config.mbuf_high_water_jumbo);
10133         }
10134         tw32(BUFMGR_DMA_LOW_WATER,
10135              tp->bufmgr_config.dma_low_water);
10136         tw32(BUFMGR_DMA_HIGH_WATER,
10137              tp->bufmgr_config.dma_high_water);
10138
10139         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10140         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10141                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10142         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10143             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10144             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10145             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10146                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10147         tw32(BUFMGR_MODE, val);
10148         for (i = 0; i < 2000; i++) {
10149                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10150                         break;
10151                 udelay(10);
10152         }
10153         if (i >= 2000) {
10154                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10155                 return -ENODEV;
10156         }
10157
10158         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10159                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10160
10161         tg3_setup_rxbd_thresholds(tp);
10162
10163         /* Initialize TG3_BDINFO's at:
10164          *  RCVDBDI_STD_BD:     standard eth size rx ring
10165          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10166          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10167          *
10168          * like so:
10169          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10170          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10171          *                              ring attribute flags
10172          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10173          *
10174          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10175          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10176          *
10177          * The size of each ring is fixed in the firmware, but the location is
10178          * configurable.
10179          */
10180         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10181              ((u64) tpr->rx_std_mapping >> 32));
10182         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10183              ((u64) tpr->rx_std_mapping & 0xffffffff));
10184         if (!tg3_flag(tp, 5717_PLUS))
10185                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10186                      NIC_SRAM_RX_BUFFER_DESC);
10187
10188         /* Disable the mini ring */
10189         if (!tg3_flag(tp, 5705_PLUS))
10190                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10191                      BDINFO_FLAGS_DISABLED);
10192
10193         /* Program the jumbo buffer descriptor ring control
10194          * blocks on those devices that have them.
10195          */
10196         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10197             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10198
10199                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10200                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10201                              ((u64) tpr->rx_jmb_mapping >> 32));
10202                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10203                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10204                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10205                               BDINFO_FLAGS_MAXLEN_SHIFT;
10206                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10207                              val | BDINFO_FLAGS_USE_EXT_RECV);
10208                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10209                             tg3_flag(tp, 57765_CLASS) ||
10210                             tg3_asic_rev(tp) == ASIC_REV_5762)
10211                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10212                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10213                 } else {
10214                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10215                              BDINFO_FLAGS_DISABLED);
10216                 }
10217
10218                 if (tg3_flag(tp, 57765_PLUS)) {
10219                         val = TG3_RX_STD_RING_SIZE(tp);
10220                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10221                         val |= (TG3_RX_STD_DMA_SZ << 2);
10222                 } else
10223                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10224         } else
10225                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10226
10227         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10228
10229         tpr->rx_std_prod_idx = tp->rx_pending;
10230         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10231
10232         tpr->rx_jmb_prod_idx =
10233                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10234         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10235
10236         tg3_rings_reset(tp);
10237
10238         /* Initialize MAC address and backoff seed. */
10239         __tg3_set_mac_addr(tp, false);
10240
10241         /* MTU + ethernet header + FCS + optional VLAN tag */
10242         tw32(MAC_RX_MTU_SIZE,
10243              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10244
10245         /* The slot time is changed by tg3_setup_phy if we
10246          * run at gigabit with half duplex.
10247          */
10248         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10249               (6 << TX_LENGTHS_IPG_SHIFT) |
10250               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10251
10252         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10253             tg3_asic_rev(tp) == ASIC_REV_5762)
10254                 val |= tr32(MAC_TX_LENGTHS) &
10255                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10256                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10257
10258         tw32(MAC_TX_LENGTHS, val);
10259
10260         /* Receive rules. */
10261         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10262         tw32(RCVLPC_CONFIG, 0x0181);
10263
10264         /* Calculate RDMAC_MODE setting early, we need it to determine
10265          * the RCVLPC_STATE_ENABLE mask.
10266          */
10267         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10268                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10269                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10270                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10271                       RDMAC_MODE_LNGREAD_ENAB);
10272
10273         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10274                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10275
10276         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10277             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10278             tg3_asic_rev(tp) == ASIC_REV_57780)
10279                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10280                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10281                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10282
10283         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10284             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10285                 if (tg3_flag(tp, TSO_CAPABLE) &&
10286                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10287                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10288                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10289                            !tg3_flag(tp, IS_5788)) {
10290                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10291                 }
10292         }
10293
10294         if (tg3_flag(tp, PCI_EXPRESS))
10295                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10296
10297         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10298                 tp->dma_limit = 0;
10299                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10300                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10301                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10302                 }
10303         }
10304
10305         if (tg3_flag(tp, HW_TSO_1) ||
10306             tg3_flag(tp, HW_TSO_2) ||
10307             tg3_flag(tp, HW_TSO_3))
10308                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10309
10310         if (tg3_flag(tp, 57765_PLUS) ||
10311             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10312             tg3_asic_rev(tp) == ASIC_REV_57780)
10313                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10314
10315         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10316             tg3_asic_rev(tp) == ASIC_REV_5762)
10317                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10318
10319         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10320             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10321             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10322             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10323             tg3_flag(tp, 57765_PLUS)) {
10324                 u32 tgtreg;
10325
10326                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10327                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10328                 else
10329                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10330
10331                 val = tr32(tgtreg);
10332                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10333                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10334                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10335                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10336                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10337                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10338                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10339                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10340                 }
10341                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10342         }
10343
10344         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10345             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10346             tg3_asic_rev(tp) == ASIC_REV_5762) {
10347                 u32 tgtreg;
10348
10349                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10350                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10351                 else
10352                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10353
10354                 val = tr32(tgtreg);
10355                 tw32(tgtreg, val |
10356                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10357                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10358         }
10359
10360         /* Receive/send statistics. */
10361         if (tg3_flag(tp, 5750_PLUS)) {
10362                 val = tr32(RCVLPC_STATS_ENABLE);
10363                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10364                 tw32(RCVLPC_STATS_ENABLE, val);
10365         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10366                    tg3_flag(tp, TSO_CAPABLE)) {
10367                 val = tr32(RCVLPC_STATS_ENABLE);
10368                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10369                 tw32(RCVLPC_STATS_ENABLE, val);
10370         } else {
10371                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10372         }
10373         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10374         tw32(SNDDATAI_STATSENAB, 0xffffff);
10375         tw32(SNDDATAI_STATSCTRL,
10376              (SNDDATAI_SCTRL_ENABLE |
10377               SNDDATAI_SCTRL_FASTUPD));
10378
10379         /* Setup host coalescing engine. */
10380         tw32(HOSTCC_MODE, 0);
10381         for (i = 0; i < 2000; i++) {
10382                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10383                         break;
10384                 udelay(10);
10385         }
10386
10387         __tg3_set_coalesce(tp, &tp->coal);
10388
10389         if (!tg3_flag(tp, 5705_PLUS)) {
10390                 /* Status/statistics block address.  See tg3_timer,
10391                  * the tg3_periodic_fetch_stats call there, and
10392                  * tg3_get_stats to see how this works for 5705/5750 chips.
10393                  */
10394                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10395                      ((u64) tp->stats_mapping >> 32));
10396                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10397                      ((u64) tp->stats_mapping & 0xffffffff));
10398                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10399
10400                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10401
10402                 /* Clear statistics and status block memory areas */
10403                 for (i = NIC_SRAM_STATS_BLK;
10404                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10405                      i += sizeof(u32)) {
10406                         tg3_write_mem(tp, i, 0);
10407                         udelay(40);
10408                 }
10409         }
10410
10411         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10412
10413         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10414         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10415         if (!tg3_flag(tp, 5705_PLUS))
10416                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10417
10418         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10419                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10420                 /* reset to prevent losing 1st rx packet intermittently */
10421                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10422                 udelay(10);
10423         }
10424
10425         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10426                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10427                         MAC_MODE_FHDE_ENABLE;
10428         if (tg3_flag(tp, ENABLE_APE))
10429                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10430         if (!tg3_flag(tp, 5705_PLUS) &&
10431             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10432             tg3_asic_rev(tp) != ASIC_REV_5700)
10433                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10434         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10435         udelay(40);
10436
10437         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10438          * If TG3_FLAG_IS_NIC is zero, we should read the
10439          * register to preserve the GPIO settings for LOMs. The GPIOs,
10440          * whether used as inputs or outputs, are set by boot code after
10441          * reset.
10442          */
10443         if (!tg3_flag(tp, IS_NIC)) {
10444                 u32 gpio_mask;
10445
10446                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10447                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10448                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10449
10450                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10451                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10452                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10453
10454                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10455                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10456
10457                 tp->grc_local_ctrl &= ~gpio_mask;
10458                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10459
10460                 /* GPIO1 must be driven high for eeprom write protect */
10461                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10462                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10463                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10464         }
10465         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10466         udelay(100);
10467
10468         if (tg3_flag(tp, USING_MSIX)) {
10469                 val = tr32(MSGINT_MODE);
10470                 val |= MSGINT_MODE_ENABLE;
10471                 if (tp->irq_cnt > 1)
10472                         val |= MSGINT_MODE_MULTIVEC_EN;
10473                 if (!tg3_flag(tp, 1SHOT_MSI))
10474                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10475                 tw32(MSGINT_MODE, val);
10476         }
10477
10478         if (!tg3_flag(tp, 5705_PLUS)) {
10479                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10480                 udelay(40);
10481         }
10482
10483         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10484                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10485                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10486                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10487                WDMAC_MODE_LNGREAD_ENAB);
10488
10489         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10490             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10491                 if (tg3_flag(tp, TSO_CAPABLE) &&
10492                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10493                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10494                         /* nothing */
10495                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10496                            !tg3_flag(tp, IS_5788)) {
10497                         val |= WDMAC_MODE_RX_ACCEL;
10498                 }
10499         }
10500
10501         /* Enable host coalescing bug fix */
10502         if (tg3_flag(tp, 5755_PLUS))
10503                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10504
10505         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10506                 val |= WDMAC_MODE_BURST_ALL_DATA;
10507
10508         tw32_f(WDMAC_MODE, val);
10509         udelay(40);
10510
10511         if (tg3_flag(tp, PCIX_MODE)) {
10512                 u16 pcix_cmd;
10513
10514                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10515                                      &pcix_cmd);
10516                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10517                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10518                         pcix_cmd |= PCI_X_CMD_READ_2K;
10519                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10520                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10521                         pcix_cmd |= PCI_X_CMD_READ_2K;
10522                 }
10523                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10524                                       pcix_cmd);
10525         }
10526
10527         tw32_f(RDMAC_MODE, rdmac_mode);
10528         udelay(40);
10529
10530         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10531             tg3_asic_rev(tp) == ASIC_REV_5720) {
10532                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10533                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10534                                 break;
10535                 }
10536                 if (i < TG3_NUM_RDMA_CHANNELS) {
10537                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10538                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10539                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10540                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10541                 }
10542         }
10543
10544         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10545         if (!tg3_flag(tp, 5705_PLUS))
10546                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10547
10548         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10549                 tw32(SNDDATAC_MODE,
10550                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10551         else
10552                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10553
10554         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10555         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10556         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10557         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10558                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10559         tw32(RCVDBDI_MODE, val);
10560         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10561         if (tg3_flag(tp, HW_TSO_1) ||
10562             tg3_flag(tp, HW_TSO_2) ||
10563             tg3_flag(tp, HW_TSO_3))
10564                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10565         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10566         if (tg3_flag(tp, ENABLE_TSS))
10567                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10568         tw32(SNDBDI_MODE, val);
10569         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10570
10571         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10572                 err = tg3_load_5701_a0_firmware_fix(tp);
10573                 if (err)
10574                         return err;
10575         }
10576
10577         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10578                 /* Ignore any errors for the firmware download. If download
10579                  * fails, the device will operate with EEE disabled
10580                  */
10581                 tg3_load_57766_firmware(tp);
10582         }
10583
10584         if (tg3_flag(tp, TSO_CAPABLE)) {
10585                 err = tg3_load_tso_firmware(tp);
10586                 if (err)
10587                         return err;
10588         }
10589
10590         tp->tx_mode = TX_MODE_ENABLE;
10591
10592         if (tg3_flag(tp, 5755_PLUS) ||
10593             tg3_asic_rev(tp) == ASIC_REV_5906)
10594                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10595
10596         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10597             tg3_asic_rev(tp) == ASIC_REV_5762) {
10598                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10599                 tp->tx_mode &= ~val;
10600                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10601         }
10602
10603         tw32_f(MAC_TX_MODE, tp->tx_mode);
10604         udelay(100);
10605
10606         if (tg3_flag(tp, ENABLE_RSS)) {
10607                 u32 rss_key[10];
10608
10609                 tg3_rss_write_indir_tbl(tp);
10610
10611                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10612
10613                 for (i = 0; i < 10 ; i++)
10614                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10615         }
10616
10617         tp->rx_mode = RX_MODE_ENABLE;
10618         if (tg3_flag(tp, 5755_PLUS))
10619                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10620
10621         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10622                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10623
10624         if (tg3_flag(tp, ENABLE_RSS))
10625                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10626                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10627                                RX_MODE_RSS_IPV6_HASH_EN |
10628                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10629                                RX_MODE_RSS_IPV4_HASH_EN |
10630                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10631
10632         tw32_f(MAC_RX_MODE, tp->rx_mode);
10633         udelay(10);
10634
10635         tw32(MAC_LED_CTRL, tp->led_ctrl);
10636
10637         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10638         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10639                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10640                 udelay(10);
10641         }
10642         tw32_f(MAC_RX_MODE, tp->rx_mode);
10643         udelay(10);
10644
10645         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10646                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10647                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10648                         /* Set drive transmission level to 1.2V  */
10649                         /* only if the signal pre-emphasis bit is not set  */
10650                         val = tr32(MAC_SERDES_CFG);
10651                         val &= 0xfffff000;
10652                         val |= 0x880;
10653                         tw32(MAC_SERDES_CFG, val);
10654                 }
10655                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10656                         tw32(MAC_SERDES_CFG, 0x616000);
10657         }
10658
10659         /* Prevent chip from dropping frames when flow control
10660          * is enabled.
10661          */
10662         if (tg3_flag(tp, 57765_CLASS))
10663                 val = 1;
10664         else
10665                 val = 2;
10666         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10667
10668         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10669             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10670                 /* Use hardware link auto-negotiation */
10671                 tg3_flag_set(tp, HW_AUTONEG);
10672         }
10673
10674         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10675             tg3_asic_rev(tp) == ASIC_REV_5714) {
10676                 u32 tmp;
10677
10678                 tmp = tr32(SERDES_RX_CTRL);
10679                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10680                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10681                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10682                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10683         }
10684
10685         if (!tg3_flag(tp, USE_PHYLIB)) {
10686                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10687                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10688
10689                 err = tg3_setup_phy(tp, false);
10690                 if (err)
10691                         return err;
10692
10693                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10694                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10695                         u32 tmp;
10696
10697                         /* Clear CRC stats. */
10698                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10699                                 tg3_writephy(tp, MII_TG3_TEST1,
10700                                              tmp | MII_TG3_TEST1_CRC_EN);
10701                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10702                         }
10703                 }
10704         }
10705
10706         __tg3_set_rx_mode(tp->dev);
10707
10708         /* Initialize receive rules. */
10709         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10710         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10711         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10712         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10713
10714         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10715                 limit = 8;
10716         else
10717                 limit = 16;
10718         if (tg3_flag(tp, ENABLE_ASF))
10719                 limit -= 4;
10720         switch (limit) {
10721         case 16:
10722                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10723                 /* fall through */
10724         case 15:
10725                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10726                 /* fall through */
10727         case 14:
10728                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10729                 /* fall through */
10730         case 13:
10731                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10732                 /* fall through */
10733         case 12:
10734                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10735                 /* fall through */
10736         case 11:
10737                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10738                 /* fall through */
10739         case 10:
10740                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10741                 /* fall through */
10742         case 9:
10743                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10744                 /* fall through */
10745         case 8:
10746                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10747                 /* fall through */
10748         case 7:
10749                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10750                 /* fall through */
10751         case 6:
10752                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10753                 /* fall through */
10754         case 5:
10755                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10756                 /* fall through */
10757         case 4:
10758                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10759         case 3:
10760                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10761         case 2:
10762         case 1:
10763
10764         default:
10765                 break;
10766         }
10767
10768         if (tg3_flag(tp, ENABLE_APE))
10769                 /* Write our heartbeat update interval to APE. */
10770                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10771                                 APE_HOST_HEARTBEAT_INT_5SEC);
10772
10773         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10774
10775         return 0;
10776 }
10777
10778 /* Called at device open time to get the chip ready for
10779  * packet processing.  Invoked with tp->lock held.
10780  */
10781 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10782 {
10783         /* Chip may have been just powered on. If so, the boot code may still
10784          * be running initialization. Wait for it to finish to avoid races in
10785          * accessing the hardware.
10786          */
10787         tg3_enable_register_access(tp);
10788         tg3_poll_fw(tp);
10789
10790         tg3_switch_clocks(tp);
10791
10792         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10793
10794         return tg3_reset_hw(tp, reset_phy);
10795 }
10796
10797 #ifdef CONFIG_TIGON3_HWMON
10798 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10799 {
10800         u32 off, len = TG3_OCIR_LEN;
10801         int i;
10802
10803         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10804                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10805
10806                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10807                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10808                         memset(ocir, 0, len);
10809         }
10810 }
10811
10812 /* sysfs attributes for hwmon */
10813 static ssize_t tg3_show_temp(struct device *dev,
10814                              struct device_attribute *devattr, char *buf)
10815 {
10816         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10817         struct tg3 *tp = dev_get_drvdata(dev);
10818         u32 temperature;
10819
10820         spin_lock_bh(&tp->lock);
10821         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10822                                 sizeof(temperature));
10823         spin_unlock_bh(&tp->lock);
10824         return sprintf(buf, "%u\n", temperature * 1000);
10825 }
10826
10827
10828 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10829                           TG3_TEMP_SENSOR_OFFSET);
10830 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10831                           TG3_TEMP_CAUTION_OFFSET);
10832 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10833                           TG3_TEMP_MAX_OFFSET);
10834
10835 static struct attribute *tg3_attrs[] = {
10836         &sensor_dev_attr_temp1_input.dev_attr.attr,
10837         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10838         &sensor_dev_attr_temp1_max.dev_attr.attr,
10839         NULL
10840 };
10841 ATTRIBUTE_GROUPS(tg3);
10842
10843 static void tg3_hwmon_close(struct tg3 *tp)
10844 {
10845         if (tp->hwmon_dev) {
10846                 hwmon_device_unregister(tp->hwmon_dev);
10847                 tp->hwmon_dev = NULL;
10848         }
10849 }
10850
10851 static void tg3_hwmon_open(struct tg3 *tp)
10852 {
10853         int i;
10854         u32 size = 0;
10855         struct pci_dev *pdev = tp->pdev;
10856         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10857
10858         tg3_sd_scan_scratchpad(tp, ocirs);
10859
10860         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10861                 if (!ocirs[i].src_data_length)
10862                         continue;
10863
10864                 size += ocirs[i].src_hdr_length;
10865                 size += ocirs[i].src_data_length;
10866         }
10867
10868         if (!size)
10869                 return;
10870
10871         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10872                                                           tp, tg3_groups);
10873         if (IS_ERR(tp->hwmon_dev)) {
10874                 tp->hwmon_dev = NULL;
10875                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10876         }
10877 }
10878 #else
10879 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10880 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10881 #endif /* CONFIG_TIGON3_HWMON */
10882
10883
10884 #define TG3_STAT_ADD32(PSTAT, REG) \
10885 do {    u32 __val = tr32(REG); \
10886         (PSTAT)->low += __val; \
10887         if ((PSTAT)->low < __val) \
10888                 (PSTAT)->high += 1; \
10889 } while (0)
10890
10891 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10892 {
10893         struct tg3_hw_stats *sp = tp->hw_stats;
10894
10895         if (!tp->link_up)
10896                 return;
10897
10898         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10899         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10900         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10901         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10902         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10903         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10904         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10905         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10906         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10907         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10908         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10909         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10910         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10911         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10912                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10913                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10914                 u32 val;
10915
10916                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10917                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10918                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10919                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10920         }
10921
10922         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10923         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10924         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10925         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10926         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10927         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10928         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10929         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10930         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10931         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10932         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10933         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10934         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10935         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10936
10937         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10938         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10939             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10940             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10941             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10942                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10943         } else {
10944                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10945                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10946                 if (val) {
10947                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10948                         sp->rx_discards.low += val;
10949                         if (sp->rx_discards.low < val)
10950                                 sp->rx_discards.high += 1;
10951                 }
10952                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10953         }
10954         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10955 }
10956
10957 static void tg3_chk_missed_msi(struct tg3 *tp)
10958 {
10959         u32 i;
10960
10961         for (i = 0; i < tp->irq_cnt; i++) {
10962                 struct tg3_napi *tnapi = &tp->napi[i];
10963
10964                 if (tg3_has_work(tnapi)) {
10965                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10966                             tnapi->last_tx_cons == tnapi->tx_cons) {
10967                                 if (tnapi->chk_msi_cnt < 1) {
10968                                         tnapi->chk_msi_cnt++;
10969                                         return;
10970                                 }
10971                                 tg3_msi(0, tnapi);
10972                         }
10973                 }
10974                 tnapi->chk_msi_cnt = 0;
10975                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10976                 tnapi->last_tx_cons = tnapi->tx_cons;
10977         }
10978 }
10979
10980 static void tg3_timer(struct timer_list *t)
10981 {
10982         struct tg3 *tp = from_timer(tp, t, timer);
10983
10984         spin_lock(&tp->lock);
10985
10986         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10987                 spin_unlock(&tp->lock);
10988                 goto restart_timer;
10989         }
10990
10991         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10992             tg3_flag(tp, 57765_CLASS))
10993                 tg3_chk_missed_msi(tp);
10994
10995         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10996                 /* BCM4785: Flush posted writes from GbE to host memory. */
10997                 tr32(HOSTCC_MODE);
10998         }
10999
11000         if (!tg3_flag(tp, TAGGED_STATUS)) {
11001                 /* All of this garbage is because when using non-tagged
11002                  * IRQ status the mailbox/status_block protocol the chip
11003                  * uses with the cpu is race prone.
11004                  */
11005                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11006                         tw32(GRC_LOCAL_CTRL,
11007                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11008                 } else {
11009                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11010                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11011                 }
11012
11013                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11014                         spin_unlock(&tp->lock);
11015                         tg3_reset_task_schedule(tp);
11016                         goto restart_timer;
11017                 }
11018         }
11019
11020         /* This part only runs once per second. */
11021         if (!--tp->timer_counter) {
11022                 if (tg3_flag(tp, 5705_PLUS))
11023                         tg3_periodic_fetch_stats(tp);
11024
11025                 if (tp->setlpicnt && !--tp->setlpicnt)
11026                         tg3_phy_eee_enable(tp);
11027
11028                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11029                         u32 mac_stat;
11030                         int phy_event;
11031
11032                         mac_stat = tr32(MAC_STATUS);
11033
11034                         phy_event = 0;
11035                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11036                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11037                                         phy_event = 1;
11038                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11039                                 phy_event = 1;
11040
11041                         if (phy_event)
11042                                 tg3_setup_phy(tp, false);
11043                 } else if (tg3_flag(tp, POLL_SERDES)) {
11044                         u32 mac_stat = tr32(MAC_STATUS);
11045                         int need_setup = 0;
11046
11047                         if (tp->link_up &&
11048                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11049                                 need_setup = 1;
11050                         }
11051                         if (!tp->link_up &&
11052                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11053                                          MAC_STATUS_SIGNAL_DET))) {
11054                                 need_setup = 1;
11055                         }
11056                         if (need_setup) {
11057                                 if (!tp->serdes_counter) {
11058                                         tw32_f(MAC_MODE,
11059                                              (tp->mac_mode &
11060                                               ~MAC_MODE_PORT_MODE_MASK));
11061                                         udelay(40);
11062                                         tw32_f(MAC_MODE, tp->mac_mode);
11063                                         udelay(40);
11064                                 }
11065                                 tg3_setup_phy(tp, false);
11066                         }
11067                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11068                            tg3_flag(tp, 5780_CLASS)) {
11069                         tg3_serdes_parallel_detect(tp);
11070                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11071                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11072                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11073                                          TG3_CPMU_STATUS_LINK_MASK);
11074
11075                         if (link_up != tp->link_up)
11076                                 tg3_setup_phy(tp, false);
11077                 }
11078
11079                 tp->timer_counter = tp->timer_multiplier;
11080         }
11081
11082         /* Heartbeat is only sent once every 2 seconds.
11083          *
11084          * The heartbeat is to tell the ASF firmware that the host
11085          * driver is still alive.  In the event that the OS crashes,
11086          * ASF needs to reset the hardware to free up the FIFO space
11087          * that may be filled with rx packets destined for the host.
11088          * If the FIFO is full, ASF will no longer function properly.
11089          *
11090          * Unintended resets have been reported on real time kernels
11091          * where the timer doesn't run on time.  Netpoll will also have
11092          * same problem.
11093          *
11094          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11095          * to check the ring condition when the heartbeat is expiring
11096          * before doing the reset.  This will prevent most unintended
11097          * resets.
11098          */
11099         if (!--tp->asf_counter) {
11100                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11101                         tg3_wait_for_event_ack(tp);
11102
11103                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11104                                       FWCMD_NICDRV_ALIVE3);
11105                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11106                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11107                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11108
11109                         tg3_generate_fw_event(tp);
11110                 }
11111                 tp->asf_counter = tp->asf_multiplier;
11112         }
11113
11114         /* Update the APE heartbeat every 5 seconds.*/
11115         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11116
11117         spin_unlock(&tp->lock);
11118
11119 restart_timer:
11120         tp->timer.expires = jiffies + tp->timer_offset;
11121         add_timer(&tp->timer);
11122 }
11123
11124 static void tg3_timer_init(struct tg3 *tp)
11125 {
11126         if (tg3_flag(tp, TAGGED_STATUS) &&
11127             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11128             !tg3_flag(tp, 57765_CLASS))
11129                 tp->timer_offset = HZ;
11130         else
11131                 tp->timer_offset = HZ / 10;
11132
11133         BUG_ON(tp->timer_offset > HZ);
11134
11135         tp->timer_multiplier = (HZ / tp->timer_offset);
11136         tp->asf_multiplier = (HZ / tp->timer_offset) *
11137                              TG3_FW_UPDATE_FREQ_SEC;
11138
11139         timer_setup(&tp->timer, tg3_timer, 0);
11140 }
11141
11142 static void tg3_timer_start(struct tg3 *tp)
11143 {
11144         tp->asf_counter   = tp->asf_multiplier;
11145         tp->timer_counter = tp->timer_multiplier;
11146
11147         tp->timer.expires = jiffies + tp->timer_offset;
11148         add_timer(&tp->timer);
11149 }
11150
11151 static void tg3_timer_stop(struct tg3 *tp)
11152 {
11153         del_timer_sync(&tp->timer);
11154 }
11155
11156 /* Restart hardware after configuration changes, self-test, etc.
11157  * Invoked with tp->lock held.
11158  */
11159 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11160         __releases(tp->lock)
11161         __acquires(tp->lock)
11162 {
11163         int err;
11164
11165         err = tg3_init_hw(tp, reset_phy);
11166         if (err) {
11167                 netdev_err(tp->dev,
11168                            "Failed to re-initialize device, aborting\n");
11169                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11170                 tg3_full_unlock(tp);
11171                 tg3_timer_stop(tp);
11172                 tp->irq_sync = 0;
11173                 tg3_napi_enable(tp);
11174                 dev_close(tp->dev);
11175                 tg3_full_lock(tp, 0);
11176         }
11177         return err;
11178 }
11179
11180 static void tg3_reset_task(struct work_struct *work)
11181 {
11182         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11183         int err;
11184
11185         rtnl_lock();
11186         tg3_full_lock(tp, 0);
11187
11188         if (!netif_running(tp->dev)) {
11189                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11190                 tg3_full_unlock(tp);
11191                 rtnl_unlock();
11192                 return;
11193         }
11194
11195         tg3_full_unlock(tp);
11196
11197         tg3_phy_stop(tp);
11198
11199         tg3_netif_stop(tp);
11200
11201         tg3_full_lock(tp, 1);
11202
11203         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11204                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11205                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11206                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11207                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11208         }
11209
11210         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11211         err = tg3_init_hw(tp, true);
11212         if (err)
11213                 goto out;
11214
11215         tg3_netif_start(tp);
11216
11217 out:
11218         tg3_full_unlock(tp);
11219
11220         if (!err)
11221                 tg3_phy_start(tp);
11222
11223         tg3_flag_clear(tp, RESET_TASK_PENDING);
11224         rtnl_unlock();
11225 }
11226
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11228 {
11229         irq_handler_t fn;
11230         unsigned long flags;
11231         char *name;
11232         struct tg3_napi *tnapi = &tp->napi[irq_num];
11233
11234         if (tp->irq_cnt == 1)
11235                 name = tp->dev->name;
11236         else {
11237                 name = &tnapi->irq_lbl[0];
11238                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11239                         snprintf(name, IFNAMSIZ,
11240                                  "%s-txrx-%d", tp->dev->name, irq_num);
11241                 else if (tnapi->tx_buffers)
11242                         snprintf(name, IFNAMSIZ,
11243                                  "%s-tx-%d", tp->dev->name, irq_num);
11244                 else if (tnapi->rx_rcb)
11245                         snprintf(name, IFNAMSIZ,
11246                                  "%s-rx-%d", tp->dev->name, irq_num);
11247                 else
11248                         snprintf(name, IFNAMSIZ,
11249                                  "%s-%d", tp->dev->name, irq_num);
11250                 name[IFNAMSIZ-1] = 0;
11251         }
11252
11253         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11254                 fn = tg3_msi;
11255                 if (tg3_flag(tp, 1SHOT_MSI))
11256                         fn = tg3_msi_1shot;
11257                 flags = 0;
11258         } else {
11259                 fn = tg3_interrupt;
11260                 if (tg3_flag(tp, TAGGED_STATUS))
11261                         fn = tg3_interrupt_tagged;
11262                 flags = IRQF_SHARED;
11263         }
11264
11265         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11266 }
11267
11268 static int tg3_test_interrupt(struct tg3 *tp)
11269 {
11270         struct tg3_napi *tnapi = &tp->napi[0];
11271         struct net_device *dev = tp->dev;
11272         int err, i, intr_ok = 0;
11273         u32 val;
11274
11275         if (!netif_running(dev))
11276                 return -ENODEV;
11277
11278         tg3_disable_ints(tp);
11279
11280         free_irq(tnapi->irq_vec, tnapi);
11281
11282         /*
11283          * Turn off MSI one shot mode.  Otherwise this test has no
11284          * observable way to know whether the interrupt was delivered.
11285          */
11286         if (tg3_flag(tp, 57765_PLUS)) {
11287                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288                 tw32(MSGINT_MODE, val);
11289         }
11290
11291         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292                           IRQF_SHARED, dev->name, tnapi);
11293         if (err)
11294                 return err;
11295
11296         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297         tg3_enable_ints(tp);
11298
11299         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11300                tnapi->coal_now);
11301
11302         for (i = 0; i < 5; i++) {
11303                 u32 int_mbox, misc_host_ctrl;
11304
11305                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11306                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11307
11308                 if ((int_mbox != 0) ||
11309                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11310                         intr_ok = 1;
11311                         break;
11312                 }
11313
11314                 if (tg3_flag(tp, 57765_PLUS) &&
11315                     tnapi->hw_status->status_tag != tnapi->last_tag)
11316                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11317
11318                 msleep(10);
11319         }
11320
11321         tg3_disable_ints(tp);
11322
11323         free_irq(tnapi->irq_vec, tnapi);
11324
11325         err = tg3_request_irq(tp, 0);
11326
11327         if (err)
11328                 return err;
11329
11330         if (intr_ok) {
11331                 /* Reenable MSI one shot mode. */
11332                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334                         tw32(MSGINT_MODE, val);
11335                 }
11336                 return 0;
11337         }
11338
11339         return -EIO;
11340 }
11341
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343  * successfully restored
11344  */
11345 static int tg3_test_msi(struct tg3 *tp)
11346 {
11347         int err;
11348         u16 pci_cmd;
11349
11350         if (!tg3_flag(tp, USING_MSI))
11351                 return 0;
11352
11353         /* Turn off SERR reporting in case MSI terminates with Master
11354          * Abort.
11355          */
11356         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357         pci_write_config_word(tp->pdev, PCI_COMMAND,
11358                               pci_cmd & ~PCI_COMMAND_SERR);
11359
11360         err = tg3_test_interrupt(tp);
11361
11362         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11363
11364         if (!err)
11365                 return 0;
11366
11367         /* other failures */
11368         if (err != -EIO)
11369                 return err;
11370
11371         /* MSI test failed, go back to INTx mode */
11372         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373                     "to INTx mode. Please report this failure to the PCI "
11374                     "maintainer and include system chipset information\n");
11375
11376         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11377
11378         pci_disable_msi(tp->pdev);
11379
11380         tg3_flag_clear(tp, USING_MSI);
11381         tp->napi[0].irq_vec = tp->pdev->irq;
11382
11383         err = tg3_request_irq(tp, 0);
11384         if (err)
11385                 return err;
11386
11387         /* Need to reset the chip because the MSI cycle may have terminated
11388          * with Master Abort.
11389          */
11390         tg3_full_lock(tp, 1);
11391
11392         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393         err = tg3_init_hw(tp, true);
11394
11395         tg3_full_unlock(tp);
11396
11397         if (err)
11398                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11399
11400         return err;
11401 }
11402
11403 static int tg3_request_firmware(struct tg3 *tp)
11404 {
11405         const struct tg3_firmware_hdr *fw_hdr;
11406
11407         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11409                            tp->fw_needed);
11410                 return -ENOENT;
11411         }
11412
11413         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11414
11415         /* Firmware blob starts with version numbers, followed by
11416          * start address and _full_ length including BSS sections
11417          * (which must be longer than the actual data, of course
11418          */
11419
11420         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11421         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423                            tp->fw_len, tp->fw_needed);
11424                 release_firmware(tp->fw);
11425                 tp->fw = NULL;
11426                 return -EINVAL;
11427         }
11428
11429         /* We no longer need firmware; we have it. */
11430         tp->fw_needed = NULL;
11431         return 0;
11432 }
11433
11434 static u32 tg3_irq_count(struct tg3 *tp)
11435 {
11436         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11437
11438         if (irq_cnt > 1) {
11439                 /* We want as many rx rings enabled as there are cpus.
11440                  * In multiqueue MSI-X mode, the first MSI-X vector
11441                  * only deals with link interrupts, etc, so we add
11442                  * one to the number of vectors we are requesting.
11443                  */
11444                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11445         }
11446
11447         return irq_cnt;
11448 }
11449
11450 static bool tg3_enable_msix(struct tg3 *tp)
11451 {
11452         int i, rc;
11453         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11454
11455         tp->txq_cnt = tp->txq_req;
11456         tp->rxq_cnt = tp->rxq_req;
11457         if (!tp->rxq_cnt)
11458                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11459         if (tp->rxq_cnt > tp->rxq_max)
11460                 tp->rxq_cnt = tp->rxq_max;
11461
11462         /* Disable multiple TX rings by default.  Simple round-robin hardware
11463          * scheduling of the TX rings can cause starvation of rings with
11464          * small packets when other rings have TSO or jumbo packets.
11465          */
11466         if (!tp->txq_req)
11467                 tp->txq_cnt = 1;
11468
11469         tp->irq_cnt = tg3_irq_count(tp);
11470
11471         for (i = 0; i < tp->irq_max; i++) {
11472                 msix_ent[i].entry  = i;
11473                 msix_ent[i].vector = 0;
11474         }
11475
11476         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11477         if (rc < 0) {
11478                 return false;
11479         } else if (rc < tp->irq_cnt) {
11480                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11481                               tp->irq_cnt, rc);
11482                 tp->irq_cnt = rc;
11483                 tp->rxq_cnt = max(rc - 1, 1);
11484                 if (tp->txq_cnt)
11485                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11486         }
11487
11488         for (i = 0; i < tp->irq_max; i++)
11489                 tp->napi[i].irq_vec = msix_ent[i].vector;
11490
11491         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492                 pci_disable_msix(tp->pdev);
11493                 return false;
11494         }
11495
11496         if (tp->irq_cnt == 1)
11497                 return true;
11498
11499         tg3_flag_set(tp, ENABLE_RSS);
11500
11501         if (tp->txq_cnt > 1)
11502                 tg3_flag_set(tp, ENABLE_TSS);
11503
11504         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11505
11506         return true;
11507 }
11508
11509 static void tg3_ints_init(struct tg3 *tp)
11510 {
11511         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512             !tg3_flag(tp, TAGGED_STATUS)) {
11513                 /* All MSI supporting chips should support tagged
11514                  * status.  Assert that this is the case.
11515                  */
11516                 netdev_warn(tp->dev,
11517                             "MSI without TAGGED_STATUS? Not using MSI\n");
11518                 goto defcfg;
11519         }
11520
11521         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522                 tg3_flag_set(tp, USING_MSIX);
11523         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524                 tg3_flag_set(tp, USING_MSI);
11525
11526         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527                 u32 msi_mode = tr32(MSGINT_MODE);
11528                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530                 if (!tg3_flag(tp, 1SHOT_MSI))
11531                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11533         }
11534 defcfg:
11535         if (!tg3_flag(tp, USING_MSIX)) {
11536                 tp->irq_cnt = 1;
11537                 tp->napi[0].irq_vec = tp->pdev->irq;
11538         }
11539
11540         if (tp->irq_cnt == 1) {
11541                 tp->txq_cnt = 1;
11542                 tp->rxq_cnt = 1;
11543                 netif_set_real_num_tx_queues(tp->dev, 1);
11544                 netif_set_real_num_rx_queues(tp->dev, 1);
11545         }
11546 }
11547
11548 static void tg3_ints_fini(struct tg3 *tp)
11549 {
11550         if (tg3_flag(tp, USING_MSIX))
11551                 pci_disable_msix(tp->pdev);
11552         else if (tg3_flag(tp, USING_MSI))
11553                 pci_disable_msi(tp->pdev);
11554         tg3_flag_clear(tp, USING_MSI);
11555         tg3_flag_clear(tp, USING_MSIX);
11556         tg3_flag_clear(tp, ENABLE_RSS);
11557         tg3_flag_clear(tp, ENABLE_TSS);
11558 }
11559
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11561                      bool init)
11562 {
11563         struct net_device *dev = tp->dev;
11564         int i, err;
11565
11566         /*
11567          * Setup interrupts first so we know how
11568          * many NAPI resources to allocate
11569          */
11570         tg3_ints_init(tp);
11571
11572         tg3_rss_check_indir_tbl(tp);
11573
11574         /* The placement of this call is tied
11575          * to the setup and use of Host TX descriptors.
11576          */
11577         err = tg3_alloc_consistent(tp);
11578         if (err)
11579                 goto out_ints_fini;
11580
11581         tg3_napi_init(tp);
11582
11583         tg3_napi_enable(tp);
11584
11585         for (i = 0; i < tp->irq_cnt; i++) {
11586                 err = tg3_request_irq(tp, i);
11587                 if (err) {
11588                         for (i--; i >= 0; i--) {
11589                                 struct tg3_napi *tnapi = &tp->napi[i];
11590
11591                                 free_irq(tnapi->irq_vec, tnapi);
11592                         }
11593                         goto out_napi_fini;
11594                 }
11595         }
11596
11597         tg3_full_lock(tp, 0);
11598
11599         if (init)
11600                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11601
11602         err = tg3_init_hw(tp, reset_phy);
11603         if (err) {
11604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605                 tg3_free_rings(tp);
11606         }
11607
11608         tg3_full_unlock(tp);
11609
11610         if (err)
11611                 goto out_free_irq;
11612
11613         if (test_irq && tg3_flag(tp, USING_MSI)) {
11614                 err = tg3_test_msi(tp);
11615
11616                 if (err) {
11617                         tg3_full_lock(tp, 0);
11618                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619                         tg3_free_rings(tp);
11620                         tg3_full_unlock(tp);
11621
11622                         goto out_napi_fini;
11623                 }
11624
11625                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11627
11628                         tw32(PCIE_TRANSACTION_CFG,
11629                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11630                 }
11631         }
11632
11633         tg3_phy_start(tp);
11634
11635         tg3_hwmon_open(tp);
11636
11637         tg3_full_lock(tp, 0);
11638
11639         tg3_timer_start(tp);
11640         tg3_flag_set(tp, INIT_COMPLETE);
11641         tg3_enable_ints(tp);
11642
11643         tg3_ptp_resume(tp);
11644
11645         tg3_full_unlock(tp);
11646
11647         netif_tx_start_all_queues(dev);
11648
11649         /*
11650          * Reset loopback feature if it was turned on while the device was down
11651          * make sure that it's installed properly now.
11652          */
11653         if (dev->features & NETIF_F_LOOPBACK)
11654                 tg3_set_loopback(dev, dev->features);
11655
11656         return 0;
11657
11658 out_free_irq:
11659         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660                 struct tg3_napi *tnapi = &tp->napi[i];
11661                 free_irq(tnapi->irq_vec, tnapi);
11662         }
11663
11664 out_napi_fini:
11665         tg3_napi_disable(tp);
11666         tg3_napi_fini(tp);
11667         tg3_free_consistent(tp);
11668
11669 out_ints_fini:
11670         tg3_ints_fini(tp);
11671
11672         return err;
11673 }
11674
11675 static void tg3_stop(struct tg3 *tp)
11676 {
11677         int i;
11678
11679         tg3_reset_task_cancel(tp);
11680         tg3_netif_stop(tp);
11681
11682         tg3_timer_stop(tp);
11683
11684         tg3_hwmon_close(tp);
11685
11686         tg3_phy_stop(tp);
11687
11688         tg3_full_lock(tp, 1);
11689
11690         tg3_disable_ints(tp);
11691
11692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693         tg3_free_rings(tp);
11694         tg3_flag_clear(tp, INIT_COMPLETE);
11695
11696         tg3_full_unlock(tp);
11697
11698         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699                 struct tg3_napi *tnapi = &tp->napi[i];
11700                 free_irq(tnapi->irq_vec, tnapi);
11701         }
11702
11703         tg3_ints_fini(tp);
11704
11705         tg3_napi_fini(tp);
11706
11707         tg3_free_consistent(tp);
11708 }
11709
11710 static int tg3_open(struct net_device *dev)
11711 {
11712         struct tg3 *tp = netdev_priv(dev);
11713         int err;
11714
11715         if (tp->pcierr_recovery) {
11716                 netdev_err(dev, "Failed to open device. PCI error recovery "
11717                            "in progress\n");
11718                 return -EAGAIN;
11719         }
11720
11721         if (tp->fw_needed) {
11722                 err = tg3_request_firmware(tp);
11723                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11724                         if (err) {
11725                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11726                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728                                 netdev_warn(tp->dev, "EEE capability restored\n");
11729                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11730                         }
11731                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11732                         if (err)
11733                                 return err;
11734                 } else if (err) {
11735                         netdev_warn(tp->dev, "TSO capability disabled\n");
11736                         tg3_flag_clear(tp, TSO_CAPABLE);
11737                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738                         netdev_notice(tp->dev, "TSO capability restored\n");
11739                         tg3_flag_set(tp, TSO_CAPABLE);
11740                 }
11741         }
11742
11743         tg3_carrier_off(tp);
11744
11745         err = tg3_power_up(tp);
11746         if (err)
11747                 return err;
11748
11749         tg3_full_lock(tp, 0);
11750
11751         tg3_disable_ints(tp);
11752         tg3_flag_clear(tp, INIT_COMPLETE);
11753
11754         tg3_full_unlock(tp);
11755
11756         err = tg3_start(tp,
11757                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11758                         true, true);
11759         if (err) {
11760                 tg3_frob_aux_power(tp, false);
11761                 pci_set_power_state(tp->pdev, PCI_D3hot);
11762         }
11763
11764         return err;
11765 }
11766
11767 static int tg3_close(struct net_device *dev)
11768 {
11769         struct tg3 *tp = netdev_priv(dev);
11770
11771         if (tp->pcierr_recovery) {
11772                 netdev_err(dev, "Failed to close device. PCI error recovery "
11773                            "in progress\n");
11774                 return -EAGAIN;
11775         }
11776
11777         tg3_stop(tp);
11778
11779         if (pci_device_is_present(tp->pdev)) {
11780                 tg3_power_down_prepare(tp);
11781
11782                 tg3_carrier_off(tp);
11783         }
11784         return 0;
11785 }
11786
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11788 {
11789        return ((u64)val->high << 32) | ((u64)val->low);
11790 }
11791
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11793 {
11794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11795
11796         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11799                 u32 val;
11800
11801                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802                         tg3_writephy(tp, MII_TG3_TEST1,
11803                                      val | MII_TG3_TEST1_CRC_EN);
11804                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11805                 } else
11806                         val = 0;
11807
11808                 tp->phy_crc_errors += val;
11809
11810                 return tp->phy_crc_errors;
11811         }
11812
11813         return get_stat64(&hw_stats->rx_fcs_errors);
11814 }
11815
11816 #define ESTAT_ADD(member) \
11817         estats->member =        old_estats->member + \
11818                                 get_stat64(&hw_stats->member)
11819
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11821 {
11822         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11824
11825         ESTAT_ADD(rx_octets);
11826         ESTAT_ADD(rx_fragments);
11827         ESTAT_ADD(rx_ucast_packets);
11828         ESTAT_ADD(rx_mcast_packets);
11829         ESTAT_ADD(rx_bcast_packets);
11830         ESTAT_ADD(rx_fcs_errors);
11831         ESTAT_ADD(rx_align_errors);
11832         ESTAT_ADD(rx_xon_pause_rcvd);
11833         ESTAT_ADD(rx_xoff_pause_rcvd);
11834         ESTAT_ADD(rx_mac_ctrl_rcvd);
11835         ESTAT_ADD(rx_xoff_entered);
11836         ESTAT_ADD(rx_frame_too_long_errors);
11837         ESTAT_ADD(rx_jabbers);
11838         ESTAT_ADD(rx_undersize_packets);
11839         ESTAT_ADD(rx_in_length_errors);
11840         ESTAT_ADD(rx_out_length_errors);
11841         ESTAT_ADD(rx_64_or_less_octet_packets);
11842         ESTAT_ADD(rx_65_to_127_octet_packets);
11843         ESTAT_ADD(rx_128_to_255_octet_packets);
11844         ESTAT_ADD(rx_256_to_511_octet_packets);
11845         ESTAT_ADD(rx_512_to_1023_octet_packets);
11846         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11851
11852         ESTAT_ADD(tx_octets);
11853         ESTAT_ADD(tx_collisions);
11854         ESTAT_ADD(tx_xon_sent);
11855         ESTAT_ADD(tx_xoff_sent);
11856         ESTAT_ADD(tx_flow_control);
11857         ESTAT_ADD(tx_mac_errors);
11858         ESTAT_ADD(tx_single_collisions);
11859         ESTAT_ADD(tx_mult_collisions);
11860         ESTAT_ADD(tx_deferred);
11861         ESTAT_ADD(tx_excessive_collisions);
11862         ESTAT_ADD(tx_late_collisions);
11863         ESTAT_ADD(tx_collide_2times);
11864         ESTAT_ADD(tx_collide_3times);
11865         ESTAT_ADD(tx_collide_4times);
11866         ESTAT_ADD(tx_collide_5times);
11867         ESTAT_ADD(tx_collide_6times);
11868         ESTAT_ADD(tx_collide_7times);
11869         ESTAT_ADD(tx_collide_8times);
11870         ESTAT_ADD(tx_collide_9times);
11871         ESTAT_ADD(tx_collide_10times);
11872         ESTAT_ADD(tx_collide_11times);
11873         ESTAT_ADD(tx_collide_12times);
11874         ESTAT_ADD(tx_collide_13times);
11875         ESTAT_ADD(tx_collide_14times);
11876         ESTAT_ADD(tx_collide_15times);
11877         ESTAT_ADD(tx_ucast_packets);
11878         ESTAT_ADD(tx_mcast_packets);
11879         ESTAT_ADD(tx_bcast_packets);
11880         ESTAT_ADD(tx_carrier_sense_errors);
11881         ESTAT_ADD(tx_discards);
11882         ESTAT_ADD(tx_errors);
11883
11884         ESTAT_ADD(dma_writeq_full);
11885         ESTAT_ADD(dma_write_prioq_full);
11886         ESTAT_ADD(rxbds_empty);
11887         ESTAT_ADD(rx_discards);
11888         ESTAT_ADD(rx_errors);
11889         ESTAT_ADD(rx_threshold_hit);
11890
11891         ESTAT_ADD(dma_readq_full);
11892         ESTAT_ADD(dma_read_prioq_full);
11893         ESTAT_ADD(tx_comp_queue_full);
11894
11895         ESTAT_ADD(ring_set_send_prod_index);
11896         ESTAT_ADD(ring_status_update);
11897         ESTAT_ADD(nic_irqs);
11898         ESTAT_ADD(nic_avoided_irqs);
11899         ESTAT_ADD(nic_tx_threshold_hit);
11900
11901         ESTAT_ADD(mbuf_lwm_thresh_hit);
11902 }
11903
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11905 {
11906         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11908
11909         stats->rx_packets = old_stats->rx_packets +
11910                 get_stat64(&hw_stats->rx_ucast_packets) +
11911                 get_stat64(&hw_stats->rx_mcast_packets) +
11912                 get_stat64(&hw_stats->rx_bcast_packets);
11913
11914         stats->tx_packets = old_stats->tx_packets +
11915                 get_stat64(&hw_stats->tx_ucast_packets) +
11916                 get_stat64(&hw_stats->tx_mcast_packets) +
11917                 get_stat64(&hw_stats->tx_bcast_packets);
11918
11919         stats->rx_bytes = old_stats->rx_bytes +
11920                 get_stat64(&hw_stats->rx_octets);
11921         stats->tx_bytes = old_stats->tx_bytes +
11922                 get_stat64(&hw_stats->tx_octets);
11923
11924         stats->rx_errors = old_stats->rx_errors +
11925                 get_stat64(&hw_stats->rx_errors);
11926         stats->tx_errors = old_stats->tx_errors +
11927                 get_stat64(&hw_stats->tx_errors) +
11928                 get_stat64(&hw_stats->tx_mac_errors) +
11929                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930                 get_stat64(&hw_stats->tx_discards);
11931
11932         stats->multicast = old_stats->multicast +
11933                 get_stat64(&hw_stats->rx_mcast_packets);
11934         stats->collisions = old_stats->collisions +
11935                 get_stat64(&hw_stats->tx_collisions);
11936
11937         stats->rx_length_errors = old_stats->rx_length_errors +
11938                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939                 get_stat64(&hw_stats->rx_undersize_packets);
11940
11941         stats->rx_frame_errors = old_stats->rx_frame_errors +
11942                 get_stat64(&hw_stats->rx_align_errors);
11943         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944                 get_stat64(&hw_stats->tx_discards);
11945         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11947
11948         stats->rx_crc_errors = old_stats->rx_crc_errors +
11949                 tg3_calc_crc_errors(tp);
11950
11951         stats->rx_missed_errors = old_stats->rx_missed_errors +
11952                 get_stat64(&hw_stats->rx_discards);
11953
11954         stats->rx_dropped = tp->rx_dropped;
11955         stats->tx_dropped = tp->tx_dropped;
11956 }
11957
11958 static int tg3_get_regs_len(struct net_device *dev)
11959 {
11960         return TG3_REG_BLK_SIZE;
11961 }
11962
11963 static void tg3_get_regs(struct net_device *dev,
11964                 struct ethtool_regs *regs, void *_p)
11965 {
11966         struct tg3 *tp = netdev_priv(dev);
11967
11968         regs->version = 0;
11969
11970         memset(_p, 0, TG3_REG_BLK_SIZE);
11971
11972         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11973                 return;
11974
11975         tg3_full_lock(tp, 0);
11976
11977         tg3_dump_legacy_regs(tp, (u32 *)_p);
11978
11979         tg3_full_unlock(tp);
11980 }
11981
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11983 {
11984         struct tg3 *tp = netdev_priv(dev);
11985
11986         return tp->nvram_size;
11987 }
11988
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         int ret, cpmu_restore = 0;
11993         u8  *pd;
11994         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11995         __be32 val;
11996
11997         if (tg3_flag(tp, NO_NVRAM))
11998                 return -EINVAL;
11999
12000         offset = eeprom->offset;
12001         len = eeprom->len;
12002         eeprom->len = 0;
12003
12004         eeprom->magic = TG3_EEPROM_MAGIC;
12005
12006         /* Override clock, link aware and link idle modes */
12007         if (tg3_flag(tp, CPMU_PRESENT)) {
12008                 cpmu_val = tr32(TG3_CPMU_CTRL);
12009                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12011                         tw32(TG3_CPMU_CTRL, cpmu_val &
12012                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12013                                              CPMU_CTRL_LINK_IDLE_MODE));
12014                         cpmu_restore = 1;
12015                 }
12016         }
12017         tg3_override_clk(tp);
12018
12019         if (offset & 3) {
12020                 /* adjustments to start on required 4 byte boundary */
12021                 b_offset = offset & 3;
12022                 b_count = 4 - b_offset;
12023                 if (b_count > len) {
12024                         /* i.e. offset=1 len=2 */
12025                         b_count = len;
12026                 }
12027                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12028                 if (ret)
12029                         goto eeprom_done;
12030                 memcpy(data, ((char *)&val) + b_offset, b_count);
12031                 len -= b_count;
12032                 offset += b_count;
12033                 eeprom->len += b_count;
12034         }
12035
12036         /* read bytes up to the last 4 byte boundary */
12037         pd = &data[eeprom->len];
12038         for (i = 0; i < (len - (len & 3)); i += 4) {
12039                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12040                 if (ret) {
12041                         if (i)
12042                                 i -= 4;
12043                         eeprom->len += i;
12044                         goto eeprom_done;
12045                 }
12046                 memcpy(pd + i, &val, 4);
12047                 if (need_resched()) {
12048                         if (signal_pending(current)) {
12049                                 eeprom->len += i;
12050                                 ret = -EINTR;
12051                                 goto eeprom_done;
12052                         }
12053                         cond_resched();
12054                 }
12055         }
12056         eeprom->len += i;
12057
12058         if (len & 3) {
12059                 /* read last bytes not ending on 4 byte boundary */
12060                 pd = &data[eeprom->len];
12061                 b_count = len & 3;
12062                 b_offset = offset + len - b_count;
12063                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12064                 if (ret)
12065                         goto eeprom_done;
12066                 memcpy(pd, &val, b_count);
12067                 eeprom->len += b_count;
12068         }
12069         ret = 0;
12070
12071 eeprom_done:
12072         /* Restore clock, link aware and link idle modes */
12073         tg3_restore_clk(tp);
12074         if (cpmu_restore)
12075                 tw32(TG3_CPMU_CTRL, cpmu_val);
12076
12077         return ret;
12078 }
12079
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12081 {
12082         struct tg3 *tp = netdev_priv(dev);
12083         int ret;
12084         u32 offset, len, b_offset, odd_len;
12085         u8 *buf;
12086         __be32 start = 0, end;
12087
12088         if (tg3_flag(tp, NO_NVRAM) ||
12089             eeprom->magic != TG3_EEPROM_MAGIC)
12090                 return -EINVAL;
12091
12092         offset = eeprom->offset;
12093         len = eeprom->len;
12094
12095         if ((b_offset = (offset & 3))) {
12096                 /* adjustments to start on required 4 byte boundary */
12097                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12098                 if (ret)
12099                         return ret;
12100                 len += b_offset;
12101                 offset &= ~3;
12102                 if (len < 4)
12103                         len = 4;
12104         }
12105
12106         odd_len = 0;
12107         if (len & 3) {
12108                 /* adjustments to end on required 4 byte boundary */
12109                 odd_len = 1;
12110                 len = (len + 3) & ~3;
12111                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12112                 if (ret)
12113                         return ret;
12114         }
12115
12116         buf = data;
12117         if (b_offset || odd_len) {
12118                 buf = kmalloc(len, GFP_KERNEL);
12119                 if (!buf)
12120                         return -ENOMEM;
12121                 if (b_offset)
12122                         memcpy(buf, &start, 4);
12123                 if (odd_len)
12124                         memcpy(buf+len-4, &end, 4);
12125                 memcpy(buf + b_offset, data, eeprom->len);
12126         }
12127
12128         ret = tg3_nvram_write_block(tp, offset, len, buf);
12129
12130         if (buf != data)
12131                 kfree(buf);
12132
12133         return ret;
12134 }
12135
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137                                   struct ethtool_link_ksettings *cmd)
12138 {
12139         struct tg3 *tp = netdev_priv(dev);
12140         u32 supported, advertising;
12141
12142         if (tg3_flag(tp, USE_PHYLIB)) {
12143                 struct phy_device *phydev;
12144                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12145                         return -EAGAIN;
12146                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147                 phy_ethtool_ksettings_get(phydev, cmd);
12148
12149                 return 0;
12150         }
12151
12152         supported = (SUPPORTED_Autoneg);
12153
12154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155                 supported |= (SUPPORTED_1000baseT_Half |
12156                               SUPPORTED_1000baseT_Full);
12157
12158         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159                 supported |= (SUPPORTED_100baseT_Half |
12160                               SUPPORTED_100baseT_Full |
12161                               SUPPORTED_10baseT_Half |
12162                               SUPPORTED_10baseT_Full |
12163                               SUPPORTED_TP);
12164                 cmd->base.port = PORT_TP;
12165         } else {
12166                 supported |= SUPPORTED_FIBRE;
12167                 cmd->base.port = PORT_FIBRE;
12168         }
12169         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12170                                                 supported);
12171
12172         advertising = tp->link_config.advertising;
12173         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176                                 advertising |= ADVERTISED_Pause;
12177                         } else {
12178                                 advertising |= ADVERTISED_Pause |
12179                                         ADVERTISED_Asym_Pause;
12180                         }
12181                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182                         advertising |= ADVERTISED_Asym_Pause;
12183                 }
12184         }
12185         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12186                                                 advertising);
12187
12188         if (netif_running(dev) && tp->link_up) {
12189                 cmd->base.speed = tp->link_config.active_speed;
12190                 cmd->base.duplex = tp->link_config.active_duplex;
12191                 ethtool_convert_legacy_u32_to_link_mode(
12192                         cmd->link_modes.lp_advertising,
12193                         tp->link_config.rmt_adv);
12194
12195                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12198                         else
12199                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12200                 }
12201         } else {
12202                 cmd->base.speed = SPEED_UNKNOWN;
12203                 cmd->base.duplex = DUPLEX_UNKNOWN;
12204                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12205         }
12206         cmd->base.phy_address = tp->phy_addr;
12207         cmd->base.autoneg = tp->link_config.autoneg;
12208         return 0;
12209 }
12210
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212                                   const struct ethtool_link_ksettings *cmd)
12213 {
12214         struct tg3 *tp = netdev_priv(dev);
12215         u32 speed = cmd->base.speed;
12216         u32 advertising;
12217
12218         if (tg3_flag(tp, USE_PHYLIB)) {
12219                 struct phy_device *phydev;
12220                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12221                         return -EAGAIN;
12222                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223                 return phy_ethtool_ksettings_set(phydev, cmd);
12224         }
12225
12226         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227             cmd->base.autoneg != AUTONEG_DISABLE)
12228                 return -EINVAL;
12229
12230         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231             cmd->base.duplex != DUPLEX_FULL &&
12232             cmd->base.duplex != DUPLEX_HALF)
12233                 return -EINVAL;
12234
12235         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236                                                 cmd->link_modes.advertising);
12237
12238         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239                 u32 mask = ADVERTISED_Autoneg |
12240                            ADVERTISED_Pause |
12241                            ADVERTISED_Asym_Pause;
12242
12243                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244                         mask |= ADVERTISED_1000baseT_Half |
12245                                 ADVERTISED_1000baseT_Full;
12246
12247                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248                         mask |= ADVERTISED_100baseT_Half |
12249                                 ADVERTISED_100baseT_Full |
12250                                 ADVERTISED_10baseT_Half |
12251                                 ADVERTISED_10baseT_Full |
12252                                 ADVERTISED_TP;
12253                 else
12254                         mask |= ADVERTISED_FIBRE;
12255
12256                 if (advertising & ~mask)
12257                         return -EINVAL;
12258
12259                 mask &= (ADVERTISED_1000baseT_Half |
12260                          ADVERTISED_1000baseT_Full |
12261                          ADVERTISED_100baseT_Half |
12262                          ADVERTISED_100baseT_Full |
12263                          ADVERTISED_10baseT_Half |
12264                          ADVERTISED_10baseT_Full);
12265
12266                 advertising &= mask;
12267         } else {
12268                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269                         if (speed != SPEED_1000)
12270                                 return -EINVAL;
12271
12272                         if (cmd->base.duplex != DUPLEX_FULL)
12273                                 return -EINVAL;
12274                 } else {
12275                         if (speed != SPEED_100 &&
12276                             speed != SPEED_10)
12277                                 return -EINVAL;
12278                 }
12279         }
12280
12281         tg3_full_lock(tp, 0);
12282
12283         tp->link_config.autoneg = cmd->base.autoneg;
12284         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285                 tp->link_config.advertising = (advertising |
12286                                               ADVERTISED_Autoneg);
12287                 tp->link_config.speed = SPEED_UNKNOWN;
12288                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12289         } else {
12290                 tp->link_config.advertising = 0;
12291                 tp->link_config.speed = speed;
12292                 tp->link_config.duplex = cmd->base.duplex;
12293         }
12294
12295         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12296
12297         tg3_warn_mgmt_link_flap(tp);
12298
12299         if (netif_running(dev))
12300                 tg3_setup_phy(tp, true);
12301
12302         tg3_full_unlock(tp);
12303
12304         return 0;
12305 }
12306
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12308 {
12309         struct tg3 *tp = netdev_priv(dev);
12310
12311         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12313         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12314 }
12315
12316 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12317 {
12318         struct tg3 *tp = netdev_priv(dev);
12319
12320         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12321                 wol->supported = WAKE_MAGIC;
12322         else
12323                 wol->supported = 0;
12324         wol->wolopts = 0;
12325         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12326                 wol->wolopts = WAKE_MAGIC;
12327         memset(&wol->sopass, 0, sizeof(wol->sopass));
12328 }
12329
12330 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12331 {
12332         struct tg3 *tp = netdev_priv(dev);
12333         struct device *dp = &tp->pdev->dev;
12334
12335         if (wol->wolopts & ~WAKE_MAGIC)
12336                 return -EINVAL;
12337         if ((wol->wolopts & WAKE_MAGIC) &&
12338             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12339                 return -EINVAL;
12340
12341         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12342
12343         if (device_may_wakeup(dp))
12344                 tg3_flag_set(tp, WOL_ENABLE);
12345         else
12346                 tg3_flag_clear(tp, WOL_ENABLE);
12347
12348         return 0;
12349 }
12350
12351 static u32 tg3_get_msglevel(struct net_device *dev)
12352 {
12353         struct tg3 *tp = netdev_priv(dev);
12354         return tp->msg_enable;
12355 }
12356
12357 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12358 {
12359         struct tg3 *tp = netdev_priv(dev);
12360         tp->msg_enable = value;
12361 }
12362
12363 static int tg3_nway_reset(struct net_device *dev)
12364 {
12365         struct tg3 *tp = netdev_priv(dev);
12366         int r;
12367
12368         if (!netif_running(dev))
12369                 return -EAGAIN;
12370
12371         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12372                 return -EINVAL;
12373
12374         tg3_warn_mgmt_link_flap(tp);
12375
12376         if (tg3_flag(tp, USE_PHYLIB)) {
12377                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12378                         return -EAGAIN;
12379                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12380         } else {
12381                 u32 bmcr;
12382
12383                 spin_lock_bh(&tp->lock);
12384                 r = -EINVAL;
12385                 tg3_readphy(tp, MII_BMCR, &bmcr);
12386                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12387                     ((bmcr & BMCR_ANENABLE) ||
12388                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12389                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12390                                                    BMCR_ANENABLE);
12391                         r = 0;
12392                 }
12393                 spin_unlock_bh(&tp->lock);
12394         }
12395
12396         return r;
12397 }
12398
12399 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12400 {
12401         struct tg3 *tp = netdev_priv(dev);
12402
12403         ering->rx_max_pending = tp->rx_std_ring_mask;
12404         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12405                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12406         else
12407                 ering->rx_jumbo_max_pending = 0;
12408
12409         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12410
12411         ering->rx_pending = tp->rx_pending;
12412         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12413                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12414         else
12415                 ering->rx_jumbo_pending = 0;
12416
12417         ering->tx_pending = tp->napi[0].tx_pending;
12418 }
12419
12420 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12421 {
12422         struct tg3 *tp = netdev_priv(dev);
12423         int i, irq_sync = 0, err = 0;
12424         bool reset_phy = false;
12425
12426         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430             (tg3_flag(tp, TSO_BUG) &&
12431              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12432                 return -EINVAL;
12433
12434         if (netif_running(dev)) {
12435                 tg3_phy_stop(tp);
12436                 tg3_netif_stop(tp);
12437                 irq_sync = 1;
12438         }
12439
12440         tg3_full_lock(tp, irq_sync);
12441
12442         tp->rx_pending = ering->rx_pending;
12443
12444         if (tg3_flag(tp, MAX_RXPEND_64) &&
12445             tp->rx_pending > 63)
12446                 tp->rx_pending = 63;
12447
12448         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12450
12451         for (i = 0; i < tp->irq_max; i++)
12452                 tp->napi[i].tx_pending = ering->tx_pending;
12453
12454         if (netif_running(dev)) {
12455                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456                 /* Reset PHY to avoid PHY lock up */
12457                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12458                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12459                     tg3_asic_rev(tp) == ASIC_REV_5720)
12460                         reset_phy = true;
12461
12462                 err = tg3_restart_hw(tp, reset_phy);
12463                 if (!err)
12464                         tg3_netif_start(tp);
12465         }
12466
12467         tg3_full_unlock(tp);
12468
12469         if (irq_sync && !err)
12470                 tg3_phy_start(tp);
12471
12472         return err;
12473 }
12474
12475 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12476 {
12477         struct tg3 *tp = netdev_priv(dev);
12478
12479         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12480
12481         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12482                 epause->rx_pause = 1;
12483         else
12484                 epause->rx_pause = 0;
12485
12486         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12487                 epause->tx_pause = 1;
12488         else
12489                 epause->tx_pause = 0;
12490 }
12491
12492 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12493 {
12494         struct tg3 *tp = netdev_priv(dev);
12495         int err = 0;
12496         bool reset_phy = false;
12497
12498         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12499                 tg3_warn_mgmt_link_flap(tp);
12500
12501         if (tg3_flag(tp, USE_PHYLIB)) {
12502                 struct phy_device *phydev;
12503
12504                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12505
12506                 if (!phy_validate_pause(phydev, epause))
12507                         return -EINVAL;
12508
12509                 tp->link_config.flowctrl = 0;
12510                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12511                 if (epause->rx_pause) {
12512                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12513
12514                         if (epause->tx_pause) {
12515                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12516                         }
12517                 } else if (epause->tx_pause) {
12518                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12519                 }
12520
12521                 if (epause->autoneg)
12522                         tg3_flag_set(tp, PAUSE_AUTONEG);
12523                 else
12524                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12525
12526                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12527                         if (phydev->autoneg) {
12528                                 /* phy_set_asym_pause() will
12529                                  * renegotiate the link to inform our
12530                                  * link partner of our flow control
12531                                  * settings, even if the flow control
12532                                  * is forced.  Let tg3_adjust_link()
12533                                  * do the final flow control setup.
12534                                  */
12535                                 return 0;
12536                         }
12537
12538                         if (!epause->autoneg)
12539                                 tg3_setup_flow_control(tp, 0, 0);
12540                 }
12541         } else {
12542                 int irq_sync = 0;
12543
12544                 if (netif_running(dev)) {
12545                         tg3_netif_stop(tp);
12546                         irq_sync = 1;
12547                 }
12548
12549                 tg3_full_lock(tp, irq_sync);
12550
12551                 if (epause->autoneg)
12552                         tg3_flag_set(tp, PAUSE_AUTONEG);
12553                 else
12554                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12555                 if (epause->rx_pause)
12556                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12557                 else
12558                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12559                 if (epause->tx_pause)
12560                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12561                 else
12562                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12563
12564                 if (netif_running(dev)) {
12565                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12566                         /* Reset PHY to avoid PHY lock up */
12567                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12568                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12569                             tg3_asic_rev(tp) == ASIC_REV_5720)
12570                                 reset_phy = true;
12571
12572                         err = tg3_restart_hw(tp, reset_phy);
12573                         if (!err)
12574                                 tg3_netif_start(tp);
12575                 }
12576
12577                 tg3_full_unlock(tp);
12578         }
12579
12580         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12581
12582         return err;
12583 }
12584
12585 static int tg3_get_sset_count(struct net_device *dev, int sset)
12586 {
12587         switch (sset) {
12588         case ETH_SS_TEST:
12589                 return TG3_NUM_TEST;
12590         case ETH_SS_STATS:
12591                 return TG3_NUM_STATS;
12592         default:
12593                 return -EOPNOTSUPP;
12594         }
12595 }
12596
12597 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12598                          u32 *rules __always_unused)
12599 {
12600         struct tg3 *tp = netdev_priv(dev);
12601
12602         if (!tg3_flag(tp, SUPPORT_MSIX))
12603                 return -EOPNOTSUPP;
12604
12605         switch (info->cmd) {
12606         case ETHTOOL_GRXRINGS:
12607                 if (netif_running(tp->dev))
12608                         info->data = tp->rxq_cnt;
12609                 else {
12610                         info->data = num_online_cpus();
12611                         if (info->data > TG3_RSS_MAX_NUM_QS)
12612                                 info->data = TG3_RSS_MAX_NUM_QS;
12613                 }
12614
12615                 return 0;
12616
12617         default:
12618                 return -EOPNOTSUPP;
12619         }
12620 }
12621
12622 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12623 {
12624         u32 size = 0;
12625         struct tg3 *tp = netdev_priv(dev);
12626
12627         if (tg3_flag(tp, SUPPORT_MSIX))
12628                 size = TG3_RSS_INDIR_TBL_SIZE;
12629
12630         return size;
12631 }
12632
12633 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12634 {
12635         struct tg3 *tp = netdev_priv(dev);
12636         int i;
12637
12638         if (hfunc)
12639                 *hfunc = ETH_RSS_HASH_TOP;
12640         if (!indir)
12641                 return 0;
12642
12643         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12644                 indir[i] = tp->rss_ind_tbl[i];
12645
12646         return 0;
12647 }
12648
12649 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12650                         const u8 hfunc)
12651 {
12652         struct tg3 *tp = netdev_priv(dev);
12653         size_t i;
12654
12655         /* We require at least one supported parameter to be changed and no
12656          * change in any of the unsupported parameters
12657          */
12658         if (key ||
12659             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12660                 return -EOPNOTSUPP;
12661
12662         if (!indir)
12663                 return 0;
12664
12665         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12666                 tp->rss_ind_tbl[i] = indir[i];
12667
12668         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12669                 return 0;
12670
12671         /* It is legal to write the indirection
12672          * table while the device is running.
12673          */
12674         tg3_full_lock(tp, 0);
12675         tg3_rss_write_indir_tbl(tp);
12676         tg3_full_unlock(tp);
12677
12678         return 0;
12679 }
12680
12681 static void tg3_get_channels(struct net_device *dev,
12682                              struct ethtool_channels *channel)
12683 {
12684         struct tg3 *tp = netdev_priv(dev);
12685         u32 deflt_qs = netif_get_num_default_rss_queues();
12686
12687         channel->max_rx = tp->rxq_max;
12688         channel->max_tx = tp->txq_max;
12689
12690         if (netif_running(dev)) {
12691                 channel->rx_count = tp->rxq_cnt;
12692                 channel->tx_count = tp->txq_cnt;
12693         } else {
12694                 if (tp->rxq_req)
12695                         channel->rx_count = tp->rxq_req;
12696                 else
12697                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12698
12699                 if (tp->txq_req)
12700                         channel->tx_count = tp->txq_req;
12701                 else
12702                         channel->tx_count = min(deflt_qs, tp->txq_max);
12703         }
12704 }
12705
12706 static int tg3_set_channels(struct net_device *dev,
12707                             struct ethtool_channels *channel)
12708 {
12709         struct tg3 *tp = netdev_priv(dev);
12710
12711         if (!tg3_flag(tp, SUPPORT_MSIX))
12712                 return -EOPNOTSUPP;
12713
12714         if (channel->rx_count > tp->rxq_max ||
12715             channel->tx_count > tp->txq_max)
12716                 return -EINVAL;
12717
12718         tp->rxq_req = channel->rx_count;
12719         tp->txq_req = channel->tx_count;
12720
12721         if (!netif_running(dev))
12722                 return 0;
12723
12724         tg3_stop(tp);
12725
12726         tg3_carrier_off(tp);
12727
12728         tg3_start(tp, true, false, false);
12729
12730         return 0;
12731 }
12732
12733 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12734 {
12735         switch (stringset) {
12736         case ETH_SS_STATS:
12737                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12738                 break;
12739         case ETH_SS_TEST:
12740                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12741                 break;
12742         default:
12743                 WARN_ON(1);     /* we need a WARN() */
12744                 break;
12745         }
12746 }
12747
12748 static int tg3_set_phys_id(struct net_device *dev,
12749                             enum ethtool_phys_id_state state)
12750 {
12751         struct tg3 *tp = netdev_priv(dev);
12752
12753         switch (state) {
12754         case ETHTOOL_ID_ACTIVE:
12755                 return 1;       /* cycle on/off once per second */
12756
12757         case ETHTOOL_ID_ON:
12758                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759                      LED_CTRL_1000MBPS_ON |
12760                      LED_CTRL_100MBPS_ON |
12761                      LED_CTRL_10MBPS_ON |
12762                      LED_CTRL_TRAFFIC_OVERRIDE |
12763                      LED_CTRL_TRAFFIC_BLINK |
12764                      LED_CTRL_TRAFFIC_LED);
12765                 break;
12766
12767         case ETHTOOL_ID_OFF:
12768                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769                      LED_CTRL_TRAFFIC_OVERRIDE);
12770                 break;
12771
12772         case ETHTOOL_ID_INACTIVE:
12773                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12774                 break;
12775         }
12776
12777         return 0;
12778 }
12779
12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781                                    struct ethtool_stats *estats, u64 *tmp_stats)
12782 {
12783         struct tg3 *tp = netdev_priv(dev);
12784
12785         if (tp->hw_stats)
12786                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12787         else
12788                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12789 }
12790
12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12792 {
12793         int i;
12794         __be32 *buf;
12795         u32 offset = 0, len = 0;
12796         u32 magic, val;
12797
12798         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12799                 return NULL;
12800
12801         if (magic == TG3_EEPROM_MAGIC) {
12802                 for (offset = TG3_NVM_DIR_START;
12803                      offset < TG3_NVM_DIR_END;
12804                      offset += TG3_NVM_DIRENT_SIZE) {
12805                         if (tg3_nvram_read(tp, offset, &val))
12806                                 return NULL;
12807
12808                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809                             TG3_NVM_DIRTYPE_EXTVPD)
12810                                 break;
12811                 }
12812
12813                 if (offset != TG3_NVM_DIR_END) {
12814                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815                         if (tg3_nvram_read(tp, offset + 4, &offset))
12816                                 return NULL;
12817
12818                         offset = tg3_nvram_logical_addr(tp, offset);
12819                 }
12820         }
12821
12822         if (!offset || !len) {
12823                 offset = TG3_NVM_VPD_OFF;
12824                 len = TG3_NVM_VPD_LEN;
12825         }
12826
12827         buf = kmalloc(len, GFP_KERNEL);
12828         if (buf == NULL)
12829                 return NULL;
12830
12831         if (magic == TG3_EEPROM_MAGIC) {
12832                 for (i = 0; i < len; i += 4) {
12833                         /* The data is in little-endian format in NVRAM.
12834                          * Use the big-endian read routines to preserve
12835                          * the byte order as it exists in NVRAM.
12836                          */
12837                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12838                                 goto error;
12839                 }
12840         } else {
12841                 u8 *ptr;
12842                 ssize_t cnt;
12843                 unsigned int pos = 0;
12844
12845                 ptr = (u8 *)&buf[0];
12846                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12847                         cnt = pci_read_vpd(tp->pdev, pos,
12848                                            len - pos, ptr);
12849                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12850                                 cnt = 0;
12851                         else if (cnt < 0)
12852                                 goto error;
12853                 }
12854                 if (pos != len)
12855                         goto error;
12856         }
12857
12858         *vpdlen = len;
12859
12860         return buf;
12861
12862 error:
12863         kfree(buf);
12864         return NULL;
12865 }
12866
12867 #define NVRAM_TEST_SIZE 0x100
12868 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12869 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12870 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12871 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12872 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12873 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12874 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12875 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12876
12877 static int tg3_test_nvram(struct tg3 *tp)
12878 {
12879         u32 csum, magic, len;
12880         __be32 *buf;
12881         int i, j, k, err = 0, size;
12882
12883         if (tg3_flag(tp, NO_NVRAM))
12884                 return 0;
12885
12886         if (tg3_nvram_read(tp, 0, &magic) != 0)
12887                 return -EIO;
12888
12889         if (magic == TG3_EEPROM_MAGIC)
12890                 size = NVRAM_TEST_SIZE;
12891         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12892                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12893                     TG3_EEPROM_SB_FORMAT_1) {
12894                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12895                         case TG3_EEPROM_SB_REVISION_0:
12896                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12897                                 break;
12898                         case TG3_EEPROM_SB_REVISION_2:
12899                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12900                                 break;
12901                         case TG3_EEPROM_SB_REVISION_3:
12902                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12903                                 break;
12904                         case TG3_EEPROM_SB_REVISION_4:
12905                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12906                                 break;
12907                         case TG3_EEPROM_SB_REVISION_5:
12908                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12909                                 break;
12910                         case TG3_EEPROM_SB_REVISION_6:
12911                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12912                                 break;
12913                         default:
12914                                 return -EIO;
12915                         }
12916                 } else
12917                         return 0;
12918         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12919                 size = NVRAM_SELFBOOT_HW_SIZE;
12920         else
12921                 return -EIO;
12922
12923         buf = kmalloc(size, GFP_KERNEL);
12924         if (buf == NULL)
12925                 return -ENOMEM;
12926
12927         err = -EIO;
12928         for (i = 0, j = 0; i < size; i += 4, j++) {
12929                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12930                 if (err)
12931                         break;
12932         }
12933         if (i < size)
12934                 goto out;
12935
12936         /* Selfboot format */
12937         magic = be32_to_cpu(buf[0]);
12938         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12939             TG3_EEPROM_MAGIC_FW) {
12940                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12941
12942                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12943                     TG3_EEPROM_SB_REVISION_2) {
12944                         /* For rev 2, the csum doesn't include the MBA. */
12945                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12946                                 csum8 += buf8[i];
12947                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12948                                 csum8 += buf8[i];
12949                 } else {
12950                         for (i = 0; i < size; i++)
12951                                 csum8 += buf8[i];
12952                 }
12953
12954                 if (csum8 == 0) {
12955                         err = 0;
12956                         goto out;
12957                 }
12958
12959                 err = -EIO;
12960                 goto out;
12961         }
12962
12963         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12964             TG3_EEPROM_MAGIC_HW) {
12965                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12966                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12967                 u8 *buf8 = (u8 *) buf;
12968
12969                 /* Separate the parity bits and the data bytes.  */
12970                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12971                         if ((i == 0) || (i == 8)) {
12972                                 int l;
12973                                 u8 msk;
12974
12975                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12976                                         parity[k++] = buf8[i] & msk;
12977                                 i++;
12978                         } else if (i == 16) {
12979                                 int l;
12980                                 u8 msk;
12981
12982                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12983                                         parity[k++] = buf8[i] & msk;
12984                                 i++;
12985
12986                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12987                                         parity[k++] = buf8[i] & msk;
12988                                 i++;
12989                         }
12990                         data[j++] = buf8[i];
12991                 }
12992
12993                 err = -EIO;
12994                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12995                         u8 hw8 = hweight8(data[i]);
12996
12997                         if ((hw8 & 0x1) && parity[i])
12998                                 goto out;
12999                         else if (!(hw8 & 0x1) && !parity[i])
13000                                 goto out;
13001                 }
13002                 err = 0;
13003                 goto out;
13004         }
13005
13006         err = -EIO;
13007
13008         /* Bootstrap checksum at offset 0x10 */
13009         csum = calc_crc((unsigned char *) buf, 0x10);
13010         if (csum != le32_to_cpu(buf[0x10/4]))
13011                 goto out;
13012
13013         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13014         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13015         if (csum != le32_to_cpu(buf[0xfc/4]))
13016                 goto out;
13017
13018         kfree(buf);
13019
13020         buf = tg3_vpd_readblock(tp, &len);
13021         if (!buf)
13022                 return -ENOMEM;
13023
13024         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
13025         if (i > 0) {
13026                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13027                 if (j < 0)
13028                         goto out;
13029
13030                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13031                         goto out;
13032
13033                 i += PCI_VPD_LRDT_TAG_SIZE;
13034                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13035                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13036                 if (j > 0) {
13037                         u8 csum8 = 0;
13038
13039                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13040
13041                         for (i = 0; i <= j; i++)
13042                                 csum8 += ((u8 *)buf)[i];
13043
13044                         if (csum8)
13045                                 goto out;
13046                 }
13047         }
13048
13049         err = 0;
13050
13051 out:
13052         kfree(buf);
13053         return err;
13054 }
13055
13056 #define TG3_SERDES_TIMEOUT_SEC  2
13057 #define TG3_COPPER_TIMEOUT_SEC  6
13058
13059 static int tg3_test_link(struct tg3 *tp)
13060 {
13061         int i, max;
13062
13063         if (!netif_running(tp->dev))
13064                 return -ENODEV;
13065
13066         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13067                 max = TG3_SERDES_TIMEOUT_SEC;
13068         else
13069                 max = TG3_COPPER_TIMEOUT_SEC;
13070
13071         for (i = 0; i < max; i++) {
13072                 if (tp->link_up)
13073                         return 0;
13074
13075                 if (msleep_interruptible(1000))
13076                         break;
13077         }
13078
13079         return -EIO;
13080 }
13081
13082 /* Only test the commonly used registers */
13083 static int tg3_test_registers(struct tg3 *tp)
13084 {
13085         int i, is_5705, is_5750;
13086         u32 offset, read_mask, write_mask, val, save_val, read_val;
13087         static struct {
13088                 u16 offset;
13089                 u16 flags;
13090 #define TG3_FL_5705     0x1
13091 #define TG3_FL_NOT_5705 0x2
13092 #define TG3_FL_NOT_5788 0x4
13093 #define TG3_FL_NOT_5750 0x8
13094                 u32 read_mask;
13095                 u32 write_mask;
13096         } reg_tbl[] = {
13097                 /* MAC Control Registers */
13098                 { MAC_MODE, TG3_FL_NOT_5705,
13099                         0x00000000, 0x00ef6f8c },
13100                 { MAC_MODE, TG3_FL_5705,
13101                         0x00000000, 0x01ef6b8c },
13102                 { MAC_STATUS, TG3_FL_NOT_5705,
13103                         0x03800107, 0x00000000 },
13104                 { MAC_STATUS, TG3_FL_5705,
13105                         0x03800100, 0x00000000 },
13106                 { MAC_ADDR_0_HIGH, 0x0000,
13107                         0x00000000, 0x0000ffff },
13108                 { MAC_ADDR_0_LOW, 0x0000,
13109                         0x00000000, 0xffffffff },
13110                 { MAC_RX_MTU_SIZE, 0x0000,
13111                         0x00000000, 0x0000ffff },
13112                 { MAC_TX_MODE, 0x0000,
13113                         0x00000000, 0x00000070 },
13114                 { MAC_TX_LENGTHS, 0x0000,
13115                         0x00000000, 0x00003fff },
13116                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13117                         0x00000000, 0x000007fc },
13118                 { MAC_RX_MODE, TG3_FL_5705,
13119                         0x00000000, 0x000007dc },
13120                 { MAC_HASH_REG_0, 0x0000,
13121                         0x00000000, 0xffffffff },
13122                 { MAC_HASH_REG_1, 0x0000,
13123                         0x00000000, 0xffffffff },
13124                 { MAC_HASH_REG_2, 0x0000,
13125                         0x00000000, 0xffffffff },
13126                 { MAC_HASH_REG_3, 0x0000,
13127                         0x00000000, 0xffffffff },
13128
13129                 /* Receive Data and Receive BD Initiator Control Registers. */
13130                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13131                         0x00000000, 0xffffffff },
13132                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13133                         0x00000000, 0xffffffff },
13134                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13135                         0x00000000, 0x00000003 },
13136                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13137                         0x00000000, 0xffffffff },
13138                 { RCVDBDI_STD_BD+0, 0x0000,
13139                         0x00000000, 0xffffffff },
13140                 { RCVDBDI_STD_BD+4, 0x0000,
13141                         0x00000000, 0xffffffff },
13142                 { RCVDBDI_STD_BD+8, 0x0000,
13143                         0x00000000, 0xffff0002 },
13144                 { RCVDBDI_STD_BD+0xc, 0x0000,
13145                         0x00000000, 0xffffffff },
13146
13147                 /* Receive BD Initiator Control Registers. */
13148                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13149                         0x00000000, 0xffffffff },
13150                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13151                         0x00000000, 0x000003ff },
13152                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13153                         0x00000000, 0xffffffff },
13154
13155                 /* Host Coalescing Control Registers. */
13156                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13157                         0x00000000, 0x00000004 },
13158                 { HOSTCC_MODE, TG3_FL_5705,
13159                         0x00000000, 0x000000f6 },
13160                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13161                         0x00000000, 0xffffffff },
13162                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13163                         0x00000000, 0x000003ff },
13164                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13165                         0x00000000, 0xffffffff },
13166                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13167                         0x00000000, 0x000003ff },
13168                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13169                         0x00000000, 0xffffffff },
13170                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13171                         0x00000000, 0x000000ff },
13172                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13173                         0x00000000, 0xffffffff },
13174                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13175                         0x00000000, 0x000000ff },
13176                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13177                         0x00000000, 0xffffffff },
13178                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13179                         0x00000000, 0xffffffff },
13180                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13181                         0x00000000, 0xffffffff },
13182                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13183                         0x00000000, 0x000000ff },
13184                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13185                         0x00000000, 0xffffffff },
13186                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13187                         0x00000000, 0x000000ff },
13188                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13189                         0x00000000, 0xffffffff },
13190                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13191                         0x00000000, 0xffffffff },
13192                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13193                         0x00000000, 0xffffffff },
13194                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13195                         0x00000000, 0xffffffff },
13196                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13197                         0x00000000, 0xffffffff },
13198                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13199                         0xffffffff, 0x00000000 },
13200                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13201                         0xffffffff, 0x00000000 },
13202
13203                 /* Buffer Manager Control Registers. */
13204                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13205                         0x00000000, 0x007fff80 },
13206                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13207                         0x00000000, 0x007fffff },
13208                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13209                         0x00000000, 0x0000003f },
13210                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13211                         0x00000000, 0x000001ff },
13212                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13213                         0x00000000, 0x000001ff },
13214                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13215                         0xffffffff, 0x00000000 },
13216                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13217                         0xffffffff, 0x00000000 },
13218
13219                 /* Mailbox Registers */
13220                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13221                         0x00000000, 0x000001ff },
13222                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13223                         0x00000000, 0x000001ff },
13224                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13225                         0x00000000, 0x000007ff },
13226                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13227                         0x00000000, 0x000001ff },
13228
13229                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13230         };
13231
13232         is_5705 = is_5750 = 0;
13233         if (tg3_flag(tp, 5705_PLUS)) {
13234                 is_5705 = 1;
13235                 if (tg3_flag(tp, 5750_PLUS))
13236                         is_5750 = 1;
13237         }
13238
13239         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13240                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13241                         continue;
13242
13243                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13244                         continue;
13245
13246                 if (tg3_flag(tp, IS_5788) &&
13247                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13248                         continue;
13249
13250                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13251                         continue;
13252
13253                 offset = (u32) reg_tbl[i].offset;
13254                 read_mask = reg_tbl[i].read_mask;
13255                 write_mask = reg_tbl[i].write_mask;
13256
13257                 /* Save the original register content */
13258                 save_val = tr32(offset);
13259
13260                 /* Determine the read-only value. */
13261                 read_val = save_val & read_mask;
13262
13263                 /* Write zero to the register, then make sure the read-only bits
13264                  * are not changed and the read/write bits are all zeros.
13265                  */
13266                 tw32(offset, 0);
13267
13268                 val = tr32(offset);
13269
13270                 /* Test the read-only and read/write bits. */
13271                 if (((val & read_mask) != read_val) || (val & write_mask))
13272                         goto out;
13273
13274                 /* Write ones to all the bits defined by RdMask and WrMask, then
13275                  * make sure the read-only bits are not changed and the
13276                  * read/write bits are all ones.
13277                  */
13278                 tw32(offset, read_mask | write_mask);
13279
13280                 val = tr32(offset);
13281
13282                 /* Test the read-only bits. */
13283                 if ((val & read_mask) != read_val)
13284                         goto out;
13285
13286                 /* Test the read/write bits. */
13287                 if ((val & write_mask) != write_mask)
13288                         goto out;
13289
13290                 tw32(offset, save_val);
13291         }
13292
13293         return 0;
13294
13295 out:
13296         if (netif_msg_hw(tp))
13297                 netdev_err(tp->dev,
13298                            "Register test failed at offset %x\n", offset);
13299         tw32(offset, save_val);
13300         return -EIO;
13301 }
13302
13303 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13304 {
13305         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13306         int i;
13307         u32 j;
13308
13309         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13310                 for (j = 0; j < len; j += 4) {
13311                         u32 val;
13312
13313                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13314                         tg3_read_mem(tp, offset + j, &val);
13315                         if (val != test_pattern[i])
13316                                 return -EIO;
13317                 }
13318         }
13319         return 0;
13320 }
13321
13322 static int tg3_test_memory(struct tg3 *tp)
13323 {
13324         static struct mem_entry {
13325                 u32 offset;
13326                 u32 len;
13327         } mem_tbl_570x[] = {
13328                 { 0x00000000, 0x00b50},
13329                 { 0x00002000, 0x1c000},
13330                 { 0xffffffff, 0x00000}
13331         }, mem_tbl_5705[] = {
13332                 { 0x00000100, 0x0000c},
13333                 { 0x00000200, 0x00008},
13334                 { 0x00004000, 0x00800},
13335                 { 0x00006000, 0x01000},
13336                 { 0x00008000, 0x02000},
13337                 { 0x00010000, 0x0e000},
13338                 { 0xffffffff, 0x00000}
13339         }, mem_tbl_5755[] = {
13340                 { 0x00000200, 0x00008},
13341                 { 0x00004000, 0x00800},
13342                 { 0x00006000, 0x00800},
13343                 { 0x00008000, 0x02000},
13344                 { 0x00010000, 0x0c000},
13345                 { 0xffffffff, 0x00000}
13346         }, mem_tbl_5906[] = {
13347                 { 0x00000200, 0x00008},
13348                 { 0x00004000, 0x00400},
13349                 { 0x00006000, 0x00400},
13350                 { 0x00008000, 0x01000},
13351                 { 0x00010000, 0x01000},
13352                 { 0xffffffff, 0x00000}
13353         }, mem_tbl_5717[] = {
13354                 { 0x00000200, 0x00008},
13355                 { 0x00010000, 0x0a000},
13356                 { 0x00020000, 0x13c00},
13357                 { 0xffffffff, 0x00000}
13358         }, mem_tbl_57765[] = {
13359                 { 0x00000200, 0x00008},
13360                 { 0x00004000, 0x00800},
13361                 { 0x00006000, 0x09800},
13362                 { 0x00010000, 0x0a000},
13363                 { 0xffffffff, 0x00000}
13364         };
13365         struct mem_entry *mem_tbl;
13366         int err = 0;
13367         int i;
13368
13369         if (tg3_flag(tp, 5717_PLUS))
13370                 mem_tbl = mem_tbl_5717;
13371         else if (tg3_flag(tp, 57765_CLASS) ||
13372                  tg3_asic_rev(tp) == ASIC_REV_5762)
13373                 mem_tbl = mem_tbl_57765;
13374         else if (tg3_flag(tp, 5755_PLUS))
13375                 mem_tbl = mem_tbl_5755;
13376         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13377                 mem_tbl = mem_tbl_5906;
13378         else if (tg3_flag(tp, 5705_PLUS))
13379                 mem_tbl = mem_tbl_5705;
13380         else
13381                 mem_tbl = mem_tbl_570x;
13382
13383         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13384                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13385                 if (err)
13386                         break;
13387         }
13388
13389         return err;
13390 }
13391
13392 #define TG3_TSO_MSS             500
13393
13394 #define TG3_TSO_IP_HDR_LEN      20
13395 #define TG3_TSO_TCP_HDR_LEN     20
13396 #define TG3_TSO_TCP_OPT_LEN     12
13397
13398 static const u8 tg3_tso_header[] = {
13399 0x08, 0x00,
13400 0x45, 0x00, 0x00, 0x00,
13401 0x00, 0x00, 0x40, 0x00,
13402 0x40, 0x06, 0x00, 0x00,
13403 0x0a, 0x00, 0x00, 0x01,
13404 0x0a, 0x00, 0x00, 0x02,
13405 0x0d, 0x00, 0xe0, 0x00,
13406 0x00, 0x00, 0x01, 0x00,
13407 0x00, 0x00, 0x02, 0x00,
13408 0x80, 0x10, 0x10, 0x00,
13409 0x14, 0x09, 0x00, 0x00,
13410 0x01, 0x01, 0x08, 0x0a,
13411 0x11, 0x11, 0x11, 0x11,
13412 0x11, 0x11, 0x11, 0x11,
13413 };
13414
13415 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13416 {
13417         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13418         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13419         u32 budget;
13420         struct sk_buff *skb;
13421         u8 *tx_data, *rx_data;
13422         dma_addr_t map;
13423         int num_pkts, tx_len, rx_len, i, err;
13424         struct tg3_rx_buffer_desc *desc;
13425         struct tg3_napi *tnapi, *rnapi;
13426         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13427
13428         tnapi = &tp->napi[0];
13429         rnapi = &tp->napi[0];
13430         if (tp->irq_cnt > 1) {
13431                 if (tg3_flag(tp, ENABLE_RSS))
13432                         rnapi = &tp->napi[1];
13433                 if (tg3_flag(tp, ENABLE_TSS))
13434                         tnapi = &tp->napi[1];
13435         }
13436         coal_now = tnapi->coal_now | rnapi->coal_now;
13437
13438         err = -EIO;
13439
13440         tx_len = pktsz;
13441         skb = netdev_alloc_skb(tp->dev, tx_len);
13442         if (!skb)
13443                 return -ENOMEM;
13444
13445         tx_data = skb_put(skb, tx_len);
13446         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13447         memset(tx_data + ETH_ALEN, 0x0, 8);
13448
13449         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13450
13451         if (tso_loopback) {
13452                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13453
13454                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13455                               TG3_TSO_TCP_OPT_LEN;
13456
13457                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13458                        sizeof(tg3_tso_header));
13459                 mss = TG3_TSO_MSS;
13460
13461                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13462                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13463
13464                 /* Set the total length field in the IP header */
13465                 iph->tot_len = htons((u16)(mss + hdr_len));
13466
13467                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13468                               TXD_FLAG_CPU_POST_DMA);
13469
13470                 if (tg3_flag(tp, HW_TSO_1) ||
13471                     tg3_flag(tp, HW_TSO_2) ||
13472                     tg3_flag(tp, HW_TSO_3)) {
13473                         struct tcphdr *th;
13474                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13475                         th = (struct tcphdr *)&tx_data[val];
13476                         th->check = 0;
13477                 } else
13478                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13479
13480                 if (tg3_flag(tp, HW_TSO_3)) {
13481                         mss |= (hdr_len & 0xc) << 12;
13482                         if (hdr_len & 0x10)
13483                                 base_flags |= 0x00000010;
13484                         base_flags |= (hdr_len & 0x3e0) << 5;
13485                 } else if (tg3_flag(tp, HW_TSO_2))
13486                         mss |= hdr_len << 9;
13487                 else if (tg3_flag(tp, HW_TSO_1) ||
13488                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13489                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13490                 } else {
13491                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13492                 }
13493
13494                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13495         } else {
13496                 num_pkts = 1;
13497                 data_off = ETH_HLEN;
13498
13499                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13500                     tx_len > VLAN_ETH_FRAME_LEN)
13501                         base_flags |= TXD_FLAG_JMB_PKT;
13502         }
13503
13504         for (i = data_off; i < tx_len; i++)
13505                 tx_data[i] = (u8) (i & 0xff);
13506
13507         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13508         if (pci_dma_mapping_error(tp->pdev, map)) {
13509                 dev_kfree_skb(skb);
13510                 return -EIO;
13511         }
13512
13513         val = tnapi->tx_prod;
13514         tnapi->tx_buffers[val].skb = skb;
13515         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13516
13517         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13518                rnapi->coal_now);
13519
13520         udelay(10);
13521
13522         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13523
13524         budget = tg3_tx_avail(tnapi);
13525         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13526                             base_flags | TXD_FLAG_END, mss, 0)) {
13527                 tnapi->tx_buffers[val].skb = NULL;
13528                 dev_kfree_skb(skb);
13529                 return -EIO;
13530         }
13531
13532         tnapi->tx_prod++;
13533
13534         /* Sync BD data before updating mailbox */
13535         wmb();
13536
13537         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13538         tr32_mailbox(tnapi->prodmbox);
13539
13540         udelay(10);
13541
13542         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13543         for (i = 0; i < 35; i++) {
13544                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13545                        coal_now);
13546
13547                 udelay(10);
13548
13549                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13550                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13551                 if ((tx_idx == tnapi->tx_prod) &&
13552                     (rx_idx == (rx_start_idx + num_pkts)))
13553                         break;
13554         }
13555
13556         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13557         dev_kfree_skb(skb);
13558
13559         if (tx_idx != tnapi->tx_prod)
13560                 goto out;
13561
13562         if (rx_idx != rx_start_idx + num_pkts)
13563                 goto out;
13564
13565         val = data_off;
13566         while (rx_idx != rx_start_idx) {
13567                 desc = &rnapi->rx_rcb[rx_start_idx++];
13568                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13569                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13570
13571                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13572                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13573                         goto out;
13574
13575                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13576                          - ETH_FCS_LEN;
13577
13578                 if (!tso_loopback) {
13579                         if (rx_len != tx_len)
13580                                 goto out;
13581
13582                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13583                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13584                                         goto out;
13585                         } else {
13586                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13587                                         goto out;
13588                         }
13589                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13590                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13591                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13592                         goto out;
13593                 }
13594
13595                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13596                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13597                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13598                                              mapping);
13599                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13600                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13601                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13602                                              mapping);
13603                 } else
13604                         goto out;
13605
13606                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13607                                             PCI_DMA_FROMDEVICE);
13608
13609                 rx_data += TG3_RX_OFFSET(tp);
13610                 for (i = data_off; i < rx_len; i++, val++) {
13611                         if (*(rx_data + i) != (u8) (val & 0xff))
13612                                 goto out;
13613                 }
13614         }
13615
13616         err = 0;
13617
13618         /* tg3_free_rings will unmap and free the rx_data */
13619 out:
13620         return err;
13621 }
13622
13623 #define TG3_STD_LOOPBACK_FAILED         1
13624 #define TG3_JMB_LOOPBACK_FAILED         2
13625 #define TG3_TSO_LOOPBACK_FAILED         4
13626 #define TG3_LOOPBACK_FAILED \
13627         (TG3_STD_LOOPBACK_FAILED | \
13628          TG3_JMB_LOOPBACK_FAILED | \
13629          TG3_TSO_LOOPBACK_FAILED)
13630
13631 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13632 {
13633         int err = -EIO;
13634         u32 eee_cap;
13635         u32 jmb_pkt_sz = 9000;
13636
13637         if (tp->dma_limit)
13638                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13639
13640         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13641         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13642
13643         if (!netif_running(tp->dev)) {
13644                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13645                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13646                 if (do_extlpbk)
13647                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13648                 goto done;
13649         }
13650
13651         err = tg3_reset_hw(tp, true);
13652         if (err) {
13653                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13654                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13655                 if (do_extlpbk)
13656                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13657                 goto done;
13658         }
13659
13660         if (tg3_flag(tp, ENABLE_RSS)) {
13661                 int i;
13662
13663                 /* Reroute all rx packets to the 1st queue */
13664                 for (i = MAC_RSS_INDIR_TBL_0;
13665                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13666                         tw32(i, 0x0);
13667         }
13668
13669         /* HW errata - mac loopback fails in some cases on 5780.
13670          * Normal traffic and PHY loopback are not affected by
13671          * errata.  Also, the MAC loopback test is deprecated for
13672          * all newer ASIC revisions.
13673          */
13674         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13675             !tg3_flag(tp, CPMU_PRESENT)) {
13676                 tg3_mac_loopback(tp, true);
13677
13678                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13679                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13680
13681                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13682                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13683                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13684
13685                 tg3_mac_loopback(tp, false);
13686         }
13687
13688         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13689             !tg3_flag(tp, USE_PHYLIB)) {
13690                 int i;
13691
13692                 tg3_phy_lpbk_set(tp, 0, false);
13693
13694                 /* Wait for link */
13695                 for (i = 0; i < 100; i++) {
13696                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13697                                 break;
13698                         mdelay(1);
13699                 }
13700
13701                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13702                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13703                 if (tg3_flag(tp, TSO_CAPABLE) &&
13704                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13705                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13706                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13707                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13708                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13709
13710                 if (do_extlpbk) {
13711                         tg3_phy_lpbk_set(tp, 0, true);
13712
13713                         /* All link indications report up, but the hardware
13714                          * isn't really ready for about 20 msec.  Double it
13715                          * to be sure.
13716                          */
13717                         mdelay(40);
13718
13719                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13720                                 data[TG3_EXT_LOOPB_TEST] |=
13721                                                         TG3_STD_LOOPBACK_FAILED;
13722                         if (tg3_flag(tp, TSO_CAPABLE) &&
13723                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13724                                 data[TG3_EXT_LOOPB_TEST] |=
13725                                                         TG3_TSO_LOOPBACK_FAILED;
13726                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13727                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13728                                 data[TG3_EXT_LOOPB_TEST] |=
13729                                                         TG3_JMB_LOOPBACK_FAILED;
13730                 }
13731
13732                 /* Re-enable gphy autopowerdown. */
13733                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13734                         tg3_phy_toggle_apd(tp, true);
13735         }
13736
13737         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13738                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13739
13740 done:
13741         tp->phy_flags |= eee_cap;
13742
13743         return err;
13744 }
13745
13746 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13747                           u64 *data)
13748 {
13749         struct tg3 *tp = netdev_priv(dev);
13750         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13751
13752         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13753                 if (tg3_power_up(tp)) {
13754                         etest->flags |= ETH_TEST_FL_FAILED;
13755                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13756                         return;
13757                 }
13758                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13759         }
13760
13761         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13762
13763         if (tg3_test_nvram(tp) != 0) {
13764                 etest->flags |= ETH_TEST_FL_FAILED;
13765                 data[TG3_NVRAM_TEST] = 1;
13766         }
13767         if (!doextlpbk && tg3_test_link(tp)) {
13768                 etest->flags |= ETH_TEST_FL_FAILED;
13769                 data[TG3_LINK_TEST] = 1;
13770         }
13771         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13772                 int err, err2 = 0, irq_sync = 0;
13773
13774                 if (netif_running(dev)) {
13775                         tg3_phy_stop(tp);
13776                         tg3_netif_stop(tp);
13777                         irq_sync = 1;
13778                 }
13779
13780                 tg3_full_lock(tp, irq_sync);
13781                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13782                 err = tg3_nvram_lock(tp);
13783                 tg3_halt_cpu(tp, RX_CPU_BASE);
13784                 if (!tg3_flag(tp, 5705_PLUS))
13785                         tg3_halt_cpu(tp, TX_CPU_BASE);
13786                 if (!err)
13787                         tg3_nvram_unlock(tp);
13788
13789                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13790                         tg3_phy_reset(tp);
13791
13792                 if (tg3_test_registers(tp) != 0) {
13793                         etest->flags |= ETH_TEST_FL_FAILED;
13794                         data[TG3_REGISTER_TEST] = 1;
13795                 }
13796
13797                 if (tg3_test_memory(tp) != 0) {
13798                         etest->flags |= ETH_TEST_FL_FAILED;
13799                         data[TG3_MEMORY_TEST] = 1;
13800                 }
13801
13802                 if (doextlpbk)
13803                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13804
13805                 if (tg3_test_loopback(tp, data, doextlpbk))
13806                         etest->flags |= ETH_TEST_FL_FAILED;
13807
13808                 tg3_full_unlock(tp);
13809
13810                 if (tg3_test_interrupt(tp) != 0) {
13811                         etest->flags |= ETH_TEST_FL_FAILED;
13812                         data[TG3_INTERRUPT_TEST] = 1;
13813                 }
13814
13815                 tg3_full_lock(tp, 0);
13816
13817                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13818                 if (netif_running(dev)) {
13819                         tg3_flag_set(tp, INIT_COMPLETE);
13820                         err2 = tg3_restart_hw(tp, true);
13821                         if (!err2)
13822                                 tg3_netif_start(tp);
13823                 }
13824
13825                 tg3_full_unlock(tp);
13826
13827                 if (irq_sync && !err2)
13828                         tg3_phy_start(tp);
13829         }
13830         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13831                 tg3_power_down_prepare(tp);
13832
13833 }
13834
13835 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13836 {
13837         struct tg3 *tp = netdev_priv(dev);
13838         struct hwtstamp_config stmpconf;
13839
13840         if (!tg3_flag(tp, PTP_CAPABLE))
13841                 return -EOPNOTSUPP;
13842
13843         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13844                 return -EFAULT;
13845
13846         if (stmpconf.flags)
13847                 return -EINVAL;
13848
13849         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13850             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13851                 return -ERANGE;
13852
13853         switch (stmpconf.rx_filter) {
13854         case HWTSTAMP_FILTER_NONE:
13855                 tp->rxptpctl = 0;
13856                 break;
13857         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13858                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13859                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13860                 break;
13861         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13862                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13863                                TG3_RX_PTP_CTL_SYNC_EVNT;
13864                 break;
13865         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13866                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13867                                TG3_RX_PTP_CTL_DELAY_REQ;
13868                 break;
13869         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13870                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13871                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13872                 break;
13873         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13874                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13875                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13876                 break;
13877         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13878                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13879                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13880                 break;
13881         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13882                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13883                                TG3_RX_PTP_CTL_SYNC_EVNT;
13884                 break;
13885         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13886                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13887                                TG3_RX_PTP_CTL_SYNC_EVNT;
13888                 break;
13889         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13890                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13891                                TG3_RX_PTP_CTL_SYNC_EVNT;
13892                 break;
13893         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13894                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13895                                TG3_RX_PTP_CTL_DELAY_REQ;
13896                 break;
13897         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13898                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13899                                TG3_RX_PTP_CTL_DELAY_REQ;
13900                 break;
13901         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13902                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13903                                TG3_RX_PTP_CTL_DELAY_REQ;
13904                 break;
13905         default:
13906                 return -ERANGE;
13907         }
13908
13909         if (netif_running(dev) && tp->rxptpctl)
13910                 tw32(TG3_RX_PTP_CTL,
13911                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13912
13913         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13914                 tg3_flag_set(tp, TX_TSTAMP_EN);
13915         else
13916                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13917
13918         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13919                 -EFAULT : 0;
13920 }
13921
13922 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13923 {
13924         struct tg3 *tp = netdev_priv(dev);
13925         struct hwtstamp_config stmpconf;
13926
13927         if (!tg3_flag(tp, PTP_CAPABLE))
13928                 return -EOPNOTSUPP;
13929
13930         stmpconf.flags = 0;
13931         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13932                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13933
13934         switch (tp->rxptpctl) {
13935         case 0:
13936                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13937                 break;
13938         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13939                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13940                 break;
13941         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13942                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13943                 break;
13944         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13945                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13946                 break;
13947         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13948                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13949                 break;
13950         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13951                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13952                 break;
13953         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13954                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13955                 break;
13956         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13957                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13958                 break;
13959         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13960                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13961                 break;
13962         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13963                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13964                 break;
13965         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13966                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13967                 break;
13968         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13969                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13970                 break;
13971         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13972                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13973                 break;
13974         default:
13975                 WARN_ON_ONCE(1);
13976                 return -ERANGE;
13977         }
13978
13979         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13980                 -EFAULT : 0;
13981 }
13982
13983 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13984 {
13985         struct mii_ioctl_data *data = if_mii(ifr);
13986         struct tg3 *tp = netdev_priv(dev);
13987         int err;
13988
13989         if (tg3_flag(tp, USE_PHYLIB)) {
13990                 struct phy_device *phydev;
13991                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13992                         return -EAGAIN;
13993                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13994                 return phy_mii_ioctl(phydev, ifr, cmd);
13995         }
13996
13997         switch (cmd) {
13998         case SIOCGMIIPHY:
13999                 data->phy_id = tp->phy_addr;
14000
14001                 /* fall through */
14002         case SIOCGMIIREG: {
14003                 u32 mii_regval;
14004
14005                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14006                         break;                  /* We have no PHY */
14007
14008                 if (!netif_running(dev))
14009                         return -EAGAIN;
14010
14011                 spin_lock_bh(&tp->lock);
14012                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14013                                     data->reg_num & 0x1f, &mii_regval);
14014                 spin_unlock_bh(&tp->lock);
14015
14016                 data->val_out = mii_regval;
14017
14018                 return err;
14019         }
14020
14021         case SIOCSMIIREG:
14022                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14023                         break;                  /* We have no PHY */
14024
14025                 if (!netif_running(dev))
14026                         return -EAGAIN;
14027
14028                 spin_lock_bh(&tp->lock);
14029                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14030                                      data->reg_num & 0x1f, data->val_in);
14031                 spin_unlock_bh(&tp->lock);
14032
14033                 return err;
14034
14035         case SIOCSHWTSTAMP:
14036                 return tg3_hwtstamp_set(dev, ifr);
14037
14038         case SIOCGHWTSTAMP:
14039                 return tg3_hwtstamp_get(dev, ifr);
14040
14041         default:
14042                 /* do nothing */
14043                 break;
14044         }
14045         return -EOPNOTSUPP;
14046 }
14047
14048 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14049 {
14050         struct tg3 *tp = netdev_priv(dev);
14051
14052         memcpy(ec, &tp->coal, sizeof(*ec));
14053         return 0;
14054 }
14055
14056 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14057 {
14058         struct tg3 *tp = netdev_priv(dev);
14059         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14060         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14061
14062         if (!tg3_flag(tp, 5705_PLUS)) {
14063                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14064                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14065                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14066                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14067         }
14068
14069         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14070             (!ec->rx_coalesce_usecs) ||
14071             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14072             (!ec->tx_coalesce_usecs) ||
14073             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14074             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14075             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14076             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14077             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14078             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14079             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14080             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14081                 return -EINVAL;
14082
14083         /* Only copy relevant parameters, ignore all others. */
14084         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14085         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14086         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14087         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14088         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14089         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14090         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14091         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14092         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14093
14094         if (netif_running(dev)) {
14095                 tg3_full_lock(tp, 0);
14096                 __tg3_set_coalesce(tp, &tp->coal);
14097                 tg3_full_unlock(tp);
14098         }
14099         return 0;
14100 }
14101
14102 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14103 {
14104         struct tg3 *tp = netdev_priv(dev);
14105
14106         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14107                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14108                 return -EOPNOTSUPP;
14109         }
14110
14111         if (edata->advertised != tp->eee.advertised) {
14112                 netdev_warn(tp->dev,
14113                             "Direct manipulation of EEE advertisement is not supported\n");
14114                 return -EINVAL;
14115         }
14116
14117         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14118                 netdev_warn(tp->dev,
14119                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14120                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14121                 return -EINVAL;
14122         }
14123
14124         tp->eee = *edata;
14125
14126         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14127         tg3_warn_mgmt_link_flap(tp);
14128
14129         if (netif_running(tp->dev)) {
14130                 tg3_full_lock(tp, 0);
14131                 tg3_setup_eee(tp);
14132                 tg3_phy_reset(tp);
14133                 tg3_full_unlock(tp);
14134         }
14135
14136         return 0;
14137 }
14138
14139 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14140 {
14141         struct tg3 *tp = netdev_priv(dev);
14142
14143         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14144                 netdev_warn(tp->dev,
14145                             "Board does not support EEE!\n");
14146                 return -EOPNOTSUPP;
14147         }
14148
14149         *edata = tp->eee;
14150         return 0;
14151 }
14152
14153 static const struct ethtool_ops tg3_ethtool_ops = {
14154         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14155                                      ETHTOOL_COALESCE_MAX_FRAMES |
14156                                      ETHTOOL_COALESCE_USECS_IRQ |
14157                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14158                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14159         .get_drvinfo            = tg3_get_drvinfo,
14160         .get_regs_len           = tg3_get_regs_len,
14161         .get_regs               = tg3_get_regs,
14162         .get_wol                = tg3_get_wol,
14163         .set_wol                = tg3_set_wol,
14164         .get_msglevel           = tg3_get_msglevel,
14165         .set_msglevel           = tg3_set_msglevel,
14166         .nway_reset             = tg3_nway_reset,
14167         .get_link               = ethtool_op_get_link,
14168         .get_eeprom_len         = tg3_get_eeprom_len,
14169         .get_eeprom             = tg3_get_eeprom,
14170         .set_eeprom             = tg3_set_eeprom,
14171         .get_ringparam          = tg3_get_ringparam,
14172         .set_ringparam          = tg3_set_ringparam,
14173         .get_pauseparam         = tg3_get_pauseparam,
14174         .set_pauseparam         = tg3_set_pauseparam,
14175         .self_test              = tg3_self_test,
14176         .get_strings            = tg3_get_strings,
14177         .set_phys_id            = tg3_set_phys_id,
14178         .get_ethtool_stats      = tg3_get_ethtool_stats,
14179         .get_coalesce           = tg3_get_coalesce,
14180         .set_coalesce           = tg3_set_coalesce,
14181         .get_sset_count         = tg3_get_sset_count,
14182         .get_rxnfc              = tg3_get_rxnfc,
14183         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14184         .get_rxfh               = tg3_get_rxfh,
14185         .set_rxfh               = tg3_set_rxfh,
14186         .get_channels           = tg3_get_channels,
14187         .set_channels           = tg3_set_channels,
14188         .get_ts_info            = tg3_get_ts_info,
14189         .get_eee                = tg3_get_eee,
14190         .set_eee                = tg3_set_eee,
14191         .get_link_ksettings     = tg3_get_link_ksettings,
14192         .set_link_ksettings     = tg3_set_link_ksettings,
14193 };
14194
14195 static void tg3_get_stats64(struct net_device *dev,
14196                             struct rtnl_link_stats64 *stats)
14197 {
14198         struct tg3 *tp = netdev_priv(dev);
14199
14200         spin_lock_bh(&tp->lock);
14201         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14202                 *stats = tp->net_stats_prev;
14203                 spin_unlock_bh(&tp->lock);
14204                 return;
14205         }
14206
14207         tg3_get_nstats(tp, stats);
14208         spin_unlock_bh(&tp->lock);
14209 }
14210
14211 static void tg3_set_rx_mode(struct net_device *dev)
14212 {
14213         struct tg3 *tp = netdev_priv(dev);
14214
14215         if (!netif_running(dev))
14216                 return;
14217
14218         tg3_full_lock(tp, 0);
14219         __tg3_set_rx_mode(dev);
14220         tg3_full_unlock(tp);
14221 }
14222
14223 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14224                                int new_mtu)
14225 {
14226         dev->mtu = new_mtu;
14227
14228         if (new_mtu > ETH_DATA_LEN) {
14229                 if (tg3_flag(tp, 5780_CLASS)) {
14230                         netdev_update_features(dev);
14231                         tg3_flag_clear(tp, TSO_CAPABLE);
14232                 } else {
14233                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14234                 }
14235         } else {
14236                 if (tg3_flag(tp, 5780_CLASS)) {
14237                         tg3_flag_set(tp, TSO_CAPABLE);
14238                         netdev_update_features(dev);
14239                 }
14240                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14241         }
14242 }
14243
14244 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14245 {
14246         struct tg3 *tp = netdev_priv(dev);
14247         int err;
14248         bool reset_phy = false;
14249
14250         if (!netif_running(dev)) {
14251                 /* We'll just catch it later when the
14252                  * device is up'd.
14253                  */
14254                 tg3_set_mtu(dev, tp, new_mtu);
14255                 return 0;
14256         }
14257
14258         tg3_phy_stop(tp);
14259
14260         tg3_netif_stop(tp);
14261
14262         tg3_set_mtu(dev, tp, new_mtu);
14263
14264         tg3_full_lock(tp, 1);
14265
14266         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14267
14268         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14269          * breaks all requests to 256 bytes.
14270          */
14271         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14272             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14273             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14274             tg3_asic_rev(tp) == ASIC_REV_5720)
14275                 reset_phy = true;
14276
14277         err = tg3_restart_hw(tp, reset_phy);
14278
14279         if (!err)
14280                 tg3_netif_start(tp);
14281
14282         tg3_full_unlock(tp);
14283
14284         if (!err)
14285                 tg3_phy_start(tp);
14286
14287         return err;
14288 }
14289
14290 static const struct net_device_ops tg3_netdev_ops = {
14291         .ndo_open               = tg3_open,
14292         .ndo_stop               = tg3_close,
14293         .ndo_start_xmit         = tg3_start_xmit,
14294         .ndo_get_stats64        = tg3_get_stats64,
14295         .ndo_validate_addr      = eth_validate_addr,
14296         .ndo_set_rx_mode        = tg3_set_rx_mode,
14297         .ndo_set_mac_address    = tg3_set_mac_addr,
14298         .ndo_do_ioctl           = tg3_ioctl,
14299         .ndo_tx_timeout         = tg3_tx_timeout,
14300         .ndo_change_mtu         = tg3_change_mtu,
14301         .ndo_fix_features       = tg3_fix_features,
14302         .ndo_set_features       = tg3_set_features,
14303 #ifdef CONFIG_NET_POLL_CONTROLLER
14304         .ndo_poll_controller    = tg3_poll_controller,
14305 #endif
14306 };
14307
14308 static void tg3_get_eeprom_size(struct tg3 *tp)
14309 {
14310         u32 cursize, val, magic;
14311
14312         tp->nvram_size = EEPROM_CHIP_SIZE;
14313
14314         if (tg3_nvram_read(tp, 0, &magic) != 0)
14315                 return;
14316
14317         if ((magic != TG3_EEPROM_MAGIC) &&
14318             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14319             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14320                 return;
14321
14322         /*
14323          * Size the chip by reading offsets at increasing powers of two.
14324          * When we encounter our validation signature, we know the addressing
14325          * has wrapped around, and thus have our chip size.
14326          */
14327         cursize = 0x10;
14328
14329         while (cursize < tp->nvram_size) {
14330                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14331                         return;
14332
14333                 if (val == magic)
14334                         break;
14335
14336                 cursize <<= 1;
14337         }
14338
14339         tp->nvram_size = cursize;
14340 }
14341
14342 static void tg3_get_nvram_size(struct tg3 *tp)
14343 {
14344         u32 val;
14345
14346         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14347                 return;
14348
14349         /* Selfboot format */
14350         if (val != TG3_EEPROM_MAGIC) {
14351                 tg3_get_eeprom_size(tp);
14352                 return;
14353         }
14354
14355         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14356                 if (val != 0) {
14357                         /* This is confusing.  We want to operate on the
14358                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14359                          * call will read from NVRAM and byteswap the data
14360                          * according to the byteswapping settings for all
14361                          * other register accesses.  This ensures the data we
14362                          * want will always reside in the lower 16-bits.
14363                          * However, the data in NVRAM is in LE format, which
14364                          * means the data from the NVRAM read will always be
14365                          * opposite the endianness of the CPU.  The 16-bit
14366                          * byteswap then brings the data to CPU endianness.
14367                          */
14368                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14369                         return;
14370                 }
14371         }
14372         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14373 }
14374
14375 static void tg3_get_nvram_info(struct tg3 *tp)
14376 {
14377         u32 nvcfg1;
14378
14379         nvcfg1 = tr32(NVRAM_CFG1);
14380         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14381                 tg3_flag_set(tp, FLASH);
14382         } else {
14383                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14384                 tw32(NVRAM_CFG1, nvcfg1);
14385         }
14386
14387         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14388             tg3_flag(tp, 5780_CLASS)) {
14389                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14390                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14391                         tp->nvram_jedecnum = JEDEC_ATMEL;
14392                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14393                         tg3_flag_set(tp, NVRAM_BUFFERED);
14394                         break;
14395                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14396                         tp->nvram_jedecnum = JEDEC_ATMEL;
14397                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14398                         break;
14399                 case FLASH_VENDOR_ATMEL_EEPROM:
14400                         tp->nvram_jedecnum = JEDEC_ATMEL;
14401                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14402                         tg3_flag_set(tp, NVRAM_BUFFERED);
14403                         break;
14404                 case FLASH_VENDOR_ST:
14405                         tp->nvram_jedecnum = JEDEC_ST;
14406                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14407                         tg3_flag_set(tp, NVRAM_BUFFERED);
14408                         break;
14409                 case FLASH_VENDOR_SAIFUN:
14410                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14411                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14412                         break;
14413                 case FLASH_VENDOR_SST_SMALL:
14414                 case FLASH_VENDOR_SST_LARGE:
14415                         tp->nvram_jedecnum = JEDEC_SST;
14416                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14417                         break;
14418                 }
14419         } else {
14420                 tp->nvram_jedecnum = JEDEC_ATMEL;
14421                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14422                 tg3_flag_set(tp, NVRAM_BUFFERED);
14423         }
14424 }
14425
14426 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14427 {
14428         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14429         case FLASH_5752PAGE_SIZE_256:
14430                 tp->nvram_pagesize = 256;
14431                 break;
14432         case FLASH_5752PAGE_SIZE_512:
14433                 tp->nvram_pagesize = 512;
14434                 break;
14435         case FLASH_5752PAGE_SIZE_1K:
14436                 tp->nvram_pagesize = 1024;
14437                 break;
14438         case FLASH_5752PAGE_SIZE_2K:
14439                 tp->nvram_pagesize = 2048;
14440                 break;
14441         case FLASH_5752PAGE_SIZE_4K:
14442                 tp->nvram_pagesize = 4096;
14443                 break;
14444         case FLASH_5752PAGE_SIZE_264:
14445                 tp->nvram_pagesize = 264;
14446                 break;
14447         case FLASH_5752PAGE_SIZE_528:
14448                 tp->nvram_pagesize = 528;
14449                 break;
14450         }
14451 }
14452
14453 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14454 {
14455         u32 nvcfg1;
14456
14457         nvcfg1 = tr32(NVRAM_CFG1);
14458
14459         /* NVRAM protection for TPM */
14460         if (nvcfg1 & (1 << 27))
14461                 tg3_flag_set(tp, PROTECTED_NVRAM);
14462
14463         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14464         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14465         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14466                 tp->nvram_jedecnum = JEDEC_ATMEL;
14467                 tg3_flag_set(tp, NVRAM_BUFFERED);
14468                 break;
14469         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14470                 tp->nvram_jedecnum = JEDEC_ATMEL;
14471                 tg3_flag_set(tp, NVRAM_BUFFERED);
14472                 tg3_flag_set(tp, FLASH);
14473                 break;
14474         case FLASH_5752VENDOR_ST_M45PE10:
14475         case FLASH_5752VENDOR_ST_M45PE20:
14476         case FLASH_5752VENDOR_ST_M45PE40:
14477                 tp->nvram_jedecnum = JEDEC_ST;
14478                 tg3_flag_set(tp, NVRAM_BUFFERED);
14479                 tg3_flag_set(tp, FLASH);
14480                 break;
14481         }
14482
14483         if (tg3_flag(tp, FLASH)) {
14484                 tg3_nvram_get_pagesize(tp, nvcfg1);
14485         } else {
14486                 /* For eeprom, set pagesize to maximum eeprom size */
14487                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14488
14489                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14490                 tw32(NVRAM_CFG1, nvcfg1);
14491         }
14492 }
14493
14494 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14495 {
14496         u32 nvcfg1, protect = 0;
14497
14498         nvcfg1 = tr32(NVRAM_CFG1);
14499
14500         /* NVRAM protection for TPM */
14501         if (nvcfg1 & (1 << 27)) {
14502                 tg3_flag_set(tp, PROTECTED_NVRAM);
14503                 protect = 1;
14504         }
14505
14506         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14507         switch (nvcfg1) {
14508         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14509         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14510         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14511         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14512                 tp->nvram_jedecnum = JEDEC_ATMEL;
14513                 tg3_flag_set(tp, NVRAM_BUFFERED);
14514                 tg3_flag_set(tp, FLASH);
14515                 tp->nvram_pagesize = 264;
14516                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14517                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14518                         tp->nvram_size = (protect ? 0x3e200 :
14519                                           TG3_NVRAM_SIZE_512KB);
14520                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14521                         tp->nvram_size = (protect ? 0x1f200 :
14522                                           TG3_NVRAM_SIZE_256KB);
14523                 else
14524                         tp->nvram_size = (protect ? 0x1f200 :
14525                                           TG3_NVRAM_SIZE_128KB);
14526                 break;
14527         case FLASH_5752VENDOR_ST_M45PE10:
14528         case FLASH_5752VENDOR_ST_M45PE20:
14529         case FLASH_5752VENDOR_ST_M45PE40:
14530                 tp->nvram_jedecnum = JEDEC_ST;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tg3_flag_set(tp, FLASH);
14533                 tp->nvram_pagesize = 256;
14534                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14535                         tp->nvram_size = (protect ?
14536                                           TG3_NVRAM_SIZE_64KB :
14537                                           TG3_NVRAM_SIZE_128KB);
14538                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14539                         tp->nvram_size = (protect ?
14540                                           TG3_NVRAM_SIZE_64KB :
14541                                           TG3_NVRAM_SIZE_256KB);
14542                 else
14543                         tp->nvram_size = (protect ?
14544                                           TG3_NVRAM_SIZE_128KB :
14545                                           TG3_NVRAM_SIZE_512KB);
14546                 break;
14547         }
14548 }
14549
14550 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14551 {
14552         u32 nvcfg1;
14553
14554         nvcfg1 = tr32(NVRAM_CFG1);
14555
14556         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14557         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14558         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14559         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14560         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14561                 tp->nvram_jedecnum = JEDEC_ATMEL;
14562                 tg3_flag_set(tp, NVRAM_BUFFERED);
14563                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14564
14565                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14566                 tw32(NVRAM_CFG1, nvcfg1);
14567                 break;
14568         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14569         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14570         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14571         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14572                 tp->nvram_jedecnum = JEDEC_ATMEL;
14573                 tg3_flag_set(tp, NVRAM_BUFFERED);
14574                 tg3_flag_set(tp, FLASH);
14575                 tp->nvram_pagesize = 264;
14576                 break;
14577         case FLASH_5752VENDOR_ST_M45PE10:
14578         case FLASH_5752VENDOR_ST_M45PE20:
14579         case FLASH_5752VENDOR_ST_M45PE40:
14580                 tp->nvram_jedecnum = JEDEC_ST;
14581                 tg3_flag_set(tp, NVRAM_BUFFERED);
14582                 tg3_flag_set(tp, FLASH);
14583                 tp->nvram_pagesize = 256;
14584                 break;
14585         }
14586 }
14587
14588 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14589 {
14590         u32 nvcfg1, protect = 0;
14591
14592         nvcfg1 = tr32(NVRAM_CFG1);
14593
14594         /* NVRAM protection for TPM */
14595         if (nvcfg1 & (1 << 27)) {
14596                 tg3_flag_set(tp, PROTECTED_NVRAM);
14597                 protect = 1;
14598         }
14599
14600         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14601         switch (nvcfg1) {
14602         case FLASH_5761VENDOR_ATMEL_ADB021D:
14603         case FLASH_5761VENDOR_ATMEL_ADB041D:
14604         case FLASH_5761VENDOR_ATMEL_ADB081D:
14605         case FLASH_5761VENDOR_ATMEL_ADB161D:
14606         case FLASH_5761VENDOR_ATMEL_MDB021D:
14607         case FLASH_5761VENDOR_ATMEL_MDB041D:
14608         case FLASH_5761VENDOR_ATMEL_MDB081D:
14609         case FLASH_5761VENDOR_ATMEL_MDB161D:
14610                 tp->nvram_jedecnum = JEDEC_ATMEL;
14611                 tg3_flag_set(tp, NVRAM_BUFFERED);
14612                 tg3_flag_set(tp, FLASH);
14613                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14614                 tp->nvram_pagesize = 256;
14615                 break;
14616         case FLASH_5761VENDOR_ST_A_M45PE20:
14617         case FLASH_5761VENDOR_ST_A_M45PE40:
14618         case FLASH_5761VENDOR_ST_A_M45PE80:
14619         case FLASH_5761VENDOR_ST_A_M45PE16:
14620         case FLASH_5761VENDOR_ST_M_M45PE20:
14621         case FLASH_5761VENDOR_ST_M_M45PE40:
14622         case FLASH_5761VENDOR_ST_M_M45PE80:
14623         case FLASH_5761VENDOR_ST_M_M45PE16:
14624                 tp->nvram_jedecnum = JEDEC_ST;
14625                 tg3_flag_set(tp, NVRAM_BUFFERED);
14626                 tg3_flag_set(tp, FLASH);
14627                 tp->nvram_pagesize = 256;
14628                 break;
14629         }
14630
14631         if (protect) {
14632                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14633         } else {
14634                 switch (nvcfg1) {
14635                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14636                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14637                 case FLASH_5761VENDOR_ST_A_M45PE16:
14638                 case FLASH_5761VENDOR_ST_M_M45PE16:
14639                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14640                         break;
14641                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14642                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14643                 case FLASH_5761VENDOR_ST_A_M45PE80:
14644                 case FLASH_5761VENDOR_ST_M_M45PE80:
14645                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14646                         break;
14647                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14648                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14649                 case FLASH_5761VENDOR_ST_A_M45PE40:
14650                 case FLASH_5761VENDOR_ST_M_M45PE40:
14651                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14652                         break;
14653                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14654                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14655                 case FLASH_5761VENDOR_ST_A_M45PE20:
14656                 case FLASH_5761VENDOR_ST_M_M45PE20:
14657                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14658                         break;
14659                 }
14660         }
14661 }
14662
14663 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14664 {
14665         tp->nvram_jedecnum = JEDEC_ATMEL;
14666         tg3_flag_set(tp, NVRAM_BUFFERED);
14667         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14668 }
14669
14670 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14671 {
14672         u32 nvcfg1;
14673
14674         nvcfg1 = tr32(NVRAM_CFG1);
14675
14676         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14677         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14678         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14679                 tp->nvram_jedecnum = JEDEC_ATMEL;
14680                 tg3_flag_set(tp, NVRAM_BUFFERED);
14681                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14682
14683                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14684                 tw32(NVRAM_CFG1, nvcfg1);
14685                 return;
14686         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14687         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14688         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14689         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14690         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14691         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14692         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14693                 tp->nvram_jedecnum = JEDEC_ATMEL;
14694                 tg3_flag_set(tp, NVRAM_BUFFERED);
14695                 tg3_flag_set(tp, FLASH);
14696
14697                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14698                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14699                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14700                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14701                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14702                         break;
14703                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14704                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14705                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14706                         break;
14707                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14708                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14709                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14710                         break;
14711                 }
14712                 break;
14713         case FLASH_5752VENDOR_ST_M45PE10:
14714         case FLASH_5752VENDOR_ST_M45PE20:
14715         case FLASH_5752VENDOR_ST_M45PE40:
14716                 tp->nvram_jedecnum = JEDEC_ST;
14717                 tg3_flag_set(tp, NVRAM_BUFFERED);
14718                 tg3_flag_set(tp, FLASH);
14719
14720                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14721                 case FLASH_5752VENDOR_ST_M45PE10:
14722                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14723                         break;
14724                 case FLASH_5752VENDOR_ST_M45PE20:
14725                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14726                         break;
14727                 case FLASH_5752VENDOR_ST_M45PE40:
14728                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14729                         break;
14730                 }
14731                 break;
14732         default:
14733                 tg3_flag_set(tp, NO_NVRAM);
14734                 return;
14735         }
14736
14737         tg3_nvram_get_pagesize(tp, nvcfg1);
14738         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14739                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14740 }
14741
14742
14743 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14744 {
14745         u32 nvcfg1;
14746
14747         nvcfg1 = tr32(NVRAM_CFG1);
14748
14749         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14750         case FLASH_5717VENDOR_ATMEL_EEPROM:
14751         case FLASH_5717VENDOR_MICRO_EEPROM:
14752                 tp->nvram_jedecnum = JEDEC_ATMEL;
14753                 tg3_flag_set(tp, NVRAM_BUFFERED);
14754                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14755
14756                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14757                 tw32(NVRAM_CFG1, nvcfg1);
14758                 return;
14759         case FLASH_5717VENDOR_ATMEL_MDB011D:
14760         case FLASH_5717VENDOR_ATMEL_ADB011B:
14761         case FLASH_5717VENDOR_ATMEL_ADB011D:
14762         case FLASH_5717VENDOR_ATMEL_MDB021D:
14763         case FLASH_5717VENDOR_ATMEL_ADB021B:
14764         case FLASH_5717VENDOR_ATMEL_ADB021D:
14765         case FLASH_5717VENDOR_ATMEL_45USPT:
14766                 tp->nvram_jedecnum = JEDEC_ATMEL;
14767                 tg3_flag_set(tp, NVRAM_BUFFERED);
14768                 tg3_flag_set(tp, FLASH);
14769
14770                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14771                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14772                         /* Detect size with tg3_nvram_get_size() */
14773                         break;
14774                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14775                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14776                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14777                         break;
14778                 default:
14779                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14780                         break;
14781                 }
14782                 break;
14783         case FLASH_5717VENDOR_ST_M_M25PE10:
14784         case FLASH_5717VENDOR_ST_A_M25PE10:
14785         case FLASH_5717VENDOR_ST_M_M45PE10:
14786         case FLASH_5717VENDOR_ST_A_M45PE10:
14787         case FLASH_5717VENDOR_ST_M_M25PE20:
14788         case FLASH_5717VENDOR_ST_A_M25PE20:
14789         case FLASH_5717VENDOR_ST_M_M45PE20:
14790         case FLASH_5717VENDOR_ST_A_M45PE20:
14791         case FLASH_5717VENDOR_ST_25USPT:
14792         case FLASH_5717VENDOR_ST_45USPT:
14793                 tp->nvram_jedecnum = JEDEC_ST;
14794                 tg3_flag_set(tp, NVRAM_BUFFERED);
14795                 tg3_flag_set(tp, FLASH);
14796
14797                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14798                 case FLASH_5717VENDOR_ST_M_M25PE20:
14799                 case FLASH_5717VENDOR_ST_M_M45PE20:
14800                         /* Detect size with tg3_nvram_get_size() */
14801                         break;
14802                 case FLASH_5717VENDOR_ST_A_M25PE20:
14803                 case FLASH_5717VENDOR_ST_A_M45PE20:
14804                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14805                         break;
14806                 default:
14807                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14808                         break;
14809                 }
14810                 break;
14811         default:
14812                 tg3_flag_set(tp, NO_NVRAM);
14813                 return;
14814         }
14815
14816         tg3_nvram_get_pagesize(tp, nvcfg1);
14817         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14818                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14819 }
14820
14821 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14822 {
14823         u32 nvcfg1, nvmpinstrp, nv_status;
14824
14825         nvcfg1 = tr32(NVRAM_CFG1);
14826         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14827
14828         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14829                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14830                         tg3_flag_set(tp, NO_NVRAM);
14831                         return;
14832                 }
14833
14834                 switch (nvmpinstrp) {
14835                 case FLASH_5762_MX25L_100:
14836                 case FLASH_5762_MX25L_200:
14837                 case FLASH_5762_MX25L_400:
14838                 case FLASH_5762_MX25L_800:
14839                 case FLASH_5762_MX25L_160_320:
14840                         tp->nvram_pagesize = 4096;
14841                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14842                         tg3_flag_set(tp, NVRAM_BUFFERED);
14843                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14844                         tg3_flag_set(tp, FLASH);
14845                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14846                         tp->nvram_size =
14847                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14848                                                 AUTOSENSE_DEVID_MASK)
14849                                         << AUTOSENSE_SIZE_IN_MB);
14850                         return;
14851
14852                 case FLASH_5762_EEPROM_HD:
14853                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14854                         break;
14855                 case FLASH_5762_EEPROM_LD:
14856                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14857                         break;
14858                 case FLASH_5720VENDOR_M_ST_M45PE20:
14859                         /* This pinstrap supports multiple sizes, so force it
14860                          * to read the actual size from location 0xf0.
14861                          */
14862                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14863                         break;
14864                 }
14865         }
14866
14867         switch (nvmpinstrp) {
14868         case FLASH_5720_EEPROM_HD:
14869         case FLASH_5720_EEPROM_LD:
14870                 tp->nvram_jedecnum = JEDEC_ATMEL;
14871                 tg3_flag_set(tp, NVRAM_BUFFERED);
14872
14873                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14874                 tw32(NVRAM_CFG1, nvcfg1);
14875                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14876                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14877                 else
14878                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14879                 return;
14880         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14881         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14882         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14883         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14884         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14885         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14886         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14887         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14888         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14889         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14890         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14891         case FLASH_5720VENDOR_ATMEL_45USPT:
14892                 tp->nvram_jedecnum = JEDEC_ATMEL;
14893                 tg3_flag_set(tp, NVRAM_BUFFERED);
14894                 tg3_flag_set(tp, FLASH);
14895
14896                 switch (nvmpinstrp) {
14897                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14898                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14899                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14900                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14901                         break;
14902                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14903                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14904                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14905                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14906                         break;
14907                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14908                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14909                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14910                         break;
14911                 default:
14912                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14913                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14914                         break;
14915                 }
14916                 break;
14917         case FLASH_5720VENDOR_M_ST_M25PE10:
14918         case FLASH_5720VENDOR_M_ST_M45PE10:
14919         case FLASH_5720VENDOR_A_ST_M25PE10:
14920         case FLASH_5720VENDOR_A_ST_M45PE10:
14921         case FLASH_5720VENDOR_M_ST_M25PE20:
14922         case FLASH_5720VENDOR_M_ST_M45PE20:
14923         case FLASH_5720VENDOR_A_ST_M25PE20:
14924         case FLASH_5720VENDOR_A_ST_M45PE20:
14925         case FLASH_5720VENDOR_M_ST_M25PE40:
14926         case FLASH_5720VENDOR_M_ST_M45PE40:
14927         case FLASH_5720VENDOR_A_ST_M25PE40:
14928         case FLASH_5720VENDOR_A_ST_M45PE40:
14929         case FLASH_5720VENDOR_M_ST_M25PE80:
14930         case FLASH_5720VENDOR_M_ST_M45PE80:
14931         case FLASH_5720VENDOR_A_ST_M25PE80:
14932         case FLASH_5720VENDOR_A_ST_M45PE80:
14933         case FLASH_5720VENDOR_ST_25USPT:
14934         case FLASH_5720VENDOR_ST_45USPT:
14935                 tp->nvram_jedecnum = JEDEC_ST;
14936                 tg3_flag_set(tp, NVRAM_BUFFERED);
14937                 tg3_flag_set(tp, FLASH);
14938
14939                 switch (nvmpinstrp) {
14940                 case FLASH_5720VENDOR_M_ST_M25PE20:
14941                 case FLASH_5720VENDOR_M_ST_M45PE20:
14942                 case FLASH_5720VENDOR_A_ST_M25PE20:
14943                 case FLASH_5720VENDOR_A_ST_M45PE20:
14944                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14945                         break;
14946                 case FLASH_5720VENDOR_M_ST_M25PE40:
14947                 case FLASH_5720VENDOR_M_ST_M45PE40:
14948                 case FLASH_5720VENDOR_A_ST_M25PE40:
14949                 case FLASH_5720VENDOR_A_ST_M45PE40:
14950                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14951                         break;
14952                 case FLASH_5720VENDOR_M_ST_M25PE80:
14953                 case FLASH_5720VENDOR_M_ST_M45PE80:
14954                 case FLASH_5720VENDOR_A_ST_M25PE80:
14955                 case FLASH_5720VENDOR_A_ST_M45PE80:
14956                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14957                         break;
14958                 default:
14959                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14960                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14961                         break;
14962                 }
14963                 break;
14964         default:
14965                 tg3_flag_set(tp, NO_NVRAM);
14966                 return;
14967         }
14968
14969         tg3_nvram_get_pagesize(tp, nvcfg1);
14970         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14971                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14972
14973         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14974                 u32 val;
14975
14976                 if (tg3_nvram_read(tp, 0, &val))
14977                         return;
14978
14979                 if (val != TG3_EEPROM_MAGIC &&
14980                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14981                         tg3_flag_set(tp, NO_NVRAM);
14982         }
14983 }
14984
14985 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14986 static void tg3_nvram_init(struct tg3 *tp)
14987 {
14988         if (tg3_flag(tp, IS_SSB_CORE)) {
14989                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14990                 tg3_flag_clear(tp, NVRAM);
14991                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14992                 tg3_flag_set(tp, NO_NVRAM);
14993                 return;
14994         }
14995
14996         tw32_f(GRC_EEPROM_ADDR,
14997              (EEPROM_ADDR_FSM_RESET |
14998               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14999                EEPROM_ADDR_CLKPERD_SHIFT)));
15000
15001         msleep(1);
15002
15003         /* Enable seeprom accesses. */
15004         tw32_f(GRC_LOCAL_CTRL,
15005              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15006         udelay(100);
15007
15008         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15009             tg3_asic_rev(tp) != ASIC_REV_5701) {
15010                 tg3_flag_set(tp, NVRAM);
15011
15012                 if (tg3_nvram_lock(tp)) {
15013                         netdev_warn(tp->dev,
15014                                     "Cannot get nvram lock, %s failed\n",
15015                                     __func__);
15016                         return;
15017                 }
15018                 tg3_enable_nvram_access(tp);
15019
15020                 tp->nvram_size = 0;
15021
15022                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15023                         tg3_get_5752_nvram_info(tp);
15024                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15025                         tg3_get_5755_nvram_info(tp);
15026                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15027                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15028                          tg3_asic_rev(tp) == ASIC_REV_5785)
15029                         tg3_get_5787_nvram_info(tp);
15030                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15031                         tg3_get_5761_nvram_info(tp);
15032                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15033                         tg3_get_5906_nvram_info(tp);
15034                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15035                          tg3_flag(tp, 57765_CLASS))
15036                         tg3_get_57780_nvram_info(tp);
15037                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15038                          tg3_asic_rev(tp) == ASIC_REV_5719)
15039                         tg3_get_5717_nvram_info(tp);
15040                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15041                          tg3_asic_rev(tp) == ASIC_REV_5762)
15042                         tg3_get_5720_nvram_info(tp);
15043                 else
15044                         tg3_get_nvram_info(tp);
15045
15046                 if (tp->nvram_size == 0)
15047                         tg3_get_nvram_size(tp);
15048
15049                 tg3_disable_nvram_access(tp);
15050                 tg3_nvram_unlock(tp);
15051
15052         } else {
15053                 tg3_flag_clear(tp, NVRAM);
15054                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15055
15056                 tg3_get_eeprom_size(tp);
15057         }
15058 }
15059
15060 struct subsys_tbl_ent {
15061         u16 subsys_vendor, subsys_devid;
15062         u32 phy_id;
15063 };
15064
15065 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15066         /* Broadcom boards. */
15067         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15068           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15069         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15070           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15071         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15072           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15073         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15074           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15075         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15076           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15077         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15078           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15079         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15080           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15081         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15082           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15083         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15084           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15085         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15086           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15087         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15088           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15089
15090         /* 3com boards. */
15091         { TG3PCI_SUBVENDOR_ID_3COM,
15092           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15093         { TG3PCI_SUBVENDOR_ID_3COM,
15094           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15095         { TG3PCI_SUBVENDOR_ID_3COM,
15096           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15097         { TG3PCI_SUBVENDOR_ID_3COM,
15098           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15099         { TG3PCI_SUBVENDOR_ID_3COM,
15100           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15101
15102         /* DELL boards. */
15103         { TG3PCI_SUBVENDOR_ID_DELL,
15104           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15105         { TG3PCI_SUBVENDOR_ID_DELL,
15106           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15107         { TG3PCI_SUBVENDOR_ID_DELL,
15108           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15109         { TG3PCI_SUBVENDOR_ID_DELL,
15110           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15111
15112         /* Compaq boards. */
15113         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15114           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15115         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15116           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15117         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15118           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15119         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15120           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15121         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15122           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15123
15124         /* IBM boards. */
15125         { TG3PCI_SUBVENDOR_ID_IBM,
15126           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15127 };
15128
15129 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15130 {
15131         int i;
15132
15133         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15134                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15135                      tp->pdev->subsystem_vendor) &&
15136                     (subsys_id_to_phy_id[i].subsys_devid ==
15137                      tp->pdev->subsystem_device))
15138                         return &subsys_id_to_phy_id[i];
15139         }
15140         return NULL;
15141 }
15142
15143 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15144 {
15145         u32 val;
15146
15147         tp->phy_id = TG3_PHY_ID_INVALID;
15148         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15149
15150         /* Assume an onboard device and WOL capable by default.  */
15151         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15152         tg3_flag_set(tp, WOL_CAP);
15153
15154         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15155                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15156                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15157                         tg3_flag_set(tp, IS_NIC);
15158                 }
15159                 val = tr32(VCPU_CFGSHDW);
15160                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15161                         tg3_flag_set(tp, ASPM_WORKAROUND);
15162                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15163                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15164                         tg3_flag_set(tp, WOL_ENABLE);
15165                         device_set_wakeup_enable(&tp->pdev->dev, true);
15166                 }
15167                 goto done;
15168         }
15169
15170         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15171         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15172                 u32 nic_cfg, led_cfg;
15173                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15174                 u32 nic_phy_id, ver, eeprom_phy_id;
15175                 int eeprom_phy_serdes = 0;
15176
15177                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15178                 tp->nic_sram_data_cfg = nic_cfg;
15179
15180                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15181                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15182                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15183                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15184                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15185                     (ver > 0) && (ver < 0x100))
15186                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15187
15188                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15189                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15190
15191                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15192                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15193                     tg3_asic_rev(tp) == ASIC_REV_5720)
15194                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15195
15196                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15197                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15198                         eeprom_phy_serdes = 1;
15199
15200                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15201                 if (nic_phy_id != 0) {
15202                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15203                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15204
15205                         eeprom_phy_id  = (id1 >> 16) << 10;
15206                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15207                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15208                 } else
15209                         eeprom_phy_id = 0;
15210
15211                 tp->phy_id = eeprom_phy_id;
15212                 if (eeprom_phy_serdes) {
15213                         if (!tg3_flag(tp, 5705_PLUS))
15214                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15215                         else
15216                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15217                 }
15218
15219                 if (tg3_flag(tp, 5750_PLUS))
15220                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15221                                     SHASTA_EXT_LED_MODE_MASK);
15222                 else
15223                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15224
15225                 switch (led_cfg) {
15226                 default:
15227                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15228                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15229                         break;
15230
15231                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15232                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15233                         break;
15234
15235                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15236                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15237
15238                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15239                          * read on some older 5700/5701 bootcode.
15240                          */
15241                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15242                             tg3_asic_rev(tp) == ASIC_REV_5701)
15243                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15244
15245                         break;
15246
15247                 case SHASTA_EXT_LED_SHARED:
15248                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15249                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15250                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15251                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15252                                                  LED_CTRL_MODE_PHY_2);
15253
15254                         if (tg3_flag(tp, 5717_PLUS) ||
15255                             tg3_asic_rev(tp) == ASIC_REV_5762)
15256                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15257                                                 LED_CTRL_BLINK_RATE_MASK;
15258
15259                         break;
15260
15261                 case SHASTA_EXT_LED_MAC:
15262                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15263                         break;
15264
15265                 case SHASTA_EXT_LED_COMBO:
15266                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15267                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15268                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15269                                                  LED_CTRL_MODE_PHY_2);
15270                         break;
15271
15272                 }
15273
15274                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15275                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15276                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15277                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15278
15279                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15280                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15281
15282                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15283                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15284                         if ((tp->pdev->subsystem_vendor ==
15285                              PCI_VENDOR_ID_ARIMA) &&
15286                             (tp->pdev->subsystem_device == 0x205a ||
15287                              tp->pdev->subsystem_device == 0x2063))
15288                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15289                 } else {
15290                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15291                         tg3_flag_set(tp, IS_NIC);
15292                 }
15293
15294                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15295                         tg3_flag_set(tp, ENABLE_ASF);
15296                         if (tg3_flag(tp, 5750_PLUS))
15297                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15298                 }
15299
15300                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15301                     tg3_flag(tp, 5750_PLUS))
15302                         tg3_flag_set(tp, ENABLE_APE);
15303
15304                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15305                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15306                         tg3_flag_clear(tp, WOL_CAP);
15307
15308                 if (tg3_flag(tp, WOL_CAP) &&
15309                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15310                         tg3_flag_set(tp, WOL_ENABLE);
15311                         device_set_wakeup_enable(&tp->pdev->dev, true);
15312                 }
15313
15314                 if (cfg2 & (1 << 17))
15315                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15316
15317                 /* serdes signal pre-emphasis in register 0x590 set by */
15318                 /* bootcode if bit 18 is set */
15319                 if (cfg2 & (1 << 18))
15320                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15321
15322                 if ((tg3_flag(tp, 57765_PLUS) ||
15323                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15324                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15325                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15326                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15327
15328                 if (tg3_flag(tp, PCI_EXPRESS)) {
15329                         u32 cfg3;
15330
15331                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15332                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15333                             !tg3_flag(tp, 57765_PLUS) &&
15334                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15335                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15336                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15337                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15338                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15339                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15340                 }
15341
15342                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15343                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15344                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15345                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15346                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15347                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15348
15349                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15350                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15351         }
15352 done:
15353         if (tg3_flag(tp, WOL_CAP))
15354                 device_set_wakeup_enable(&tp->pdev->dev,
15355                                          tg3_flag(tp, WOL_ENABLE));
15356         else
15357                 device_set_wakeup_capable(&tp->pdev->dev, false);
15358 }
15359
15360 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15361 {
15362         int i, err;
15363         u32 val2, off = offset * 8;
15364
15365         err = tg3_nvram_lock(tp);
15366         if (err)
15367                 return err;
15368
15369         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15370         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15371                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15372         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15373         udelay(10);
15374
15375         for (i = 0; i < 100; i++) {
15376                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15377                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15378                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15379                         break;
15380                 }
15381                 udelay(10);
15382         }
15383
15384         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15385
15386         tg3_nvram_unlock(tp);
15387         if (val2 & APE_OTP_STATUS_CMD_DONE)
15388                 return 0;
15389
15390         return -EBUSY;
15391 }
15392
15393 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15394 {
15395         int i;
15396         u32 val;
15397
15398         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15399         tw32(OTP_CTRL, cmd);
15400
15401         /* Wait for up to 1 ms for command to execute. */
15402         for (i = 0; i < 100; i++) {
15403                 val = tr32(OTP_STATUS);
15404                 if (val & OTP_STATUS_CMD_DONE)
15405                         break;
15406                 udelay(10);
15407         }
15408
15409         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15410 }
15411
15412 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15413  * configuration is a 32-bit value that straddles the alignment boundary.
15414  * We do two 32-bit reads and then shift and merge the results.
15415  */
15416 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15417 {
15418         u32 bhalf_otp, thalf_otp;
15419
15420         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15421
15422         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15423                 return 0;
15424
15425         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15426
15427         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15428                 return 0;
15429
15430         thalf_otp = tr32(OTP_READ_DATA);
15431
15432         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15433
15434         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15435                 return 0;
15436
15437         bhalf_otp = tr32(OTP_READ_DATA);
15438
15439         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15440 }
15441
15442 static void tg3_phy_init_link_config(struct tg3 *tp)
15443 {
15444         u32 adv = ADVERTISED_Autoneg;
15445
15446         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15447                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15448                         adv |= ADVERTISED_1000baseT_Half;
15449                 adv |= ADVERTISED_1000baseT_Full;
15450         }
15451
15452         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15453                 adv |= ADVERTISED_100baseT_Half |
15454                        ADVERTISED_100baseT_Full |
15455                        ADVERTISED_10baseT_Half |
15456                        ADVERTISED_10baseT_Full |
15457                        ADVERTISED_TP;
15458         else
15459                 adv |= ADVERTISED_FIBRE;
15460
15461         tp->link_config.advertising = adv;
15462         tp->link_config.speed = SPEED_UNKNOWN;
15463         tp->link_config.duplex = DUPLEX_UNKNOWN;
15464         tp->link_config.autoneg = AUTONEG_ENABLE;
15465         tp->link_config.active_speed = SPEED_UNKNOWN;
15466         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15467
15468         tp->old_link = -1;
15469 }
15470
15471 static int tg3_phy_probe(struct tg3 *tp)
15472 {
15473         u32 hw_phy_id_1, hw_phy_id_2;
15474         u32 hw_phy_id, hw_phy_id_masked;
15475         int err;
15476
15477         /* flow control autonegotiation is default behavior */
15478         tg3_flag_set(tp, PAUSE_AUTONEG);
15479         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15480
15481         if (tg3_flag(tp, ENABLE_APE)) {
15482                 switch (tp->pci_fn) {
15483                 case 0:
15484                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15485                         break;
15486                 case 1:
15487                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15488                         break;
15489                 case 2:
15490                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15491                         break;
15492                 case 3:
15493                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15494                         break;
15495                 }
15496         }
15497
15498         if (!tg3_flag(tp, ENABLE_ASF) &&
15499             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15500             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15501                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15502                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15503
15504         if (tg3_flag(tp, USE_PHYLIB))
15505                 return tg3_phy_init(tp);
15506
15507         /* Reading the PHY ID register can conflict with ASF
15508          * firmware access to the PHY hardware.
15509          */
15510         err = 0;
15511         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15512                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15513         } else {
15514                 /* Now read the physical PHY_ID from the chip and verify
15515                  * that it is sane.  If it doesn't look good, we fall back
15516                  * to either the hard-coded table based PHY_ID and failing
15517                  * that the value found in the eeprom area.
15518                  */
15519                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15520                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15521
15522                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15523                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15524                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15525
15526                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15527         }
15528
15529         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15530                 tp->phy_id = hw_phy_id;
15531                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15532                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15533                 else
15534                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15535         } else {
15536                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15537                         /* Do nothing, phy ID already set up in
15538                          * tg3_get_eeprom_hw_cfg().
15539                          */
15540                 } else {
15541                         struct subsys_tbl_ent *p;
15542
15543                         /* No eeprom signature?  Try the hardcoded
15544                          * subsys device table.
15545                          */
15546                         p = tg3_lookup_by_subsys(tp);
15547                         if (p) {
15548                                 tp->phy_id = p->phy_id;
15549                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15550                                 /* For now we saw the IDs 0xbc050cd0,
15551                                  * 0xbc050f80 and 0xbc050c30 on devices
15552                                  * connected to an BCM4785 and there are
15553                                  * probably more. Just assume that the phy is
15554                                  * supported when it is connected to a SSB core
15555                                  * for now.
15556                                  */
15557                                 return -ENODEV;
15558                         }
15559
15560                         if (!tp->phy_id ||
15561                             tp->phy_id == TG3_PHY_ID_BCM8002)
15562                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15563                 }
15564         }
15565
15566         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15567             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15568              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15569              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15570              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15571              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15572               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15573              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15574               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15575                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15576
15577                 tp->eee.supported = SUPPORTED_100baseT_Full |
15578                                     SUPPORTED_1000baseT_Full;
15579                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15580                                      ADVERTISED_1000baseT_Full;
15581                 tp->eee.eee_enabled = 1;
15582                 tp->eee.tx_lpi_enabled = 1;
15583                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15584         }
15585
15586         tg3_phy_init_link_config(tp);
15587
15588         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15589             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15590             !tg3_flag(tp, ENABLE_APE) &&
15591             !tg3_flag(tp, ENABLE_ASF)) {
15592                 u32 bmsr, dummy;
15593
15594                 tg3_readphy(tp, MII_BMSR, &bmsr);
15595                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15596                     (bmsr & BMSR_LSTATUS))
15597                         goto skip_phy_reset;
15598
15599                 err = tg3_phy_reset(tp);
15600                 if (err)
15601                         return err;
15602
15603                 tg3_phy_set_wirespeed(tp);
15604
15605                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15606                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15607                                             tp->link_config.flowctrl);
15608
15609                         tg3_writephy(tp, MII_BMCR,
15610                                      BMCR_ANENABLE | BMCR_ANRESTART);
15611                 }
15612         }
15613
15614 skip_phy_reset:
15615         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15616                 err = tg3_init_5401phy_dsp(tp);
15617                 if (err)
15618                         return err;
15619
15620                 err = tg3_init_5401phy_dsp(tp);
15621         }
15622
15623         return err;
15624 }
15625
15626 static void tg3_read_vpd(struct tg3 *tp)
15627 {
15628         u8 *vpd_data;
15629         unsigned int block_end, rosize, len;
15630         u32 vpdlen;
15631         int j, i = 0;
15632
15633         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15634         if (!vpd_data)
15635                 goto out_no_vpd;
15636
15637         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15638         if (i < 0)
15639                 goto out_not_found;
15640
15641         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15642         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15643         i += PCI_VPD_LRDT_TAG_SIZE;
15644
15645         if (block_end > vpdlen)
15646                 goto out_not_found;
15647
15648         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15649                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15650         if (j > 0) {
15651                 len = pci_vpd_info_field_size(&vpd_data[j]);
15652
15653                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15654                 if (j + len > block_end || len != 4 ||
15655                     memcmp(&vpd_data[j], "1028", 4))
15656                         goto partno;
15657
15658                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15659                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15660                 if (j < 0)
15661                         goto partno;
15662
15663                 len = pci_vpd_info_field_size(&vpd_data[j]);
15664
15665                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15666                 if (j + len > block_end)
15667                         goto partno;
15668
15669                 if (len >= sizeof(tp->fw_ver))
15670                         len = sizeof(tp->fw_ver) - 1;
15671                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15672                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15673                          &vpd_data[j]);
15674         }
15675
15676 partno:
15677         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15678                                       PCI_VPD_RO_KEYWORD_PARTNO);
15679         if (i < 0)
15680                 goto out_not_found;
15681
15682         len = pci_vpd_info_field_size(&vpd_data[i]);
15683
15684         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15685         if (len > TG3_BPN_SIZE ||
15686             (len + i) > vpdlen)
15687                 goto out_not_found;
15688
15689         memcpy(tp->board_part_number, &vpd_data[i], len);
15690
15691 out_not_found:
15692         kfree(vpd_data);
15693         if (tp->board_part_number[0])
15694                 return;
15695
15696 out_no_vpd:
15697         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15698                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15699                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15700                         strcpy(tp->board_part_number, "BCM5717");
15701                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15702                         strcpy(tp->board_part_number, "BCM5718");
15703                 else
15704                         goto nomatch;
15705         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15706                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15707                         strcpy(tp->board_part_number, "BCM57780");
15708                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15709                         strcpy(tp->board_part_number, "BCM57760");
15710                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15711                         strcpy(tp->board_part_number, "BCM57790");
15712                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15713                         strcpy(tp->board_part_number, "BCM57788");
15714                 else
15715                         goto nomatch;
15716         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15717                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15718                         strcpy(tp->board_part_number, "BCM57761");
15719                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15720                         strcpy(tp->board_part_number, "BCM57765");
15721                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15722                         strcpy(tp->board_part_number, "BCM57781");
15723                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15724                         strcpy(tp->board_part_number, "BCM57785");
15725                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15726                         strcpy(tp->board_part_number, "BCM57791");
15727                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15728                         strcpy(tp->board_part_number, "BCM57795");
15729                 else
15730                         goto nomatch;
15731         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15732                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15733                         strcpy(tp->board_part_number, "BCM57762");
15734                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15735                         strcpy(tp->board_part_number, "BCM57766");
15736                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15737                         strcpy(tp->board_part_number, "BCM57782");
15738                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15739                         strcpy(tp->board_part_number, "BCM57786");
15740                 else
15741                         goto nomatch;
15742         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15743                 strcpy(tp->board_part_number, "BCM95906");
15744         } else {
15745 nomatch:
15746                 strcpy(tp->board_part_number, "none");
15747         }
15748 }
15749
15750 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15751 {
15752         u32 val;
15753
15754         if (tg3_nvram_read(tp, offset, &val) ||
15755             (val & 0xfc000000) != 0x0c000000 ||
15756             tg3_nvram_read(tp, offset + 4, &val) ||
15757             val != 0)
15758                 return 0;
15759
15760         return 1;
15761 }
15762
15763 static void tg3_read_bc_ver(struct tg3 *tp)
15764 {
15765         u32 val, offset, start, ver_offset;
15766         int i, dst_off;
15767         bool newver = false;
15768
15769         if (tg3_nvram_read(tp, 0xc, &offset) ||
15770             tg3_nvram_read(tp, 0x4, &start))
15771                 return;
15772
15773         offset = tg3_nvram_logical_addr(tp, offset);
15774
15775         if (tg3_nvram_read(tp, offset, &val))
15776                 return;
15777
15778         if ((val & 0xfc000000) == 0x0c000000) {
15779                 if (tg3_nvram_read(tp, offset + 4, &val))
15780                         return;
15781
15782                 if (val == 0)
15783                         newver = true;
15784         }
15785
15786         dst_off = strlen(tp->fw_ver);
15787
15788         if (newver) {
15789                 if (TG3_VER_SIZE - dst_off < 16 ||
15790                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15791                         return;
15792
15793                 offset = offset + ver_offset - start;
15794                 for (i = 0; i < 16; i += 4) {
15795                         __be32 v;
15796                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15797                                 return;
15798
15799                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15800                 }
15801         } else {
15802                 u32 major, minor;
15803
15804                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15805                         return;
15806
15807                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15808                         TG3_NVM_BCVER_MAJSFT;
15809                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15810                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15811                          "v%d.%02d", major, minor);
15812         }
15813 }
15814
15815 static void tg3_read_hwsb_ver(struct tg3 *tp)
15816 {
15817         u32 val, major, minor;
15818
15819         /* Use native endian representation */
15820         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15821                 return;
15822
15823         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15824                 TG3_NVM_HWSB_CFG1_MAJSFT;
15825         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15826                 TG3_NVM_HWSB_CFG1_MINSFT;
15827
15828         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15829 }
15830
15831 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15832 {
15833         u32 offset, major, minor, build;
15834
15835         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15836
15837         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15838                 return;
15839
15840         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15841         case TG3_EEPROM_SB_REVISION_0:
15842                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15843                 break;
15844         case TG3_EEPROM_SB_REVISION_2:
15845                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15846                 break;
15847         case TG3_EEPROM_SB_REVISION_3:
15848                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15849                 break;
15850         case TG3_EEPROM_SB_REVISION_4:
15851                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15852                 break;
15853         case TG3_EEPROM_SB_REVISION_5:
15854                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15855                 break;
15856         case TG3_EEPROM_SB_REVISION_6:
15857                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15858                 break;
15859         default:
15860                 return;
15861         }
15862
15863         if (tg3_nvram_read(tp, offset, &val))
15864                 return;
15865
15866         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15867                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15868         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15869                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15870         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15871
15872         if (minor > 99 || build > 26)
15873                 return;
15874
15875         offset = strlen(tp->fw_ver);
15876         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15877                  " v%d.%02d", major, minor);
15878
15879         if (build > 0) {
15880                 offset = strlen(tp->fw_ver);
15881                 if (offset < TG3_VER_SIZE - 1)
15882                         tp->fw_ver[offset] = 'a' + build - 1;
15883         }
15884 }
15885
15886 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15887 {
15888         u32 val, offset, start;
15889         int i, vlen;
15890
15891         for (offset = TG3_NVM_DIR_START;
15892              offset < TG3_NVM_DIR_END;
15893              offset += TG3_NVM_DIRENT_SIZE) {
15894                 if (tg3_nvram_read(tp, offset, &val))
15895                         return;
15896
15897                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15898                         break;
15899         }
15900
15901         if (offset == TG3_NVM_DIR_END)
15902                 return;
15903
15904         if (!tg3_flag(tp, 5705_PLUS))
15905                 start = 0x08000000;
15906         else if (tg3_nvram_read(tp, offset - 4, &start))
15907                 return;
15908
15909         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15910             !tg3_fw_img_is_valid(tp, offset) ||
15911             tg3_nvram_read(tp, offset + 8, &val))
15912                 return;
15913
15914         offset += val - start;
15915
15916         vlen = strlen(tp->fw_ver);
15917
15918         tp->fw_ver[vlen++] = ',';
15919         tp->fw_ver[vlen++] = ' ';
15920
15921         for (i = 0; i < 4; i++) {
15922                 __be32 v;
15923                 if (tg3_nvram_read_be32(tp, offset, &v))
15924                         return;
15925
15926                 offset += sizeof(v);
15927
15928                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15929                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15930                         break;
15931                 }
15932
15933                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15934                 vlen += sizeof(v);
15935         }
15936 }
15937
15938 static void tg3_probe_ncsi(struct tg3 *tp)
15939 {
15940         u32 apedata;
15941
15942         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15943         if (apedata != APE_SEG_SIG_MAGIC)
15944                 return;
15945
15946         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15947         if (!(apedata & APE_FW_STATUS_READY))
15948                 return;
15949
15950         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15951                 tg3_flag_set(tp, APE_HAS_NCSI);
15952 }
15953
15954 static void tg3_read_dash_ver(struct tg3 *tp)
15955 {
15956         int vlen;
15957         u32 apedata;
15958         char *fwtype;
15959
15960         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15961
15962         if (tg3_flag(tp, APE_HAS_NCSI))
15963                 fwtype = "NCSI";
15964         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15965                 fwtype = "SMASH";
15966         else
15967                 fwtype = "DASH";
15968
15969         vlen = strlen(tp->fw_ver);
15970
15971         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15972                  fwtype,
15973                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15974                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15975                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15976                  (apedata & APE_FW_VERSION_BLDMSK));
15977 }
15978
15979 static void tg3_read_otp_ver(struct tg3 *tp)
15980 {
15981         u32 val, val2;
15982
15983         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15984                 return;
15985
15986         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15987             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15988             TG3_OTP_MAGIC0_VALID(val)) {
15989                 u64 val64 = (u64) val << 32 | val2;
15990                 u32 ver = 0;
15991                 int i, vlen;
15992
15993                 for (i = 0; i < 7; i++) {
15994                         if ((val64 & 0xff) == 0)
15995                                 break;
15996                         ver = val64 & 0xff;
15997                         val64 >>= 8;
15998                 }
15999                 vlen = strlen(tp->fw_ver);
16000                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
16001         }
16002 }
16003
16004 static void tg3_read_fw_ver(struct tg3 *tp)
16005 {
16006         u32 val;
16007         bool vpd_vers = false;
16008
16009         if (tp->fw_ver[0] != 0)
16010                 vpd_vers = true;
16011
16012         if (tg3_flag(tp, NO_NVRAM)) {
16013                 strcat(tp->fw_ver, "sb");
16014                 tg3_read_otp_ver(tp);
16015                 return;
16016         }
16017
16018         if (tg3_nvram_read(tp, 0, &val))
16019                 return;
16020
16021         if (val == TG3_EEPROM_MAGIC)
16022                 tg3_read_bc_ver(tp);
16023         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16024                 tg3_read_sb_ver(tp, val);
16025         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16026                 tg3_read_hwsb_ver(tp);
16027
16028         if (tg3_flag(tp, ENABLE_ASF)) {
16029                 if (tg3_flag(tp, ENABLE_APE)) {
16030                         tg3_probe_ncsi(tp);
16031                         if (!vpd_vers)
16032                                 tg3_read_dash_ver(tp);
16033                 } else if (!vpd_vers) {
16034                         tg3_read_mgmtfw_ver(tp);
16035                 }
16036         }
16037
16038         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16039 }
16040
16041 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16042 {
16043         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16044                 return TG3_RX_RET_MAX_SIZE_5717;
16045         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16046                 return TG3_RX_RET_MAX_SIZE_5700;
16047         else
16048                 return TG3_RX_RET_MAX_SIZE_5705;
16049 }
16050
16051 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16052         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16053         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16054         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16055         { },
16056 };
16057
16058 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16059 {
16060         struct pci_dev *peer;
16061         unsigned int func, devnr = tp->pdev->devfn & ~7;
16062
16063         for (func = 0; func < 8; func++) {
16064                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16065                 if (peer && peer != tp->pdev)
16066                         break;
16067                 pci_dev_put(peer);
16068         }
16069         /* 5704 can be configured in single-port mode, set peer to
16070          * tp->pdev in that case.
16071          */
16072         if (!peer) {
16073                 peer = tp->pdev;
16074                 return peer;
16075         }
16076
16077         /*
16078          * We don't need to keep the refcount elevated; there's no way
16079          * to remove one half of this device without removing the other
16080          */
16081         pci_dev_put(peer);
16082
16083         return peer;
16084 }
16085
16086 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16087 {
16088         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16089         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16090                 u32 reg;
16091
16092                 /* All devices that use the alternate
16093                  * ASIC REV location have a CPMU.
16094                  */
16095                 tg3_flag_set(tp, CPMU_PRESENT);
16096
16097                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16098                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16099                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16100                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16101                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16102                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16103                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16104                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16105                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16106                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16107                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16108                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16109                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16110                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16111                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16112                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16113                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16114                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16115                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16116                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16117                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16118                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16119                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16120                 else
16121                         reg = TG3PCI_PRODID_ASICREV;
16122
16123                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16124         }
16125
16126         /* Wrong chip ID in 5752 A0. This code can be removed later
16127          * as A0 is not in production.
16128          */
16129         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16130                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16131
16132         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16133                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16134
16135         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16136             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16137             tg3_asic_rev(tp) == ASIC_REV_5720)
16138                 tg3_flag_set(tp, 5717_PLUS);
16139
16140         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16141             tg3_asic_rev(tp) == ASIC_REV_57766)
16142                 tg3_flag_set(tp, 57765_CLASS);
16143
16144         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16145              tg3_asic_rev(tp) == ASIC_REV_5762)
16146                 tg3_flag_set(tp, 57765_PLUS);
16147
16148         /* Intentionally exclude ASIC_REV_5906 */
16149         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16150             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16151             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16152             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16153             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16154             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16155             tg3_flag(tp, 57765_PLUS))
16156                 tg3_flag_set(tp, 5755_PLUS);
16157
16158         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16159             tg3_asic_rev(tp) == ASIC_REV_5714)
16160                 tg3_flag_set(tp, 5780_CLASS);
16161
16162         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16163             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16164             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16165             tg3_flag(tp, 5755_PLUS) ||
16166             tg3_flag(tp, 5780_CLASS))
16167                 tg3_flag_set(tp, 5750_PLUS);
16168
16169         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16170             tg3_flag(tp, 5750_PLUS))
16171                 tg3_flag_set(tp, 5705_PLUS);
16172 }
16173
16174 static bool tg3_10_100_only_device(struct tg3 *tp,
16175                                    const struct pci_device_id *ent)
16176 {
16177         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16178
16179         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16180              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16181             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16182                 return true;
16183
16184         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16185                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16186                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16187                                 return true;
16188                 } else {
16189                         return true;
16190                 }
16191         }
16192
16193         return false;
16194 }
16195
16196 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16197 {
16198         u32 misc_ctrl_reg;
16199         u32 pci_state_reg, grc_misc_cfg;
16200         u32 val;
16201         u16 pci_cmd;
16202         int err;
16203
16204         /* Force memory write invalidate off.  If we leave it on,
16205          * then on 5700_BX chips we have to enable a workaround.
16206          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16207          * to match the cacheline size.  The Broadcom driver have this
16208          * workaround but turns MWI off all the times so never uses
16209          * it.  This seems to suggest that the workaround is insufficient.
16210          */
16211         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16212         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16213         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16214
16215         /* Important! -- Make sure register accesses are byteswapped
16216          * correctly.  Also, for those chips that require it, make
16217          * sure that indirect register accesses are enabled before
16218          * the first operation.
16219          */
16220         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16221                               &misc_ctrl_reg);
16222         tp->misc_host_ctrl |= (misc_ctrl_reg &
16223                                MISC_HOST_CTRL_CHIPREV);
16224         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16225                                tp->misc_host_ctrl);
16226
16227         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16228
16229         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16230          * we need to disable memory and use config. cycles
16231          * only to access all registers. The 5702/03 chips
16232          * can mistakenly decode the special cycles from the
16233          * ICH chipsets as memory write cycles, causing corruption
16234          * of register and memory space. Only certain ICH bridges
16235          * will drive special cycles with non-zero data during the
16236          * address phase which can fall within the 5703's address
16237          * range. This is not an ICH bug as the PCI spec allows
16238          * non-zero address during special cycles. However, only
16239          * these ICH bridges are known to drive non-zero addresses
16240          * during special cycles.
16241          *
16242          * Since special cycles do not cross PCI bridges, we only
16243          * enable this workaround if the 5703 is on the secondary
16244          * bus of these ICH bridges.
16245          */
16246         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16247             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16248                 static struct tg3_dev_id {
16249                         u32     vendor;
16250                         u32     device;
16251                         u32     rev;
16252                 } ich_chipsets[] = {
16253                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16254                           PCI_ANY_ID },
16255                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16256                           PCI_ANY_ID },
16257                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16258                           0xa },
16259                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16260                           PCI_ANY_ID },
16261                         { },
16262                 };
16263                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16264                 struct pci_dev *bridge = NULL;
16265
16266                 while (pci_id->vendor != 0) {
16267                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16268                                                 bridge);
16269                         if (!bridge) {
16270                                 pci_id++;
16271                                 continue;
16272                         }
16273                         if (pci_id->rev != PCI_ANY_ID) {
16274                                 if (bridge->revision > pci_id->rev)
16275                                         continue;
16276                         }
16277                         if (bridge->subordinate &&
16278                             (bridge->subordinate->number ==
16279                              tp->pdev->bus->number)) {
16280                                 tg3_flag_set(tp, ICH_WORKAROUND);
16281                                 pci_dev_put(bridge);
16282                                 break;
16283                         }
16284                 }
16285         }
16286
16287         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16288                 static struct tg3_dev_id {
16289                         u32     vendor;
16290                         u32     device;
16291                 } bridge_chipsets[] = {
16292                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16293                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16294                         { },
16295                 };
16296                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16297                 struct pci_dev *bridge = NULL;
16298
16299                 while (pci_id->vendor != 0) {
16300                         bridge = pci_get_device(pci_id->vendor,
16301                                                 pci_id->device,
16302                                                 bridge);
16303                         if (!bridge) {
16304                                 pci_id++;
16305                                 continue;
16306                         }
16307                         if (bridge->subordinate &&
16308                             (bridge->subordinate->number <=
16309                              tp->pdev->bus->number) &&
16310                             (bridge->subordinate->busn_res.end >=
16311                              tp->pdev->bus->number)) {
16312                                 tg3_flag_set(tp, 5701_DMA_BUG);
16313                                 pci_dev_put(bridge);
16314                                 break;
16315                         }
16316                 }
16317         }
16318
16319         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16320          * DMA addresses > 40-bit. This bridge may have other additional
16321          * 57xx devices behind it in some 4-port NIC designs for example.
16322          * Any tg3 device found behind the bridge will also need the 40-bit
16323          * DMA workaround.
16324          */
16325         if (tg3_flag(tp, 5780_CLASS)) {
16326                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16327                 tp->msi_cap = tp->pdev->msi_cap;
16328         } else {
16329                 struct pci_dev *bridge = NULL;
16330
16331                 do {
16332                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16333                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16334                                                 bridge);
16335                         if (bridge && bridge->subordinate &&
16336                             (bridge->subordinate->number <=
16337                              tp->pdev->bus->number) &&
16338                             (bridge->subordinate->busn_res.end >=
16339                              tp->pdev->bus->number)) {
16340                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16341                                 pci_dev_put(bridge);
16342                                 break;
16343                         }
16344                 } while (bridge);
16345         }
16346
16347         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16348             tg3_asic_rev(tp) == ASIC_REV_5714)
16349                 tp->pdev_peer = tg3_find_peer(tp);
16350
16351         /* Determine TSO capabilities */
16352         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16353                 ; /* Do nothing. HW bug. */
16354         else if (tg3_flag(tp, 57765_PLUS))
16355                 tg3_flag_set(tp, HW_TSO_3);
16356         else if (tg3_flag(tp, 5755_PLUS) ||
16357                  tg3_asic_rev(tp) == ASIC_REV_5906)
16358                 tg3_flag_set(tp, HW_TSO_2);
16359         else if (tg3_flag(tp, 5750_PLUS)) {
16360                 tg3_flag_set(tp, HW_TSO_1);
16361                 tg3_flag_set(tp, TSO_BUG);
16362                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16363                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16364                         tg3_flag_clear(tp, TSO_BUG);
16365         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16366                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16367                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16368                 tg3_flag_set(tp, FW_TSO);
16369                 tg3_flag_set(tp, TSO_BUG);
16370                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16371                         tp->fw_needed = FIRMWARE_TG3TSO5;
16372                 else
16373                         tp->fw_needed = FIRMWARE_TG3TSO;
16374         }
16375
16376         /* Selectively allow TSO based on operating conditions */
16377         if (tg3_flag(tp, HW_TSO_1) ||
16378             tg3_flag(tp, HW_TSO_2) ||
16379             tg3_flag(tp, HW_TSO_3) ||
16380             tg3_flag(tp, FW_TSO)) {
16381                 /* For firmware TSO, assume ASF is disabled.
16382                  * We'll disable TSO later if we discover ASF
16383                  * is enabled in tg3_get_eeprom_hw_cfg().
16384                  */
16385                 tg3_flag_set(tp, TSO_CAPABLE);
16386         } else {
16387                 tg3_flag_clear(tp, TSO_CAPABLE);
16388                 tg3_flag_clear(tp, TSO_BUG);
16389                 tp->fw_needed = NULL;
16390         }
16391
16392         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16393                 tp->fw_needed = FIRMWARE_TG3;
16394
16395         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16396                 tp->fw_needed = FIRMWARE_TG357766;
16397
16398         tp->irq_max = 1;
16399
16400         if (tg3_flag(tp, 5750_PLUS)) {
16401                 tg3_flag_set(tp, SUPPORT_MSI);
16402                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16403                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16404                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16405                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16406                      tp->pdev_peer == tp->pdev))
16407                         tg3_flag_clear(tp, SUPPORT_MSI);
16408
16409                 if (tg3_flag(tp, 5755_PLUS) ||
16410                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16411                         tg3_flag_set(tp, 1SHOT_MSI);
16412                 }
16413
16414                 if (tg3_flag(tp, 57765_PLUS)) {
16415                         tg3_flag_set(tp, SUPPORT_MSIX);
16416                         tp->irq_max = TG3_IRQ_MAX_VECS;
16417                 }
16418         }
16419
16420         tp->txq_max = 1;
16421         tp->rxq_max = 1;
16422         if (tp->irq_max > 1) {
16423                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16424                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16425
16426                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16427                     tg3_asic_rev(tp) == ASIC_REV_5720)
16428                         tp->txq_max = tp->irq_max - 1;
16429         }
16430
16431         if (tg3_flag(tp, 5755_PLUS) ||
16432             tg3_asic_rev(tp) == ASIC_REV_5906)
16433                 tg3_flag_set(tp, SHORT_DMA_BUG);
16434
16435         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16436                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16437
16438         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16439             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16440             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16441             tg3_asic_rev(tp) == ASIC_REV_5762)
16442                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16443
16444         if (tg3_flag(tp, 57765_PLUS) &&
16445             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16446                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16447
16448         if (!tg3_flag(tp, 5705_PLUS) ||
16449             tg3_flag(tp, 5780_CLASS) ||
16450             tg3_flag(tp, USE_JUMBO_BDFLAG))
16451                 tg3_flag_set(tp, JUMBO_CAPABLE);
16452
16453         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16454                               &pci_state_reg);
16455
16456         if (pci_is_pcie(tp->pdev)) {
16457                 u16 lnkctl;
16458
16459                 tg3_flag_set(tp, PCI_EXPRESS);
16460
16461                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16462                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16463                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16464                                 tg3_flag_clear(tp, HW_TSO_2);
16465                                 tg3_flag_clear(tp, TSO_CAPABLE);
16466                         }
16467                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16468                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16469                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16470                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16471                                 tg3_flag_set(tp, CLKREQ_BUG);
16472                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16473                         tg3_flag_set(tp, L1PLLPD_EN);
16474                 }
16475         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16476                 /* BCM5785 devices are effectively PCIe devices, and should
16477                  * follow PCIe codepaths, but do not have a PCIe capabilities
16478                  * section.
16479                  */
16480                 tg3_flag_set(tp, PCI_EXPRESS);
16481         } else if (!tg3_flag(tp, 5705_PLUS) ||
16482                    tg3_flag(tp, 5780_CLASS)) {
16483                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16484                 if (!tp->pcix_cap) {
16485                         dev_err(&tp->pdev->dev,
16486                                 "Cannot find PCI-X capability, aborting\n");
16487                         return -EIO;
16488                 }
16489
16490                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16491                         tg3_flag_set(tp, PCIX_MODE);
16492         }
16493
16494         /* If we have an AMD 762 or VIA K8T800 chipset, write
16495          * reordering to the mailbox registers done by the host
16496          * controller can cause major troubles.  We read back from
16497          * every mailbox register write to force the writes to be
16498          * posted to the chip in order.
16499          */
16500         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16501             !tg3_flag(tp, PCI_EXPRESS))
16502                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16503
16504         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16505                              &tp->pci_cacheline_sz);
16506         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16507                              &tp->pci_lat_timer);
16508         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16509             tp->pci_lat_timer < 64) {
16510                 tp->pci_lat_timer = 64;
16511                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16512                                       tp->pci_lat_timer);
16513         }
16514
16515         /* Important! -- It is critical that the PCI-X hw workaround
16516          * situation is decided before the first MMIO register access.
16517          */
16518         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16519                 /* 5700 BX chips need to have their TX producer index
16520                  * mailboxes written twice to workaround a bug.
16521                  */
16522                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16523
16524                 /* If we are in PCI-X mode, enable register write workaround.
16525                  *
16526                  * The workaround is to use indirect register accesses
16527                  * for all chip writes not to mailbox registers.
16528                  */
16529                 if (tg3_flag(tp, PCIX_MODE)) {
16530                         u32 pm_reg;
16531
16532                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16533
16534                         /* The chip can have it's power management PCI config
16535                          * space registers clobbered due to this bug.
16536                          * So explicitly force the chip into D0 here.
16537                          */
16538                         pci_read_config_dword(tp->pdev,
16539                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16540                                               &pm_reg);
16541                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16542                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16543                         pci_write_config_dword(tp->pdev,
16544                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16545                                                pm_reg);
16546
16547                         /* Also, force SERR#/PERR# in PCI command. */
16548                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16549                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16550                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16551                 }
16552         }
16553
16554         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16555                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16556         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16557                 tg3_flag_set(tp, PCI_32BIT);
16558
16559         /* Chip-specific fixup from Broadcom driver */
16560         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16561             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16562                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16563                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16564         }
16565
16566         /* Default fast path register access methods */
16567         tp->read32 = tg3_read32;
16568         tp->write32 = tg3_write32;
16569         tp->read32_mbox = tg3_read32;
16570         tp->write32_mbox = tg3_write32;
16571         tp->write32_tx_mbox = tg3_write32;
16572         tp->write32_rx_mbox = tg3_write32;
16573
16574         /* Various workaround register access methods */
16575         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16576                 tp->write32 = tg3_write_indirect_reg32;
16577         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16578                  (tg3_flag(tp, PCI_EXPRESS) &&
16579                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16580                 /*
16581                  * Back to back register writes can cause problems on these
16582                  * chips, the workaround is to read back all reg writes
16583                  * except those to mailbox regs.
16584                  *
16585                  * See tg3_write_indirect_reg32().
16586                  */
16587                 tp->write32 = tg3_write_flush_reg32;
16588         }
16589
16590         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16591                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16592                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16593                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16594         }
16595
16596         if (tg3_flag(tp, ICH_WORKAROUND)) {
16597                 tp->read32 = tg3_read_indirect_reg32;
16598                 tp->write32 = tg3_write_indirect_reg32;
16599                 tp->read32_mbox = tg3_read_indirect_mbox;
16600                 tp->write32_mbox = tg3_write_indirect_mbox;
16601                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16602                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16603
16604                 iounmap(tp->regs);
16605                 tp->regs = NULL;
16606
16607                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16608                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16609                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16610         }
16611         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16612                 tp->read32_mbox = tg3_read32_mbox_5906;
16613                 tp->write32_mbox = tg3_write32_mbox_5906;
16614                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16615                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16616         }
16617
16618         if (tp->write32 == tg3_write_indirect_reg32 ||
16619             (tg3_flag(tp, PCIX_MODE) &&
16620              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16621               tg3_asic_rev(tp) == ASIC_REV_5701)))
16622                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16623
16624         /* The memory arbiter has to be enabled in order for SRAM accesses
16625          * to succeed.  Normally on powerup the tg3 chip firmware will make
16626          * sure it is enabled, but other entities such as system netboot
16627          * code might disable it.
16628          */
16629         val = tr32(MEMARB_MODE);
16630         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16631
16632         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16633         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16634             tg3_flag(tp, 5780_CLASS)) {
16635                 if (tg3_flag(tp, PCIX_MODE)) {
16636                         pci_read_config_dword(tp->pdev,
16637                                               tp->pcix_cap + PCI_X_STATUS,
16638                                               &val);
16639                         tp->pci_fn = val & 0x7;
16640                 }
16641         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16642                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16643                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16644                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16645                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16646                         val = tr32(TG3_CPMU_STATUS);
16647
16648                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16649                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16650                 else
16651                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16652                                      TG3_CPMU_STATUS_FSHFT_5719;
16653         }
16654
16655         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16656                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16657                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16658         }
16659
16660         /* Get eeprom hw config before calling tg3_set_power_state().
16661          * In particular, the TG3_FLAG_IS_NIC flag must be
16662          * determined before calling tg3_set_power_state() so that
16663          * we know whether or not to switch out of Vaux power.
16664          * When the flag is set, it means that GPIO1 is used for eeprom
16665          * write protect and also implies that it is a LOM where GPIOs
16666          * are not used to switch power.
16667          */
16668         tg3_get_eeprom_hw_cfg(tp);
16669
16670         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16671                 tg3_flag_clear(tp, TSO_CAPABLE);
16672                 tg3_flag_clear(tp, TSO_BUG);
16673                 tp->fw_needed = NULL;
16674         }
16675
16676         if (tg3_flag(tp, ENABLE_APE)) {
16677                 /* Allow reads and writes to the
16678                  * APE register and memory space.
16679                  */
16680                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16681                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16682                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16683                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16684                                        pci_state_reg);
16685
16686                 tg3_ape_lock_init(tp);
16687                 tp->ape_hb_interval =
16688                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16689         }
16690
16691         /* Set up tp->grc_local_ctrl before calling
16692          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16693          * will bring 5700's external PHY out of reset.
16694          * It is also used as eeprom write protect on LOMs.
16695          */
16696         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16697         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16698             tg3_flag(tp, EEPROM_WRITE_PROT))
16699                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16700                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16701         /* Unused GPIO3 must be driven as output on 5752 because there
16702          * are no pull-up resistors on unused GPIO pins.
16703          */
16704         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16705                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16706
16707         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16708             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16709             tg3_flag(tp, 57765_CLASS))
16710                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16711
16712         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16713             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16714                 /* Turn off the debug UART. */
16715                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16716                 if (tg3_flag(tp, IS_NIC))
16717                         /* Keep VMain power. */
16718                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16719                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16720         }
16721
16722         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16723                 tp->grc_local_ctrl |=
16724                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16725
16726         /* Switch out of Vaux if it is a NIC */
16727         tg3_pwrsrc_switch_to_vmain(tp);
16728
16729         /* Derive initial jumbo mode from MTU assigned in
16730          * ether_setup() via the alloc_etherdev() call
16731          */
16732         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16733                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16734
16735         /* Determine WakeOnLan speed to use. */
16736         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16737             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16738             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16739             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16740                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16741         } else {
16742                 tg3_flag_set(tp, WOL_SPEED_100MB);
16743         }
16744
16745         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16746                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16747
16748         /* A few boards don't want Ethernet@WireSpeed phy feature */
16749         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16750             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16751              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16752              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16753             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16754             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16755                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16756
16757         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16758             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16759                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16760         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16761                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16762
16763         if (tg3_flag(tp, 5705_PLUS) &&
16764             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16765             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16766             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16767             !tg3_flag(tp, 57765_PLUS)) {
16768                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16769                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16770                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16771                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16772                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16773                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16774                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16775                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16776                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16777                 } else
16778                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16779         }
16780
16781         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16782             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16783                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16784                 if (tp->phy_otp == 0)
16785                         tp->phy_otp = TG3_OTP_DEFAULT;
16786         }
16787
16788         if (tg3_flag(tp, CPMU_PRESENT))
16789                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16790         else
16791                 tp->mi_mode = MAC_MI_MODE_BASE;
16792
16793         tp->coalesce_mode = 0;
16794         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16795             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16796                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16797
16798         /* Set these bits to enable statistics workaround. */
16799         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16800             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16801             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16802             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16803                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16804                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16805         }
16806
16807         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16808             tg3_asic_rev(tp) == ASIC_REV_57780)
16809                 tg3_flag_set(tp, USE_PHYLIB);
16810
16811         err = tg3_mdio_init(tp);
16812         if (err)
16813                 return err;
16814
16815         /* Initialize data/descriptor byte/word swapping. */
16816         val = tr32(GRC_MODE);
16817         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16818             tg3_asic_rev(tp) == ASIC_REV_5762)
16819                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16820                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16821                         GRC_MODE_B2HRX_ENABLE |
16822                         GRC_MODE_HTX2B_ENABLE |
16823                         GRC_MODE_HOST_STACKUP);
16824         else
16825                 val &= GRC_MODE_HOST_STACKUP;
16826
16827         tw32(GRC_MODE, val | tp->grc_mode);
16828
16829         tg3_switch_clocks(tp);
16830
16831         /* Clear this out for sanity. */
16832         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16833
16834         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16835         tw32(TG3PCI_REG_BASE_ADDR, 0);
16836
16837         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16838                               &pci_state_reg);
16839         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16840             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16841                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16842                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16843                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16844                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16845                         void __iomem *sram_base;
16846
16847                         /* Write some dummy words into the SRAM status block
16848                          * area, see if it reads back correctly.  If the return
16849                          * value is bad, force enable the PCIX workaround.
16850                          */
16851                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16852
16853                         writel(0x00000000, sram_base);
16854                         writel(0x00000000, sram_base + 4);
16855                         writel(0xffffffff, sram_base + 4);
16856                         if (readl(sram_base) != 0x00000000)
16857                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16858                 }
16859         }
16860
16861         udelay(50);
16862         tg3_nvram_init(tp);
16863
16864         /* If the device has an NVRAM, no need to load patch firmware */
16865         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16866             !tg3_flag(tp, NO_NVRAM))
16867                 tp->fw_needed = NULL;
16868
16869         grc_misc_cfg = tr32(GRC_MISC_CFG);
16870         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16871
16872         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16873             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16874              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16875                 tg3_flag_set(tp, IS_5788);
16876
16877         if (!tg3_flag(tp, IS_5788) &&
16878             tg3_asic_rev(tp) != ASIC_REV_5700)
16879                 tg3_flag_set(tp, TAGGED_STATUS);
16880         if (tg3_flag(tp, TAGGED_STATUS)) {
16881                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16882                                       HOSTCC_MODE_CLRTICK_TXBD);
16883
16884                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16885                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16886                                        tp->misc_host_ctrl);
16887         }
16888
16889         /* Preserve the APE MAC_MODE bits */
16890         if (tg3_flag(tp, ENABLE_APE))
16891                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16892         else
16893                 tp->mac_mode = 0;
16894
16895         if (tg3_10_100_only_device(tp, ent))
16896                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16897
16898         err = tg3_phy_probe(tp);
16899         if (err) {
16900                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16901                 /* ... but do not return immediately ... */
16902                 tg3_mdio_fini(tp);
16903         }
16904
16905         tg3_read_vpd(tp);
16906         tg3_read_fw_ver(tp);
16907
16908         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16909                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16910         } else {
16911                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16912                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16913                 else
16914                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16915         }
16916
16917         /* 5700 {AX,BX} chips have a broken status block link
16918          * change bit implementation, so we must use the
16919          * status register in those cases.
16920          */
16921         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16922                 tg3_flag_set(tp, USE_LINKCHG_REG);
16923         else
16924                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16925
16926         /* The led_ctrl is set during tg3_phy_probe, here we might
16927          * have to force the link status polling mechanism based
16928          * upon subsystem IDs.
16929          */
16930         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16931             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16932             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16933                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16934                 tg3_flag_set(tp, USE_LINKCHG_REG);
16935         }
16936
16937         /* For all SERDES we poll the MAC status register. */
16938         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16939                 tg3_flag_set(tp, POLL_SERDES);
16940         else
16941                 tg3_flag_clear(tp, POLL_SERDES);
16942
16943         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16944                 tg3_flag_set(tp, POLL_CPMU_LINK);
16945
16946         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16947         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16948         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16949             tg3_flag(tp, PCIX_MODE)) {
16950                 tp->rx_offset = NET_SKB_PAD;
16951 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16952                 tp->rx_copy_thresh = ~(u16)0;
16953 #endif
16954         }
16955
16956         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16957         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16958         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16959
16960         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16961
16962         /* Increment the rx prod index on the rx std ring by at most
16963          * 8 for these chips to workaround hw errata.
16964          */
16965         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16966             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16967             tg3_asic_rev(tp) == ASIC_REV_5755)
16968                 tp->rx_std_max_post = 8;
16969
16970         if (tg3_flag(tp, ASPM_WORKAROUND))
16971                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16972                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16973
16974         return err;
16975 }
16976
16977 static int tg3_get_device_address(struct tg3 *tp)
16978 {
16979         struct net_device *dev = tp->dev;
16980         u32 hi, lo, mac_offset;
16981         int addr_ok = 0;
16982         int err;
16983
16984         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16985                 return 0;
16986
16987         if (tg3_flag(tp, IS_SSB_CORE)) {
16988                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16989                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16990                         return 0;
16991         }
16992
16993         mac_offset = 0x7c;
16994         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16995             tg3_flag(tp, 5780_CLASS)) {
16996                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16997                         mac_offset = 0xcc;
16998                 if (tg3_nvram_lock(tp))
16999                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
17000                 else
17001                         tg3_nvram_unlock(tp);
17002         } else if (tg3_flag(tp, 5717_PLUS)) {
17003                 if (tp->pci_fn & 1)
17004                         mac_offset = 0xcc;
17005                 if (tp->pci_fn > 1)
17006                         mac_offset += 0x18c;
17007         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17008                 mac_offset = 0x10;
17009
17010         /* First try to get it from MAC address mailbox. */
17011         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17012         if ((hi >> 16) == 0x484b) {
17013                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17014                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17015
17016                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17017                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17018                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17019                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17020                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17021
17022                 /* Some old bootcode may report a 0 MAC address in SRAM */
17023                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17024         }
17025         if (!addr_ok) {
17026                 /* Next, try NVRAM. */
17027                 if (!tg3_flag(tp, NO_NVRAM) &&
17028                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17029                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17030                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17031                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17032                 }
17033                 /* Finally just fetch it out of the MAC control regs. */
17034                 else {
17035                         hi = tr32(MAC_ADDR_0_HIGH);
17036                         lo = tr32(MAC_ADDR_0_LOW);
17037
17038                         dev->dev_addr[5] = lo & 0xff;
17039                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17040                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17041                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17042                         dev->dev_addr[1] = hi & 0xff;
17043                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17044                 }
17045         }
17046
17047         if (!is_valid_ether_addr(&dev->dev_addr[0]))
17048                 return -EINVAL;
17049         return 0;
17050 }
17051
17052 #define BOUNDARY_SINGLE_CACHELINE       1
17053 #define BOUNDARY_MULTI_CACHELINE        2
17054
17055 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17056 {
17057         int cacheline_size;
17058         u8 byte;
17059         int goal;
17060
17061         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17062         if (byte == 0)
17063                 cacheline_size = 1024;
17064         else
17065                 cacheline_size = (int) byte * 4;
17066
17067         /* On 5703 and later chips, the boundary bits have no
17068          * effect.
17069          */
17070         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17071             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17072             !tg3_flag(tp, PCI_EXPRESS))
17073                 goto out;
17074
17075 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17076         goal = BOUNDARY_MULTI_CACHELINE;
17077 #else
17078 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17079         goal = BOUNDARY_SINGLE_CACHELINE;
17080 #else
17081         goal = 0;
17082 #endif
17083 #endif
17084
17085         if (tg3_flag(tp, 57765_PLUS)) {
17086                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17087                 goto out;
17088         }
17089
17090         if (!goal)
17091                 goto out;
17092
17093         /* PCI controllers on most RISC systems tend to disconnect
17094          * when a device tries to burst across a cache-line boundary.
17095          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17096          *
17097          * Unfortunately, for PCI-E there are only limited
17098          * write-side controls for this, and thus for reads
17099          * we will still get the disconnects.  We'll also waste
17100          * these PCI cycles for both read and write for chips
17101          * other than 5700 and 5701 which do not implement the
17102          * boundary bits.
17103          */
17104         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17105                 switch (cacheline_size) {
17106                 case 16:
17107                 case 32:
17108                 case 64:
17109                 case 128:
17110                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17111                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17112                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17113                         } else {
17114                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17115                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17116                         }
17117                         break;
17118
17119                 case 256:
17120                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17121                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17122                         break;
17123
17124                 default:
17125                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17126                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17127                         break;
17128                 }
17129         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17130                 switch (cacheline_size) {
17131                 case 16:
17132                 case 32:
17133                 case 64:
17134                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17135                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17136                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17137                                 break;
17138                         }
17139                         /* fallthrough */
17140                 case 128:
17141                 default:
17142                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17143                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17144                         break;
17145                 }
17146         } else {
17147                 switch (cacheline_size) {
17148                 case 16:
17149                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17150                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17151                                         DMA_RWCTRL_WRITE_BNDRY_16);
17152                                 break;
17153                         }
17154                         /* fallthrough */
17155                 case 32:
17156                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17157                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17158                                         DMA_RWCTRL_WRITE_BNDRY_32);
17159                                 break;
17160                         }
17161                         /* fallthrough */
17162                 case 64:
17163                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17164                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17165                                         DMA_RWCTRL_WRITE_BNDRY_64);
17166                                 break;
17167                         }
17168                         /* fallthrough */
17169                 case 128:
17170                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17171                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17172                                         DMA_RWCTRL_WRITE_BNDRY_128);
17173                                 break;
17174                         }
17175                         /* fallthrough */
17176                 case 256:
17177                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17178                                 DMA_RWCTRL_WRITE_BNDRY_256);
17179                         break;
17180                 case 512:
17181                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17182                                 DMA_RWCTRL_WRITE_BNDRY_512);
17183                         break;
17184                 case 1024:
17185                 default:
17186                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17187                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17188                         break;
17189                 }
17190         }
17191
17192 out:
17193         return val;
17194 }
17195
17196 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17197                            int size, bool to_device)
17198 {
17199         struct tg3_internal_buffer_desc test_desc;
17200         u32 sram_dma_descs;
17201         int i, ret;
17202
17203         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17204
17205         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17206         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17207         tw32(RDMAC_STATUS, 0);
17208         tw32(WDMAC_STATUS, 0);
17209
17210         tw32(BUFMGR_MODE, 0);
17211         tw32(FTQ_RESET, 0);
17212
17213         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17214         test_desc.addr_lo = buf_dma & 0xffffffff;
17215         test_desc.nic_mbuf = 0x00002100;
17216         test_desc.len = size;
17217
17218         /*
17219          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17220          * the *second* time the tg3 driver was getting loaded after an
17221          * initial scan.
17222          *
17223          * Broadcom tells me:
17224          *   ...the DMA engine is connected to the GRC block and a DMA
17225          *   reset may affect the GRC block in some unpredictable way...
17226          *   The behavior of resets to individual blocks has not been tested.
17227          *
17228          * Broadcom noted the GRC reset will also reset all sub-components.
17229          */
17230         if (to_device) {
17231                 test_desc.cqid_sqid = (13 << 8) | 2;
17232
17233                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17234                 udelay(40);
17235         } else {
17236                 test_desc.cqid_sqid = (16 << 8) | 7;
17237
17238                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17239                 udelay(40);
17240         }
17241         test_desc.flags = 0x00000005;
17242
17243         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17244                 u32 val;
17245
17246                 val = *(((u32 *)&test_desc) + i);
17247                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17248                                        sram_dma_descs + (i * sizeof(u32)));
17249                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17250         }
17251         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17252
17253         if (to_device)
17254                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17255         else
17256                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17257
17258         ret = -ENODEV;
17259         for (i = 0; i < 40; i++) {
17260                 u32 val;
17261
17262                 if (to_device)
17263                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17264                 else
17265                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17266                 if ((val & 0xffff) == sram_dma_descs) {
17267                         ret = 0;
17268                         break;
17269                 }
17270
17271                 udelay(100);
17272         }
17273
17274         return ret;
17275 }
17276
17277 #define TEST_BUFFER_SIZE        0x2000
17278
17279 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17280         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17281         { },
17282 };
17283
17284 static int tg3_test_dma(struct tg3 *tp)
17285 {
17286         dma_addr_t buf_dma;
17287         u32 *buf, saved_dma_rwctrl;
17288         int ret = 0;
17289
17290         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17291                                  &buf_dma, GFP_KERNEL);
17292         if (!buf) {
17293                 ret = -ENOMEM;
17294                 goto out_nofree;
17295         }
17296
17297         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17298                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17299
17300         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17301
17302         if (tg3_flag(tp, 57765_PLUS))
17303                 goto out;
17304
17305         if (tg3_flag(tp, PCI_EXPRESS)) {
17306                 /* DMA read watermark not used on PCIE */
17307                 tp->dma_rwctrl |= 0x00180000;
17308         } else if (!tg3_flag(tp, PCIX_MODE)) {
17309                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17310                     tg3_asic_rev(tp) == ASIC_REV_5750)
17311                         tp->dma_rwctrl |= 0x003f0000;
17312                 else
17313                         tp->dma_rwctrl |= 0x003f000f;
17314         } else {
17315                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17316                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17317                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17318                         u32 read_water = 0x7;
17319
17320                         /* If the 5704 is behind the EPB bridge, we can
17321                          * do the less restrictive ONE_DMA workaround for
17322                          * better performance.
17323                          */
17324                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17325                             tg3_asic_rev(tp) == ASIC_REV_5704)
17326                                 tp->dma_rwctrl |= 0x8000;
17327                         else if (ccval == 0x6 || ccval == 0x7)
17328                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17329
17330                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17331                                 read_water = 4;
17332                         /* Set bit 23 to enable PCIX hw bug fix */
17333                         tp->dma_rwctrl |=
17334                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17335                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17336                                 (1 << 23);
17337                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17338                         /* 5780 always in PCIX mode */
17339                         tp->dma_rwctrl |= 0x00144000;
17340                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17341                         /* 5714 always in PCIX mode */
17342                         tp->dma_rwctrl |= 0x00148000;
17343                 } else {
17344                         tp->dma_rwctrl |= 0x001b000f;
17345                 }
17346         }
17347         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17348                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17349
17350         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17351             tg3_asic_rev(tp) == ASIC_REV_5704)
17352                 tp->dma_rwctrl &= 0xfffffff0;
17353
17354         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17355             tg3_asic_rev(tp) == ASIC_REV_5701) {
17356                 /* Remove this if it causes problems for some boards. */
17357                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17358
17359                 /* On 5700/5701 chips, we need to set this bit.
17360                  * Otherwise the chip will issue cacheline transactions
17361                  * to streamable DMA memory with not all the byte
17362                  * enables turned on.  This is an error on several
17363                  * RISC PCI controllers, in particular sparc64.
17364                  *
17365                  * On 5703/5704 chips, this bit has been reassigned
17366                  * a different meaning.  In particular, it is used
17367                  * on those chips to enable a PCI-X workaround.
17368                  */
17369                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17370         }
17371
17372         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17373
17374
17375         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17376             tg3_asic_rev(tp) != ASIC_REV_5701)
17377                 goto out;
17378
17379         /* It is best to perform DMA test with maximum write burst size
17380          * to expose the 5700/5701 write DMA bug.
17381          */
17382         saved_dma_rwctrl = tp->dma_rwctrl;
17383         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17384         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17385
17386         while (1) {
17387                 u32 *p = buf, i;
17388
17389                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17390                         p[i] = i;
17391
17392                 /* Send the buffer to the chip. */
17393                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17394                 if (ret) {
17395                         dev_err(&tp->pdev->dev,
17396                                 "%s: Buffer write failed. err = %d\n",
17397                                 __func__, ret);
17398                         break;
17399                 }
17400
17401                 /* Now read it back. */
17402                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17403                 if (ret) {
17404                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17405                                 "err = %d\n", __func__, ret);
17406                         break;
17407                 }
17408
17409                 /* Verify it. */
17410                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17411                         if (p[i] == i)
17412                                 continue;
17413
17414                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17415                             DMA_RWCTRL_WRITE_BNDRY_16) {
17416                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17417                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17418                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17419                                 break;
17420                         } else {
17421                                 dev_err(&tp->pdev->dev,
17422                                         "%s: Buffer corrupted on read back! "
17423                                         "(%d != %d)\n", __func__, p[i], i);
17424                                 ret = -ENODEV;
17425                                 goto out;
17426                         }
17427                 }
17428
17429                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17430                         /* Success. */
17431                         ret = 0;
17432                         break;
17433                 }
17434         }
17435         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17436             DMA_RWCTRL_WRITE_BNDRY_16) {
17437                 /* DMA test passed without adjusting DMA boundary,
17438                  * now look for chipsets that are known to expose the
17439                  * DMA bug without failing the test.
17440                  */
17441                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17442                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17443                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17444                 } else {
17445                         /* Safe to use the calculated DMA boundary. */
17446                         tp->dma_rwctrl = saved_dma_rwctrl;
17447                 }
17448
17449                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17450         }
17451
17452 out:
17453         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17454 out_nofree:
17455         return ret;
17456 }
17457
17458 static void tg3_init_bufmgr_config(struct tg3 *tp)
17459 {
17460         if (tg3_flag(tp, 57765_PLUS)) {
17461                 tp->bufmgr_config.mbuf_read_dma_low_water =
17462                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17463                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17464                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17465                 tp->bufmgr_config.mbuf_high_water =
17466                         DEFAULT_MB_HIGH_WATER_57765;
17467
17468                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17469                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17470                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17471                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17472                 tp->bufmgr_config.mbuf_high_water_jumbo =
17473                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17474         } else if (tg3_flag(tp, 5705_PLUS)) {
17475                 tp->bufmgr_config.mbuf_read_dma_low_water =
17476                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17477                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17478                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17479                 tp->bufmgr_config.mbuf_high_water =
17480                         DEFAULT_MB_HIGH_WATER_5705;
17481                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17482                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17483                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17484                         tp->bufmgr_config.mbuf_high_water =
17485                                 DEFAULT_MB_HIGH_WATER_5906;
17486                 }
17487
17488                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17489                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17490                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17491                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17492                 tp->bufmgr_config.mbuf_high_water_jumbo =
17493                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17494         } else {
17495                 tp->bufmgr_config.mbuf_read_dma_low_water =
17496                         DEFAULT_MB_RDMA_LOW_WATER;
17497                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17498                         DEFAULT_MB_MACRX_LOW_WATER;
17499                 tp->bufmgr_config.mbuf_high_water =
17500                         DEFAULT_MB_HIGH_WATER;
17501
17502                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17503                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17504                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17505                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17506                 tp->bufmgr_config.mbuf_high_water_jumbo =
17507                         DEFAULT_MB_HIGH_WATER_JUMBO;
17508         }
17509
17510         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17511         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17512 }
17513
17514 static char *tg3_phy_string(struct tg3 *tp)
17515 {
17516         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17517         case TG3_PHY_ID_BCM5400:        return "5400";
17518         case TG3_PHY_ID_BCM5401:        return "5401";
17519         case TG3_PHY_ID_BCM5411:        return "5411";
17520         case TG3_PHY_ID_BCM5701:        return "5701";
17521         case TG3_PHY_ID_BCM5703:        return "5703";
17522         case TG3_PHY_ID_BCM5704:        return "5704";
17523         case TG3_PHY_ID_BCM5705:        return "5705";
17524         case TG3_PHY_ID_BCM5750:        return "5750";
17525         case TG3_PHY_ID_BCM5752:        return "5752";
17526         case TG3_PHY_ID_BCM5714:        return "5714";
17527         case TG3_PHY_ID_BCM5780:        return "5780";
17528         case TG3_PHY_ID_BCM5755:        return "5755";
17529         case TG3_PHY_ID_BCM5787:        return "5787";
17530         case TG3_PHY_ID_BCM5784:        return "5784";
17531         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17532         case TG3_PHY_ID_BCM5906:        return "5906";
17533         case TG3_PHY_ID_BCM5761:        return "5761";
17534         case TG3_PHY_ID_BCM5718C:       return "5718C";
17535         case TG3_PHY_ID_BCM5718S:       return "5718S";
17536         case TG3_PHY_ID_BCM57765:       return "57765";
17537         case TG3_PHY_ID_BCM5719C:       return "5719C";
17538         case TG3_PHY_ID_BCM5720C:       return "5720C";
17539         case TG3_PHY_ID_BCM5762:        return "5762C";
17540         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17541         case 0:                 return "serdes";
17542         default:                return "unknown";
17543         }
17544 }
17545
17546 static char *tg3_bus_string(struct tg3 *tp, char *str)
17547 {
17548         if (tg3_flag(tp, PCI_EXPRESS)) {
17549                 strcpy(str, "PCI Express");
17550                 return str;
17551         } else if (tg3_flag(tp, PCIX_MODE)) {
17552                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17553
17554                 strcpy(str, "PCIX:");
17555
17556                 if ((clock_ctrl == 7) ||
17557                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17558                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17559                         strcat(str, "133MHz");
17560                 else if (clock_ctrl == 0)
17561                         strcat(str, "33MHz");
17562                 else if (clock_ctrl == 2)
17563                         strcat(str, "50MHz");
17564                 else if (clock_ctrl == 4)
17565                         strcat(str, "66MHz");
17566                 else if (clock_ctrl == 6)
17567                         strcat(str, "100MHz");
17568         } else {
17569                 strcpy(str, "PCI:");
17570                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17571                         strcat(str, "66MHz");
17572                 else
17573                         strcat(str, "33MHz");
17574         }
17575         if (tg3_flag(tp, PCI_32BIT))
17576                 strcat(str, ":32-bit");
17577         else
17578                 strcat(str, ":64-bit");
17579         return str;
17580 }
17581
17582 static void tg3_init_coal(struct tg3 *tp)
17583 {
17584         struct ethtool_coalesce *ec = &tp->coal;
17585
17586         memset(ec, 0, sizeof(*ec));
17587         ec->cmd = ETHTOOL_GCOALESCE;
17588         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17589         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17590         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17591         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17592         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17593         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17594         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17595         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17596         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17597
17598         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17599                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17600                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17601                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17602                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17603                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17604         }
17605
17606         if (tg3_flag(tp, 5705_PLUS)) {
17607                 ec->rx_coalesce_usecs_irq = 0;
17608                 ec->tx_coalesce_usecs_irq = 0;
17609                 ec->stats_block_coalesce_usecs = 0;
17610         }
17611 }
17612
17613 static int tg3_init_one(struct pci_dev *pdev,
17614                                   const struct pci_device_id *ent)
17615 {
17616         struct net_device *dev;
17617         struct tg3 *tp;
17618         int i, err;
17619         u32 sndmbx, rcvmbx, intmbx;
17620         char str[40];
17621         u64 dma_mask, persist_dma_mask;
17622         netdev_features_t features = 0;
17623
17624         err = pci_enable_device(pdev);
17625         if (err) {
17626                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17627                 return err;
17628         }
17629
17630         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17631         if (err) {
17632                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17633                 goto err_out_disable_pdev;
17634         }
17635
17636         pci_set_master(pdev);
17637
17638         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17639         if (!dev) {
17640                 err = -ENOMEM;
17641                 goto err_out_free_res;
17642         }
17643
17644         SET_NETDEV_DEV(dev, &pdev->dev);
17645
17646         tp = netdev_priv(dev);
17647         tp->pdev = pdev;
17648         tp->dev = dev;
17649         tp->rx_mode = TG3_DEF_RX_MODE;
17650         tp->tx_mode = TG3_DEF_TX_MODE;
17651         tp->irq_sync = 1;
17652         tp->pcierr_recovery = false;
17653
17654         if (tg3_debug > 0)
17655                 tp->msg_enable = tg3_debug;
17656         else
17657                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17658
17659         if (pdev_is_ssb_gige_core(pdev)) {
17660                 tg3_flag_set(tp, IS_SSB_CORE);
17661                 if (ssb_gige_must_flush_posted_writes(pdev))
17662                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17663                 if (ssb_gige_one_dma_at_once(pdev))
17664                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17665                 if (ssb_gige_have_roboswitch(pdev)) {
17666                         tg3_flag_set(tp, USE_PHYLIB);
17667                         tg3_flag_set(tp, ROBOSWITCH);
17668                 }
17669                 if (ssb_gige_is_rgmii(pdev))
17670                         tg3_flag_set(tp, RGMII_MODE);
17671         }
17672
17673         /* The word/byte swap controls here control register access byte
17674          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17675          * setting below.
17676          */
17677         tp->misc_host_ctrl =
17678                 MISC_HOST_CTRL_MASK_PCI_INT |
17679                 MISC_HOST_CTRL_WORD_SWAP |
17680                 MISC_HOST_CTRL_INDIR_ACCESS |
17681                 MISC_HOST_CTRL_PCISTATE_RW;
17682
17683         /* The NONFRM (non-frame) byte/word swap controls take effect
17684          * on descriptor entries, anything which isn't packet data.
17685          *
17686          * The StrongARM chips on the board (one for tx, one for rx)
17687          * are running in big-endian mode.
17688          */
17689         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17690                         GRC_MODE_WSWAP_NONFRM_DATA);
17691 #ifdef __BIG_ENDIAN
17692         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17693 #endif
17694         spin_lock_init(&tp->lock);
17695         spin_lock_init(&tp->indirect_lock);
17696         INIT_WORK(&tp->reset_task, tg3_reset_task);
17697
17698         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17699         if (!tp->regs) {
17700                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17701                 err = -ENOMEM;
17702                 goto err_out_free_dev;
17703         }
17704
17705         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17706             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17707             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17708             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17709             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17710             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17711             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17712             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17713             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17714             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17715             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17716             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17717             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17718             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17719             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17720                 tg3_flag_set(tp, ENABLE_APE);
17721                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17722                 if (!tp->aperegs) {
17723                         dev_err(&pdev->dev,
17724                                 "Cannot map APE registers, aborting\n");
17725                         err = -ENOMEM;
17726                         goto err_out_iounmap;
17727                 }
17728         }
17729
17730         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17731         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17732
17733         dev->ethtool_ops = &tg3_ethtool_ops;
17734         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17735         dev->netdev_ops = &tg3_netdev_ops;
17736         dev->irq = pdev->irq;
17737
17738         err = tg3_get_invariants(tp, ent);
17739         if (err) {
17740                 dev_err(&pdev->dev,
17741                         "Problem fetching invariants of chip, aborting\n");
17742                 goto err_out_apeunmap;
17743         }
17744
17745         /* The EPB bridge inside 5714, 5715, and 5780 and any
17746          * device behind the EPB cannot support DMA addresses > 40-bit.
17747          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17748          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17749          * do DMA address check in tg3_start_xmit().
17750          */
17751         if (tg3_flag(tp, IS_5788))
17752                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17753         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17754                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17755 #ifdef CONFIG_HIGHMEM
17756                 dma_mask = DMA_BIT_MASK(64);
17757 #endif
17758         } else
17759                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17760
17761         /* Configure DMA attributes. */
17762         if (dma_mask > DMA_BIT_MASK(32)) {
17763                 err = pci_set_dma_mask(pdev, dma_mask);
17764                 if (!err) {
17765                         features |= NETIF_F_HIGHDMA;
17766                         err = pci_set_consistent_dma_mask(pdev,
17767                                                           persist_dma_mask);
17768                         if (err < 0) {
17769                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17770                                         "DMA for consistent allocations\n");
17771                                 goto err_out_apeunmap;
17772                         }
17773                 }
17774         }
17775         if (err || dma_mask == DMA_BIT_MASK(32)) {
17776                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17777                 if (err) {
17778                         dev_err(&pdev->dev,
17779                                 "No usable DMA configuration, aborting\n");
17780                         goto err_out_apeunmap;
17781                 }
17782         }
17783
17784         tg3_init_bufmgr_config(tp);
17785
17786         /* 5700 B0 chips do not support checksumming correctly due
17787          * to hardware bugs.
17788          */
17789         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17790                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17791
17792                 if (tg3_flag(tp, 5755_PLUS))
17793                         features |= NETIF_F_IPV6_CSUM;
17794         }
17795
17796         /* TSO is on by default on chips that support hardware TSO.
17797          * Firmware TSO on older chips gives lower performance, so it
17798          * is off by default, but can be enabled using ethtool.
17799          */
17800         if ((tg3_flag(tp, HW_TSO_1) ||
17801              tg3_flag(tp, HW_TSO_2) ||
17802              tg3_flag(tp, HW_TSO_3)) &&
17803             (features & NETIF_F_IP_CSUM))
17804                 features |= NETIF_F_TSO;
17805         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17806                 if (features & NETIF_F_IPV6_CSUM)
17807                         features |= NETIF_F_TSO6;
17808                 if (tg3_flag(tp, HW_TSO_3) ||
17809                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17810                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17811                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17812                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17813                     tg3_asic_rev(tp) == ASIC_REV_57780)
17814                         features |= NETIF_F_TSO_ECN;
17815         }
17816
17817         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17818                          NETIF_F_HW_VLAN_CTAG_RX;
17819         dev->vlan_features |= features;
17820
17821         /*
17822          * Add loopback capability only for a subset of devices that support
17823          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17824          * loopback for the remaining devices.
17825          */
17826         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17827             !tg3_flag(tp, CPMU_PRESENT))
17828                 /* Add the loopback capability */
17829                 features |= NETIF_F_LOOPBACK;
17830
17831         dev->hw_features |= features;
17832         dev->priv_flags |= IFF_UNICAST_FLT;
17833
17834         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17835         dev->min_mtu = TG3_MIN_MTU;
17836         dev->max_mtu = TG3_MAX_MTU(tp);
17837
17838         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17839             !tg3_flag(tp, TSO_CAPABLE) &&
17840             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17841                 tg3_flag_set(tp, MAX_RXPEND_64);
17842                 tp->rx_pending = 63;
17843         }
17844
17845         err = tg3_get_device_address(tp);
17846         if (err) {
17847                 dev_err(&pdev->dev,
17848                         "Could not obtain valid ethernet address, aborting\n");
17849                 goto err_out_apeunmap;
17850         }
17851
17852         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17853         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17854         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17855         for (i = 0; i < tp->irq_max; i++) {
17856                 struct tg3_napi *tnapi = &tp->napi[i];
17857
17858                 tnapi->tp = tp;
17859                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17860
17861                 tnapi->int_mbox = intmbx;
17862                 if (i <= 4)
17863                         intmbx += 0x8;
17864                 else
17865                         intmbx += 0x4;
17866
17867                 tnapi->consmbox = rcvmbx;
17868                 tnapi->prodmbox = sndmbx;
17869
17870                 if (i)
17871                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17872                 else
17873                         tnapi->coal_now = HOSTCC_MODE_NOW;
17874
17875                 if (!tg3_flag(tp, SUPPORT_MSIX))
17876                         break;
17877
17878                 /*
17879                  * If we support MSIX, we'll be using RSS.  If we're using
17880                  * RSS, the first vector only handles link interrupts and the
17881                  * remaining vectors handle rx and tx interrupts.  Reuse the
17882                  * mailbox values for the next iteration.  The values we setup
17883                  * above are still useful for the single vectored mode.
17884                  */
17885                 if (!i)
17886                         continue;
17887
17888                 rcvmbx += 0x8;
17889
17890                 if (sndmbx & 0x4)
17891                         sndmbx -= 0x4;
17892                 else
17893                         sndmbx += 0xc;
17894         }
17895
17896         /*
17897          * Reset chip in case UNDI or EFI driver did not shutdown
17898          * DMA self test will enable WDMAC and we'll see (spurious)
17899          * pending DMA on the PCI bus at that point.
17900          */
17901         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17902             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17903                 tg3_full_lock(tp, 0);
17904                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17905                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17906                 tg3_full_unlock(tp);
17907         }
17908
17909         err = tg3_test_dma(tp);
17910         if (err) {
17911                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17912                 goto err_out_apeunmap;
17913         }
17914
17915         tg3_init_coal(tp);
17916
17917         pci_set_drvdata(pdev, dev);
17918
17919         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17920             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17921             tg3_asic_rev(tp) == ASIC_REV_5762)
17922                 tg3_flag_set(tp, PTP_CAPABLE);
17923
17924         tg3_timer_init(tp);
17925
17926         tg3_carrier_off(tp);
17927
17928         err = register_netdev(dev);
17929         if (err) {
17930                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17931                 goto err_out_apeunmap;
17932         }
17933
17934         if (tg3_flag(tp, PTP_CAPABLE)) {
17935                 tg3_ptp_init(tp);
17936                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17937                                                    &tp->pdev->dev);
17938                 if (IS_ERR(tp->ptp_clock))
17939                         tp->ptp_clock = NULL;
17940         }
17941
17942         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17943                     tp->board_part_number,
17944                     tg3_chip_rev_id(tp),
17945                     tg3_bus_string(tp, str),
17946                     dev->dev_addr);
17947
17948         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17949                 char *ethtype;
17950
17951                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17952                         ethtype = "10/100Base-TX";
17953                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17954                         ethtype = "1000Base-SX";
17955                 else
17956                         ethtype = "10/100/1000Base-T";
17957
17958                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17959                             "(WireSpeed[%d], EEE[%d])\n",
17960                             tg3_phy_string(tp), ethtype,
17961                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17962                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17963         }
17964
17965         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17966                     (dev->features & NETIF_F_RXCSUM) != 0,
17967                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17968                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17969                     tg3_flag(tp, ENABLE_ASF) != 0,
17970                     tg3_flag(tp, TSO_CAPABLE) != 0);
17971         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17972                     tp->dma_rwctrl,
17973                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17974                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17975
17976         pci_save_state(pdev);
17977
17978         return 0;
17979
17980 err_out_apeunmap:
17981         if (tp->aperegs) {
17982                 iounmap(tp->aperegs);
17983                 tp->aperegs = NULL;
17984         }
17985
17986 err_out_iounmap:
17987         if (tp->regs) {
17988                 iounmap(tp->regs);
17989                 tp->regs = NULL;
17990         }
17991
17992 err_out_free_dev:
17993         free_netdev(dev);
17994
17995 err_out_free_res:
17996         pci_release_regions(pdev);
17997
17998 err_out_disable_pdev:
17999         if (pci_is_enabled(pdev))
18000                 pci_disable_device(pdev);
18001         return err;
18002 }
18003
18004 static void tg3_remove_one(struct pci_dev *pdev)
18005 {
18006         struct net_device *dev = pci_get_drvdata(pdev);
18007
18008         if (dev) {
18009                 struct tg3 *tp = netdev_priv(dev);
18010
18011                 tg3_ptp_fini(tp);
18012
18013                 release_firmware(tp->fw);
18014
18015                 tg3_reset_task_cancel(tp);
18016
18017                 if (tg3_flag(tp, USE_PHYLIB)) {
18018                         tg3_phy_fini(tp);
18019                         tg3_mdio_fini(tp);
18020                 }
18021
18022                 unregister_netdev(dev);
18023                 if (tp->aperegs) {
18024                         iounmap(tp->aperegs);
18025                         tp->aperegs = NULL;
18026                 }
18027                 if (tp->regs) {
18028                         iounmap(tp->regs);
18029                         tp->regs = NULL;
18030                 }
18031                 free_netdev(dev);
18032                 pci_release_regions(pdev);
18033                 pci_disable_device(pdev);
18034         }
18035 }
18036
18037 #ifdef CONFIG_PM_SLEEP
18038 static int tg3_suspend(struct device *device)
18039 {
18040         struct net_device *dev = dev_get_drvdata(device);
18041         struct tg3 *tp = netdev_priv(dev);
18042         int err = 0;
18043
18044         rtnl_lock();
18045
18046         if (!netif_running(dev))
18047                 goto unlock;
18048
18049         tg3_reset_task_cancel(tp);
18050         tg3_phy_stop(tp);
18051         tg3_netif_stop(tp);
18052
18053         tg3_timer_stop(tp);
18054
18055         tg3_full_lock(tp, 1);
18056         tg3_disable_ints(tp);
18057         tg3_full_unlock(tp);
18058
18059         netif_device_detach(dev);
18060
18061         tg3_full_lock(tp, 0);
18062         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18063         tg3_flag_clear(tp, INIT_COMPLETE);
18064         tg3_full_unlock(tp);
18065
18066         err = tg3_power_down_prepare(tp);
18067         if (err) {
18068                 int err2;
18069
18070                 tg3_full_lock(tp, 0);
18071
18072                 tg3_flag_set(tp, INIT_COMPLETE);
18073                 err2 = tg3_restart_hw(tp, true);
18074                 if (err2)
18075                         goto out;
18076
18077                 tg3_timer_start(tp);
18078
18079                 netif_device_attach(dev);
18080                 tg3_netif_start(tp);
18081
18082 out:
18083                 tg3_full_unlock(tp);
18084
18085                 if (!err2)
18086                         tg3_phy_start(tp);
18087         }
18088
18089 unlock:
18090         rtnl_unlock();
18091         return err;
18092 }
18093
18094 static int tg3_resume(struct device *device)
18095 {
18096         struct net_device *dev = dev_get_drvdata(device);
18097         struct tg3 *tp = netdev_priv(dev);
18098         int err = 0;
18099
18100         rtnl_lock();
18101
18102         if (!netif_running(dev))
18103                 goto unlock;
18104
18105         netif_device_attach(dev);
18106
18107         tg3_full_lock(tp, 0);
18108
18109         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18110
18111         tg3_flag_set(tp, INIT_COMPLETE);
18112         err = tg3_restart_hw(tp,
18113                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18114         if (err)
18115                 goto out;
18116
18117         tg3_timer_start(tp);
18118
18119         tg3_netif_start(tp);
18120
18121 out:
18122         tg3_full_unlock(tp);
18123
18124         if (!err)
18125                 tg3_phy_start(tp);
18126
18127 unlock:
18128         rtnl_unlock();
18129         return err;
18130 }
18131 #endif /* CONFIG_PM_SLEEP */
18132
18133 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18134
18135 static void tg3_shutdown(struct pci_dev *pdev)
18136 {
18137         struct net_device *dev = pci_get_drvdata(pdev);
18138         struct tg3 *tp = netdev_priv(dev);
18139
18140         rtnl_lock();
18141         netif_device_detach(dev);
18142
18143         if (netif_running(dev))
18144                 dev_close(dev);
18145
18146         if (system_state == SYSTEM_POWER_OFF)
18147                 tg3_power_down(tp);
18148
18149         rtnl_unlock();
18150 }
18151
18152 /**
18153  * tg3_io_error_detected - called when PCI error is detected
18154  * @pdev: Pointer to PCI device
18155  * @state: The current pci connection state
18156  *
18157  * This function is called after a PCI bus error affecting
18158  * this device has been detected.
18159  */
18160 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18161                                               pci_channel_state_t state)
18162 {
18163         struct net_device *netdev = pci_get_drvdata(pdev);
18164         struct tg3 *tp = netdev_priv(netdev);
18165         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18166
18167         netdev_info(netdev, "PCI I/O error detected\n");
18168
18169         rtnl_lock();
18170
18171         /* Could be second call or maybe we don't have netdev yet */
18172         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18173                 goto done;
18174
18175         /* We needn't recover from permanent error */
18176         if (state == pci_channel_io_frozen)
18177                 tp->pcierr_recovery = true;
18178
18179         tg3_phy_stop(tp);
18180
18181         tg3_netif_stop(tp);
18182
18183         tg3_timer_stop(tp);
18184
18185         /* Want to make sure that the reset task doesn't run */
18186         tg3_reset_task_cancel(tp);
18187
18188         netif_device_detach(netdev);
18189
18190         /* Clean up software state, even if MMIO is blocked */
18191         tg3_full_lock(tp, 0);
18192         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18193         tg3_full_unlock(tp);
18194
18195 done:
18196         if (state == pci_channel_io_perm_failure) {
18197                 if (netdev) {
18198                         tg3_napi_enable(tp);
18199                         dev_close(netdev);
18200                 }
18201                 err = PCI_ERS_RESULT_DISCONNECT;
18202         } else {
18203                 pci_disable_device(pdev);
18204         }
18205
18206         rtnl_unlock();
18207
18208         return err;
18209 }
18210
18211 /**
18212  * tg3_io_slot_reset - called after the pci bus has been reset.
18213  * @pdev: Pointer to PCI device
18214  *
18215  * Restart the card from scratch, as if from a cold-boot.
18216  * At this point, the card has exprienced a hard reset,
18217  * followed by fixups by BIOS, and has its config space
18218  * set up identically to what it was at cold boot.
18219  */
18220 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18221 {
18222         struct net_device *netdev = pci_get_drvdata(pdev);
18223         struct tg3 *tp = netdev_priv(netdev);
18224         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18225         int err;
18226
18227         rtnl_lock();
18228
18229         if (pci_enable_device(pdev)) {
18230                 dev_err(&pdev->dev,
18231                         "Cannot re-enable PCI device after reset.\n");
18232                 goto done;
18233         }
18234
18235         pci_set_master(pdev);
18236         pci_restore_state(pdev);
18237         pci_save_state(pdev);
18238
18239         if (!netdev || !netif_running(netdev)) {
18240                 rc = PCI_ERS_RESULT_RECOVERED;
18241                 goto done;
18242         }
18243
18244         err = tg3_power_up(tp);
18245         if (err)
18246                 goto done;
18247
18248         rc = PCI_ERS_RESULT_RECOVERED;
18249
18250 done:
18251         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18252                 tg3_napi_enable(tp);
18253                 dev_close(netdev);
18254         }
18255         rtnl_unlock();
18256
18257         return rc;
18258 }
18259
18260 /**
18261  * tg3_io_resume - called when traffic can start flowing again.
18262  * @pdev: Pointer to PCI device
18263  *
18264  * This callback is called when the error recovery driver tells
18265  * us that its OK to resume normal operation.
18266  */
18267 static void tg3_io_resume(struct pci_dev *pdev)
18268 {
18269         struct net_device *netdev = pci_get_drvdata(pdev);
18270         struct tg3 *tp = netdev_priv(netdev);
18271         int err;
18272
18273         rtnl_lock();
18274
18275         if (!netdev || !netif_running(netdev))
18276                 goto done;
18277
18278         tg3_full_lock(tp, 0);
18279         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18280         tg3_flag_set(tp, INIT_COMPLETE);
18281         err = tg3_restart_hw(tp, true);
18282         if (err) {
18283                 tg3_full_unlock(tp);
18284                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18285                 goto done;
18286         }
18287
18288         netif_device_attach(netdev);
18289
18290         tg3_timer_start(tp);
18291
18292         tg3_netif_start(tp);
18293
18294         tg3_full_unlock(tp);
18295
18296         tg3_phy_start(tp);
18297
18298 done:
18299         tp->pcierr_recovery = false;
18300         rtnl_unlock();
18301 }
18302
18303 static const struct pci_error_handlers tg3_err_handler = {
18304         .error_detected = tg3_io_error_detected,
18305         .slot_reset     = tg3_io_slot_reset,
18306         .resume         = tg3_io_resume
18307 };
18308
18309 static struct pci_driver tg3_driver = {
18310         .name           = DRV_MODULE_NAME,
18311         .id_table       = tg3_pci_tbl,
18312         .probe          = tg3_init_one,
18313         .remove         = tg3_remove_one,
18314         .err_handler    = &tg3_err_handler,
18315         .driver.pm      = &tg3_pm_ops,
18316         .shutdown       = tg3_shutdown,
18317 };
18318
18319 module_pci_driver(tg3_driver);