Merge branch 'topic/firewire' into for-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM                     3
101 #define TG3_MIN_NUM                     137
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static const struct pci_device_id tg3_pci_tbl[] = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353         {}
354 };
355
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357
358 static const struct {
359         const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361         { "rx_octets" },
362         { "rx_fragments" },
363         { "rx_ucast_packets" },
364         { "rx_mcast_packets" },
365         { "rx_bcast_packets" },
366         { "rx_fcs_errors" },
367         { "rx_align_errors" },
368         { "rx_xon_pause_rcvd" },
369         { "rx_xoff_pause_rcvd" },
370         { "rx_mac_ctrl_rcvd" },
371         { "rx_xoff_entered" },
372         { "rx_frame_too_long_errors" },
373         { "rx_jabbers" },
374         { "rx_undersize_packets" },
375         { "rx_in_length_errors" },
376         { "rx_out_length_errors" },
377         { "rx_64_or_less_octet_packets" },
378         { "rx_65_to_127_octet_packets" },
379         { "rx_128_to_255_octet_packets" },
380         { "rx_256_to_511_octet_packets" },
381         { "rx_512_to_1023_octet_packets" },
382         { "rx_1024_to_1522_octet_packets" },
383         { "rx_1523_to_2047_octet_packets" },
384         { "rx_2048_to_4095_octet_packets" },
385         { "rx_4096_to_8191_octet_packets" },
386         { "rx_8192_to_9022_octet_packets" },
387
388         { "tx_octets" },
389         { "tx_collisions" },
390
391         { "tx_xon_sent" },
392         { "tx_xoff_sent" },
393         { "tx_flow_control" },
394         { "tx_mac_errors" },
395         { "tx_single_collisions" },
396         { "tx_mult_collisions" },
397         { "tx_deferred" },
398         { "tx_excessive_collisions" },
399         { "tx_late_collisions" },
400         { "tx_collide_2times" },
401         { "tx_collide_3times" },
402         { "tx_collide_4times" },
403         { "tx_collide_5times" },
404         { "tx_collide_6times" },
405         { "tx_collide_7times" },
406         { "tx_collide_8times" },
407         { "tx_collide_9times" },
408         { "tx_collide_10times" },
409         { "tx_collide_11times" },
410         { "tx_collide_12times" },
411         { "tx_collide_13times" },
412         { "tx_collide_14times" },
413         { "tx_collide_15times" },
414         { "tx_ucast_packets" },
415         { "tx_mcast_packets" },
416         { "tx_bcast_packets" },
417         { "tx_carrier_sense_errors" },
418         { "tx_discards" },
419         { "tx_errors" },
420
421         { "dma_writeq_full" },
422         { "dma_write_prioq_full" },
423         { "rxbds_empty" },
424         { "rx_discards" },
425         { "rx_errors" },
426         { "rx_threshold_hit" },
427
428         { "dma_readq_full" },
429         { "dma_read_prioq_full" },
430         { "tx_comp_queue_full" },
431
432         { "ring_set_send_prod_index" },
433         { "ring_status_update" },
434         { "nic_irqs" },
435         { "nic_avoided_irqs" },
436         { "nic_tx_threshold_hit" },
437
438         { "mbuf_lwm_thresh_hit" },
439 };
440
441 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST          0
443 #define TG3_LINK_TEST           1
444 #define TG3_REGISTER_TEST       2
445 #define TG3_MEMORY_TEST         3
446 #define TG3_MAC_LOOPB_TEST      4
447 #define TG3_PHY_LOOPB_TEST      5
448 #define TG3_EXT_LOOPB_TEST      6
449 #define TG3_INTERRUPT_TEST      7
450
451
452 static const struct {
453         const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
456         [TG3_LINK_TEST]         = { "link test         (online) " },
457         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
458         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
459         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
460         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
461         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
462         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
463 };
464
465 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
466
467
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470         writel(val, tp->regs + off);
471 }
472
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475         return readl(tp->regs + off);
476 }
477
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480         writel(val, tp->aperegs + off);
481 }
482
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485         return readl(tp->aperegs + off);
486 }
487
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490         unsigned long flags;
491
492         spin_lock_irqsave(&tp->indirect_lock, flags);
493         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495         spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500         writel(val, tp->regs + off);
501         readl(tp->regs + off);
502 }
503
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506         unsigned long flags;
507         u32 val;
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513         return val;
514 }
515
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518         unsigned long flags;
519
520         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525         if (off == TG3_RX_STD_PROD_IDX_REG) {
526                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527                                        TG3_64BIT_REG_LOW, val);
528                 return;
529         }
530
531         spin_lock_irqsave(&tp->indirect_lock, flags);
532         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535
536         /* In indirect mode when disabling interrupts, we also need
537          * to clear the interrupt bit in the GRC local ctrl register.
538          */
539         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540             (val == 0x1)) {
541                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543         }
544 }
545
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548         unsigned long flags;
549         u32 val;
550
551         spin_lock_irqsave(&tp->indirect_lock, flags);
552         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554         spin_unlock_irqrestore(&tp->indirect_lock, flags);
555         return val;
556 }
557
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566                 /* Non-posted methods */
567                 tp->write32(tp, off, val);
568         else {
569                 /* Posted method */
570                 tg3_write32(tp, off, val);
571                 if (usec_wait)
572                         udelay(usec_wait);
573                 tp->read32(tp, off);
574         }
575         /* Wait again after the read for the posted method to guarantee that
576          * the wait time is met.
577          */
578         if (usec_wait)
579                 udelay(usec_wait);
580 }
581
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584         tp->write32_mbox(tp, off, val);
585         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587              !tg3_flag(tp, ICH_WORKAROUND)))
588                 tp->read32_mbox(tp, off);
589 }
590
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593         void __iomem *mbox = tp->regs + off;
594         writel(val, mbox);
595         if (tg3_flag(tp, TXD_MBOX_HWBUG))
596                 writel(val, mbox);
597         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598             tg3_flag(tp, FLUSH_POSTED_WRITES))
599                 readl(mbox);
600 }
601
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604         return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609         writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611
612 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
617
618 #define tw32(reg, val)                  tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)                       tp->read32(tp, reg)
622
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625         unsigned long flags;
626
627         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629                 return;
630
631         spin_lock_irqsave(&tp->indirect_lock, flags);
632         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         } else {
639                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         }
645         spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650         unsigned long flags;
651
652         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654                 *val = 0;
655                 return;
656         }
657
658         spin_lock_irqsave(&tp->indirect_lock, flags);
659         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662
663                 /* Always leave this as zero. */
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         } else {
666                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 *val = tr32(TG3PCI_MEM_WIN_DATA);
668
669                 /* Always leave this as zero. */
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         }
672         spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677         int i;
678         u32 regbase, bit;
679
680         if (tg3_asic_rev(tp) == ASIC_REV_5761)
681                 regbase = TG3_APE_LOCK_GRANT;
682         else
683                 regbase = TG3_APE_PER_LOCK_GRANT;
684
685         /* Make sure the driver hasn't any stale locks. */
686         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687                 switch (i) {
688                 case TG3_APE_LOCK_PHY0:
689                 case TG3_APE_LOCK_PHY1:
690                 case TG3_APE_LOCK_PHY2:
691                 case TG3_APE_LOCK_PHY3:
692                         bit = APE_LOCK_GRANT_DRIVER;
693                         break;
694                 default:
695                         if (!tp->pci_fn)
696                                 bit = APE_LOCK_GRANT_DRIVER;
697                         else
698                                 bit = 1 << tp->pci_fn;
699                 }
700                 tg3_ape_write32(tp, regbase + 4 * i, bit);
701         }
702
703 }
704
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707         int i, off;
708         int ret = 0;
709         u32 status, req, gnt, bit;
710
711         if (!tg3_flag(tp, ENABLE_APE))
712                 return 0;
713
714         switch (locknum) {
715         case TG3_APE_LOCK_GPIO:
716                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717                         return 0;
718                 fallthrough;
719         case TG3_APE_LOCK_GRC:
720         case TG3_APE_LOCK_MEM:
721                 if (!tp->pci_fn)
722                         bit = APE_LOCK_REQ_DRIVER;
723                 else
724                         bit = 1 << tp->pci_fn;
725                 break;
726         case TG3_APE_LOCK_PHY0:
727         case TG3_APE_LOCK_PHY1:
728         case TG3_APE_LOCK_PHY2:
729         case TG3_APE_LOCK_PHY3:
730                 bit = APE_LOCK_REQ_DRIVER;
731                 break;
732         default:
733                 return -EINVAL;
734         }
735
736         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737                 req = TG3_APE_LOCK_REQ;
738                 gnt = TG3_APE_LOCK_GRANT;
739         } else {
740                 req = TG3_APE_PER_LOCK_REQ;
741                 gnt = TG3_APE_PER_LOCK_GRANT;
742         }
743
744         off = 4 * locknum;
745
746         tg3_ape_write32(tp, req + off, bit);
747
748         /* Wait for up to 1 millisecond to acquire lock. */
749         for (i = 0; i < 100; i++) {
750                 status = tg3_ape_read32(tp, gnt + off);
751                 if (status == bit)
752                         break;
753                 if (pci_channel_offline(tp->pdev))
754                         break;
755
756                 udelay(10);
757         }
758
759         if (status != bit) {
760                 /* Revoke the lock request. */
761                 tg3_ape_write32(tp, gnt + off, bit);
762                 ret = -EBUSY;
763         }
764
765         return ret;
766 }
767
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770         u32 gnt, bit;
771
772         if (!tg3_flag(tp, ENABLE_APE))
773                 return;
774
775         switch (locknum) {
776         case TG3_APE_LOCK_GPIO:
777                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
778                         return;
779                 fallthrough;
780         case TG3_APE_LOCK_GRC:
781         case TG3_APE_LOCK_MEM:
782                 if (!tp->pci_fn)
783                         bit = APE_LOCK_GRANT_DRIVER;
784                 else
785                         bit = 1 << tp->pci_fn;
786                 break;
787         case TG3_APE_LOCK_PHY0:
788         case TG3_APE_LOCK_PHY1:
789         case TG3_APE_LOCK_PHY2:
790         case TG3_APE_LOCK_PHY3:
791                 bit = APE_LOCK_GRANT_DRIVER;
792                 break;
793         default:
794                 return;
795         }
796
797         if (tg3_asic_rev(tp) == ASIC_REV_5761)
798                 gnt = TG3_APE_LOCK_GRANT;
799         else
800                 gnt = TG3_APE_PER_LOCK_GRANT;
801
802         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807         u32 apedata;
808
809         while (timeout_us) {
810                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811                         return -EBUSY;
812
813                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815                         break;
816
817                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818
819                 udelay(10);
820                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821         }
822
823         return timeout_us ? 0 : -EBUSY;
824 }
825
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906 #endif
907
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910         int err;
911         u32 apedata;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914         if (apedata != APE_SEG_SIG_MAGIC)
915                 return -EAGAIN;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918         if (!(apedata & APE_FW_STATUS_READY))
919                 return -EAGAIN;
920
921         /* Wait for up to 20 millisecond for APE to service previous event. */
922         err = tg3_ape_event_lock(tp, 20000);
923         if (err)
924                 return err;
925
926         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927                         event | APE_EVENT_STATUS_EVENT_PENDING);
928
929         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932         return 0;
933 }
934
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937         u32 event;
938         u32 apedata;
939
940         if (!tg3_flag(tp, ENABLE_APE))
941                 return;
942
943         switch (kind) {
944         case RESET_KIND_INIT:
945                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947                                 APE_HOST_SEG_SIG_MAGIC);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949                                 APE_HOST_SEG_LEN_MAGIC);
950                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955                                 APE_HOST_BEHAV_NO_PHYLOCK);
956                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957                                     TG3_APE_HOST_DRVR_STATE_START);
958
959                 event = APE_EVENT_STATUS_STATE_START;
960                 break;
961         case RESET_KIND_SHUTDOWN:
962                 if (device_may_wakeup(&tp->pdev->dev) &&
963                     tg3_flag(tp, WOL_ENABLE)) {
964                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965                                             TG3_APE_HOST_WOL_SPEED_AUTO);
966                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967                 } else
968                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969
970                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971
972                 event = APE_EVENT_STATUS_STATE_UNLOAD;
973                 break;
974         default:
975                 return;
976         }
977
978         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980         tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984                                    unsigned long interval)
985 {
986         /* Check if hb interval has exceeded */
987         if (!tg3_flag(tp, ENABLE_APE) ||
988             time_before(jiffies, tp->ape_hb_jiffies + interval))
989                 return;
990
991         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992         tp->ape_hb_jiffies = jiffies;
993 }
994
995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997         int i;
998
999         tw32(TG3PCI_MISC_HOST_CTRL,
1000              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001         for (i = 0; i < tp->irq_max; i++)
1002                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004
1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007         int i;
1008
1009         tp->irq_sync = 0;
1010         wmb();
1011
1012         tw32(TG3PCI_MISC_HOST_CTRL,
1013              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014
1015         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016         for (i = 0; i < tp->irq_cnt; i++) {
1017                 struct tg3_napi *tnapi = &tp->napi[i];
1018
1019                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020                 if (tg3_flag(tp, 1SHOT_MSI))
1021                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022
1023                 tp->coal_now |= tnapi->coal_now;
1024         }
1025
1026         /* Force an initial interrupt */
1027         if (!tg3_flag(tp, TAGGED_STATUS) &&
1028             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030         else
1031                 tw32(HOSTCC_MODE, tp->coal_now);
1032
1033         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038         struct tg3 *tp = tnapi->tp;
1039         struct tg3_hw_status *sblk = tnapi->hw_status;
1040         unsigned int work_exists = 0;
1041
1042         /* check for phy events */
1043         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044                 if (sblk->status & SD_STATUS_LINK_CHG)
1045                         work_exists = 1;
1046         }
1047
1048         /* check for TX work to do */
1049         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050                 work_exists = 1;
1051
1052         /* check for RX work to do */
1053         if (tnapi->rx_rcb_prod_idx &&
1054             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055                 work_exists = 1;
1056
1057         return work_exists;
1058 }
1059
1060 /* tg3_int_reenable
1061  *  similar to tg3_enable_ints, but it accurately determines whether there
1062  *  is new work pending and can return without flushing the PIO write
1063  *  which reenables interrupts
1064  */
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067         struct tg3 *tp = tnapi->tp;
1068
1069         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070
1071         /* When doing tagged status, this work check is unnecessary.
1072          * The last_tag we write above tells the chip which piece of
1073          * work we've completed.
1074          */
1075         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079
1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082         u32 clock_ctrl;
1083         u32 orig_clock_ctrl;
1084
1085         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086                 return;
1087
1088         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089
1090         orig_clock_ctrl = clock_ctrl;
1091         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092                        CLOCK_CTRL_CLKRUN_OENABLE |
1093                        0x1f);
1094         tp->pci_clock_ctrl = clock_ctrl;
1095
1096         if (tg3_flag(tp, 5705_PLUS)) {
1097                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100                 }
1101         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103                             clock_ctrl |
1104                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105                             40);
1106                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108                             40);
1109         }
1110         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112
1113 #define PHY_BUSY_LOOPS  5000
1114
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116                          u32 *val)
1117 {
1118         u32 frame_val;
1119         unsigned int loops;
1120         int ret;
1121
1122         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123                 tw32_f(MAC_MI_MODE,
1124                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125                 udelay(80);
1126         }
1127
1128         tg3_ape_lock(tp, tp->phy_ape_lock);
1129
1130         *val = 0x0;
1131
1132         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133                       MI_COM_PHY_ADDR_MASK);
1134         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135                       MI_COM_REG_ADDR_MASK);
1136         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137
1138         tw32_f(MAC_MI_COM, frame_val);
1139
1140         loops = PHY_BUSY_LOOPS;
1141         while (loops != 0) {
1142                 udelay(10);
1143                 frame_val = tr32(MAC_MI_COM);
1144
1145                 if ((frame_val & MI_COM_BUSY) == 0) {
1146                         udelay(5);
1147                         frame_val = tr32(MAC_MI_COM);
1148                         break;
1149                 }
1150                 loops -= 1;
1151         }
1152
1153         ret = -EBUSY;
1154         if (loops != 0) {
1155                 *val = frame_val & MI_COM_DATA_MASK;
1156                 ret = 0;
1157         }
1158
1159         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1161                 udelay(80);
1162         }
1163
1164         tg3_ape_unlock(tp, tp->phy_ape_lock);
1165
1166         return ret;
1167 }
1168
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175                           u32 val)
1176 {
1177         u32 frame_val;
1178         unsigned int loops;
1179         int ret;
1180
1181         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183                 return 0;
1184
1185         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186                 tw32_f(MAC_MI_MODE,
1187                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188                 udelay(80);
1189         }
1190
1191         tg3_ape_lock(tp, tp->phy_ape_lock);
1192
1193         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194                       MI_COM_PHY_ADDR_MASK);
1195         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196                       MI_COM_REG_ADDR_MASK);
1197         frame_val |= (val & MI_COM_DATA_MASK);
1198         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199
1200         tw32_f(MAC_MI_COM, frame_val);
1201
1202         loops = PHY_BUSY_LOOPS;
1203         while (loops != 0) {
1204                 udelay(10);
1205                 frame_val = tr32(MAC_MI_COM);
1206                 if ((frame_val & MI_COM_BUSY) == 0) {
1207                         udelay(5);
1208                         frame_val = tr32(MAC_MI_COM);
1209                         break;
1210                 }
1211                 loops -= 1;
1212         }
1213
1214         ret = -EBUSY;
1215         if (loops != 0)
1216                 ret = 0;
1217
1218         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1220                 udelay(80);
1221         }
1222
1223         tg3_ape_unlock(tp, tp->phy_ape_lock);
1224
1225         return ret;
1226 }
1227
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235         int err;
1236
1237         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238         if (err)
1239                 goto done;
1240
1241         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251
1252 done:
1253         return err;
1254 }
1255
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258         int err;
1259
1260         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261         if (err)
1262                 goto done;
1263
1264         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274
1275 done:
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295         if (!err)
1296                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297
1298         return err;
1299 }
1300
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303         int err;
1304
1305         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1308         if (!err)
1309                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310
1311         return err;
1312 }
1313
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317                 set |= MII_TG3_AUXCTL_MISC_WREN;
1318
1319         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324         u32 val;
1325         int err;
1326
1327         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328
1329         if (err)
1330                 return err;
1331
1332         if (enable)
1333                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334         else
1335                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336
1337         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339
1340         return err;
1341 }
1342
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346                             reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351         u32 phy_control;
1352         int limit, err;
1353
1354         /* OK, reset it, and poll the BMCR_RESET bit until it
1355          * clears or we time out.
1356          */
1357         phy_control = BMCR_RESET;
1358         err = tg3_writephy(tp, MII_BMCR, phy_control);
1359         if (err != 0)
1360                 return -EBUSY;
1361
1362         limit = 5000;
1363         while (limit--) {
1364                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365                 if (err != 0)
1366                         return -EBUSY;
1367
1368                 if ((phy_control & BMCR_RESET) == 0) {
1369                         udelay(40);
1370                         break;
1371                 }
1372                 udelay(10);
1373         }
1374         if (limit < 0)
1375                 return -EBUSY;
1376
1377         return 0;
1378 }
1379
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 val;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_readphy(tp, mii_id, reg, &val))
1388                 val = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return val;
1393 }
1394
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397         struct tg3 *tp = bp->priv;
1398         u32 ret = 0;
1399
1400         spin_lock_bh(&tp->lock);
1401
1402         if (__tg3_writephy(tp, mii_id, reg, val))
1403                 ret = -EIO;
1404
1405         spin_unlock_bh(&tp->lock);
1406
1407         return ret;
1408 }
1409
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412         u32 val;
1413         struct phy_device *phydev;
1414
1415         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417         case PHY_ID_BCM50610:
1418         case PHY_ID_BCM50610M:
1419                 val = MAC_PHYCFG2_50610_LED_MODES;
1420                 break;
1421         case PHY_ID_BCMAC131:
1422                 val = MAC_PHYCFG2_AC131_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8211C:
1425                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426                 break;
1427         case PHY_ID_RTL8201E:
1428                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429                 break;
1430         default:
1431                 return;
1432         }
1433
1434         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435                 tw32(MAC_PHYCFG2, val);
1436
1437                 val = tr32(MAC_PHYCFG1);
1438                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441                 tw32(MAC_PHYCFG1, val);
1442
1443                 return;
1444         }
1445
1446         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448                        MAC_PHYCFG2_FMODE_MASK_MASK |
1449                        MAC_PHYCFG2_GMODE_MASK_MASK |
1450                        MAC_PHYCFG2_ACT_MASK_MASK   |
1451                        MAC_PHYCFG2_QUAL_MASK_MASK |
1452                        MAC_PHYCFG2_INBAND_ENABLE;
1453
1454         tw32(MAC_PHYCFG2, val);
1455
1456         val = tr32(MAC_PHYCFG1);
1457         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464         }
1465         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467         tw32(MAC_PHYCFG1, val);
1468
1469         val = tr32(MAC_EXT_RGMII_MODE);
1470         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471                  MAC_RGMII_MODE_RX_QUALITY |
1472                  MAC_RGMII_MODE_RX_ACTIVITY |
1473                  MAC_RGMII_MODE_RX_ENG_DET |
1474                  MAC_RGMII_MODE_TX_ENABLE |
1475                  MAC_RGMII_MODE_TX_LOWPWR |
1476                  MAC_RGMII_MODE_TX_RESET);
1477         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479                         val |= MAC_RGMII_MODE_RX_INT_B |
1480                                MAC_RGMII_MODE_RX_QUALITY |
1481                                MAC_RGMII_MODE_RX_ACTIVITY |
1482                                MAC_RGMII_MODE_RX_ENG_DET;
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484                         val |= MAC_RGMII_MODE_TX_ENABLE |
1485                                MAC_RGMII_MODE_TX_LOWPWR |
1486                                MAC_RGMII_MODE_TX_RESET;
1487         }
1488         tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494         tw32_f(MAC_MI_MODE, tp->mi_mode);
1495         udelay(80);
1496
1497         if (tg3_flag(tp, MDIOBUS_INITED) &&
1498             tg3_asic_rev(tp) == ASIC_REV_5785)
1499                 tg3_mdio_config_5785(tp);
1500 }
1501
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504         int i;
1505         u32 reg;
1506         struct phy_device *phydev;
1507
1508         if (tg3_flag(tp, 5717_PLUS)) {
1509                 u32 is_serdes;
1510
1511                 tp->phy_addr = tp->pci_fn + 1;
1512
1513                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515                 else
1516                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1518                 if (is_serdes)
1519                         tp->phy_addr += 7;
1520         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521                 int addr;
1522
1523                 addr = ssb_gige_get_phyaddr(tp->pdev);
1524                 if (addr < 0)
1525                         return addr;
1526                 tp->phy_addr = addr;
1527         } else
1528                 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530         tg3_mdio_start(tp);
1531
1532         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533                 return 0;
1534
1535         tp->mdio_bus = mdiobus_alloc();
1536         if (tp->mdio_bus == NULL)
1537                 return -ENOMEM;
1538
1539         tp->mdio_bus->name     = "tg3 mdio bus";
1540         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542         tp->mdio_bus->priv     = tp;
1543         tp->mdio_bus->parent   = &tp->pdev->dev;
1544         tp->mdio_bus->read     = &tg3_mdio_read;
1545         tp->mdio_bus->write    = &tg3_mdio_write;
1546         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547
1548         /* The bus registration will look for all the PHYs on the mdio bus.
1549          * Unfortunately, it does not ensure the PHY is powered up before
1550          * accessing the PHY ID registers.  A chip reset is the
1551          * quickest way to bring the device back to an operational state..
1552          */
1553         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554                 tg3_bmcr_reset(tp);
1555
1556         i = mdiobus_register(tp->mdio_bus);
1557         if (i) {
1558                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559                 mdiobus_free(tp->mdio_bus);
1560                 return i;
1561         }
1562
1563         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564
1565         if (!phydev || !phydev->drv) {
1566                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567                 mdiobus_unregister(tp->mdio_bus);
1568                 mdiobus_free(tp->mdio_bus);
1569                 return -ENODEV;
1570         }
1571
1572         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573         case PHY_ID_BCM57780:
1574                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576                 break;
1577         case PHY_ID_BCM50610:
1578         case PHY_ID_BCM50610M:
1579                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580                                      PHY_BRCM_RX_REFCLK_UNUSED |
1581                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 fallthrough;
1584         case PHY_ID_RTL8211C:
1585                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586                 break;
1587         case PHY_ID_RTL8201E:
1588         case PHY_ID_BCMAC131:
1589                 phydev->interface = PHY_INTERFACE_MODE_MII;
1590                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592                 break;
1593         }
1594
1595         tg3_flag_set(tp, MDIOBUS_INITED);
1596
1597         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598                 tg3_mdio_config_5785(tp);
1599
1600         return 0;
1601 }
1602
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605         if (tg3_flag(tp, MDIOBUS_INITED)) {
1606                 tg3_flag_clear(tp, MDIOBUS_INITED);
1607                 mdiobus_unregister(tp->mdio_bus);
1608                 mdiobus_free(tp->mdio_bus);
1609         }
1610 }
1611
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615         u32 val;
1616
1617         val = tr32(GRC_RX_CPU_EVENT);
1618         val |= GRC_RX_CPU_DRIVER_EVENT;
1619         tw32_f(GRC_RX_CPU_EVENT, val);
1620
1621         tp->last_event_jiffies = jiffies;
1622 }
1623
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629         int i;
1630         unsigned int delay_cnt;
1631         long time_remain;
1632
1633         /* If enough time has passed, no wait is necessary. */
1634         time_remain = (long)(tp->last_event_jiffies + 1 +
1635                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636                       (long)jiffies;
1637         if (time_remain < 0)
1638                 return;
1639
1640         /* Check if we can shorten the wait time. */
1641         delay_cnt = jiffies_to_usecs(time_remain);
1642         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644         delay_cnt = (delay_cnt >> 3) + 1;
1645
1646         for (i = 0; i < delay_cnt; i++) {
1647                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648                         break;
1649                 if (pci_channel_offline(tp->pdev))
1650                         break;
1651
1652                 udelay(8);
1653         }
1654 }
1655
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659         u32 reg, val;
1660
1661         val = 0;
1662         if (!tg3_readphy(tp, MII_BMCR, &reg))
1663                 val = reg << 16;
1664         if (!tg3_readphy(tp, MII_BMSR, &reg))
1665                 val |= (reg & 0xffff);
1666         *data++ = val;
1667
1668         val = 0;
1669         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670                 val = reg << 16;
1671         if (!tg3_readphy(tp, MII_LPA, &reg))
1672                 val |= (reg & 0xffff);
1673         *data++ = val;
1674
1675         val = 0;
1676         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678                         val = reg << 16;
1679                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680                         val |= (reg & 0xffff);
1681         }
1682         *data++ = val;
1683
1684         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685                 val = reg << 16;
1686         else
1687                 val = 0;
1688         *data++ = val;
1689 }
1690
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694         u32 data[4];
1695
1696         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697                 return;
1698
1699         tg3_phy_gather_ump_data(tp, data);
1700
1701         tg3_wait_for_event_ack(tp);
1702
1703         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709
1710         tg3_generate_fw_event(tp);
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717                 /* Wait for RX cpu to ACK the previous event. */
1718                 tg3_wait_for_event_ack(tp);
1719
1720                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721
1722                 tg3_generate_fw_event(tp);
1723
1724                 /* Wait for RX cpu to ACK this event. */
1725                 tg3_wait_for_event_ack(tp);
1726         }
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734
1735         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736                 switch (kind) {
1737                 case RESET_KIND_INIT:
1738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739                                       DRV_STATE_START);
1740                         break;
1741
1742                 case RESET_KIND_SHUTDOWN:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_UNLOAD);
1745                         break;
1746
1747                 case RESET_KIND_SUSPEND:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_SUSPEND);
1750                         break;
1751
1752                 default:
1753                         break;
1754                 }
1755         }
1756 }
1757
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762                 switch (kind) {
1763                 case RESET_KIND_INIT:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_START_DONE);
1766                         break;
1767
1768                 case RESET_KIND_SHUTDOWN:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_UNLOAD_DONE);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782         if (tg3_flag(tp, ENABLE_ASF)) {
1783                 switch (kind) {
1784                 case RESET_KIND_INIT:
1785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786                                       DRV_STATE_START);
1787                         break;
1788
1789                 case RESET_KIND_SHUTDOWN:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_UNLOAD);
1792                         break;
1793
1794                 case RESET_KIND_SUSPEND:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_SUSPEND);
1797                         break;
1798
1799                 default:
1800                         break;
1801                 }
1802         }
1803 }
1804
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807         int i;
1808         u32 val;
1809
1810         if (tg3_flag(tp, NO_FWARE_REPORTED))
1811                 return 0;
1812
1813         if (tg3_flag(tp, IS_SSB_CORE)) {
1814                 /* We don't use firmware. */
1815                 return 0;
1816         }
1817
1818         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819                 /* Wait up to 20ms for init done. */
1820                 for (i = 0; i < 200; i++) {
1821                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822                                 return 0;
1823                         if (pci_channel_offline(tp->pdev))
1824                                 return -ENODEV;
1825
1826                         udelay(100);
1827                 }
1828                 return -ENODEV;
1829         }
1830
1831         /* Wait for firmware initialization to complete. */
1832         for (i = 0; i < 100000; i++) {
1833                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835                         break;
1836                 if (pci_channel_offline(tp->pdev)) {
1837                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839                                 netdev_info(tp->dev, "No firmware running\n");
1840                         }
1841
1842                         break;
1843                 }
1844
1845                 udelay(10);
1846         }
1847
1848         /* Chip might not be fitted with firmware.  Some Sun onboard
1849          * parts are configured like that.  So don't signal the timeout
1850          * of the above loop as an error, but do report the lack of
1851          * running firmware once.
1852          */
1853         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855
1856                 netdev_info(tp->dev, "No firmware running\n");
1857         }
1858
1859         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860                 /* The 57765 A0 needs a little more
1861                  * time to do some important work.
1862                  */
1863                 mdelay(10);
1864         }
1865
1866         return 0;
1867 }
1868
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871         if (!netif_carrier_ok(tp->dev)) {
1872                 netif_info(tp, link, tp->dev, "Link is down\n");
1873                 tg3_ump_link_report(tp);
1874         } else if (netif_msg_link(tp)) {
1875                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876                             (tp->link_config.active_speed == SPEED_1000 ?
1877                              1000 :
1878                              (tp->link_config.active_speed == SPEED_100 ?
1879                               100 : 10)),
1880                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1881                              "full" : "half"));
1882
1883                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885                             "on" : "off",
1886                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887                             "on" : "off");
1888
1889                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890                         netdev_info(tp->dev, "EEE is %s\n",
1891                                     tp->setlpicnt ? "enabled" : "disabled");
1892
1893                 tg3_ump_link_report(tp);
1894         }
1895
1896         tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901         u32 flowctrl = 0;
1902
1903         if (adv & ADVERTISE_PAUSE_CAP) {
1904                 flowctrl |= FLOW_CTRL_RX;
1905                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906                         flowctrl |= FLOW_CTRL_TX;
1907         } else if (adv & ADVERTISE_PAUSE_ASYM)
1908                 flowctrl |= FLOW_CTRL_TX;
1909
1910         return flowctrl;
1911 }
1912
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915         u16 miireg;
1916
1917         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918                 miireg = ADVERTISE_1000XPAUSE;
1919         else if (flow_ctrl & FLOW_CTRL_TX)
1920                 miireg = ADVERTISE_1000XPSE_ASYM;
1921         else if (flow_ctrl & FLOW_CTRL_RX)
1922                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923         else
1924                 miireg = 0;
1925
1926         return miireg;
1927 }
1928
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931         u32 flowctrl = 0;
1932
1933         if (adv & ADVERTISE_1000XPAUSE) {
1934                 flowctrl |= FLOW_CTRL_RX;
1935                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936                         flowctrl |= FLOW_CTRL_TX;
1937         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938                 flowctrl |= FLOW_CTRL_TX;
1939
1940         return flowctrl;
1941 }
1942
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945         u8 cap = 0;
1946
1947         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950                 if (lcladv & ADVERTISE_1000XPAUSE)
1951                         cap = FLOW_CTRL_RX;
1952                 if (rmtadv & ADVERTISE_1000XPAUSE)
1953                         cap = FLOW_CTRL_TX;
1954         }
1955
1956         return cap;
1957 }
1958
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961         u8 autoneg;
1962         u8 flowctrl = 0;
1963         u32 old_rx_mode = tp->rx_mode;
1964         u32 old_tx_mode = tp->tx_mode;
1965
1966         if (tg3_flag(tp, USE_PHYLIB))
1967                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968         else
1969                 autoneg = tp->link_config.autoneg;
1970
1971         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974                 else
1975                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976         } else
1977                 flowctrl = tp->link_config.flowctrl;
1978
1979         tp->link_config.active_flowctrl = flowctrl;
1980
1981         if (flowctrl & FLOW_CTRL_RX)
1982                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983         else
1984                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985
1986         if (old_rx_mode != tp->rx_mode)
1987                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988
1989         if (flowctrl & FLOW_CTRL_TX)
1990                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991         else
1992                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993
1994         if (old_tx_mode != tp->tx_mode)
1995                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000         u8 oldflowctrl, linkmesg = 0;
2001         u32 mac_mode, lcl_adv, rmt_adv;
2002         struct tg3 *tp = netdev_priv(dev);
2003         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004
2005         spin_lock_bh(&tp->lock);
2006
2007         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008                                     MAC_MODE_HALF_DUPLEX);
2009
2010         oldflowctrl = tp->link_config.active_flowctrl;
2011
2012         if (phydev->link) {
2013                 lcl_adv = 0;
2014                 rmt_adv = 0;
2015
2016                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2018                 else if (phydev->speed == SPEED_1000 ||
2019                          tg3_asic_rev(tp) != ASIC_REV_5785)
2020                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021                 else
2022                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2023
2024                 if (phydev->duplex == DUPLEX_HALF)
2025                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2026                 else {
2027                         lcl_adv = mii_advertise_flowctrl(
2028                                   tp->link_config.flowctrl);
2029
2030                         if (phydev->pause)
2031                                 rmt_adv = LPA_PAUSE_CAP;
2032                         if (phydev->asym_pause)
2033                                 rmt_adv |= LPA_PAUSE_ASYM;
2034                 }
2035
2036                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037         } else
2038                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         if (mac_mode != tp->mac_mode) {
2041                 tp->mac_mode = mac_mode;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044         }
2045
2046         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047                 if (phydev->speed == SPEED_10)
2048                         tw32(MAC_MI_STAT,
2049                              MAC_MI_STAT_10MBPS_MODE |
2050                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051                 else
2052                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053         }
2054
2055         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056                 tw32(MAC_TX_LENGTHS,
2057                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058                       (6 << TX_LENGTHS_IPG_SHIFT) |
2059                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060         else
2061                 tw32(MAC_TX_LENGTHS,
2062                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063                       (6 << TX_LENGTHS_IPG_SHIFT) |
2064                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065
2066         if (phydev->link != tp->old_link ||
2067             phydev->speed != tp->link_config.active_speed ||
2068             phydev->duplex != tp->link_config.active_duplex ||
2069             oldflowctrl != tp->link_config.active_flowctrl)
2070                 linkmesg = 1;
2071
2072         tp->old_link = phydev->link;
2073         tp->link_config.active_speed = phydev->speed;
2074         tp->link_config.active_duplex = phydev->duplex;
2075
2076         spin_unlock_bh(&tp->lock);
2077
2078         if (linkmesg)
2079                 tg3_link_report(tp);
2080 }
2081
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084         struct phy_device *phydev;
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087                 return 0;
2088
2089         /* Bring the PHY back to a known state. */
2090         tg3_bmcr_reset(tp);
2091
2092         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093
2094         /* Attach the MAC to the PHY. */
2095         phydev = phy_connect(tp->dev, phydev_name(phydev),
2096                              tg3_adjust_link, phydev->interface);
2097         if (IS_ERR(phydev)) {
2098                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099                 return PTR_ERR(phydev);
2100         }
2101
2102         /* Mask with MAC supported features. */
2103         switch (phydev->interface) {
2104         case PHY_INTERFACE_MODE_GMII:
2105         case PHY_INTERFACE_MODE_RGMII:
2106                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107                         phy_set_max_speed(phydev, SPEED_1000);
2108                         phy_support_asym_pause(phydev);
2109                         break;
2110                 }
2111                 fallthrough;
2112         case PHY_INTERFACE_MODE_MII:
2113                 phy_set_max_speed(phydev, SPEED_100);
2114                 phy_support_asym_pause(phydev);
2115                 break;
2116         default:
2117                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2118                 return -EINVAL;
2119         }
2120
2121         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2122
2123         phy_attached_info(phydev);
2124
2125         return 0;
2126 }
2127
2128 static void tg3_phy_start(struct tg3 *tp)
2129 {
2130         struct phy_device *phydev;
2131
2132         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133                 return;
2134
2135         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2136
2137         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139                 phydev->speed = tp->link_config.speed;
2140                 phydev->duplex = tp->link_config.duplex;
2141                 phydev->autoneg = tp->link_config.autoneg;
2142                 ethtool_convert_legacy_u32_to_link_mode(
2143                         phydev->advertising, tp->link_config.advertising);
2144         }
2145
2146         phy_start(phydev);
2147
2148         phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154                 return;
2155
2156         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164         }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169         int err;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173                 return 0;
2174
2175         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176                 /* Cannot do read-modify-write on 5401 */
2177                 err = tg3_phy_auxctl_write(tp,
2178                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180                                            0x4c20);
2181                 goto done;
2182         }
2183
2184         err = tg3_phy_auxctl_read(tp,
2185                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186         if (err)
2187                 return err;
2188
2189         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190         err = tg3_phy_auxctl_write(tp,
2191                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194         return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199         u32 phytest;
2200
2201         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202                 u32 phy;
2203
2204                 tg3_writephy(tp, MII_TG3_FET_TEST,
2205                              phytest | MII_TG3_FET_SHADOW_EN);
2206                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207                         if (enable)
2208                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209                         else
2210                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212                 }
2213                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214         }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219         u32 reg;
2220
2221         if (!tg3_flag(tp, 5705_PLUS) ||
2222             (tg3_flag(tp, 5717_PLUS) &&
2223              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224                 return;
2225
2226         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227                 tg3_phy_fet_toggle_apd(tp, enable);
2228                 return;
2229         }
2230
2231         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233               MII_TG3_MISC_SHDW_SCR5_SDTL |
2234               MII_TG3_MISC_SHDW_SCR5_C125OE;
2235         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2237
2238         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2239
2240
2241         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2242         if (enable)
2243                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2244
2245         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2246 }
2247
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2249 {
2250         u32 phy;
2251
2252         if (!tg3_flag(tp, 5705_PLUS) ||
2253             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2254                 return;
2255
2256         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2257                 u32 ephy;
2258
2259                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2261
2262                         tg3_writephy(tp, MII_TG3_FET_TEST,
2263                                      ephy | MII_TG3_FET_SHADOW_EN);
2264                         if (!tg3_readphy(tp, reg, &phy)) {
2265                                 if (enable)
2266                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2267                                 else
2268                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269                                 tg3_writephy(tp, reg, phy);
2270                         }
2271                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2272                 }
2273         } else {
2274                 int ret;
2275
2276                 ret = tg3_phy_auxctl_read(tp,
2277                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2278                 if (!ret) {
2279                         if (enable)
2280                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2281                         else
2282                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283                         tg3_phy_auxctl_write(tp,
2284                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2285                 }
2286         }
2287 }
2288
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2290 {
2291         int ret;
2292         u32 val;
2293
2294         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2295                 return;
2296
2297         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2298         if (!ret)
2299                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2301 }
2302
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2304 {
2305         u32 otp, phy;
2306
2307         if (!tp->phy_otp)
2308                 return;
2309
2310         otp = tp->phy_otp;
2311
2312         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2313                 return;
2314
2315         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2318
2319         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2322
2323         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2326
2327         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2329
2330         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2332
2333         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2336
2337         tg3_phy_toggle_auxctl_smdsp(tp, false);
2338 }
2339
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2341 {
2342         u32 val;
2343         struct ethtool_eee *dest = &tp->eee;
2344
2345         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2346                 return;
2347
2348         if (eee)
2349                 dest = eee;
2350
2351         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2352                 return;
2353
2354         /* Pull eee_active */
2355         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357                 dest->eee_active = 1;
2358         } else
2359                 dest->eee_active = 0;
2360
2361         /* Pull lp advertised settings */
2362         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2363                 return;
2364         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366         /* Pull advertised and eee_enabled settings */
2367         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2368                 return;
2369         dest->eee_enabled = !!val;
2370         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372         /* Pull tx_lpi_enabled */
2373         val = tr32(TG3_CPMU_EEE_MODE);
2374         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2375
2376         /* Pull lpi timer value */
2377         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2378 }
2379
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2381 {
2382         u32 val;
2383
2384         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2385                 return;
2386
2387         tp->setlpicnt = 0;
2388
2389         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2390             current_link_up &&
2391             tp->link_config.active_duplex == DUPLEX_FULL &&
2392             (tp->link_config.active_speed == SPEED_100 ||
2393              tp->link_config.active_speed == SPEED_1000)) {
2394                 u32 eeectl;
2395
2396                 if (tp->link_config.active_speed == SPEED_1000)
2397                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2398                 else
2399                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2400
2401                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2402
2403                 tg3_eee_pull_config(tp, NULL);
2404                 if (tp->eee.eee_active)
2405                         tp->setlpicnt = 2;
2406         }
2407
2408         if (!tp->setlpicnt) {
2409                 if (current_link_up &&
2410                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2413                 }
2414
2415                 val = tr32(TG3_CPMU_EEE_MODE);
2416                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2417         }
2418 }
2419
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2421 {
2422         u32 val;
2423
2424         if (tp->link_config.active_speed == SPEED_1000 &&
2425             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427              tg3_flag(tp, 57765_CLASS)) &&
2428             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429                 val = MII_TG3_DSP_TAP26_ALNOKO |
2430                       MII_TG3_DSP_TAP26_RMRXSTO;
2431                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2433         }
2434
2435         val = tr32(TG3_CPMU_EEE_MODE);
2436         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2437 }
2438
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2440 {
2441         int limit = 100;
2442
2443         while (limit--) {
2444                 u32 tmp32;
2445
2446                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447                         if ((tmp32 & 0x1000) == 0)
2448                                 break;
2449                 }
2450         }
2451         if (limit < 0)
2452                 return -EBUSY;
2453
2454         return 0;
2455 }
2456
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2458 {
2459         static const u32 test_pat[4][6] = {
2460         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2464         };
2465         int chan;
2466
2467         for (chan = 0; chan < 4; chan++) {
2468                 int i;
2469
2470                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471                              (chan * 0x2000) | 0x0200);
2472                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2473
2474                 for (i = 0; i < 6; i++)
2475                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2476                                      test_pat[chan][i]);
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479                 if (tg3_wait_macro_done(tp)) {
2480                         *resetp = 1;
2481                         return -EBUSY;
2482                 }
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485                              (chan * 0x2000) | 0x0200);
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 for (i = 0; i < 6; i += 2) {
2499                         u32 low, high;
2500
2501                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503                             tg3_wait_macro_done(tp)) {
2504                                 *resetp = 1;
2505                                 return -EBUSY;
2506                         }
2507                         low &= 0x7fff;
2508                         high &= 0x000f;
2509                         if (low != test_pat[chan][i] ||
2510                             high != test_pat[chan][i+1]) {
2511                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2514
2515                                 return -EBUSY;
2516                         }
2517                 }
2518         }
2519
2520         return 0;
2521 }
2522
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2524 {
2525         int chan;
2526
2527         for (chan = 0; chan < 4; chan++) {
2528                 int i;
2529
2530                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531                              (chan * 0x2000) | 0x0200);
2532                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533                 for (i = 0; i < 6; i++)
2534                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536                 if (tg3_wait_macro_done(tp))
2537                         return -EBUSY;
2538         }
2539
2540         return 0;
2541 }
2542
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2544 {
2545         u32 reg32, phy9_orig;
2546         int retries, do_phy_reset, err;
2547
2548         retries = 10;
2549         do_phy_reset = 1;
2550         do {
2551                 if (do_phy_reset) {
2552                         err = tg3_bmcr_reset(tp);
2553                         if (err)
2554                                 return err;
2555                         do_phy_reset = 0;
2556                 }
2557
2558                 /* Disable transmitter and interrupt.  */
2559                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2560                         continue;
2561
2562                 reg32 |= 0x3000;
2563                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2564
2565                 /* Set full-duplex, 1000 mbps.  */
2566                 tg3_writephy(tp, MII_BMCR,
2567                              BMCR_FULLDPLX | BMCR_SPEED1000);
2568
2569                 /* Set to master mode.  */
2570                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2571                         continue;
2572
2573                 tg3_writephy(tp, MII_CTRL1000,
2574                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2575
2576                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2577                 if (err)
2578                         return err;
2579
2580                 /* Block the PHY control access.  */
2581                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2582
2583                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2584                 if (!err)
2585                         break;
2586         } while (--retries);
2587
2588         err = tg3_phy_reset_chanpat(tp);
2589         if (err)
2590                 return err;
2591
2592         tg3_phydsp_write(tp, 0x8005, 0x0000);
2593
2594         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2596
2597         tg3_phy_toggle_auxctl_smdsp(tp, false);
2598
2599         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2600
2601         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2602         if (err)
2603                 return err;
2604
2605         reg32 &= ~0x3000;
2606         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2607
2608         return 0;
2609 }
2610
2611 static void tg3_carrier_off(struct tg3 *tp)
2612 {
2613         netif_carrier_off(tp->dev);
2614         tp->link_up = false;
2615 }
2616
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2618 {
2619         if (tg3_flag(tp, ENABLE_ASF))
2620                 netdev_warn(tp->dev,
2621                             "Management side-band traffic will be interrupted during phy settings change\n");
2622 }
2623
2624 /* This will reset the tigon3 PHY if there is no valid
2625  * link unless the FORCE argument is non-zero.
2626  */
2627 static int tg3_phy_reset(struct tg3 *tp)
2628 {
2629         u32 val, cpmuctrl;
2630         int err;
2631
2632         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633                 val = tr32(GRC_MISC_CFG);
2634                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2635                 udelay(40);
2636         }
2637         err  = tg3_readphy(tp, MII_BMSR, &val);
2638         err |= tg3_readphy(tp, MII_BMSR, &val);
2639         if (err != 0)
2640                 return -EBUSY;
2641
2642         if (netif_running(tp->dev) && tp->link_up) {
2643                 netif_carrier_off(tp->dev);
2644                 tg3_link_report(tp);
2645         }
2646
2647         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649             tg3_asic_rev(tp) == ASIC_REV_5705) {
2650                 err = tg3_phy_reset_5703_4_5(tp);
2651                 if (err)
2652                         return err;
2653                 goto out;
2654         }
2655
2656         cpmuctrl = 0;
2657         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2660                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2661                         tw32(TG3_CPMU_CTRL,
2662                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2663         }
2664
2665         err = tg3_bmcr_reset(tp);
2666         if (err)
2667                 return err;
2668
2669         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2672
2673                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2674         }
2675
2676         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2681                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2682                         udelay(40);
2683                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2684                 }
2685         }
2686
2687         if (tg3_flag(tp, 5717_PLUS) &&
2688             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2689                 return 0;
2690
2691         tg3_phy_apply_otp(tp);
2692
2693         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694                 tg3_phy_toggle_apd(tp, true);
2695         else
2696                 tg3_phy_toggle_apd(tp, false);
2697
2698 out:
2699         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2703                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2704         }
2705
2706         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709         }
2710
2711         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2714                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2715                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2717                 }
2718         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723                                 tg3_writephy(tp, MII_TG3_TEST1,
2724                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2725                         } else
2726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2727
2728                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2729                 }
2730         }
2731
2732         /* Set Extended packet length bit (bit 14) on all chips that */
2733         /* support jumbo frames */
2734         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735                 /* Cannot do read-modify-write on 5401 */
2736                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738                 /* Set bit 14 with read-modify-write to preserve other bits */
2739                 err = tg3_phy_auxctl_read(tp,
2740                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2741                 if (!err)
2742                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2744         }
2745
2746         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747          * jumbo frames transmission.
2748          */
2749         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2753         }
2754
2755         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756                 /* adjust output voltage */
2757                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2758         }
2759
2760         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2762
2763         tg3_phy_toggle_automdix(tp, true);
2764         tg3_phy_set_wirespeed(tp);
2765         return 0;
2766 }
2767
2768 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2770 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2771                                           TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776          (TG3_GPIO_MSG_DRVR_PRES << 12))
2777
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782          (TG3_GPIO_MSG_NEED_VAUX << 12))
2783
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2785 {
2786         u32 status, shift;
2787
2788         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789             tg3_asic_rev(tp) == ASIC_REV_5719)
2790                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2791         else
2792                 status = tr32(TG3_CPMU_DRV_STATUS);
2793
2794         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795         status &= ~(TG3_GPIO_MSG_MASK << shift);
2796         status |= (newstat << shift);
2797
2798         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799             tg3_asic_rev(tp) == ASIC_REV_5719)
2800                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2801         else
2802                 tw32(TG3_CPMU_DRV_STATUS, status);
2803
2804         return status >> TG3_APE_GPIO_MSG_SHIFT;
2805 }
2806
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2808 {
2809         if (!tg3_flag(tp, IS_NIC))
2810                 return 0;
2811
2812         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814             tg3_asic_rev(tp) == ASIC_REV_5720) {
2815                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2816                         return -EIO;
2817
2818                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2819
2820                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2822
2823                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2824         } else {
2825                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2827         }
2828
2829         return 0;
2830 }
2831
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2833 {
2834         u32 grc_local_ctrl;
2835
2836         if (!tg3_flag(tp, IS_NIC) ||
2837             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838             tg3_asic_rev(tp) == ASIC_REV_5701)
2839                 return;
2840
2841         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2842
2843         tw32_wait_f(GRC_LOCAL_CTRL,
2844                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846
2847         tw32_wait_f(GRC_LOCAL_CTRL,
2848                     grc_local_ctrl,
2849                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2850
2851         tw32_wait_f(GRC_LOCAL_CTRL,
2852                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 }
2855
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2857 {
2858         if (!tg3_flag(tp, IS_NIC))
2859                 return;
2860
2861         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862             tg3_asic_rev(tp) == ASIC_REV_5701) {
2863                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864                             (GRC_LCLCTRL_GPIO_OE0 |
2865                              GRC_LCLCTRL_GPIO_OE1 |
2866                              GRC_LCLCTRL_GPIO_OE2 |
2867                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2868                              GRC_LCLCTRL_GPIO_OUTPUT1),
2869                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2870         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874                                      GRC_LCLCTRL_GPIO_OE1 |
2875                                      GRC_LCLCTRL_GPIO_OE2 |
2876                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2877                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2878                                      tp->grc_local_ctrl;
2879                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2881
2882                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2885
2886                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889         } else {
2890                 u32 no_gpio2;
2891                 u32 grc_local_ctrl = 0;
2892
2893                 /* Workaround to prevent overdrawing Amps. */
2894                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2897                                     grc_local_ctrl,
2898                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2899                 }
2900
2901                 /* On 5753 and variants, GPIO2 cannot be used. */
2902                 no_gpio2 = tp->nic_sram_data_cfg &
2903                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2904
2905                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906                                   GRC_LCLCTRL_GPIO_OE1 |
2907                                   GRC_LCLCTRL_GPIO_OE2 |
2908                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2909                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2910                 if (no_gpio2) {
2911                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2913                 }
2914                 tw32_wait_f(GRC_LOCAL_CTRL,
2915                             tp->grc_local_ctrl | grc_local_ctrl,
2916                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2917
2918                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2919
2920                 tw32_wait_f(GRC_LOCAL_CTRL,
2921                             tp->grc_local_ctrl | grc_local_ctrl,
2922                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924                 if (!no_gpio2) {
2925                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926                         tw32_wait_f(GRC_LOCAL_CTRL,
2927                                     tp->grc_local_ctrl | grc_local_ctrl,
2928                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2929                 }
2930         }
2931 }
2932
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2934 {
2935         u32 msg = 0;
2936
2937         /* Serialize power state transitions */
2938         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2939                 return;
2940
2941         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942                 msg = TG3_GPIO_MSG_NEED_VAUX;
2943
2944         msg = tg3_set_function_status(tp, msg);
2945
2946         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2947                 goto done;
2948
2949         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950                 tg3_pwrsrc_switch_to_vaux(tp);
2951         else
2952                 tg3_pwrsrc_die_with_vmain(tp);
2953
2954 done:
2955         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2956 }
2957
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2959 {
2960         bool need_vaux = false;
2961
2962         /* The GPIOs do something completely different on 57765. */
2963         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2964                 return;
2965
2966         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968             tg3_asic_rev(tp) == ASIC_REV_5720) {
2969                 tg3_frob_aux_power_5717(tp, include_wol ?
2970                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2971                 return;
2972         }
2973
2974         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975                 struct net_device *dev_peer;
2976
2977                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2978
2979                 /* remove_one() may have been run on the peer. */
2980                 if (dev_peer) {
2981                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2982
2983                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2984                                 return;
2985
2986                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987                             tg3_flag(tp_peer, ENABLE_ASF))
2988                                 need_vaux = true;
2989                 }
2990         }
2991
2992         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993             tg3_flag(tp, ENABLE_ASF))
2994                 need_vaux = true;
2995
2996         if (need_vaux)
2997                 tg3_pwrsrc_switch_to_vaux(tp);
2998         else
2999                 tg3_pwrsrc_die_with_vmain(tp);
3000 }
3001
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3003 {
3004         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3005                 return 1;
3006         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007                 if (speed != SPEED_10)
3008                         return 1;
3009         } else if (speed == SPEED_10)
3010                 return 1;
3011
3012         return 0;
3013 }
3014
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3016 {
3017         switch (tg3_asic_rev(tp)) {
3018         case ASIC_REV_5700:
3019         case ASIC_REV_5704:
3020                 return true;
3021         case ASIC_REV_5780:
3022                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3023                         return true;
3024                 return false;
3025         case ASIC_REV_5717:
3026                 if (!tp->pci_fn)
3027                         return true;
3028                 return false;
3029         case ASIC_REV_5719:
3030         case ASIC_REV_5720:
3031                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3032                     !tp->pci_fn)
3033                         return true;
3034                 return false;
3035         }
3036
3037         return false;
3038 }
3039
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3041 {
3042         switch (tg3_asic_rev(tp)) {
3043         case ASIC_REV_5719:
3044         case ASIC_REV_5720:
3045                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3046                     !tp->pci_fn)
3047                         return true;
3048                 return false;
3049         }
3050
3051         return false;
3052 }
3053
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3055 {
3056         u32 val;
3057
3058         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3059                 return;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3065
3066                         sg_dig_ctrl |=
3067                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3070                 }
3071                 return;
3072         }
3073
3074         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3075                 tg3_bmcr_reset(tp);
3076                 val = tr32(GRC_MISC_CFG);
3077                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3078                 udelay(40);
3079                 return;
3080         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3081                 u32 phytest;
3082                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3083                         u32 phy;
3084
3085                         tg3_writephy(tp, MII_ADVERTISE, 0);
3086                         tg3_writephy(tp, MII_BMCR,
3087                                      BMCR_ANENABLE | BMCR_ANRESTART);
3088
3089                         tg3_writephy(tp, MII_TG3_FET_TEST,
3090                                      phytest | MII_TG3_FET_SHADOW_EN);
3091                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3093                                 tg3_writephy(tp,
3094                                              MII_TG3_FET_SHDW_AUXMODE4,
3095                                              phy);
3096                         }
3097                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3098                 }
3099                 return;
3100         } else if (do_low_power) {
3101                 if (!tg3_phy_led_bug(tp))
3102                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3104
3105                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3108                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3109         }
3110
3111         /* The PHY should not be powered down on some chips because
3112          * of bugs.
3113          */
3114         if (tg3_phy_power_bug(tp))
3115                 return;
3116
3117         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3123         }
3124
3125         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3126 }
3127
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3130 {
3131         if (tg3_flag(tp, NVRAM)) {
3132                 int i;
3133
3134                 if (tp->nvram_lock_cnt == 0) {
3135                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136                         for (i = 0; i < 8000; i++) {
3137                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3138                                         break;
3139                                 udelay(20);
3140                         }
3141                         if (i == 8000) {
3142                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3143                                 return -ENODEV;
3144                         }
3145                 }
3146                 tp->nvram_lock_cnt++;
3147         }
3148         return 0;
3149 }
3150
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3153 {
3154         if (tg3_flag(tp, NVRAM)) {
3155                 if (tp->nvram_lock_cnt > 0)
3156                         tp->nvram_lock_cnt--;
3157                 if (tp->nvram_lock_cnt == 0)
3158                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3159         }
3160 }
3161
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3164 {
3165         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166                 u32 nvaccess = tr32(NVRAM_ACCESS);
3167
3168                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3169         }
3170 }
3171
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3174 {
3175         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176                 u32 nvaccess = tr32(NVRAM_ACCESS);
3177
3178                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3179         }
3180 }
3181
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183                                         u32 offset, u32 *val)
3184 {
3185         u32 tmp;
3186         int i;
3187
3188         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3189                 return -EINVAL;
3190
3191         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192                                         EEPROM_ADDR_DEVID_MASK |
3193                                         EEPROM_ADDR_READ);
3194         tw32(GRC_EEPROM_ADDR,
3195              tmp |
3196              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198               EEPROM_ADDR_ADDR_MASK) |
3199              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3200
3201         for (i = 0; i < 1000; i++) {
3202                 tmp = tr32(GRC_EEPROM_ADDR);
3203
3204                 if (tmp & EEPROM_ADDR_COMPLETE)
3205                         break;
3206                 msleep(1);
3207         }
3208         if (!(tmp & EEPROM_ADDR_COMPLETE))
3209                 return -EBUSY;
3210
3211         tmp = tr32(GRC_EEPROM_DATA);
3212
3213         /*
3214          * The data will always be opposite the native endian
3215          * format.  Perform a blind byteswap to compensate.
3216          */
3217         *val = swab32(tmp);
3218
3219         return 0;
3220 }
3221
3222 #define NVRAM_CMD_TIMEOUT 10000
3223
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3225 {
3226         int i;
3227
3228         tw32(NVRAM_CMD, nvram_cmd);
3229         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230                 usleep_range(10, 40);
3231                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3232                         udelay(10);
3233                         break;
3234                 }
3235         }
3236
3237         if (i == NVRAM_CMD_TIMEOUT)
3238                 return -EBUSY;
3239
3240         return 0;
3241 }
3242
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3244 {
3245         if (tg3_flag(tp, NVRAM) &&
3246             tg3_flag(tp, NVRAM_BUFFERED) &&
3247             tg3_flag(tp, FLASH) &&
3248             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249             (tp->nvram_jedecnum == JEDEC_ATMEL))
3250
3251                 addr = ((addr / tp->nvram_pagesize) <<
3252                         ATMEL_AT45DB0X1B_PAGE_POS) +
3253                        (addr % tp->nvram_pagesize);
3254
3255         return addr;
3256 }
3257
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3259 {
3260         if (tg3_flag(tp, NVRAM) &&
3261             tg3_flag(tp, NVRAM_BUFFERED) &&
3262             tg3_flag(tp, FLASH) &&
3263             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264             (tp->nvram_jedecnum == JEDEC_ATMEL))
3265
3266                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267                         tp->nvram_pagesize) +
3268                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3269
3270         return addr;
3271 }
3272
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274  * the byteswapping settings for all other register accesses.
3275  * tg3 devices are BE devices, so on a BE machine, the data
3276  * returned will be exactly as it is seen in NVRAM.  On a LE
3277  * machine, the 32-bit value will be byteswapped.
3278  */
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3280 {
3281         int ret;
3282
3283         if (!tg3_flag(tp, NVRAM))
3284                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3285
3286         offset = tg3_nvram_phys_addr(tp, offset);
3287
3288         if (offset > NVRAM_ADDR_MSK)
3289                 return -EINVAL;
3290
3291         ret = tg3_nvram_lock(tp);
3292         if (ret)
3293                 return ret;
3294
3295         tg3_enable_nvram_access(tp);
3296
3297         tw32(NVRAM_ADDR, offset);
3298         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3300
3301         if (ret == 0)
3302                 *val = tr32(NVRAM_RDDATA);
3303
3304         tg3_disable_nvram_access(tp);
3305
3306         tg3_nvram_unlock(tp);
3307
3308         return ret;
3309 }
3310
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3313 {
3314         u32 v;
3315         int res = tg3_nvram_read(tp, offset, &v);
3316         if (!res)
3317                 *val = cpu_to_be32(v);
3318         return res;
3319 }
3320
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322                                     u32 offset, u32 len, u8 *buf)
3323 {
3324         int i, j, rc = 0;
3325         u32 val;
3326
3327         for (i = 0; i < len; i += 4) {
3328                 u32 addr;
3329                 __be32 data;
3330
3331                 addr = offset + i;
3332
3333                 memcpy(&data, buf + i, 4);
3334
3335                 /*
3336                  * The SEEPROM interface expects the data to always be opposite
3337                  * the native endian format.  We accomplish this by reversing
3338                  * all the operations that would have been performed on the
3339                  * data from a call to tg3_nvram_read_be32().
3340                  */
3341                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3342
3343                 val = tr32(GRC_EEPROM_ADDR);
3344                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3345
3346                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3347                         EEPROM_ADDR_READ);
3348                 tw32(GRC_EEPROM_ADDR, val |
3349                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3350                         (addr & EEPROM_ADDR_ADDR_MASK) |
3351                         EEPROM_ADDR_START |
3352                         EEPROM_ADDR_WRITE);
3353
3354                 for (j = 0; j < 1000; j++) {
3355                         val = tr32(GRC_EEPROM_ADDR);
3356
3357                         if (val & EEPROM_ADDR_COMPLETE)
3358                                 break;
3359                         msleep(1);
3360                 }
3361                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3362                         rc = -EBUSY;
3363                         break;
3364                 }
3365         }
3366
3367         return rc;
3368 }
3369
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3372                 u8 *buf)
3373 {
3374         int ret = 0;
3375         u32 pagesize = tp->nvram_pagesize;
3376         u32 pagemask = pagesize - 1;
3377         u32 nvram_cmd;
3378         u8 *tmp;
3379
3380         tmp = kmalloc(pagesize, GFP_KERNEL);
3381         if (tmp == NULL)
3382                 return -ENOMEM;
3383
3384         while (len) {
3385                 int j;
3386                 u32 phy_addr, page_off, size;
3387
3388                 phy_addr = offset & ~pagemask;
3389
3390                 for (j = 0; j < pagesize; j += 4) {
3391                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392                                                   (__be32 *) (tmp + j));
3393                         if (ret)
3394                                 break;
3395                 }
3396                 if (ret)
3397                         break;
3398
3399                 page_off = offset & pagemask;
3400                 size = pagesize;
3401                 if (len < size)
3402                         size = len;
3403
3404                 len -= size;
3405
3406                 memcpy(tmp + page_off, buf, size);
3407
3408                 offset = offset + (pagesize - page_off);
3409
3410                 tg3_enable_nvram_access(tp);
3411
3412                 /*
3413                  * Before we can erase the flash page, we need
3414                  * to issue a special "write enable" command.
3415                  */
3416                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3417
3418                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3419                         break;
3420
3421                 /* Erase the target page */
3422                 tw32(NVRAM_ADDR, phy_addr);
3423
3424                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3426
3427                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428                         break;
3429
3430                 /* Issue another write enable to start the write. */
3431                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432
3433                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434                         break;
3435
3436                 for (j = 0; j < pagesize; j += 4) {
3437                         __be32 data;
3438
3439                         data = *((__be32 *) (tmp + j));
3440
3441                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3442
3443                         tw32(NVRAM_ADDR, phy_addr + j);
3444
3445                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3446                                 NVRAM_CMD_WR;
3447
3448                         if (j == 0)
3449                                 nvram_cmd |= NVRAM_CMD_FIRST;
3450                         else if (j == (pagesize - 4))
3451                                 nvram_cmd |= NVRAM_CMD_LAST;
3452
3453                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3454                         if (ret)
3455                                 break;
3456                 }
3457                 if (ret)
3458                         break;
3459         }
3460
3461         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462         tg3_nvram_exec_cmd(tp, nvram_cmd);
3463
3464         kfree(tmp);
3465
3466         return ret;
3467 }
3468
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3471                 u8 *buf)
3472 {
3473         int i, ret = 0;
3474
3475         for (i = 0; i < len; i += 4, offset += 4) {
3476                 u32 page_off, phy_addr, nvram_cmd;
3477                 __be32 data;
3478
3479                 memcpy(&data, buf + i, 4);
3480                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3481
3482                 page_off = offset % tp->nvram_pagesize;
3483
3484                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3485
3486                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3487
3488                 if (page_off == 0 || i == 0)
3489                         nvram_cmd |= NVRAM_CMD_FIRST;
3490                 if (page_off == (tp->nvram_pagesize - 4))
3491                         nvram_cmd |= NVRAM_CMD_LAST;
3492
3493                 if (i == (len - 4))
3494                         nvram_cmd |= NVRAM_CMD_LAST;
3495
3496                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497                     !tg3_flag(tp, FLASH) ||
3498                     !tg3_flag(tp, 57765_PLUS))
3499                         tw32(NVRAM_ADDR, phy_addr);
3500
3501                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502                     !tg3_flag(tp, 5755_PLUS) &&
3503                     (tp->nvram_jedecnum == JEDEC_ST) &&
3504                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3505                         u32 cmd;
3506
3507                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508                         ret = tg3_nvram_exec_cmd(tp, cmd);
3509                         if (ret)
3510                                 break;
3511                 }
3512                 if (!tg3_flag(tp, FLASH)) {
3513                         /* We always do complete word writes to eeprom. */
3514                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3515                 }
3516
3517                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3518                 if (ret)
3519                         break;
3520         }
3521         return ret;
3522 }
3523
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3526 {
3527         int ret;
3528
3529         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3532                 udelay(40);
3533         }
3534
3535         if (!tg3_flag(tp, NVRAM)) {
3536                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3537         } else {
3538                 u32 grc_mode;
3539
3540                 ret = tg3_nvram_lock(tp);
3541                 if (ret)
3542                         return ret;
3543
3544                 tg3_enable_nvram_access(tp);
3545                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546                         tw32(NVRAM_WRITE1, 0x406);
3547
3548                 grc_mode = tr32(GRC_MODE);
3549                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3550
3551                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3553                                 buf);
3554                 } else {
3555                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3556                                 buf);
3557                 }
3558
3559                 grc_mode = tr32(GRC_MODE);
3560                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3561
3562                 tg3_disable_nvram_access(tp);
3563                 tg3_nvram_unlock(tp);
3564         }
3565
3566         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3568                 udelay(40);
3569         }
3570
3571         return ret;
3572 }
3573
3574 #define RX_CPU_SCRATCH_BASE     0x30000
3575 #define RX_CPU_SCRATCH_SIZE     0x04000
3576 #define TX_CPU_SCRATCH_BASE     0x34000
3577 #define TX_CPU_SCRATCH_SIZE     0x04000
3578
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3581 {
3582         int i;
3583         const int iters = 10000;
3584
3585         for (i = 0; i < iters; i++) {
3586                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3587                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3588                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3589                         break;
3590                 if (pci_channel_offline(tp->pdev))
3591                         return -EBUSY;
3592         }
3593
3594         return (i == iters) ? -EBUSY : 0;
3595 }
3596
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3599 {
3600         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3601
3602         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3604         udelay(10);
3605
3606         return rc;
3607 }
3608
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3611 {
3612         return tg3_pause_cpu(tp, TX_CPU_BASE);
3613 }
3614
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3617 {
3618         tw32(cpu_base + CPU_STATE, 0xffffffff);
3619         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3620 }
3621
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3624 {
3625         tg3_resume_cpu(tp, RX_CPU_BASE);
3626 }
3627
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631         int rc;
3632
3633         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3634
3635         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3637
3638                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3639                 return 0;
3640         }
3641         if (cpu_base == RX_CPU_BASE) {
3642                 rc = tg3_rxcpu_pause(tp);
3643         } else {
3644                 /*
3645                  * There is only an Rx CPU for the 5750 derivative in the
3646                  * BCM4785.
3647                  */
3648                 if (tg3_flag(tp, IS_SSB_CORE))
3649                         return 0;
3650
3651                 rc = tg3_txcpu_pause(tp);
3652         }
3653
3654         if (rc) {
3655                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3657                 return -ENODEV;
3658         }
3659
3660         /* Clear firmware's nvram arbitration. */
3661         if (tg3_flag(tp, NVRAM))
3662                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3663         return 0;
3664 }
3665
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667                            const struct tg3_firmware_hdr *fw_hdr)
3668 {
3669         int fw_len;
3670
3671         /* Non fragmented firmware have one firmware header followed by a
3672          * contiguous chunk of data to be written. The length field in that
3673          * header is not the length of data to be written but the complete
3674          * length of the bss. The data length is determined based on
3675          * tp->fw->size minus headers.
3676          *
3677          * Fragmented firmware have a main header followed by multiple
3678          * fragments. Each fragment is identical to non fragmented firmware
3679          * with a firmware header followed by a contiguous chunk of data. In
3680          * the main header, the length field is unused and set to 0xffffffff.
3681          * In each fragment header the length is the entire size of that
3682          * fragment i.e. fragment data + header length. Data length is
3683          * therefore length field in the header minus TG3_FW_HDR_LEN.
3684          */
3685         if (tp->fw_len == 0xffffffff)
3686                 fw_len = be32_to_cpu(fw_hdr->len);
3687         else
3688                 fw_len = tp->fw->size;
3689
3690         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3691 }
3692
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695                                  u32 cpu_scratch_base, int cpu_scratch_size,
3696                                  const struct tg3_firmware_hdr *fw_hdr)
3697 {
3698         int err, i;
3699         void (*write_op)(struct tg3 *, u32, u32);
3700         int total_len = tp->fw->size;
3701
3702         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3703                 netdev_err(tp->dev,
3704                            "%s: Trying to load TX cpu firmware which is 5705\n",
3705                            __func__);
3706                 return -EINVAL;
3707         }
3708
3709         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710                 write_op = tg3_write_mem;
3711         else
3712                 write_op = tg3_write_indirect_reg32;
3713
3714         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715                 /* It is possible that bootcode is still loading at this point.
3716                  * Get the nvram lock first before halting the cpu.
3717                  */
3718                 int lock_err = tg3_nvram_lock(tp);
3719                 err = tg3_halt_cpu(tp, cpu_base);
3720                 if (!lock_err)
3721                         tg3_nvram_unlock(tp);
3722                 if (err)
3723                         goto out;
3724
3725                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726                         write_op(tp, cpu_scratch_base + i, 0);
3727                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3728                 tw32(cpu_base + CPU_MODE,
3729                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3730         } else {
3731                 /* Subtract additional main header for fragmented firmware and
3732                  * advance to the first fragment
3733                  */
3734                 total_len -= TG3_FW_HDR_LEN;
3735                 fw_hdr++;
3736         }
3737
3738         do {
3739                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3740                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741                         write_op(tp, cpu_scratch_base +
3742                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3743                                      (i * sizeof(u32)),
3744                                  be32_to_cpu(fw_data[i]));
3745
3746                 total_len -= be32_to_cpu(fw_hdr->len);
3747
3748                 /* Advance to next fragment */
3749                 fw_hdr = (struct tg3_firmware_hdr *)
3750                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751         } while (total_len > 0);
3752
3753         err = 0;
3754
3755 out:
3756         return err;
3757 }
3758
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3761 {
3762         int i;
3763         const int iters = 5;
3764
3765         tw32(cpu_base + CPU_STATE, 0xffffffff);
3766         tw32_f(cpu_base + CPU_PC, pc);
3767
3768         for (i = 0; i < iters; i++) {
3769                 if (tr32(cpu_base + CPU_PC) == pc)
3770                         break;
3771                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3773                 tw32_f(cpu_base + CPU_PC, pc);
3774                 udelay(1000);
3775         }
3776
3777         return (i == iters) ? -EBUSY : 0;
3778 }
3779
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3782 {
3783         const struct tg3_firmware_hdr *fw_hdr;
3784         int err;
3785
3786         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3787
3788         /* Firmware blob starts with version numbers, followed by
3789            start address and length. We are setting complete length.
3790            length = end_address_of_bss - start_address_of_text.
3791            Remainder is the blob to be loaded contiguously
3792            from start address. */
3793
3794         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3796                                     fw_hdr);
3797         if (err)
3798                 return err;
3799
3800         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         /* Now startup only the RX cpu. */
3807         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808                                        be32_to_cpu(fw_hdr->base_addr));
3809         if (err) {
3810                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811                            "should be %08x\n", __func__,
3812                            tr32(RX_CPU_BASE + CPU_PC),
3813                                 be32_to_cpu(fw_hdr->base_addr));
3814                 return -ENODEV;
3815         }
3816
3817         tg3_rxcpu_resume(tp);
3818
3819         return 0;
3820 }
3821
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3823 {
3824         const int iters = 1000;
3825         int i;
3826         u32 val;
3827
3828         /* Wait for boot code to complete initialization and enter service
3829          * loop. It is then safe to download service patches
3830          */
3831         for (i = 0; i < iters; i++) {
3832                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3833                         break;
3834
3835                 udelay(10);
3836         }
3837
3838         if (i == iters) {
3839                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3840                 return -EBUSY;
3841         }
3842
3843         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3844         if (val & 0xff) {
3845                 netdev_warn(tp->dev,
3846                             "Other patches exist. Not downloading EEE patch\n");
3847                 return -EEXIST;
3848         }
3849
3850         return 0;
3851 }
3852
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3855 {
3856         struct tg3_firmware_hdr *fw_hdr;
3857
3858         if (!tg3_flag(tp, NO_NVRAM))
3859                 return;
3860
3861         if (tg3_validate_rxcpu_state(tp))
3862                 return;
3863
3864         if (!tp->fw)
3865                 return;
3866
3867         /* This firmware blob has a different format than older firmware
3868          * releases as given below. The main difference is we have fragmented
3869          * data to be written to non-contiguous locations.
3870          *
3871          * In the beginning we have a firmware header identical to other
3872          * firmware which consists of version, base addr and length. The length
3873          * here is unused and set to 0xffffffff.
3874          *
3875          * This is followed by a series of firmware fragments which are
3876          * individually identical to previous firmware. i.e. they have the
3877          * firmware header and followed by data for that fragment. The version
3878          * field of the individual fragment header is unused.
3879          */
3880
3881         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3883                 return;
3884
3885         if (tg3_rxcpu_pause(tp))
3886                 return;
3887
3888         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3890
3891         tg3_rxcpu_resume(tp);
3892 }
3893
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3896 {
3897         const struct tg3_firmware_hdr *fw_hdr;
3898         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3899         int err;
3900
3901         if (!tg3_flag(tp, FW_TSO))
3902                 return 0;
3903
3904         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3905
3906         /* Firmware blob starts with version numbers, followed by
3907            start address and length. We are setting complete length.
3908            length = end_address_of_bss - start_address_of_text.
3909            Remainder is the blob to be loaded contiguously
3910            from start address. */
3911
3912         cpu_scratch_size = tp->fw_len;
3913
3914         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915                 cpu_base = RX_CPU_BASE;
3916                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3917         } else {
3918                 cpu_base = TX_CPU_BASE;
3919                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3921         }
3922
3923         err = tg3_load_firmware_cpu(tp, cpu_base,
3924                                     cpu_scratch_base, cpu_scratch_size,
3925                                     fw_hdr);
3926         if (err)
3927                 return err;
3928
3929         /* Now startup the cpu. */
3930         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931                                        be32_to_cpu(fw_hdr->base_addr));
3932         if (err) {
3933                 netdev_err(tp->dev,
3934                            "%s fails to set CPU PC, is %08x should be %08x\n",
3935                            __func__, tr32(cpu_base + CPU_PC),
3936                            be32_to_cpu(fw_hdr->base_addr));
3937                 return -ENODEV;
3938         }
3939
3940         tg3_resume_cpu(tp, cpu_base);
3941         return 0;
3942 }
3943
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3946 {
3947         u32 addr_high, addr_low;
3948
3949         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3950         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3951                     (mac_addr[4] <<  8) | mac_addr[5]);
3952
3953         if (index < 4) {
3954                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3955                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3956         } else {
3957                 index -= 4;
3958                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3959                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3960         }
3961 }
3962
3963 /* tp->lock is held. */
3964 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3965 {
3966         u32 addr_high;
3967         int i;
3968
3969         for (i = 0; i < 4; i++) {
3970                 if (i == 1 && skip_mac_1)
3971                         continue;
3972                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3973         }
3974
3975         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3976             tg3_asic_rev(tp) == ASIC_REV_5704) {
3977                 for (i = 4; i < 16; i++)
3978                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979         }
3980
3981         addr_high = (tp->dev->dev_addr[0] +
3982                      tp->dev->dev_addr[1] +
3983                      tp->dev->dev_addr[2] +
3984                      tp->dev->dev_addr[3] +
3985                      tp->dev->dev_addr[4] +
3986                      tp->dev->dev_addr[5]) &
3987                 TX_BACKOFF_SEED_MASK;
3988         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3989 }
3990
3991 static void tg3_enable_register_access(struct tg3 *tp)
3992 {
3993         /*
3994          * Make sure register accesses (indirect or otherwise) will function
3995          * correctly.
3996          */
3997         pci_write_config_dword(tp->pdev,
3998                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3999 }
4000
4001 static int tg3_power_up(struct tg3 *tp)
4002 {
4003         int err;
4004
4005         tg3_enable_register_access(tp);
4006
4007         err = pci_set_power_state(tp->pdev, PCI_D0);
4008         if (!err) {
4009                 /* Switch out of Vaux if it is a NIC */
4010                 tg3_pwrsrc_switch_to_vmain(tp);
4011         } else {
4012                 netdev_err(tp->dev, "Transition to D0 failed\n");
4013         }
4014
4015         return err;
4016 }
4017
4018 static int tg3_setup_phy(struct tg3 *, bool);
4019
4020 static int tg3_power_down_prepare(struct tg3 *tp)
4021 {
4022         u32 misc_host_ctrl;
4023         bool device_should_wake, do_low_power;
4024
4025         tg3_enable_register_access(tp);
4026
4027         /* Restore the CLKREQ setting. */
4028         if (tg3_flag(tp, CLKREQ_BUG))
4029                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4030                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4031
4032         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4033         tw32(TG3PCI_MISC_HOST_CTRL,
4034              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4035
4036         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4037                              tg3_flag(tp, WOL_ENABLE);
4038
4039         if (tg3_flag(tp, USE_PHYLIB)) {
4040                 do_low_power = false;
4041                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4042                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4043                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4044                         struct phy_device *phydev;
4045                         u32 phyid;
4046
4047                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4048
4049                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4050
4051                         tp->link_config.speed = phydev->speed;
4052                         tp->link_config.duplex = phydev->duplex;
4053                         tp->link_config.autoneg = phydev->autoneg;
4054                         ethtool_convert_link_mode_to_legacy_u32(
4055                                 &tp->link_config.advertising,
4056                                 phydev->advertising);
4057
4058                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4059                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4060                                          advertising);
4061                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4062                                          advertising);
4063                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4064                                          advertising);
4065
4066                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4068                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4069                                                          advertising);
4070                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4071                                                          advertising);
4072                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4073                                                          advertising);
4074                                 } else {
4075                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076                                                          advertising);
4077                                 }
4078                         }
4079
4080                         linkmode_copy(phydev->advertising, advertising);
4081                         phy_start_aneg(phydev);
4082
4083                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4084                         if (phyid != PHY_ID_BCMAC131) {
4085                                 phyid &= PHY_BCM_OUI_MASK;
4086                                 if (phyid == PHY_BCM_OUI_1 ||
4087                                     phyid == PHY_BCM_OUI_2 ||
4088                                     phyid == PHY_BCM_OUI_3)
4089                                         do_low_power = true;
4090                         }
4091                 }
4092         } else {
4093                 do_low_power = true;
4094
4095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4096                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4097
4098                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4099                         tg3_setup_phy(tp, false);
4100         }
4101
4102         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4103                 u32 val;
4104
4105                 val = tr32(GRC_VCPU_EXT_CTRL);
4106                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4107         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4108                 int i;
4109                 u32 val;
4110
4111                 for (i = 0; i < 200; i++) {
4112                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4113                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4114                                 break;
4115                         msleep(1);
4116                 }
4117         }
4118         if (tg3_flag(tp, WOL_CAP))
4119                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4120                                                      WOL_DRV_STATE_SHUTDOWN |
4121                                                      WOL_DRV_WOL |
4122                                                      WOL_SET_MAGIC_PKT);
4123
4124         if (device_should_wake) {
4125                 u32 mac_mode;
4126
4127                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4128                         if (do_low_power &&
4129                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4130                                 tg3_phy_auxctl_write(tp,
4131                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4132                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4133                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4134                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4135                                 udelay(40);
4136                         }
4137
4138                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4139                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140                         else if (tp->phy_flags &
4141                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4142                                 if (tp->link_config.active_speed == SPEED_1000)
4143                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4144                                 else
4145                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4146                         } else
4147                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4148
4149                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4150                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4151                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4152                                              SPEED_100 : SPEED_10;
4153                                 if (tg3_5700_link_polarity(tp, speed))
4154                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4155                                 else
4156                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4157                         }
4158                 } else {
4159                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4160                 }
4161
4162                 if (!tg3_flag(tp, 5750_PLUS))
4163                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4164
4165                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4166                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4167                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4168                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4169
4170                 if (tg3_flag(tp, ENABLE_APE))
4171                         mac_mode |= MAC_MODE_APE_TX_EN |
4172                                     MAC_MODE_APE_RX_EN |
4173                                     MAC_MODE_TDE_ENABLE;
4174
4175                 tw32_f(MAC_MODE, mac_mode);
4176                 udelay(100);
4177
4178                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4179                 udelay(10);
4180         }
4181
4182         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4183             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4185                 u32 base_val;
4186
4187                 base_val = tp->pci_clock_ctrl;
4188                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4189                              CLOCK_CTRL_TXCLK_DISABLE);
4190
4191                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4192                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4193         } else if (tg3_flag(tp, 5780_CLASS) ||
4194                    tg3_flag(tp, CPMU_PRESENT) ||
4195                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4196                 /* do nothing */
4197         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4198                 u32 newbits1, newbits2;
4199
4200                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4201                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4202                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4203                                     CLOCK_CTRL_TXCLK_DISABLE |
4204                                     CLOCK_CTRL_ALTCLK);
4205                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206                 } else if (tg3_flag(tp, 5705_PLUS)) {
4207                         newbits1 = CLOCK_CTRL_625_CORE;
4208                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4209                 } else {
4210                         newbits1 = CLOCK_CTRL_ALTCLK;
4211                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212                 }
4213
4214                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4215                             40);
4216
4217                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4218                             40);
4219
4220                 if (!tg3_flag(tp, 5705_PLUS)) {
4221                         u32 newbits3;
4222
4223                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4224                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4225                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4226                                             CLOCK_CTRL_TXCLK_DISABLE |
4227                                             CLOCK_CTRL_44MHZ_CORE);
4228                         } else {
4229                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4230                         }
4231
4232                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4233                                     tp->pci_clock_ctrl | newbits3, 40);
4234                 }
4235         }
4236
4237         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4238                 tg3_power_down_phy(tp, do_low_power);
4239
4240         tg3_frob_aux_power(tp, true);
4241
4242         /* Workaround for unstable PLL clock */
4243         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4244             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4245              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4246                 u32 val = tr32(0x7d00);
4247
4248                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4249                 tw32(0x7d00, val);
4250                 if (!tg3_flag(tp, ENABLE_ASF)) {
4251                         int err;
4252
4253                         err = tg3_nvram_lock(tp);
4254                         tg3_halt_cpu(tp, RX_CPU_BASE);
4255                         if (!err)
4256                                 tg3_nvram_unlock(tp);
4257                 }
4258         }
4259
4260         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4261
4262         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4263
4264         return 0;
4265 }
4266
4267 static void tg3_power_down(struct tg3 *tp)
4268 {
4269         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4270         pci_set_power_state(tp->pdev, PCI_D3hot);
4271 }
4272
4273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4274 {
4275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4276         case MII_TG3_AUX_STAT_10HALF:
4277                 *speed = SPEED_10;
4278                 *duplex = DUPLEX_HALF;
4279                 break;
4280
4281         case MII_TG3_AUX_STAT_10FULL:
4282                 *speed = SPEED_10;
4283                 *duplex = DUPLEX_FULL;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_100HALF:
4287                 *speed = SPEED_100;
4288                 *duplex = DUPLEX_HALF;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_100FULL:
4292                 *speed = SPEED_100;
4293                 *duplex = DUPLEX_FULL;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_1000HALF:
4297                 *speed = SPEED_1000;
4298                 *duplex = DUPLEX_HALF;
4299                 break;
4300
4301         case MII_TG3_AUX_STAT_1000FULL:
4302                 *speed = SPEED_1000;
4303                 *duplex = DUPLEX_FULL;
4304                 break;
4305
4306         default:
4307                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4308                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4309                                  SPEED_10;
4310                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4311                                   DUPLEX_HALF;
4312                         break;
4313                 }
4314                 *speed = SPEED_UNKNOWN;
4315                 *duplex = DUPLEX_UNKNOWN;
4316                 break;
4317         }
4318 }
4319
4320 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4321 {
4322         int err = 0;
4323         u32 val, new_adv;
4324
4325         new_adv = ADVERTISE_CSMA;
4326         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4327         new_adv |= mii_advertise_flowctrl(flowctrl);
4328
4329         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4330         if (err)
4331                 goto done;
4332
4333         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4334                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4335
4336                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4337                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4338                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4339
4340                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4341                 if (err)
4342                         goto done;
4343         }
4344
4345         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4346                 goto done;
4347
4348         tw32(TG3_CPMU_EEE_MODE,
4349              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4350
4351         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4352         if (!err) {
4353                 u32 err2;
4354
4355                 val = 0;
4356                 /* Advertise 100-BaseTX EEE ability */
4357                 if (advertise & ADVERTISED_100baseT_Full)
4358                         val |= MDIO_AN_EEE_ADV_100TX;
4359                 /* Advertise 1000-BaseT EEE ability */
4360                 if (advertise & ADVERTISED_1000baseT_Full)
4361                         val |= MDIO_AN_EEE_ADV_1000T;
4362
4363                 if (!tp->eee.eee_enabled) {
4364                         val = 0;
4365                         tp->eee.advertised = 0;
4366                 } else {
4367                         tp->eee.advertised = advertise &
4368                                              (ADVERTISED_100baseT_Full |
4369                                               ADVERTISED_1000baseT_Full);
4370                 }
4371
4372                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4373                 if (err)
4374                         val = 0;
4375
4376                 switch (tg3_asic_rev(tp)) {
4377                 case ASIC_REV_5717:
4378                 case ASIC_REV_57765:
4379                 case ASIC_REV_57766:
4380                 case ASIC_REV_5719:
4381                         /* If we advertised any eee advertisements above... */
4382                         if (val)
4383                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4384                                       MII_TG3_DSP_TAP26_RMRXSTO |
4385                                       MII_TG3_DSP_TAP26_OPCSINPT;
4386                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4387                         fallthrough;
4388                 case ASIC_REV_5720:
4389                 case ASIC_REV_5762:
4390                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4391                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4392                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4393                 }
4394
4395                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4396                 if (!err)
4397                         err = err2;
4398         }
4399
4400 done:
4401         return err;
4402 }
4403
4404 static void tg3_phy_copper_begin(struct tg3 *tp)
4405 {
4406         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4407             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4408                 u32 adv, fc;
4409
4410                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4411                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4412                         adv = ADVERTISED_10baseT_Half |
4413                               ADVERTISED_10baseT_Full;
4414                         if (tg3_flag(tp, WOL_SPEED_100MB))
4415                                 adv |= ADVERTISED_100baseT_Half |
4416                                        ADVERTISED_100baseT_Full;
4417                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4418                                 if (!(tp->phy_flags &
4419                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4420                                         adv |= ADVERTISED_1000baseT_Half;
4421                                 adv |= ADVERTISED_1000baseT_Full;
4422                         }
4423
4424                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4425                 } else {
4426                         adv = tp->link_config.advertising;
4427                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4428                                 adv &= ~(ADVERTISED_1000baseT_Half |
4429                                          ADVERTISED_1000baseT_Full);
4430
4431                         fc = tp->link_config.flowctrl;
4432                 }
4433
4434                 tg3_phy_autoneg_cfg(tp, adv, fc);
4435
4436                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4437                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4438                         /* Normally during power down we want to autonegotiate
4439                          * the lowest possible speed for WOL. However, to avoid
4440                          * link flap, we leave it untouched.
4441                          */
4442                         return;
4443                 }
4444
4445                 tg3_writephy(tp, MII_BMCR,
4446                              BMCR_ANENABLE | BMCR_ANRESTART);
4447         } else {
4448                 int i;
4449                 u32 bmcr, orig_bmcr;
4450
4451                 tp->link_config.active_speed = tp->link_config.speed;
4452                 tp->link_config.active_duplex = tp->link_config.duplex;
4453
4454                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4455                         /* With autoneg disabled, 5715 only links up when the
4456                          * advertisement register has the configured speed
4457                          * enabled.
4458                          */
4459                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4460                 }
4461
4462                 bmcr = 0;
4463                 switch (tp->link_config.speed) {
4464                 default:
4465                 case SPEED_10:
4466                         break;
4467
4468                 case SPEED_100:
4469                         bmcr |= BMCR_SPEED100;
4470                         break;
4471
4472                 case SPEED_1000:
4473                         bmcr |= BMCR_SPEED1000;
4474                         break;
4475                 }
4476
4477                 if (tp->link_config.duplex == DUPLEX_FULL)
4478                         bmcr |= BMCR_FULLDPLX;
4479
4480                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4481                     (bmcr != orig_bmcr)) {
4482                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4483                         for (i = 0; i < 1500; i++) {
4484                                 u32 tmp;
4485
4486                                 udelay(10);
4487                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4488                                     tg3_readphy(tp, MII_BMSR, &tmp))
4489                                         continue;
4490                                 if (!(tmp & BMSR_LSTATUS)) {
4491                                         udelay(40);
4492                                         break;
4493                                 }
4494                         }
4495                         tg3_writephy(tp, MII_BMCR, bmcr);
4496                         udelay(40);
4497                 }
4498         }
4499 }
4500
4501 static int tg3_phy_pull_config(struct tg3 *tp)
4502 {
4503         int err;
4504         u32 val;
4505
4506         err = tg3_readphy(tp, MII_BMCR, &val);
4507         if (err)
4508                 goto done;
4509
4510         if (!(val & BMCR_ANENABLE)) {
4511                 tp->link_config.autoneg = AUTONEG_DISABLE;
4512                 tp->link_config.advertising = 0;
4513                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4514
4515                 err = -EIO;
4516
4517                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4518                 case 0:
4519                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520                                 goto done;
4521
4522                         tp->link_config.speed = SPEED_10;
4523                         break;
4524                 case BMCR_SPEED100:
4525                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526                                 goto done;
4527
4528                         tp->link_config.speed = SPEED_100;
4529                         break;
4530                 case BMCR_SPEED1000:
4531                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4532                                 tp->link_config.speed = SPEED_1000;
4533                                 break;
4534                         }
4535                         fallthrough;
4536                 default:
4537                         goto done;
4538                 }
4539
4540                 if (val & BMCR_FULLDPLX)
4541                         tp->link_config.duplex = DUPLEX_FULL;
4542                 else
4543                         tp->link_config.duplex = DUPLEX_HALF;
4544
4545                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4546
4547                 err = 0;
4548                 goto done;
4549         }
4550
4551         tp->link_config.autoneg = AUTONEG_ENABLE;
4552         tp->link_config.advertising = ADVERTISED_Autoneg;
4553         tg3_flag_set(tp, PAUSE_AUTONEG);
4554
4555         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4556                 u32 adv;
4557
4558                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4559                 if (err)
4560                         goto done;
4561
4562                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4563                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4564
4565                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4566         } else {
4567                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4568         }
4569
4570         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4571                 u32 adv;
4572
4573                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4574                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4575                         if (err)
4576                                 goto done;
4577
4578                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4579                 } else {
4580                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4581                         if (err)
4582                                 goto done;
4583
4584                         adv = tg3_decode_flowctrl_1000X(val);
4585                         tp->link_config.flowctrl = adv;
4586
4587                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4588                         adv = mii_adv_to_ethtool_adv_x(val);
4589                 }
4590
4591                 tp->link_config.advertising |= adv;
4592         }
4593
4594 done:
4595         return err;
4596 }
4597
4598 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4599 {
4600         int err;
4601
4602         /* Turn off tap power management. */
4603         /* Set Extended packet length bit */
4604         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4605
4606         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4607         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4608         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4609         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4610         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4611
4612         udelay(40);
4613
4614         return err;
4615 }
4616
4617 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4618 {
4619         struct ethtool_eee eee;
4620
4621         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4622                 return true;
4623
4624         tg3_eee_pull_config(tp, &eee);
4625
4626         if (tp->eee.eee_enabled) {
4627                 if (tp->eee.advertised != eee.advertised ||
4628                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4629                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4630                         return false;
4631         } else {
4632                 /* EEE is disabled but we're advertising */
4633                 if (eee.advertised)
4634                         return false;
4635         }
4636
4637         return true;
4638 }
4639
4640 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4641 {
4642         u32 advmsk, tgtadv, advertising;
4643
4644         advertising = tp->link_config.advertising;
4645         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4646
4647         advmsk = ADVERTISE_ALL;
4648         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4649                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4650                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4651         }
4652
4653         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4654                 return false;
4655
4656         if ((*lcladv & advmsk) != tgtadv)
4657                 return false;
4658
4659         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4660                 u32 tg3_ctrl;
4661
4662                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4663
4664                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4665                         return false;
4666
4667                 if (tgtadv &&
4668                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4669                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4670                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4671                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4672                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4673                 } else {
4674                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4675                 }
4676
4677                 if (tg3_ctrl != tgtadv)
4678                         return false;
4679         }
4680
4681         return true;
4682 }
4683
4684 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4685 {
4686         u32 lpeth = 0;
4687
4688         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4689                 u32 val;
4690
4691                 if (tg3_readphy(tp, MII_STAT1000, &val))
4692                         return false;
4693
4694                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4695         }
4696
4697         if (tg3_readphy(tp, MII_LPA, rmtadv))
4698                 return false;
4699
4700         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4701         tp->link_config.rmt_adv = lpeth;
4702
4703         return true;
4704 }
4705
4706 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4707 {
4708         if (curr_link_up != tp->link_up) {
4709                 if (curr_link_up) {
4710                         netif_carrier_on(tp->dev);
4711                 } else {
4712                         netif_carrier_off(tp->dev);
4713                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4714                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4715                 }
4716
4717                 tg3_link_report(tp);
4718                 return true;
4719         }
4720
4721         return false;
4722 }
4723
4724 static void tg3_clear_mac_status(struct tg3 *tp)
4725 {
4726         tw32(MAC_EVENT, 0);
4727
4728         tw32_f(MAC_STATUS,
4729                MAC_STATUS_SYNC_CHANGED |
4730                MAC_STATUS_CFG_CHANGED |
4731                MAC_STATUS_MI_COMPLETION |
4732                MAC_STATUS_LNKSTATE_CHANGED);
4733         udelay(40);
4734 }
4735
4736 static void tg3_setup_eee(struct tg3 *tp)
4737 {
4738         u32 val;
4739
4740         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4741               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4742         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4743                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4744
4745         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4746
4747         tw32_f(TG3_CPMU_EEE_CTRL,
4748                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4749
4750         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4751               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4752               TG3_CPMU_EEEMD_LPI_IN_RX |
4753               TG3_CPMU_EEEMD_EEE_ENABLE;
4754
4755         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4756                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4757
4758         if (tg3_flag(tp, ENABLE_APE))
4759                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4760
4761         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4762
4763         tw32_f(TG3_CPMU_EEE_DBTMR1,
4764                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4765                (tp->eee.tx_lpi_timer & 0xffff));
4766
4767         tw32_f(TG3_CPMU_EEE_DBTMR2,
4768                TG3_CPMU_DBTMR2_APE_TX_2047US |
4769                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4770 }
4771
4772 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4773 {
4774         bool current_link_up;
4775         u32 bmsr, val;
4776         u32 lcl_adv, rmt_adv;
4777         u32 current_speed;
4778         u8 current_duplex;
4779         int i, err;
4780
4781         tg3_clear_mac_status(tp);
4782
4783         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4784                 tw32_f(MAC_MI_MODE,
4785                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4786                 udelay(80);
4787         }
4788
4789         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4790
4791         /* Some third-party PHYs need to be reset on link going
4792          * down.
4793          */
4794         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4795              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4796              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4797             tp->link_up) {
4798                 tg3_readphy(tp, MII_BMSR, &bmsr);
4799                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4800                     !(bmsr & BMSR_LSTATUS))
4801                         force_reset = true;
4802         }
4803         if (force_reset)
4804                 tg3_phy_reset(tp);
4805
4806         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4807                 tg3_readphy(tp, MII_BMSR, &bmsr);
4808                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4809                     !tg3_flag(tp, INIT_COMPLETE))
4810                         bmsr = 0;
4811
4812                 if (!(bmsr & BMSR_LSTATUS)) {
4813                         err = tg3_init_5401phy_dsp(tp);
4814                         if (err)
4815                                 return err;
4816
4817                         tg3_readphy(tp, MII_BMSR, &bmsr);
4818                         for (i = 0; i < 1000; i++) {
4819                                 udelay(10);
4820                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4821                                     (bmsr & BMSR_LSTATUS)) {
4822                                         udelay(40);
4823                                         break;
4824                                 }
4825                         }
4826
4827                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4828                             TG3_PHY_REV_BCM5401_B0 &&
4829                             !(bmsr & BMSR_LSTATUS) &&
4830                             tp->link_config.active_speed == SPEED_1000) {
4831                                 err = tg3_phy_reset(tp);
4832                                 if (!err)
4833                                         err = tg3_init_5401phy_dsp(tp);
4834                                 if (err)
4835                                         return err;
4836                         }
4837                 }
4838         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4839                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4840                 /* 5701 {A0,B0} CRC bug workaround */
4841                 tg3_writephy(tp, 0x15, 0x0a75);
4842                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4843                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845         }
4846
4847         /* Clear pending interrupts... */
4848         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850
4851         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4852                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4853         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4854                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4855
4856         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4857             tg3_asic_rev(tp) == ASIC_REV_5701) {
4858                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4859                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4860                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4861                 else
4862                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4863         }
4864
4865         current_link_up = false;
4866         current_speed = SPEED_UNKNOWN;
4867         current_duplex = DUPLEX_UNKNOWN;
4868         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4869         tp->link_config.rmt_adv = 0;
4870
4871         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4872                 err = tg3_phy_auxctl_read(tp,
4873                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874                                           &val);
4875                 if (!err && !(val & (1 << 10))) {
4876                         tg3_phy_auxctl_write(tp,
4877                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4878                                              val | (1 << 10));
4879                         goto relink;
4880                 }
4881         }
4882
4883         bmsr = 0;
4884         for (i = 0; i < 100; i++) {
4885                 tg3_readphy(tp, MII_BMSR, &bmsr);
4886                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4887                     (bmsr & BMSR_LSTATUS))
4888                         break;
4889                 udelay(40);
4890         }
4891
4892         if (bmsr & BMSR_LSTATUS) {
4893                 u32 aux_stat, bmcr;
4894
4895                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4896                 for (i = 0; i < 2000; i++) {
4897                         udelay(10);
4898                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4899                             aux_stat)
4900                                 break;
4901                 }
4902
4903                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4904                                              &current_speed,
4905                                              &current_duplex);
4906
4907                 bmcr = 0;
4908                 for (i = 0; i < 200; i++) {
4909                         tg3_readphy(tp, MII_BMCR, &bmcr);
4910                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4911                                 continue;
4912                         if (bmcr && bmcr != 0x7fff)
4913                                 break;
4914                         udelay(10);
4915                 }
4916
4917                 lcl_adv = 0;
4918                 rmt_adv = 0;
4919
4920                 tp->link_config.active_speed = current_speed;
4921                 tp->link_config.active_duplex = current_duplex;
4922
4923                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4924                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4925
4926                         if ((bmcr & BMCR_ANENABLE) &&
4927                             eee_config_ok &&
4928                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4929                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4930                                 current_link_up = true;
4931
4932                         /* EEE settings changes take effect only after a phy
4933                          * reset.  If we have skipped a reset due to Link Flap
4934                          * Avoidance being enabled, do it now.
4935                          */
4936                         if (!eee_config_ok &&
4937                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4938                             !force_reset) {
4939                                 tg3_setup_eee(tp);
4940                                 tg3_phy_reset(tp);
4941                         }
4942                 } else {
4943                         if (!(bmcr & BMCR_ANENABLE) &&
4944                             tp->link_config.speed == current_speed &&
4945                             tp->link_config.duplex == current_duplex) {
4946                                 current_link_up = true;
4947                         }
4948                 }
4949
4950                 if (current_link_up &&
4951                     tp->link_config.active_duplex == DUPLEX_FULL) {
4952                         u32 reg, bit;
4953
4954                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4955                                 reg = MII_TG3_FET_GEN_STAT;
4956                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4957                         } else {
4958                                 reg = MII_TG3_EXT_STAT;
4959                                 bit = MII_TG3_EXT_STAT_MDIX;
4960                         }
4961
4962                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4963                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4964
4965                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4966                 }
4967         }
4968
4969 relink:
4970         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4971                 tg3_phy_copper_begin(tp);
4972
4973                 if (tg3_flag(tp, ROBOSWITCH)) {
4974                         current_link_up = true;
4975                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4976                         current_speed = SPEED_1000;
4977                         current_duplex = DUPLEX_FULL;
4978                         tp->link_config.active_speed = current_speed;
4979                         tp->link_config.active_duplex = current_duplex;
4980                 }
4981
4982                 tg3_readphy(tp, MII_BMSR, &bmsr);
4983                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4984                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4985                         current_link_up = true;
4986         }
4987
4988         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4989         if (current_link_up) {
4990                 if (tp->link_config.active_speed == SPEED_100 ||
4991                     tp->link_config.active_speed == SPEED_10)
4992                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993                 else
4994                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4996                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4997         else
4998                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4999
5000         /* In order for the 5750 core in BCM4785 chip to work properly
5001          * in RGMII mode, the Led Control Register must be set up.
5002          */
5003         if (tg3_flag(tp, RGMII_MODE)) {
5004                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5005                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5006
5007                 if (tp->link_config.active_speed == SPEED_10)
5008                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5009                 else if (tp->link_config.active_speed == SPEED_100)
5010                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011                                      LED_CTRL_100MBPS_ON);
5012                 else if (tp->link_config.active_speed == SPEED_1000)
5013                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014                                      LED_CTRL_1000MBPS_ON);
5015
5016                 tw32(MAC_LED_CTRL, led_ctrl);
5017                 udelay(40);
5018         }
5019
5020         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5021         if (tp->link_config.active_duplex == DUPLEX_HALF)
5022                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5023
5024         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5025                 if (current_link_up &&
5026                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5027                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5028                 else
5029                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5030         }
5031
5032         /* ??? Without this setting Netgear GA302T PHY does not
5033          * ??? send/receive packets...
5034          */
5035         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5036             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5037                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5039                 udelay(80);
5040         }
5041
5042         tw32_f(MAC_MODE, tp->mac_mode);
5043         udelay(40);
5044
5045         tg3_phy_eee_adjust(tp, current_link_up);
5046
5047         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5048                 /* Polled via timer. */
5049                 tw32_f(MAC_EVENT, 0);
5050         } else {
5051                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5052         }
5053         udelay(40);
5054
5055         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5056             current_link_up &&
5057             tp->link_config.active_speed == SPEED_1000 &&
5058             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5059                 udelay(120);
5060                 tw32_f(MAC_STATUS,
5061                      (MAC_STATUS_SYNC_CHANGED |
5062                       MAC_STATUS_CFG_CHANGED));
5063                 udelay(40);
5064                 tg3_write_mem(tp,
5065                               NIC_SRAM_FIRMWARE_MBOX,
5066                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5067         }
5068
5069         /* Prevent send BD corruption. */
5070         if (tg3_flag(tp, CLKREQ_BUG)) {
5071                 if (tp->link_config.active_speed == SPEED_100 ||
5072                     tp->link_config.active_speed == SPEED_10)
5073                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5074                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5075                 else
5076                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5077                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5078         }
5079
5080         tg3_test_and_report_link_chg(tp, current_link_up);
5081
5082         return 0;
5083 }
5084
5085 struct tg3_fiber_aneginfo {
5086         int state;
5087 #define ANEG_STATE_UNKNOWN              0
5088 #define ANEG_STATE_AN_ENABLE            1
5089 #define ANEG_STATE_RESTART_INIT         2
5090 #define ANEG_STATE_RESTART              3
5091 #define ANEG_STATE_DISABLE_LINK_OK      4
5092 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5093 #define ANEG_STATE_ABILITY_DETECT       6
5094 #define ANEG_STATE_ACK_DETECT_INIT      7
5095 #define ANEG_STATE_ACK_DETECT           8
5096 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5097 #define ANEG_STATE_COMPLETE_ACK         10
5098 #define ANEG_STATE_IDLE_DETECT_INIT     11
5099 #define ANEG_STATE_IDLE_DETECT          12
5100 #define ANEG_STATE_LINK_OK              13
5101 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5102 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5103
5104         u32 flags;
5105 #define MR_AN_ENABLE            0x00000001
5106 #define MR_RESTART_AN           0x00000002
5107 #define MR_AN_COMPLETE          0x00000004
5108 #define MR_PAGE_RX              0x00000008
5109 #define MR_NP_LOADED            0x00000010
5110 #define MR_TOGGLE_TX            0x00000020
5111 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5112 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5113 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5114 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5115 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5116 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5117 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5118 #define MR_TOGGLE_RX            0x00002000
5119 #define MR_NP_RX                0x00004000
5120
5121 #define MR_LINK_OK              0x80000000
5122
5123         unsigned long link_time, cur_time;
5124
5125         u32 ability_match_cfg;
5126         int ability_match_count;
5127
5128         char ability_match, idle_match, ack_match;
5129
5130         u32 txconfig, rxconfig;
5131 #define ANEG_CFG_NP             0x00000080
5132 #define ANEG_CFG_ACK            0x00000040
5133 #define ANEG_CFG_RF2            0x00000020
5134 #define ANEG_CFG_RF1            0x00000010
5135 #define ANEG_CFG_PS2            0x00000001
5136 #define ANEG_CFG_PS1            0x00008000
5137 #define ANEG_CFG_HD             0x00004000
5138 #define ANEG_CFG_FD             0x00002000
5139 #define ANEG_CFG_INVAL          0x00001f06
5140
5141 };
5142 #define ANEG_OK         0
5143 #define ANEG_DONE       1
5144 #define ANEG_TIMER_ENAB 2
5145 #define ANEG_FAILED     -1
5146
5147 #define ANEG_STATE_SETTLE_TIME  10000
5148
5149 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5150                                    struct tg3_fiber_aneginfo *ap)
5151 {
5152         u16 flowctrl;
5153         unsigned long delta;
5154         u32 rx_cfg_reg;
5155         int ret;
5156
5157         if (ap->state == ANEG_STATE_UNKNOWN) {
5158                 ap->rxconfig = 0;
5159                 ap->link_time = 0;
5160                 ap->cur_time = 0;
5161                 ap->ability_match_cfg = 0;
5162                 ap->ability_match_count = 0;
5163                 ap->ability_match = 0;
5164                 ap->idle_match = 0;
5165                 ap->ack_match = 0;
5166         }
5167         ap->cur_time++;
5168
5169         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5170                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5171
5172                 if (rx_cfg_reg != ap->ability_match_cfg) {
5173                         ap->ability_match_cfg = rx_cfg_reg;
5174                         ap->ability_match = 0;
5175                         ap->ability_match_count = 0;
5176                 } else {
5177                         if (++ap->ability_match_count > 1) {
5178                                 ap->ability_match = 1;
5179                                 ap->ability_match_cfg = rx_cfg_reg;
5180                         }
5181                 }
5182                 if (rx_cfg_reg & ANEG_CFG_ACK)
5183                         ap->ack_match = 1;
5184                 else
5185                         ap->ack_match = 0;
5186
5187                 ap->idle_match = 0;
5188         } else {
5189                 ap->idle_match = 1;
5190                 ap->ability_match_cfg = 0;
5191                 ap->ability_match_count = 0;
5192                 ap->ability_match = 0;
5193                 ap->ack_match = 0;
5194
5195                 rx_cfg_reg = 0;
5196         }
5197
5198         ap->rxconfig = rx_cfg_reg;
5199         ret = ANEG_OK;
5200
5201         switch (ap->state) {
5202         case ANEG_STATE_UNKNOWN:
5203                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5204                         ap->state = ANEG_STATE_AN_ENABLE;
5205
5206                 fallthrough;
5207         case ANEG_STATE_AN_ENABLE:
5208                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5209                 if (ap->flags & MR_AN_ENABLE) {
5210                         ap->link_time = 0;
5211                         ap->cur_time = 0;
5212                         ap->ability_match_cfg = 0;
5213                         ap->ability_match_count = 0;
5214                         ap->ability_match = 0;
5215                         ap->idle_match = 0;
5216                         ap->ack_match = 0;
5217
5218                         ap->state = ANEG_STATE_RESTART_INIT;
5219                 } else {
5220                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5221                 }
5222                 break;
5223
5224         case ANEG_STATE_RESTART_INIT:
5225                 ap->link_time = ap->cur_time;
5226                 ap->flags &= ~(MR_NP_LOADED);
5227                 ap->txconfig = 0;
5228                 tw32(MAC_TX_AUTO_NEG, 0);
5229                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230                 tw32_f(MAC_MODE, tp->mac_mode);
5231                 udelay(40);
5232
5233                 ret = ANEG_TIMER_ENAB;
5234                 ap->state = ANEG_STATE_RESTART;
5235
5236                 fallthrough;
5237         case ANEG_STATE_RESTART:
5238                 delta = ap->cur_time - ap->link_time;
5239                 if (delta > ANEG_STATE_SETTLE_TIME)
5240                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5241                 else
5242                         ret = ANEG_TIMER_ENAB;
5243                 break;
5244
5245         case ANEG_STATE_DISABLE_LINK_OK:
5246                 ret = ANEG_DONE;
5247                 break;
5248
5249         case ANEG_STATE_ABILITY_DETECT_INIT:
5250                 ap->flags &= ~(MR_TOGGLE_TX);
5251                 ap->txconfig = ANEG_CFG_FD;
5252                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5253                 if (flowctrl & ADVERTISE_1000XPAUSE)
5254                         ap->txconfig |= ANEG_CFG_PS1;
5255                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5256                         ap->txconfig |= ANEG_CFG_PS2;
5257                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5258                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5259                 tw32_f(MAC_MODE, tp->mac_mode);
5260                 udelay(40);
5261
5262                 ap->state = ANEG_STATE_ABILITY_DETECT;
5263                 break;
5264
5265         case ANEG_STATE_ABILITY_DETECT:
5266                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5267                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5268                 break;
5269
5270         case ANEG_STATE_ACK_DETECT_INIT:
5271                 ap->txconfig |= ANEG_CFG_ACK;
5272                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5273                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5274                 tw32_f(MAC_MODE, tp->mac_mode);
5275                 udelay(40);
5276
5277                 ap->state = ANEG_STATE_ACK_DETECT;
5278
5279                 fallthrough;
5280         case ANEG_STATE_ACK_DETECT:
5281                 if (ap->ack_match != 0) {
5282                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5283                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5284                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5285                         } else {
5286                                 ap->state = ANEG_STATE_AN_ENABLE;
5287                         }
5288                 } else if (ap->ability_match != 0 &&
5289                            ap->rxconfig == 0) {
5290                         ap->state = ANEG_STATE_AN_ENABLE;
5291                 }
5292                 break;
5293
5294         case ANEG_STATE_COMPLETE_ACK_INIT:
5295                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5296                         ret = ANEG_FAILED;
5297                         break;
5298                 }
5299                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5300                                MR_LP_ADV_HALF_DUPLEX |
5301                                MR_LP_ADV_SYM_PAUSE |
5302                                MR_LP_ADV_ASYM_PAUSE |
5303                                MR_LP_ADV_REMOTE_FAULT1 |
5304                                MR_LP_ADV_REMOTE_FAULT2 |
5305                                MR_LP_ADV_NEXT_PAGE |
5306                                MR_TOGGLE_RX |
5307                                MR_NP_RX);
5308                 if (ap->rxconfig & ANEG_CFG_FD)
5309                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5310                 if (ap->rxconfig & ANEG_CFG_HD)
5311                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5312                 if (ap->rxconfig & ANEG_CFG_PS1)
5313                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5314                 if (ap->rxconfig & ANEG_CFG_PS2)
5315                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5316                 if (ap->rxconfig & ANEG_CFG_RF1)
5317                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5318                 if (ap->rxconfig & ANEG_CFG_RF2)
5319                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5320                 if (ap->rxconfig & ANEG_CFG_NP)
5321                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5322
5323                 ap->link_time = ap->cur_time;
5324
5325                 ap->flags ^= (MR_TOGGLE_TX);
5326                 if (ap->rxconfig & 0x0008)
5327                         ap->flags |= MR_TOGGLE_RX;
5328                 if (ap->rxconfig & ANEG_CFG_NP)
5329                         ap->flags |= MR_NP_RX;
5330                 ap->flags |= MR_PAGE_RX;
5331
5332                 ap->state = ANEG_STATE_COMPLETE_ACK;
5333                 ret = ANEG_TIMER_ENAB;
5334                 break;
5335
5336         case ANEG_STATE_COMPLETE_ACK:
5337                 if (ap->ability_match != 0 &&
5338                     ap->rxconfig == 0) {
5339                         ap->state = ANEG_STATE_AN_ENABLE;
5340                         break;
5341                 }
5342                 delta = ap->cur_time - ap->link_time;
5343                 if (delta > ANEG_STATE_SETTLE_TIME) {
5344                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5345                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346                         } else {
5347                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5348                                     !(ap->flags & MR_NP_RX)) {
5349                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5350                                 } else {
5351                                         ret = ANEG_FAILED;
5352                                 }
5353                         }
5354                 }
5355                 break;
5356
5357         case ANEG_STATE_IDLE_DETECT_INIT:
5358                 ap->link_time = ap->cur_time;
5359                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5360                 tw32_f(MAC_MODE, tp->mac_mode);
5361                 udelay(40);
5362
5363                 ap->state = ANEG_STATE_IDLE_DETECT;
5364                 ret = ANEG_TIMER_ENAB;
5365                 break;
5366
5367         case ANEG_STATE_IDLE_DETECT:
5368                 if (ap->ability_match != 0 &&
5369                     ap->rxconfig == 0) {
5370                         ap->state = ANEG_STATE_AN_ENABLE;
5371                         break;
5372                 }
5373                 delta = ap->cur_time - ap->link_time;
5374                 if (delta > ANEG_STATE_SETTLE_TIME) {
5375                         /* XXX another gem from the Broadcom driver :( */
5376                         ap->state = ANEG_STATE_LINK_OK;
5377                 }
5378                 break;
5379
5380         case ANEG_STATE_LINK_OK:
5381                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5382                 ret = ANEG_DONE;
5383                 break;
5384
5385         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5386                 /* ??? unimplemented */
5387                 break;
5388
5389         case ANEG_STATE_NEXT_PAGE_WAIT:
5390                 /* ??? unimplemented */
5391                 break;
5392
5393         default:
5394                 ret = ANEG_FAILED;
5395                 break;
5396         }
5397
5398         return ret;
5399 }
5400
5401 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5402 {
5403         int res = 0;
5404         struct tg3_fiber_aneginfo aninfo;
5405         int status = ANEG_FAILED;
5406         unsigned int tick;
5407         u32 tmp;
5408
5409         tw32_f(MAC_TX_AUTO_NEG, 0);
5410
5411         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5412         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5413         udelay(40);
5414
5415         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5416         udelay(40);
5417
5418         memset(&aninfo, 0, sizeof(aninfo));
5419         aninfo.flags |= MR_AN_ENABLE;
5420         aninfo.state = ANEG_STATE_UNKNOWN;
5421         aninfo.cur_time = 0;
5422         tick = 0;
5423         while (++tick < 195000) {
5424                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5425                 if (status == ANEG_DONE || status == ANEG_FAILED)
5426                         break;
5427
5428                 udelay(1);
5429         }
5430
5431         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5432         tw32_f(MAC_MODE, tp->mac_mode);
5433         udelay(40);
5434
5435         *txflags = aninfo.txconfig;
5436         *rxflags = aninfo.flags;
5437
5438         if (status == ANEG_DONE &&
5439             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5440                              MR_LP_ADV_FULL_DUPLEX)))
5441                 res = 1;
5442
5443         return res;
5444 }
5445
5446 static void tg3_init_bcm8002(struct tg3 *tp)
5447 {
5448         u32 mac_status = tr32(MAC_STATUS);
5449         int i;
5450
5451         /* Reset when initting first time or we have a link. */
5452         if (tg3_flag(tp, INIT_COMPLETE) &&
5453             !(mac_status & MAC_STATUS_PCS_SYNCED))
5454                 return;
5455
5456         /* Set PLL lock range. */
5457         tg3_writephy(tp, 0x16, 0x8007);
5458
5459         /* SW reset */
5460         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5461
5462         /* Wait for reset to complete. */
5463         /* XXX schedule_timeout() ... */
5464         for (i = 0; i < 500; i++)
5465                 udelay(10);
5466
5467         /* Config mode; select PMA/Ch 1 regs. */
5468         tg3_writephy(tp, 0x10, 0x8411);
5469
5470         /* Enable auto-lock and comdet, select txclk for tx. */
5471         tg3_writephy(tp, 0x11, 0x0a10);
5472
5473         tg3_writephy(tp, 0x18, 0x00a0);
5474         tg3_writephy(tp, 0x16, 0x41ff);
5475
5476         /* Assert and deassert POR. */
5477         tg3_writephy(tp, 0x13, 0x0400);
5478         udelay(40);
5479         tg3_writephy(tp, 0x13, 0x0000);
5480
5481         tg3_writephy(tp, 0x11, 0x0a50);
5482         udelay(40);
5483         tg3_writephy(tp, 0x11, 0x0a10);
5484
5485         /* Wait for signal to stabilize */
5486         /* XXX schedule_timeout() ... */
5487         for (i = 0; i < 15000; i++)
5488                 udelay(10);
5489
5490         /* Deselect the channel register so we can read the PHYID
5491          * later.
5492          */
5493         tg3_writephy(tp, 0x10, 0x8011);
5494 }
5495
5496 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5497 {
5498         u16 flowctrl;
5499         bool current_link_up;
5500         u32 sg_dig_ctrl, sg_dig_status;
5501         u32 serdes_cfg, expected_sg_dig_ctrl;
5502         int workaround, port_a;
5503
5504         serdes_cfg = 0;
5505         expected_sg_dig_ctrl = 0;
5506         workaround = 0;
5507         port_a = 1;
5508         current_link_up = false;
5509
5510         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5512                 workaround = 1;
5513                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514                         port_a = 0;
5515
5516                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517                 /* preserve bits 20-23 for voltage regulator */
5518                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519         }
5520
5521         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522
5523         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5525                         if (workaround) {
5526                                 u32 val = serdes_cfg;
5527
5528                                 if (port_a)
5529                                         val |= 0xc010000;
5530                                 else
5531                                         val |= 0x4010000;
5532                                 tw32_f(MAC_SERDES_CFG, val);
5533                         }
5534
5535                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536                 }
5537                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538                         tg3_setup_flow_control(tp, 0, 0);
5539                         current_link_up = true;
5540                 }
5541                 goto out;
5542         }
5543
5544         /* Want auto-negotiation.  */
5545         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546
5547         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548         if (flowctrl & ADVERTISE_1000XPAUSE)
5549                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552
5553         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555                     tp->serdes_counter &&
5556                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557                                     MAC_STATUS_RCVD_CFG)) ==
5558                      MAC_STATUS_PCS_SYNCED)) {
5559                         tp->serdes_counter--;
5560                         current_link_up = true;
5561                         goto out;
5562                 }
5563 restart_autoneg:
5564                 if (workaround)
5565                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5567                 udelay(5);
5568                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569
5570                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573                                  MAC_STATUS_SIGNAL_DET)) {
5574                 sg_dig_status = tr32(SG_DIG_STATUS);
5575                 mac_status = tr32(MAC_STATUS);
5576
5577                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579                         u32 local_adv = 0, remote_adv = 0;
5580
5581                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582                                 local_adv |= ADVERTISE_1000XPAUSE;
5583                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5585
5586                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587                                 remote_adv |= LPA_1000XPAUSE;
5588                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5590
5591                         tp->link_config.rmt_adv =
5592                                            mii_adv_to_ethtool_adv_x(remote_adv);
5593
5594                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5595                         current_link_up = true;
5596                         tp->serdes_counter = 0;
5597                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599                         if (tp->serdes_counter)
5600                                 tp->serdes_counter--;
5601                         else {
5602                                 if (workaround) {
5603                                         u32 val = serdes_cfg;
5604
5605                                         if (port_a)
5606                                                 val |= 0xc010000;
5607                                         else
5608                                                 val |= 0x4010000;
5609
5610                                         tw32_f(MAC_SERDES_CFG, val);
5611                                 }
5612
5613                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614                                 udelay(40);
5615
5616                                 /* Link parallel detection - link is up */
5617                                 /* only if we have PCS_SYNC and not */
5618                                 /* receiving config code words */
5619                                 mac_status = tr32(MAC_STATUS);
5620                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622                                         tg3_setup_flow_control(tp, 0, 0);
5623                                         current_link_up = true;
5624                                         tp->phy_flags |=
5625                                                 TG3_PHYFLG_PARALLEL_DETECT;
5626                                         tp->serdes_counter =
5627                                                 SERDES_PARALLEL_DET_TIMEOUT;
5628                                 } else
5629                                         goto restart_autoneg;
5630                         }
5631                 }
5632         } else {
5633                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5635         }
5636
5637 out:
5638         return current_link_up;
5639 }
5640
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 {
5643         bool current_link_up = false;
5644
5645         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646                 goto out;
5647
5648         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649                 u32 txflags, rxflags;
5650                 int i;
5651
5652                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653                         u32 local_adv = 0, remote_adv = 0;
5654
5655                         if (txflags & ANEG_CFG_PS1)
5656                                 local_adv |= ADVERTISE_1000XPAUSE;
5657                         if (txflags & ANEG_CFG_PS2)
5658                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5659
5660                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661                                 remote_adv |= LPA_1000XPAUSE;
5662                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5664
5665                         tp->link_config.rmt_adv =
5666                                            mii_adv_to_ethtool_adv_x(remote_adv);
5667
5668                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5669
5670                         current_link_up = true;
5671                 }
5672                 for (i = 0; i < 30; i++) {
5673                         udelay(20);
5674                         tw32_f(MAC_STATUS,
5675                                (MAC_STATUS_SYNC_CHANGED |
5676                                 MAC_STATUS_CFG_CHANGED));
5677                         udelay(40);
5678                         if ((tr32(MAC_STATUS) &
5679                              (MAC_STATUS_SYNC_CHANGED |
5680                               MAC_STATUS_CFG_CHANGED)) == 0)
5681                                 break;
5682                 }
5683
5684                 mac_status = tr32(MAC_STATUS);
5685                 if (!current_link_up &&
5686                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687                     !(mac_status & MAC_STATUS_RCVD_CFG))
5688                         current_link_up = true;
5689         } else {
5690                 tg3_setup_flow_control(tp, 0, 0);
5691
5692                 /* Forcing 1000FD link up. */
5693                 current_link_up = true;
5694
5695                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696                 udelay(40);
5697
5698                 tw32_f(MAC_MODE, tp->mac_mode);
5699                 udelay(40);
5700         }
5701
5702 out:
5703         return current_link_up;
5704 }
5705
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 {
5708         u32 orig_pause_cfg;
5709         u32 orig_active_speed;
5710         u8 orig_active_duplex;
5711         u32 mac_status;
5712         bool current_link_up;
5713         int i;
5714
5715         orig_pause_cfg = tp->link_config.active_flowctrl;
5716         orig_active_speed = tp->link_config.active_speed;
5717         orig_active_duplex = tp->link_config.active_duplex;
5718
5719         if (!tg3_flag(tp, HW_AUTONEG) &&
5720             tp->link_up &&
5721             tg3_flag(tp, INIT_COMPLETE)) {
5722                 mac_status = tr32(MAC_STATUS);
5723                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724                                MAC_STATUS_SIGNAL_DET |
5725                                MAC_STATUS_CFG_CHANGED |
5726                                MAC_STATUS_RCVD_CFG);
5727                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728                                    MAC_STATUS_SIGNAL_DET)) {
5729                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730                                             MAC_STATUS_CFG_CHANGED));
5731                         return 0;
5732                 }
5733         }
5734
5735         tw32_f(MAC_TX_AUTO_NEG, 0);
5736
5737         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739         tw32_f(MAC_MODE, tp->mac_mode);
5740         udelay(40);
5741
5742         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743                 tg3_init_bcm8002(tp);
5744
5745         /* Enable link change event even when serdes polling.  */
5746         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747         udelay(40);
5748
5749         current_link_up = false;
5750         tp->link_config.rmt_adv = 0;
5751         mac_status = tr32(MAC_STATUS);
5752
5753         if (tg3_flag(tp, HW_AUTONEG))
5754                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755         else
5756                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758         tp->napi[0].hw_status->status =
5759                 (SD_STATUS_UPDATED |
5760                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762         for (i = 0; i < 100; i++) {
5763                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764                                     MAC_STATUS_CFG_CHANGED));
5765                 udelay(5);
5766                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767                                          MAC_STATUS_CFG_CHANGED |
5768                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769                         break;
5770         }
5771
5772         mac_status = tr32(MAC_STATUS);
5773         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774                 current_link_up = false;
5775                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776                     tp->serdes_counter == 0) {
5777                         tw32_f(MAC_MODE, (tp->mac_mode |
5778                                           MAC_MODE_SEND_CONFIGS));
5779                         udelay(1);
5780                         tw32_f(MAC_MODE, tp->mac_mode);
5781                 }
5782         }
5783
5784         if (current_link_up) {
5785                 tp->link_config.active_speed = SPEED_1000;
5786                 tp->link_config.active_duplex = DUPLEX_FULL;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_1000MBPS_ON));
5790         } else {
5791                 tp->link_config.active_speed = SPEED_UNKNOWN;
5792                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794                                     LED_CTRL_LNKLED_OVERRIDE |
5795                                     LED_CTRL_TRAFFIC_OVERRIDE));
5796         }
5797
5798         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800                 if (orig_pause_cfg != now_pause_cfg ||
5801                     orig_active_speed != tp->link_config.active_speed ||
5802                     orig_active_duplex != tp->link_config.active_duplex)
5803                         tg3_link_report(tp);
5804         }
5805
5806         return 0;
5807 }
5808
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811         int err = 0;
5812         u32 bmsr, bmcr;
5813         u32 current_speed = SPEED_UNKNOWN;
5814         u8 current_duplex = DUPLEX_UNKNOWN;
5815         bool current_link_up = false;
5816         u32 local_adv, remote_adv, sgsr;
5817
5818         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821              (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823                 if (force_reset)
5824                         tg3_phy_reset(tp);
5825
5826                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830                 } else {
5831                         current_link_up = true;
5832                         if (sgsr & SERDES_TG3_SPEED_1000) {
5833                                 current_speed = SPEED_1000;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5836                                 current_speed = SPEED_100;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         } else {
5839                                 current_speed = SPEED_10;
5840                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841                         }
5842
5843                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844                                 current_duplex = DUPLEX_FULL;
5845                         else
5846                                 current_duplex = DUPLEX_HALF;
5847                 }
5848
5849                 tw32_f(MAC_MODE, tp->mac_mode);
5850                 udelay(40);
5851
5852                 tg3_clear_mac_status(tp);
5853
5854                 goto fiber_setup_done;
5855         }
5856
5857         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858         tw32_f(MAC_MODE, tp->mac_mode);
5859         udelay(40);
5860
5861         tg3_clear_mac_status(tp);
5862
5863         if (force_reset)
5864                 tg3_phy_reset(tp);
5865
5866         tp->link_config.rmt_adv = 0;
5867
5868         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872                         bmsr |= BMSR_LSTATUS;
5873                 else
5874                         bmsr &= ~BMSR_LSTATUS;
5875         }
5876
5877         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881                 /* do nothing, just check for link up at the end */
5882         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883                 u32 adv, newadv;
5884
5885                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887                                  ADVERTISE_1000XPAUSE |
5888                                  ADVERTISE_1000XPSE_ASYM |
5889                                  ADVERTISE_SLCT);
5890
5891                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5896                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897                         tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903                         return err;
5904                 }
5905         } else {
5906                 u32 new_bmcr;
5907
5908                 bmcr &= ~BMCR_SPEED1000;
5909                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911                 if (tp->link_config.duplex == DUPLEX_FULL)
5912                         new_bmcr |= BMCR_FULLDPLX;
5913
5914                 if (new_bmcr != bmcr) {
5915                         /* BMCR_SPEED1000 is a reserved bit that needs
5916                          * to be set on write.
5917                          */
5918                         new_bmcr |= BMCR_SPEED1000;
5919
5920                         /* Force a linkdown */
5921                         if (tp->link_up) {
5922                                 u32 adv;
5923
5924                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925                                 adv &= ~(ADVERTISE_1000XFULL |
5926                                          ADVERTISE_1000XHALF |
5927                                          ADVERTISE_SLCT);
5928                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5929                                 tg3_writephy(tp, MII_BMCR, bmcr |
5930                                                            BMCR_ANRESTART |
5931                                                            BMCR_ANENABLE);
5932                                 udelay(10);
5933                                 tg3_carrier_off(tp);
5934                         }
5935                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5936                         bmcr = new_bmcr;
5937                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941                                         bmsr |= BMSR_LSTATUS;
5942                                 else
5943                                         bmsr &= ~BMSR_LSTATUS;
5944                         }
5945                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946                 }
5947         }
5948
5949         if (bmsr & BMSR_LSTATUS) {
5950                 current_speed = SPEED_1000;
5951                 current_link_up = true;
5952                 if (bmcr & BMCR_FULLDPLX)
5953                         current_duplex = DUPLEX_FULL;
5954                 else
5955                         current_duplex = DUPLEX_HALF;
5956
5957                 local_adv = 0;
5958                 remote_adv = 0;
5959
5960                 if (bmcr & BMCR_ANENABLE) {
5961                         u32 common;
5962
5963                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965                         common = local_adv & remote_adv;
5966                         if (common & (ADVERTISE_1000XHALF |
5967                                       ADVERTISE_1000XFULL)) {
5968                                 if (common & ADVERTISE_1000XFULL)
5969                                         current_duplex = DUPLEX_FULL;
5970                                 else
5971                                         current_duplex = DUPLEX_HALF;
5972
5973                                 tp->link_config.rmt_adv =
5974                                            mii_adv_to_ethtool_adv_x(remote_adv);
5975                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5976                                 /* Link is up via parallel detect */
5977                         } else {
5978                                 current_link_up = false;
5979                         }
5980                 }
5981         }
5982
5983 fiber_setup_done:
5984         if (current_link_up && current_duplex == DUPLEX_FULL)
5985                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988         if (tp->link_config.active_duplex == DUPLEX_HALF)
5989                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991         tw32_f(MAC_MODE, tp->mac_mode);
5992         udelay(40);
5993
5994         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996         tp->link_config.active_speed = current_speed;
5997         tp->link_config.active_duplex = current_duplex;
5998
5999         tg3_test_and_report_link_chg(tp, current_link_up);
6000         return err;
6001 }
6002
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005         if (tp->serdes_counter) {
6006                 /* Give autoneg time to complete. */
6007                 tp->serdes_counter--;
6008                 return;
6009         }
6010
6011         if (!tp->link_up &&
6012             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013                 u32 bmcr;
6014
6015                 tg3_readphy(tp, MII_BMCR, &bmcr);
6016                 if (bmcr & BMCR_ANENABLE) {
6017                         u32 phy1, phy2;
6018
6019                         /* Select shadow register 0x1f */
6020                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023                         /* Select expansion interrupt status register */
6024                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025                                          MII_TG3_DSP_EXP1_INT_STAT);
6026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030                                 /* We have signal detect and not receiving
6031                                  * config code words, link is up by parallel
6032                                  * detection.
6033                                  */
6034
6035                                 bmcr &= ~BMCR_ANENABLE;
6036                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037                                 tg3_writephy(tp, MII_BMCR, bmcr);
6038                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039                         }
6040                 }
6041         } else if (tp->link_up &&
6042                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044                 u32 phy2;
6045
6046                 /* Select expansion interrupt status register */
6047                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048                                  MII_TG3_DSP_EXP1_INT_STAT);
6049                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050                 if (phy2 & 0x20) {
6051                         u32 bmcr;
6052
6053                         /* Config code words received, turn on autoneg. */
6054                         tg3_readphy(tp, MII_BMCR, &bmcr);
6055                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059                 }
6060         }
6061 }
6062
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065         u32 val;
6066         int err;
6067
6068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069                 err = tg3_setup_fiber_phy(tp, force_reset);
6070         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072         else
6073                 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076                 u32 scale;
6077
6078                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080                         scale = 65;
6081                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082                         scale = 6;
6083                 else
6084                         scale = 12;
6085
6086                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088                 tw32(GRC_MISC_CFG, val);
6089         }
6090
6091         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092               (6 << TX_LENGTHS_IPG_SHIFT);
6093         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094             tg3_asic_rev(tp) == ASIC_REV_5762)
6095                 val |= tr32(MAC_TX_LENGTHS) &
6096                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099         if (tp->link_config.active_speed == SPEED_1000 &&
6100             tp->link_config.active_duplex == DUPLEX_HALF)
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103         else
6104                 tw32(MAC_TX_LENGTHS, val |
6105                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107         if (!tg3_flag(tp, 5705_PLUS)) {
6108                 if (tp->link_up) {
6109                         tw32(HOSTCC_STAT_COAL_TICKS,
6110                              tp->coal.stats_block_coalesce_usecs);
6111                 } else {
6112                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113                 }
6114         }
6115
6116         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117                 val = tr32(PCIE_PWR_MGMT_THRESH);
6118                 if (!tp->link_up)
6119                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120                               tp->pwrmgmt_thresh;
6121                 else
6122                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123                 tw32(PCIE_PWR_MGMT_THRESH, val);
6124         }
6125
6126         return err;
6127 }
6128
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132         u64 stamp;
6133
6134         ptp_read_system_prets(sts);
6135         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136         ptp_read_system_postts(sts);
6137         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139         return stamp;
6140 }
6141
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157         struct tg3 *tp = netdev_priv(dev);
6158
6159         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6161                                 SOF_TIMESTAMPING_SOFTWARE;
6162
6163         if (tg3_flag(tp, PTP_CAPABLE)) {
6164                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165                                         SOF_TIMESTAMPING_RX_HARDWARE |
6166                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6167         }
6168
6169         if (tp->ptp_clock)
6170                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171         else
6172                 info->phc_index = -1;
6173
6174         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180         return 0;
6181 }
6182
6183 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 {
6185         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186         bool neg_adj = false;
6187         u32 correction = 0;
6188
6189         if (ppb < 0) {
6190                 neg_adj = true;
6191                 ppb = -ppb;
6192         }
6193
6194         /* Frequency adjustment is performed using hardware with a 24 bit
6195          * accumulator and a programmable correction value. On each clk, the
6196          * correction value gets added to the accumulator and when it
6197          * overflows, the time counter is incremented/decremented.
6198          *
6199          * So conversion from ppb to correction value is
6200          *              ppb * (1 << 24) / 1000000000
6201          */
6202         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6203                      TG3_EAV_REF_CLK_CORRECT_MASK;
6204
6205         tg3_full_lock(tp, 0);
6206
6207         if (correction)
6208                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6209                      TG3_EAV_REF_CLK_CORRECT_EN |
6210                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6211         else
6212                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213
6214         tg3_full_unlock(tp);
6215
6216         return 0;
6217 }
6218
6219 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 {
6221         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223         tg3_full_lock(tp, 0);
6224         tp->ptp_adjust += delta;
6225         tg3_full_unlock(tp);
6226
6227         return 0;
6228 }
6229
6230 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6231                             struct ptp_system_timestamp *sts)
6232 {
6233         u64 ns;
6234         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235
6236         tg3_full_lock(tp, 0);
6237         ns = tg3_refclk_read(tp, sts);
6238         ns += tp->ptp_adjust;
6239         tg3_full_unlock(tp);
6240
6241         *ts = ns_to_timespec64(ns);
6242
6243         return 0;
6244 }
6245
6246 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6247                            const struct timespec64 *ts)
6248 {
6249         u64 ns;
6250         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6251
6252         ns = timespec64_to_ns(ts);
6253
6254         tg3_full_lock(tp, 0);
6255         tg3_refclk_write(tp, ns);
6256         tp->ptp_adjust = 0;
6257         tg3_full_unlock(tp);
6258
6259         return 0;
6260 }
6261
6262 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6263                           struct ptp_clock_request *rq, int on)
6264 {
6265         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6266         u32 clock_ctl;
6267         int rval = 0;
6268
6269         switch (rq->type) {
6270         case PTP_CLK_REQ_PEROUT:
6271                 /* Reject requests with unsupported flags */
6272                 if (rq->perout.flags)
6273                         return -EOPNOTSUPP;
6274
6275                 if (rq->perout.index != 0)
6276                         return -EINVAL;
6277
6278                 tg3_full_lock(tp, 0);
6279                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6280                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6281
6282                 if (on) {
6283                         u64 nsec;
6284
6285                         nsec = rq->perout.start.sec * 1000000000ULL +
6286                                rq->perout.start.nsec;
6287
6288                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6289                                 netdev_warn(tp->dev,
6290                                             "Device supports only a one-shot timesync output, period must be 0\n");
6291                                 rval = -EINVAL;
6292                                 goto err_out;
6293                         }
6294
6295                         if (nsec & (1ULL << 63)) {
6296                                 netdev_warn(tp->dev,
6297                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6298                                 rval = -EINVAL;
6299                                 goto err_out;
6300                         }
6301
6302                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6303                         tw32(TG3_EAV_WATCHDOG0_MSB,
6304                              TG3_EAV_WATCHDOG0_EN |
6305                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6306
6307                         tw32(TG3_EAV_REF_CLCK_CTL,
6308                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6309                 } else {
6310                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6311                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6312                 }
6313
6314 err_out:
6315                 tg3_full_unlock(tp);
6316                 return rval;
6317
6318         default:
6319                 break;
6320         }
6321
6322         return -EOPNOTSUPP;
6323 }
6324
6325 static const struct ptp_clock_info tg3_ptp_caps = {
6326         .owner          = THIS_MODULE,
6327         .name           = "tg3 clock",
6328         .max_adj        = 250000000,
6329         .n_alarm        = 0,
6330         .n_ext_ts       = 0,
6331         .n_per_out      = 1,
6332         .n_pins         = 0,
6333         .pps            = 0,
6334         .adjfreq        = tg3_ptp_adjfreq,
6335         .adjtime        = tg3_ptp_adjtime,
6336         .gettimex64     = tg3_ptp_gettimex,
6337         .settime64      = tg3_ptp_settime,
6338         .enable         = tg3_ptp_enable,
6339 };
6340
6341 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6342                                      struct skb_shared_hwtstamps *timestamp)
6343 {
6344         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6345         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6346                                            tp->ptp_adjust);
6347 }
6348
6349 /* tp->lock must be held */
6350 static void tg3_ptp_init(struct tg3 *tp)
6351 {
6352         if (!tg3_flag(tp, PTP_CAPABLE))
6353                 return;
6354
6355         /* Initialize the hardware clock to the system time. */
6356         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6357         tp->ptp_adjust = 0;
6358         tp->ptp_info = tg3_ptp_caps;
6359 }
6360
6361 /* tp->lock must be held */
6362 static void tg3_ptp_resume(struct tg3 *tp)
6363 {
6364         if (!tg3_flag(tp, PTP_CAPABLE))
6365                 return;
6366
6367         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6368         tp->ptp_adjust = 0;
6369 }
6370
6371 static void tg3_ptp_fini(struct tg3 *tp)
6372 {
6373         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6374                 return;
6375
6376         ptp_clock_unregister(tp->ptp_clock);
6377         tp->ptp_clock = NULL;
6378         tp->ptp_adjust = 0;
6379 }
6380
6381 static inline int tg3_irq_sync(struct tg3 *tp)
6382 {
6383         return tp->irq_sync;
6384 }
6385
6386 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6387 {
6388         int i;
6389
6390         dst = (u32 *)((u8 *)dst + off);
6391         for (i = 0; i < len; i += sizeof(u32))
6392                 *dst++ = tr32(off + i);
6393 }
6394
6395 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6396 {
6397         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6398         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6399         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6400         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6401         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6402         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6403         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6404         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6405         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6406         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6407         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6408         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6409         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6410         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6412         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6413         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6414         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6415         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6416
6417         if (tg3_flag(tp, SUPPORT_MSIX))
6418                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6419
6420         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6421         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6422         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6423         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6424         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6425         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6426         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6427         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6428
6429         if (!tg3_flag(tp, 5705_PLUS)) {
6430                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6431                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6432                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6433         }
6434
6435         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6436         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6437         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6438         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6439         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6440
6441         if (tg3_flag(tp, NVRAM))
6442                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6443 }
6444
6445 static void tg3_dump_state(struct tg3 *tp)
6446 {
6447         int i;
6448         u32 *regs;
6449
6450         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6451         if (!regs)
6452                 return;
6453
6454         if (tg3_flag(tp, PCI_EXPRESS)) {
6455                 /* Read up to but not including private PCI registers */
6456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6457                         regs[i / sizeof(u32)] = tr32(i);
6458         } else
6459                 tg3_dump_legacy_regs(tp, regs);
6460
6461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6462                 if (!regs[i + 0] && !regs[i + 1] &&
6463                     !regs[i + 2] && !regs[i + 3])
6464                         continue;
6465
6466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6467                            i * 4,
6468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6469         }
6470
6471         kfree(regs);
6472
6473         for (i = 0; i < tp->irq_cnt; i++) {
6474                 struct tg3_napi *tnapi = &tp->napi[i];
6475
6476                 /* SW status block */
6477                 netdev_err(tp->dev,
6478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6479                            i,
6480                            tnapi->hw_status->status,
6481                            tnapi->hw_status->status_tag,
6482                            tnapi->hw_status->rx_jumbo_consumer,
6483                            tnapi->hw_status->rx_consumer,
6484                            tnapi->hw_status->rx_mini_consumer,
6485                            tnapi->hw_status->idx[0].rx_producer,
6486                            tnapi->hw_status->idx[0].tx_consumer);
6487
6488                 netdev_err(tp->dev,
6489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6490                            i,
6491                            tnapi->last_tag, tnapi->last_irq_tag,
6492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6493                            tnapi->rx_rcb_ptr,
6494                            tnapi->prodring.rx_std_prod_idx,
6495                            tnapi->prodring.rx_std_cons_idx,
6496                            tnapi->prodring.rx_jmb_prod_idx,
6497                            tnapi->prodring.rx_jmb_cons_idx);
6498         }
6499 }
6500
6501 /* This is called whenever we suspect that the system chipset is re-
6502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6503  * is bogus tx completions. We try to recover by setting the
6504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6505  * in the workqueue.
6506  */
6507 static void tg3_tx_recover(struct tg3 *tp)
6508 {
6509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6511
6512         netdev_warn(tp->dev,
6513                     "The system may be re-ordering memory-mapped I/O "
6514                     "cycles to the network device, attempting to recover. "
6515                     "Please report the problem to the driver maintainer "
6516                     "and include system chipset information.\n");
6517
6518         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6519 }
6520
6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6522 {
6523         /* Tell compiler to fetch tx indices from memory. */
6524         barrier();
6525         return tnapi->tx_pending -
6526                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6527 }
6528
6529 /* Tigon3 never reports partial packet sends.  So we do not
6530  * need special logic to handle SKBs that have not had all
6531  * of their frags sent yet, like SunGEM does.
6532  */
6533 static void tg3_tx(struct tg3_napi *tnapi)
6534 {
6535         struct tg3 *tp = tnapi->tp;
6536         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6537         u32 sw_idx = tnapi->tx_cons;
6538         struct netdev_queue *txq;
6539         int index = tnapi - tp->napi;
6540         unsigned int pkts_compl = 0, bytes_compl = 0;
6541
6542         if (tg3_flag(tp, ENABLE_TSS))
6543                 index--;
6544
6545         txq = netdev_get_tx_queue(tp->dev, index);
6546
6547         while (sw_idx != hw_idx) {
6548                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6549                 struct sk_buff *skb = ri->skb;
6550                 int i, tx_bug = 0;
6551
6552                 if (unlikely(skb == NULL)) {
6553                         tg3_tx_recover(tp);
6554                         return;
6555                 }
6556
6557                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6558                         struct skb_shared_hwtstamps timestamp;
6559                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6560                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6561
6562                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6563
6564                         skb_tstamp_tx(skb, &timestamp);
6565                 }
6566
6567                 pci_unmap_single(tp->pdev,
6568                                  dma_unmap_addr(ri, mapping),
6569                                  skb_headlen(skb),
6570                                  PCI_DMA_TODEVICE);
6571
6572                 ri->skb = NULL;
6573
6574                 while (ri->fragmented) {
6575                         ri->fragmented = false;
6576                         sw_idx = NEXT_TX(sw_idx);
6577                         ri = &tnapi->tx_buffers[sw_idx];
6578                 }
6579
6580                 sw_idx = NEXT_TX(sw_idx);
6581
6582                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6583                         ri = &tnapi->tx_buffers[sw_idx];
6584                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6585                                 tx_bug = 1;
6586
6587                         pci_unmap_page(tp->pdev,
6588                                        dma_unmap_addr(ri, mapping),
6589                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6590                                        PCI_DMA_TODEVICE);
6591
6592                         while (ri->fragmented) {
6593                                 ri->fragmented = false;
6594                                 sw_idx = NEXT_TX(sw_idx);
6595                                 ri = &tnapi->tx_buffers[sw_idx];
6596                         }
6597
6598                         sw_idx = NEXT_TX(sw_idx);
6599                 }
6600
6601                 pkts_compl++;
6602                 bytes_compl += skb->len;
6603
6604                 dev_consume_skb_any(skb);
6605
6606                 if (unlikely(tx_bug)) {
6607                         tg3_tx_recover(tp);
6608                         return;
6609                 }
6610         }
6611
6612         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6613
6614         tnapi->tx_cons = sw_idx;
6615
6616         /* Need to make the tx_cons update visible to tg3_start_xmit()
6617          * before checking for netif_queue_stopped().  Without the
6618          * memory barrier, there is a small possibility that tg3_start_xmit()
6619          * will miss it and cause the queue to be stopped forever.
6620          */
6621         smp_mb();
6622
6623         if (unlikely(netif_tx_queue_stopped(txq) &&
6624                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6625                 __netif_tx_lock(txq, smp_processor_id());
6626                 if (netif_tx_queue_stopped(txq) &&
6627                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6628                         netif_tx_wake_queue(txq);
6629                 __netif_tx_unlock(txq);
6630         }
6631 }
6632
6633 static void tg3_frag_free(bool is_frag, void *data)
6634 {
6635         if (is_frag)
6636                 skb_free_frag(data);
6637         else
6638                 kfree(data);
6639 }
6640
6641 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6642 {
6643         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6644                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6645
6646         if (!ri->data)
6647                 return;
6648
6649         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6650                          map_sz, PCI_DMA_FROMDEVICE);
6651         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6652         ri->data = NULL;
6653 }
6654
6655
6656 /* Returns size of skb allocated or < 0 on error.
6657  *
6658  * We only need to fill in the address because the other members
6659  * of the RX descriptor are invariant, see tg3_init_rings.
6660  *
6661  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6662  * posting buffers we only dirty the first cache line of the RX
6663  * descriptor (containing the address).  Whereas for the RX status
6664  * buffers the cpu only reads the last cacheline of the RX descriptor
6665  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6666  */
6667 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6668                              u32 opaque_key, u32 dest_idx_unmasked,
6669                              unsigned int *frag_size)
6670 {
6671         struct tg3_rx_buffer_desc *desc;
6672         struct ring_info *map;
6673         u8 *data;
6674         dma_addr_t mapping;
6675         int skb_size, data_size, dest_idx;
6676
6677         switch (opaque_key) {
6678         case RXD_OPAQUE_RING_STD:
6679                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6680                 desc = &tpr->rx_std[dest_idx];
6681                 map = &tpr->rx_std_buffers[dest_idx];
6682                 data_size = tp->rx_pkt_map_sz;
6683                 break;
6684
6685         case RXD_OPAQUE_RING_JUMBO:
6686                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6687                 desc = &tpr->rx_jmb[dest_idx].std;
6688                 map = &tpr->rx_jmb_buffers[dest_idx];
6689                 data_size = TG3_RX_JMB_MAP_SZ;
6690                 break;
6691
6692         default:
6693                 return -EINVAL;
6694         }
6695
6696         /* Do not overwrite any of the map or rp information
6697          * until we are sure we can commit to a new buffer.
6698          *
6699          * Callers depend upon this behavior and assume that
6700          * we leave everything unchanged if we fail.
6701          */
6702         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6703                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6704         if (skb_size <= PAGE_SIZE) {
6705                 data = napi_alloc_frag(skb_size);
6706                 *frag_size = skb_size;
6707         } else {
6708                 data = kmalloc(skb_size, GFP_ATOMIC);
6709                 *frag_size = 0;
6710         }
6711         if (!data)
6712                 return -ENOMEM;
6713
6714         mapping = pci_map_single(tp->pdev,
6715                                  data + TG3_RX_OFFSET(tp),
6716                                  data_size,
6717                                  PCI_DMA_FROMDEVICE);
6718         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6719                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6720                 return -EIO;
6721         }
6722
6723         map->data = data;
6724         dma_unmap_addr_set(map, mapping, mapping);
6725
6726         desc->addr_hi = ((u64)mapping >> 32);
6727         desc->addr_lo = ((u64)mapping & 0xffffffff);
6728
6729         return data_size;
6730 }
6731
6732 /* We only need to move over in the address because the other
6733  * members of the RX descriptor are invariant.  See notes above
6734  * tg3_alloc_rx_data for full details.
6735  */
6736 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6737                            struct tg3_rx_prodring_set *dpr,
6738                            u32 opaque_key, int src_idx,
6739                            u32 dest_idx_unmasked)
6740 {
6741         struct tg3 *tp = tnapi->tp;
6742         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6743         struct ring_info *src_map, *dest_map;
6744         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6745         int dest_idx;
6746
6747         switch (opaque_key) {
6748         case RXD_OPAQUE_RING_STD:
6749                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6750                 dest_desc = &dpr->rx_std[dest_idx];
6751                 dest_map = &dpr->rx_std_buffers[dest_idx];
6752                 src_desc = &spr->rx_std[src_idx];
6753                 src_map = &spr->rx_std_buffers[src_idx];
6754                 break;
6755
6756         case RXD_OPAQUE_RING_JUMBO:
6757                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6758                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6759                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6760                 src_desc = &spr->rx_jmb[src_idx].std;
6761                 src_map = &spr->rx_jmb_buffers[src_idx];
6762                 break;
6763
6764         default:
6765                 return;
6766         }
6767
6768         dest_map->data = src_map->data;
6769         dma_unmap_addr_set(dest_map, mapping,
6770                            dma_unmap_addr(src_map, mapping));
6771         dest_desc->addr_hi = src_desc->addr_hi;
6772         dest_desc->addr_lo = src_desc->addr_lo;
6773
6774         /* Ensure that the update to the skb happens after the physical
6775          * addresses have been transferred to the new BD location.
6776          */
6777         smp_wmb();
6778
6779         src_map->data = NULL;
6780 }
6781
6782 /* The RX ring scheme is composed of multiple rings which post fresh
6783  * buffers to the chip, and one special ring the chip uses to report
6784  * status back to the host.
6785  *
6786  * The special ring reports the status of received packets to the
6787  * host.  The chip does not write into the original descriptor the
6788  * RX buffer was obtained from.  The chip simply takes the original
6789  * descriptor as provided by the host, updates the status and length
6790  * field, then writes this into the next status ring entry.
6791  *
6792  * Each ring the host uses to post buffers to the chip is described
6793  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6794  * it is first placed into the on-chip ram.  When the packet's length
6795  * is known, it walks down the TG3_BDINFO entries to select the ring.
6796  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6797  * which is within the range of the new packet's length is chosen.
6798  *
6799  * The "separate ring for rx status" scheme may sound queer, but it makes
6800  * sense from a cache coherency perspective.  If only the host writes
6801  * to the buffer post rings, and only the chip writes to the rx status
6802  * rings, then cache lines never move beyond shared-modified state.
6803  * If both the host and chip were to write into the same ring, cache line
6804  * eviction could occur since both entities want it in an exclusive state.
6805  */
6806 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6807 {
6808         struct tg3 *tp = tnapi->tp;
6809         u32 work_mask, rx_std_posted = 0;
6810         u32 std_prod_idx, jmb_prod_idx;
6811         u32 sw_idx = tnapi->rx_rcb_ptr;
6812         u16 hw_idx;
6813         int received;
6814         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6815
6816         hw_idx = *(tnapi->rx_rcb_prod_idx);
6817         /*
6818          * We need to order the read of hw_idx and the read of
6819          * the opaque cookie.
6820          */
6821         rmb();
6822         work_mask = 0;
6823         received = 0;
6824         std_prod_idx = tpr->rx_std_prod_idx;
6825         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6826         while (sw_idx != hw_idx && budget > 0) {
6827                 struct ring_info *ri;
6828                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6829                 unsigned int len;
6830                 struct sk_buff *skb;
6831                 dma_addr_t dma_addr;
6832                 u32 opaque_key, desc_idx, *post_ptr;
6833                 u8 *data;
6834                 u64 tstamp = 0;
6835
6836                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6837                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6838                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6839                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6840                         dma_addr = dma_unmap_addr(ri, mapping);
6841                         data = ri->data;
6842                         post_ptr = &std_prod_idx;
6843                         rx_std_posted++;
6844                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6845                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6846                         dma_addr = dma_unmap_addr(ri, mapping);
6847                         data = ri->data;
6848                         post_ptr = &jmb_prod_idx;
6849                 } else
6850                         goto next_pkt_nopost;
6851
6852                 work_mask |= opaque_key;
6853
6854                 if (desc->err_vlan & RXD_ERR_MASK) {
6855                 drop_it:
6856                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6857                                        desc_idx, *post_ptr);
6858                 drop_it_no_recycle:
6859                         /* Other statistics kept track of by card. */
6860                         tp->rx_dropped++;
6861                         goto next_pkt;
6862                 }
6863
6864                 prefetch(data + TG3_RX_OFFSET(tp));
6865                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6866                       ETH_FCS_LEN;
6867
6868                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6869                      RXD_FLAG_PTPSTAT_PTPV1 ||
6870                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6871                      RXD_FLAG_PTPSTAT_PTPV2) {
6872                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6873                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6874                 }
6875
6876                 if (len > TG3_RX_COPY_THRESH(tp)) {
6877                         int skb_size;
6878                         unsigned int frag_size;
6879
6880                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6881                                                     *post_ptr, &frag_size);
6882                         if (skb_size < 0)
6883                                 goto drop_it;
6884
6885                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6886                                          PCI_DMA_FROMDEVICE);
6887
6888                         /* Ensure that the update to the data happens
6889                          * after the usage of the old DMA mapping.
6890                          */
6891                         smp_wmb();
6892
6893                         ri->data = NULL;
6894
6895                         skb = build_skb(data, frag_size);
6896                         if (!skb) {
6897                                 tg3_frag_free(frag_size != 0, data);
6898                                 goto drop_it_no_recycle;
6899                         }
6900                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6901                 } else {
6902                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6903                                        desc_idx, *post_ptr);
6904
6905                         skb = netdev_alloc_skb(tp->dev,
6906                                                len + TG3_RAW_IP_ALIGN);
6907                         if (skb == NULL)
6908                                 goto drop_it_no_recycle;
6909
6910                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6911                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6912                         memcpy(skb->data,
6913                                data + TG3_RX_OFFSET(tp),
6914                                len);
6915                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6916                 }
6917
6918                 skb_put(skb, len);
6919                 if (tstamp)
6920                         tg3_hwclock_to_timestamp(tp, tstamp,
6921                                                  skb_hwtstamps(skb));
6922
6923                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6924                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6925                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6926                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6927                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6928                 else
6929                         skb_checksum_none_assert(skb);
6930
6931                 skb->protocol = eth_type_trans(skb, tp->dev);
6932
6933                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6934                     skb->protocol != htons(ETH_P_8021Q) &&
6935                     skb->protocol != htons(ETH_P_8021AD)) {
6936                         dev_kfree_skb_any(skb);
6937                         goto drop_it_no_recycle;
6938                 }
6939
6940                 if (desc->type_flags & RXD_FLAG_VLAN &&
6941                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6942                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6943                                                desc->err_vlan & RXD_VLAN_MASK);
6944
6945                 napi_gro_receive(&tnapi->napi, skb);
6946
6947                 received++;
6948                 budget--;
6949
6950 next_pkt:
6951                 (*post_ptr)++;
6952
6953                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6954                         tpr->rx_std_prod_idx = std_prod_idx &
6955                                                tp->rx_std_ring_mask;
6956                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6957                                      tpr->rx_std_prod_idx);
6958                         work_mask &= ~RXD_OPAQUE_RING_STD;
6959                         rx_std_posted = 0;
6960                 }
6961 next_pkt_nopost:
6962                 sw_idx++;
6963                 sw_idx &= tp->rx_ret_ring_mask;
6964
6965                 /* Refresh hw_idx to see if there is new work */
6966                 if (sw_idx == hw_idx) {
6967                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6968                         rmb();
6969                 }
6970         }
6971
6972         /* ACK the status ring. */
6973         tnapi->rx_rcb_ptr = sw_idx;
6974         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6975
6976         /* Refill RX ring(s). */
6977         if (!tg3_flag(tp, ENABLE_RSS)) {
6978                 /* Sync BD data before updating mailbox */
6979                 wmb();
6980
6981                 if (work_mask & RXD_OPAQUE_RING_STD) {
6982                         tpr->rx_std_prod_idx = std_prod_idx &
6983                                                tp->rx_std_ring_mask;
6984                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6985                                      tpr->rx_std_prod_idx);
6986                 }
6987                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6988                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6989                                                tp->rx_jmb_ring_mask;
6990                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6991                                      tpr->rx_jmb_prod_idx);
6992                 }
6993         } else if (work_mask) {
6994                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6995                  * updated before the producer indices can be updated.
6996                  */
6997                 smp_wmb();
6998
6999                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
7000                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
7001
7002                 if (tnapi != &tp->napi[1]) {
7003                         tp->rx_refill = true;
7004                         napi_schedule(&tp->napi[1].napi);
7005                 }
7006         }
7007
7008         return received;
7009 }
7010
7011 static void tg3_poll_link(struct tg3 *tp)
7012 {
7013         /* handle link change and other phy events */
7014         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7015                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7016
7017                 if (sblk->status & SD_STATUS_LINK_CHG) {
7018                         sblk->status = SD_STATUS_UPDATED |
7019                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7020                         spin_lock(&tp->lock);
7021                         if (tg3_flag(tp, USE_PHYLIB)) {
7022                                 tw32_f(MAC_STATUS,
7023                                      (MAC_STATUS_SYNC_CHANGED |
7024                                       MAC_STATUS_CFG_CHANGED |
7025                                       MAC_STATUS_MI_COMPLETION |
7026                                       MAC_STATUS_LNKSTATE_CHANGED));
7027                                 udelay(40);
7028                         } else
7029                                 tg3_setup_phy(tp, false);
7030                         spin_unlock(&tp->lock);
7031                 }
7032         }
7033 }
7034
7035 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7036                                 struct tg3_rx_prodring_set *dpr,
7037                                 struct tg3_rx_prodring_set *spr)
7038 {
7039         u32 si, di, cpycnt, src_prod_idx;
7040         int i, err = 0;
7041
7042         while (1) {
7043                 src_prod_idx = spr->rx_std_prod_idx;
7044
7045                 /* Make sure updates to the rx_std_buffers[] entries and the
7046                  * standard producer index are seen in the correct order.
7047                  */
7048                 smp_rmb();
7049
7050                 if (spr->rx_std_cons_idx == src_prod_idx)
7051                         break;
7052
7053                 if (spr->rx_std_cons_idx < src_prod_idx)
7054                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7055                 else
7056                         cpycnt = tp->rx_std_ring_mask + 1 -
7057                                  spr->rx_std_cons_idx;
7058
7059                 cpycnt = min(cpycnt,
7060                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7061
7062                 si = spr->rx_std_cons_idx;
7063                 di = dpr->rx_std_prod_idx;
7064
7065                 for (i = di; i < di + cpycnt; i++) {
7066                         if (dpr->rx_std_buffers[i].data) {
7067                                 cpycnt = i - di;
7068                                 err = -ENOSPC;
7069                                 break;
7070                         }
7071                 }
7072
7073                 if (!cpycnt)
7074                         break;
7075
7076                 /* Ensure that updates to the rx_std_buffers ring and the
7077                  * shadowed hardware producer ring from tg3_recycle_skb() are
7078                  * ordered correctly WRT the skb check above.
7079                  */
7080                 smp_rmb();
7081
7082                 memcpy(&dpr->rx_std_buffers[di],
7083                        &spr->rx_std_buffers[si],
7084                        cpycnt * sizeof(struct ring_info));
7085
7086                 for (i = 0; i < cpycnt; i++, di++, si++) {
7087                         struct tg3_rx_buffer_desc *sbd, *dbd;
7088                         sbd = &spr->rx_std[si];
7089                         dbd = &dpr->rx_std[di];
7090                         dbd->addr_hi = sbd->addr_hi;
7091                         dbd->addr_lo = sbd->addr_lo;
7092                 }
7093
7094                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7095                                        tp->rx_std_ring_mask;
7096                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7097                                        tp->rx_std_ring_mask;
7098         }
7099
7100         while (1) {
7101                 src_prod_idx = spr->rx_jmb_prod_idx;
7102
7103                 /* Make sure updates to the rx_jmb_buffers[] entries and
7104                  * the jumbo producer index are seen in the correct order.
7105                  */
7106                 smp_rmb();
7107
7108                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7109                         break;
7110
7111                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7112                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7113                 else
7114                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7115                                  spr->rx_jmb_cons_idx;
7116
7117                 cpycnt = min(cpycnt,
7118                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7119
7120                 si = spr->rx_jmb_cons_idx;
7121                 di = dpr->rx_jmb_prod_idx;
7122
7123                 for (i = di; i < di + cpycnt; i++) {
7124                         if (dpr->rx_jmb_buffers[i].data) {
7125                                 cpycnt = i - di;
7126                                 err = -ENOSPC;
7127                                 break;
7128                         }
7129                 }
7130
7131                 if (!cpycnt)
7132                         break;
7133
7134                 /* Ensure that updates to the rx_jmb_buffers ring and the
7135                  * shadowed hardware producer ring from tg3_recycle_skb() are
7136                  * ordered correctly WRT the skb check above.
7137                  */
7138                 smp_rmb();
7139
7140                 memcpy(&dpr->rx_jmb_buffers[di],
7141                        &spr->rx_jmb_buffers[si],
7142                        cpycnt * sizeof(struct ring_info));
7143
7144                 for (i = 0; i < cpycnt; i++, di++, si++) {
7145                         struct tg3_rx_buffer_desc *sbd, *dbd;
7146                         sbd = &spr->rx_jmb[si].std;
7147                         dbd = &dpr->rx_jmb[di].std;
7148                         dbd->addr_hi = sbd->addr_hi;
7149                         dbd->addr_lo = sbd->addr_lo;
7150                 }
7151
7152                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7153                                        tp->rx_jmb_ring_mask;
7154                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7155                                        tp->rx_jmb_ring_mask;
7156         }
7157
7158         return err;
7159 }
7160
7161 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7162 {
7163         struct tg3 *tp = tnapi->tp;
7164
7165         /* run TX completion thread */
7166         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7167                 tg3_tx(tnapi);
7168                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7169                         return work_done;
7170         }
7171
7172         if (!tnapi->rx_rcb_prod_idx)
7173                 return work_done;
7174
7175         /* run RX thread, within the bounds set by NAPI.
7176          * All RX "locking" is done by ensuring outside
7177          * code synchronizes with tg3->napi.poll()
7178          */
7179         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7180                 work_done += tg3_rx(tnapi, budget - work_done);
7181
7182         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7183                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7184                 int i, err = 0;
7185                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7186                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7187
7188                 tp->rx_refill = false;
7189                 for (i = 1; i <= tp->rxq_cnt; i++)
7190                         err |= tg3_rx_prodring_xfer(tp, dpr,
7191                                                     &tp->napi[i].prodring);
7192
7193                 wmb();
7194
7195                 if (std_prod_idx != dpr->rx_std_prod_idx)
7196                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7197                                      dpr->rx_std_prod_idx);
7198
7199                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7200                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7201                                      dpr->rx_jmb_prod_idx);
7202
7203                 if (err)
7204                         tw32_f(HOSTCC_MODE, tp->coal_now);
7205         }
7206
7207         return work_done;
7208 }
7209
7210 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7211 {
7212         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7213                 schedule_work(&tp->reset_task);
7214 }
7215
7216 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7217 {
7218         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7219                 cancel_work_sync(&tp->reset_task);
7220         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7221 }
7222
7223 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7224 {
7225         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7226         struct tg3 *tp = tnapi->tp;
7227         int work_done = 0;
7228         struct tg3_hw_status *sblk = tnapi->hw_status;
7229
7230         while (1) {
7231                 work_done = tg3_poll_work(tnapi, work_done, budget);
7232
7233                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7234                         goto tx_recovery;
7235
7236                 if (unlikely(work_done >= budget))
7237                         break;
7238
7239                 /* tp->last_tag is used in tg3_int_reenable() below
7240                  * to tell the hw how much work has been processed,
7241                  * so we must read it before checking for more work.
7242                  */
7243                 tnapi->last_tag = sblk->status_tag;
7244                 tnapi->last_irq_tag = tnapi->last_tag;
7245                 rmb();
7246
7247                 /* check for RX/TX work to do */
7248                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7249                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7250
7251                         /* This test here is not race free, but will reduce
7252                          * the number of interrupts by looping again.
7253                          */
7254                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7255                                 continue;
7256
7257                         napi_complete_done(napi, work_done);
7258                         /* Reenable interrupts. */
7259                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7260
7261                         /* This test here is synchronized by napi_schedule()
7262                          * and napi_complete() to close the race condition.
7263                          */
7264                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7265                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7266                                                   HOSTCC_MODE_ENABLE |
7267                                                   tnapi->coal_now);
7268                         }
7269                         break;
7270                 }
7271         }
7272
7273         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7274         return work_done;
7275
7276 tx_recovery:
7277         /* work_done is guaranteed to be less than budget. */
7278         napi_complete(napi);
7279         tg3_reset_task_schedule(tp);
7280         return work_done;
7281 }
7282
7283 static void tg3_process_error(struct tg3 *tp)
7284 {
7285         u32 val;
7286         bool real_error = false;
7287
7288         if (tg3_flag(tp, ERROR_PROCESSED))
7289                 return;
7290
7291         /* Check Flow Attention register */
7292         val = tr32(HOSTCC_FLOW_ATTN);
7293         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7294                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7295                 real_error = true;
7296         }
7297
7298         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7299                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7300                 real_error = true;
7301         }
7302
7303         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7304                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7305                 real_error = true;
7306         }
7307
7308         if (!real_error)
7309                 return;
7310
7311         tg3_dump_state(tp);
7312
7313         tg3_flag_set(tp, ERROR_PROCESSED);
7314         tg3_reset_task_schedule(tp);
7315 }
7316
7317 static int tg3_poll(struct napi_struct *napi, int budget)
7318 {
7319         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7320         struct tg3 *tp = tnapi->tp;
7321         int work_done = 0;
7322         struct tg3_hw_status *sblk = tnapi->hw_status;
7323
7324         while (1) {
7325                 if (sblk->status & SD_STATUS_ERROR)
7326                         tg3_process_error(tp);
7327
7328                 tg3_poll_link(tp);
7329
7330                 work_done = tg3_poll_work(tnapi, work_done, budget);
7331
7332                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7333                         goto tx_recovery;
7334
7335                 if (unlikely(work_done >= budget))
7336                         break;
7337
7338                 if (tg3_flag(tp, TAGGED_STATUS)) {
7339                         /* tp->last_tag is used in tg3_int_reenable() below
7340                          * to tell the hw how much work has been processed,
7341                          * so we must read it before checking for more work.
7342                          */
7343                         tnapi->last_tag = sblk->status_tag;
7344                         tnapi->last_irq_tag = tnapi->last_tag;
7345                         rmb();
7346                 } else
7347                         sblk->status &= ~SD_STATUS_UPDATED;
7348
7349                 if (likely(!tg3_has_work(tnapi))) {
7350                         napi_complete_done(napi, work_done);
7351                         tg3_int_reenable(tnapi);
7352                         break;
7353                 }
7354         }
7355
7356         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7357         return work_done;
7358
7359 tx_recovery:
7360         /* work_done is guaranteed to be less than budget. */
7361         napi_complete(napi);
7362         tg3_reset_task_schedule(tp);
7363         return work_done;
7364 }
7365
7366 static void tg3_napi_disable(struct tg3 *tp)
7367 {
7368         int i;
7369
7370         for (i = tp->irq_cnt - 1; i >= 0; i--)
7371                 napi_disable(&tp->napi[i].napi);
7372 }
7373
7374 static void tg3_napi_enable(struct tg3 *tp)
7375 {
7376         int i;
7377
7378         for (i = 0; i < tp->irq_cnt; i++)
7379                 napi_enable(&tp->napi[i].napi);
7380 }
7381
7382 static void tg3_napi_init(struct tg3 *tp)
7383 {
7384         int i;
7385
7386         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7387         for (i = 1; i < tp->irq_cnt; i++)
7388                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7389 }
7390
7391 static void tg3_napi_fini(struct tg3 *tp)
7392 {
7393         int i;
7394
7395         for (i = 0; i < tp->irq_cnt; i++)
7396                 netif_napi_del(&tp->napi[i].napi);
7397 }
7398
7399 static inline void tg3_netif_stop(struct tg3 *tp)
7400 {
7401         netif_trans_update(tp->dev);    /* prevent tx timeout */
7402         tg3_napi_disable(tp);
7403         netif_carrier_off(tp->dev);
7404         netif_tx_disable(tp->dev);
7405 }
7406
7407 /* tp->lock must be held */
7408 static inline void tg3_netif_start(struct tg3 *tp)
7409 {
7410         tg3_ptp_resume(tp);
7411
7412         /* NOTE: unconditional netif_tx_wake_all_queues is only
7413          * appropriate so long as all callers are assured to
7414          * have free tx slots (such as after tg3_init_hw)
7415          */
7416         netif_tx_wake_all_queues(tp->dev);
7417
7418         if (tp->link_up)
7419                 netif_carrier_on(tp->dev);
7420
7421         tg3_napi_enable(tp);
7422         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7423         tg3_enable_ints(tp);
7424 }
7425
7426 static void tg3_irq_quiesce(struct tg3 *tp)
7427         __releases(tp->lock)
7428         __acquires(tp->lock)
7429 {
7430         int i;
7431
7432         BUG_ON(tp->irq_sync);
7433
7434         tp->irq_sync = 1;
7435         smp_mb();
7436
7437         spin_unlock_bh(&tp->lock);
7438
7439         for (i = 0; i < tp->irq_cnt; i++)
7440                 synchronize_irq(tp->napi[i].irq_vec);
7441
7442         spin_lock_bh(&tp->lock);
7443 }
7444
7445 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7446  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7447  * with as well.  Most of the time, this is not necessary except when
7448  * shutting down the device.
7449  */
7450 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7451 {
7452         spin_lock_bh(&tp->lock);
7453         if (irq_sync)
7454                 tg3_irq_quiesce(tp);
7455 }
7456
7457 static inline void tg3_full_unlock(struct tg3 *tp)
7458 {
7459         spin_unlock_bh(&tp->lock);
7460 }
7461
7462 /* One-shot MSI handler - Chip automatically disables interrupt
7463  * after sending MSI so driver doesn't have to do it.
7464  */
7465 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7466 {
7467         struct tg3_napi *tnapi = dev_id;
7468         struct tg3 *tp = tnapi->tp;
7469
7470         prefetch(tnapi->hw_status);
7471         if (tnapi->rx_rcb)
7472                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7473
7474         if (likely(!tg3_irq_sync(tp)))
7475                 napi_schedule(&tnapi->napi);
7476
7477         return IRQ_HANDLED;
7478 }
7479
7480 /* MSI ISR - No need to check for interrupt sharing and no need to
7481  * flush status block and interrupt mailbox. PCI ordering rules
7482  * guarantee that MSI will arrive after the status block.
7483  */
7484 static irqreturn_t tg3_msi(int irq, void *dev_id)
7485 {
7486         struct tg3_napi *tnapi = dev_id;
7487         struct tg3 *tp = tnapi->tp;
7488
7489         prefetch(tnapi->hw_status);
7490         if (tnapi->rx_rcb)
7491                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7492         /*
7493          * Writing any value to intr-mbox-0 clears PCI INTA# and
7494          * chip-internal interrupt pending events.
7495          * Writing non-zero to intr-mbox-0 additional tells the
7496          * NIC to stop sending us irqs, engaging "in-intr-handler"
7497          * event coalescing.
7498          */
7499         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7500         if (likely(!tg3_irq_sync(tp)))
7501                 napi_schedule(&tnapi->napi);
7502
7503         return IRQ_RETVAL(1);
7504 }
7505
7506 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7507 {
7508         struct tg3_napi *tnapi = dev_id;
7509         struct tg3 *tp = tnapi->tp;
7510         struct tg3_hw_status *sblk = tnapi->hw_status;
7511         unsigned int handled = 1;
7512
7513         /* In INTx mode, it is possible for the interrupt to arrive at
7514          * the CPU before the status block posted prior to the interrupt.
7515          * Reading the PCI State register will confirm whether the
7516          * interrupt is ours and will flush the status block.
7517          */
7518         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7519                 if (tg3_flag(tp, CHIP_RESETTING) ||
7520                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7521                         handled = 0;
7522                         goto out;
7523                 }
7524         }
7525
7526         /*
7527          * Writing any value to intr-mbox-0 clears PCI INTA# and
7528          * chip-internal interrupt pending events.
7529          * Writing non-zero to intr-mbox-0 additional tells the
7530          * NIC to stop sending us irqs, engaging "in-intr-handler"
7531          * event coalescing.
7532          *
7533          * Flush the mailbox to de-assert the IRQ immediately to prevent
7534          * spurious interrupts.  The flush impacts performance but
7535          * excessive spurious interrupts can be worse in some cases.
7536          */
7537         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7538         if (tg3_irq_sync(tp))
7539                 goto out;
7540         sblk->status &= ~SD_STATUS_UPDATED;
7541         if (likely(tg3_has_work(tnapi))) {
7542                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7543                 napi_schedule(&tnapi->napi);
7544         } else {
7545                 /* No work, shared interrupt perhaps?  re-enable
7546                  * interrupts, and flush that PCI write
7547                  */
7548                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7549                                0x00000000);
7550         }
7551 out:
7552         return IRQ_RETVAL(handled);
7553 }
7554
7555 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7556 {
7557         struct tg3_napi *tnapi = dev_id;
7558         struct tg3 *tp = tnapi->tp;
7559         struct tg3_hw_status *sblk = tnapi->hw_status;
7560         unsigned int handled = 1;
7561
7562         /* In INTx mode, it is possible for the interrupt to arrive at
7563          * the CPU before the status block posted prior to the interrupt.
7564          * Reading the PCI State register will confirm whether the
7565          * interrupt is ours and will flush the status block.
7566          */
7567         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7568                 if (tg3_flag(tp, CHIP_RESETTING) ||
7569                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7570                         handled = 0;
7571                         goto out;
7572                 }
7573         }
7574
7575         /*
7576          * writing any value to intr-mbox-0 clears PCI INTA# and
7577          * chip-internal interrupt pending events.
7578          * writing non-zero to intr-mbox-0 additional tells the
7579          * NIC to stop sending us irqs, engaging "in-intr-handler"
7580          * event coalescing.
7581          *
7582          * Flush the mailbox to de-assert the IRQ immediately to prevent
7583          * spurious interrupts.  The flush impacts performance but
7584          * excessive spurious interrupts can be worse in some cases.
7585          */
7586         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7587
7588         /*
7589          * In a shared interrupt configuration, sometimes other devices'
7590          * interrupts will scream.  We record the current status tag here
7591          * so that the above check can report that the screaming interrupts
7592          * are unhandled.  Eventually they will be silenced.
7593          */
7594         tnapi->last_irq_tag = sblk->status_tag;
7595
7596         if (tg3_irq_sync(tp))
7597                 goto out;
7598
7599         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7600
7601         napi_schedule(&tnapi->napi);
7602
7603 out:
7604         return IRQ_RETVAL(handled);
7605 }
7606
7607 /* ISR for interrupt test */
7608 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7609 {
7610         struct tg3_napi *tnapi = dev_id;
7611         struct tg3 *tp = tnapi->tp;
7612         struct tg3_hw_status *sblk = tnapi->hw_status;
7613
7614         if ((sblk->status & SD_STATUS_UPDATED) ||
7615             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7616                 tg3_disable_ints(tp);
7617                 return IRQ_RETVAL(1);
7618         }
7619         return IRQ_RETVAL(0);
7620 }
7621
7622 #ifdef CONFIG_NET_POLL_CONTROLLER
7623 static void tg3_poll_controller(struct net_device *dev)
7624 {
7625         int i;
7626         struct tg3 *tp = netdev_priv(dev);
7627
7628         if (tg3_irq_sync(tp))
7629                 return;
7630
7631         for (i = 0; i < tp->irq_cnt; i++)
7632                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7633 }
7634 #endif
7635
7636 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7637 {
7638         struct tg3 *tp = netdev_priv(dev);
7639
7640         if (netif_msg_tx_err(tp)) {
7641                 netdev_err(dev, "transmit timed out, resetting\n");
7642                 tg3_dump_state(tp);
7643         }
7644
7645         tg3_reset_task_schedule(tp);
7646 }
7647
7648 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7649 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7650 {
7651         u32 base = (u32) mapping & 0xffffffff;
7652
7653         return base + len + 8 < base;
7654 }
7655
7656 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7657  * of any 4GB boundaries: 4G, 8G, etc
7658  */
7659 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7660                                            u32 len, u32 mss)
7661 {
7662         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7663                 u32 base = (u32) mapping & 0xffffffff;
7664
7665                 return ((base + len + (mss & 0x3fff)) < base);
7666         }
7667         return 0;
7668 }
7669
7670 /* Test for DMA addresses > 40-bit */
7671 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7672                                           int len)
7673 {
7674 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7675         if (tg3_flag(tp, 40BIT_DMA_BUG))
7676                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7677         return 0;
7678 #else
7679         return 0;
7680 #endif
7681 }
7682
7683 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7684                                  dma_addr_t mapping, u32 len, u32 flags,
7685                                  u32 mss, u32 vlan)
7686 {
7687         txbd->addr_hi = ((u64) mapping >> 32);
7688         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7689         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7690         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7691 }
7692
7693 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7694                             dma_addr_t map, u32 len, u32 flags,
7695                             u32 mss, u32 vlan)
7696 {
7697         struct tg3 *tp = tnapi->tp;
7698         bool hwbug = false;
7699
7700         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7701                 hwbug = true;
7702
7703         if (tg3_4g_overflow_test(map, len))
7704                 hwbug = true;
7705
7706         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7707                 hwbug = true;
7708
7709         if (tg3_40bit_overflow_test(tp, map, len))
7710                 hwbug = true;
7711
7712         if (tp->dma_limit) {
7713                 u32 prvidx = *entry;
7714                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7715                 while (len > tp->dma_limit && *budget) {
7716                         u32 frag_len = tp->dma_limit;
7717                         len -= tp->dma_limit;
7718
7719                         /* Avoid the 8byte DMA problem */
7720                         if (len <= 8) {
7721                                 len += tp->dma_limit / 2;
7722                                 frag_len = tp->dma_limit / 2;
7723                         }
7724
7725                         tnapi->tx_buffers[*entry].fragmented = true;
7726
7727                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7728                                       frag_len, tmp_flag, mss, vlan);
7729                         *budget -= 1;
7730                         prvidx = *entry;
7731                         *entry = NEXT_TX(*entry);
7732
7733                         map += frag_len;
7734                 }
7735
7736                 if (len) {
7737                         if (*budget) {
7738                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7739                                               len, flags, mss, vlan);
7740                                 *budget -= 1;
7741                                 *entry = NEXT_TX(*entry);
7742                         } else {
7743                                 hwbug = true;
7744                                 tnapi->tx_buffers[prvidx].fragmented = false;
7745                         }
7746                 }
7747         } else {
7748                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7749                               len, flags, mss, vlan);
7750                 *entry = NEXT_TX(*entry);
7751         }
7752
7753         return hwbug;
7754 }
7755
7756 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7757 {
7758         int i;
7759         struct sk_buff *skb;
7760         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7761
7762         skb = txb->skb;
7763         txb->skb = NULL;
7764
7765         pci_unmap_single(tnapi->tp->pdev,
7766                          dma_unmap_addr(txb, mapping),
7767                          skb_headlen(skb),
7768                          PCI_DMA_TODEVICE);
7769
7770         while (txb->fragmented) {
7771                 txb->fragmented = false;
7772                 entry = NEXT_TX(entry);
7773                 txb = &tnapi->tx_buffers[entry];
7774         }
7775
7776         for (i = 0; i <= last; i++) {
7777                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7778
7779                 entry = NEXT_TX(entry);
7780                 txb = &tnapi->tx_buffers[entry];
7781
7782                 pci_unmap_page(tnapi->tp->pdev,
7783                                dma_unmap_addr(txb, mapping),
7784                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7785
7786                 while (txb->fragmented) {
7787                         txb->fragmented = false;
7788                         entry = NEXT_TX(entry);
7789                         txb = &tnapi->tx_buffers[entry];
7790                 }
7791         }
7792 }
7793
7794 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7795 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7796                                        struct sk_buff **pskb,
7797                                        u32 *entry, u32 *budget,
7798                                        u32 base_flags, u32 mss, u32 vlan)
7799 {
7800         struct tg3 *tp = tnapi->tp;
7801         struct sk_buff *new_skb, *skb = *pskb;
7802         dma_addr_t new_addr = 0;
7803         int ret = 0;
7804
7805         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7806                 new_skb = skb_copy(skb, GFP_ATOMIC);
7807         else {
7808                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7809
7810                 new_skb = skb_copy_expand(skb,
7811                                           skb_headroom(skb) + more_headroom,
7812                                           skb_tailroom(skb), GFP_ATOMIC);
7813         }
7814
7815         if (!new_skb) {
7816                 ret = -1;
7817         } else {
7818                 /* New SKB is guaranteed to be linear. */
7819                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7820                                           PCI_DMA_TODEVICE);
7821                 /* Make sure the mapping succeeded */
7822                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7823                         dev_kfree_skb_any(new_skb);
7824                         ret = -1;
7825                 } else {
7826                         u32 save_entry = *entry;
7827
7828                         base_flags |= TXD_FLAG_END;
7829
7830                         tnapi->tx_buffers[*entry].skb = new_skb;
7831                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7832                                            mapping, new_addr);
7833
7834                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7835                                             new_skb->len, base_flags,
7836                                             mss, vlan)) {
7837                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7838                                 dev_kfree_skb_any(new_skb);
7839                                 ret = -1;
7840                         }
7841                 }
7842         }
7843
7844         dev_consume_skb_any(skb);
7845         *pskb = new_skb;
7846         return ret;
7847 }
7848
7849 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7850 {
7851         /* Check if we will never have enough descriptors,
7852          * as gso_segs can be more than current ring size
7853          */
7854         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7855 }
7856
7857 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7858
7859 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7860  * indicated in tg3_tx_frag_set()
7861  */
7862 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7863                        struct netdev_queue *txq, struct sk_buff *skb)
7864 {
7865         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7866         struct sk_buff *segs, *seg, *next;
7867
7868         /* Estimate the number of fragments in the worst case */
7869         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7870                 netif_tx_stop_queue(txq);
7871
7872                 /* netif_tx_stop_queue() must be done before checking
7873                  * checking tx index in tg3_tx_avail() below, because in
7874                  * tg3_tx(), we update tx index before checking for
7875                  * netif_tx_queue_stopped().
7876                  */
7877                 smp_mb();
7878                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7879                         return NETDEV_TX_BUSY;
7880
7881                 netif_tx_wake_queue(txq);
7882         }
7883
7884         segs = skb_gso_segment(skb, tp->dev->features &
7885                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7886         if (IS_ERR(segs) || !segs)
7887                 goto tg3_tso_bug_end;
7888
7889         skb_list_walk_safe(segs, seg, next) {
7890                 skb_mark_not_on_list(seg);
7891                 tg3_start_xmit(seg, tp->dev);
7892         }
7893
7894 tg3_tso_bug_end:
7895         dev_consume_skb_any(skb);
7896
7897         return NETDEV_TX_OK;
7898 }
7899
7900 /* hard_start_xmit for all devices */
7901 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7902 {
7903         struct tg3 *tp = netdev_priv(dev);
7904         u32 len, entry, base_flags, mss, vlan = 0;
7905         u32 budget;
7906         int i = -1, would_hit_hwbug;
7907         dma_addr_t mapping;
7908         struct tg3_napi *tnapi;
7909         struct netdev_queue *txq;
7910         unsigned int last;
7911         struct iphdr *iph = NULL;
7912         struct tcphdr *tcph = NULL;
7913         __sum16 tcp_csum = 0, ip_csum = 0;
7914         __be16 ip_tot_len = 0;
7915
7916         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7917         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7918         if (tg3_flag(tp, ENABLE_TSS))
7919                 tnapi++;
7920
7921         budget = tg3_tx_avail(tnapi);
7922
7923         /* We are running in BH disabled context with netif_tx_lock
7924          * and TX reclaim runs via tp->napi.poll inside of a software
7925          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7926          * no IRQ context deadlocks to worry about either.  Rejoice!
7927          */
7928         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7929                 if (!netif_tx_queue_stopped(txq)) {
7930                         netif_tx_stop_queue(txq);
7931
7932                         /* This is a hard error, log it. */
7933                         netdev_err(dev,
7934                                    "BUG! Tx Ring full when queue awake!\n");
7935                 }
7936                 return NETDEV_TX_BUSY;
7937         }
7938
7939         entry = tnapi->tx_prod;
7940         base_flags = 0;
7941
7942         mss = skb_shinfo(skb)->gso_size;
7943         if (mss) {
7944                 u32 tcp_opt_len, hdr_len;
7945
7946                 if (skb_cow_head(skb, 0))
7947                         goto drop;
7948
7949                 iph = ip_hdr(skb);
7950                 tcp_opt_len = tcp_optlen(skb);
7951
7952                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7953
7954                 /* HW/FW can not correctly segment packets that have been
7955                  * vlan encapsulated.
7956                  */
7957                 if (skb->protocol == htons(ETH_P_8021Q) ||
7958                     skb->protocol == htons(ETH_P_8021AD)) {
7959                         if (tg3_tso_bug_gso_check(tnapi, skb))
7960                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7961                         goto drop;
7962                 }
7963
7964                 if (!skb_is_gso_v6(skb)) {
7965                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7966                             tg3_flag(tp, TSO_BUG)) {
7967                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7968                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7969                                 goto drop;
7970                         }
7971                         ip_csum = iph->check;
7972                         ip_tot_len = iph->tot_len;
7973                         iph->check = 0;
7974                         iph->tot_len = htons(mss + hdr_len);
7975                 }
7976
7977                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7978                                TXD_FLAG_CPU_POST_DMA);
7979
7980                 tcph = tcp_hdr(skb);
7981                 tcp_csum = tcph->check;
7982
7983                 if (tg3_flag(tp, HW_TSO_1) ||
7984                     tg3_flag(tp, HW_TSO_2) ||
7985                     tg3_flag(tp, HW_TSO_3)) {
7986                         tcph->check = 0;
7987                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7988                 } else {
7989                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7990                                                          0, IPPROTO_TCP, 0);
7991                 }
7992
7993                 if (tg3_flag(tp, HW_TSO_3)) {
7994                         mss |= (hdr_len & 0xc) << 12;
7995                         if (hdr_len & 0x10)
7996                                 base_flags |= 0x00000010;
7997                         base_flags |= (hdr_len & 0x3e0) << 5;
7998                 } else if (tg3_flag(tp, HW_TSO_2))
7999                         mss |= hdr_len << 9;
8000                 else if (tg3_flag(tp, HW_TSO_1) ||
8001                          tg3_asic_rev(tp) == ASIC_REV_5705) {
8002                         if (tcp_opt_len || iph->ihl > 5) {
8003                                 int tsflags;
8004
8005                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8006                                 mss |= (tsflags << 11);
8007                         }
8008                 } else {
8009                         if (tcp_opt_len || iph->ihl > 5) {
8010                                 int tsflags;
8011
8012                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8013                                 base_flags |= tsflags << 12;
8014                         }
8015                 }
8016         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8017                 /* HW/FW can not correctly checksum packets that have been
8018                  * vlan encapsulated.
8019                  */
8020                 if (skb->protocol == htons(ETH_P_8021Q) ||
8021                     skb->protocol == htons(ETH_P_8021AD)) {
8022                         if (skb_checksum_help(skb))
8023                                 goto drop;
8024                 } else  {
8025                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8026                 }
8027         }
8028
8029         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8030             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8031                 base_flags |= TXD_FLAG_JMB_PKT;
8032
8033         if (skb_vlan_tag_present(skb)) {
8034                 base_flags |= TXD_FLAG_VLAN;
8035                 vlan = skb_vlan_tag_get(skb);
8036         }
8037
8038         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8039             tg3_flag(tp, TX_TSTAMP_EN)) {
8040                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8041                 base_flags |= TXD_FLAG_HWTSTAMP;
8042         }
8043
8044         len = skb_headlen(skb);
8045
8046         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8047         if (pci_dma_mapping_error(tp->pdev, mapping))
8048                 goto drop;
8049
8050
8051         tnapi->tx_buffers[entry].skb = skb;
8052         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8053
8054         would_hit_hwbug = 0;
8055
8056         if (tg3_flag(tp, 5701_DMA_BUG))
8057                 would_hit_hwbug = 1;
8058
8059         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8060                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8061                             mss, vlan)) {
8062                 would_hit_hwbug = 1;
8063         } else if (skb_shinfo(skb)->nr_frags > 0) {
8064                 u32 tmp_mss = mss;
8065
8066                 if (!tg3_flag(tp, HW_TSO_1) &&
8067                     !tg3_flag(tp, HW_TSO_2) &&
8068                     !tg3_flag(tp, HW_TSO_3))
8069                         tmp_mss = 0;
8070
8071                 /* Now loop through additional data
8072                  * fragments, and queue them.
8073                  */
8074                 last = skb_shinfo(skb)->nr_frags - 1;
8075                 for (i = 0; i <= last; i++) {
8076                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8077
8078                         len = skb_frag_size(frag);
8079                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8080                                                    len, DMA_TO_DEVICE);
8081
8082                         tnapi->tx_buffers[entry].skb = NULL;
8083                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8084                                            mapping);
8085                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8086                                 goto dma_error;
8087
8088                         if (!budget ||
8089                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8090                                             len, base_flags |
8091                                             ((i == last) ? TXD_FLAG_END : 0),
8092                                             tmp_mss, vlan)) {
8093                                 would_hit_hwbug = 1;
8094                                 break;
8095                         }
8096                 }
8097         }
8098
8099         if (would_hit_hwbug) {
8100                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8101
8102                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8103                         /* If it's a TSO packet, do GSO instead of
8104                          * allocating and copying to a large linear SKB
8105                          */
8106                         if (ip_tot_len) {
8107                                 iph->check = ip_csum;
8108                                 iph->tot_len = ip_tot_len;
8109                         }
8110                         tcph->check = tcp_csum;
8111                         return tg3_tso_bug(tp, tnapi, txq, skb);
8112                 }
8113
8114                 /* If the workaround fails due to memory/mapping
8115                  * failure, silently drop this packet.
8116                  */
8117                 entry = tnapi->tx_prod;
8118                 budget = tg3_tx_avail(tnapi);
8119                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8120                                                 base_flags, mss, vlan))
8121                         goto drop_nofree;
8122         }
8123
8124         skb_tx_timestamp(skb);
8125         netdev_tx_sent_queue(txq, skb->len);
8126
8127         /* Sync BD data before updating mailbox */
8128         wmb();
8129
8130         tnapi->tx_prod = entry;
8131         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8132                 netif_tx_stop_queue(txq);
8133
8134                 /* netif_tx_stop_queue() must be done before checking
8135                  * checking tx index in tg3_tx_avail() below, because in
8136                  * tg3_tx(), we update tx index before checking for
8137                  * netif_tx_queue_stopped().
8138                  */
8139                 smp_mb();
8140                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8141                         netif_tx_wake_queue(txq);
8142         }
8143
8144         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8145                 /* Packets are ready, update Tx producer idx on card. */
8146                 tw32_tx_mbox(tnapi->prodmbox, entry);
8147         }
8148
8149         return NETDEV_TX_OK;
8150
8151 dma_error:
8152         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8153         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8154 drop:
8155         dev_kfree_skb_any(skb);
8156 drop_nofree:
8157         tp->tx_dropped++;
8158         return NETDEV_TX_OK;
8159 }
8160
8161 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8162 {
8163         if (enable) {
8164                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8165                                   MAC_MODE_PORT_MODE_MASK);
8166
8167                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8168
8169                 if (!tg3_flag(tp, 5705_PLUS))
8170                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8171
8172                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8173                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8174                 else
8175                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8176         } else {
8177                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8178
8179                 if (tg3_flag(tp, 5705_PLUS) ||
8180                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8181                     tg3_asic_rev(tp) == ASIC_REV_5700)
8182                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8183         }
8184
8185         tw32(MAC_MODE, tp->mac_mode);
8186         udelay(40);
8187 }
8188
8189 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8190 {
8191         u32 val, bmcr, mac_mode, ptest = 0;
8192
8193         tg3_phy_toggle_apd(tp, false);
8194         tg3_phy_toggle_automdix(tp, false);
8195
8196         if (extlpbk && tg3_phy_set_extloopbk(tp))
8197                 return -EIO;
8198
8199         bmcr = BMCR_FULLDPLX;
8200         switch (speed) {
8201         case SPEED_10:
8202                 break;
8203         case SPEED_100:
8204                 bmcr |= BMCR_SPEED100;
8205                 break;
8206         case SPEED_1000:
8207         default:
8208                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8209                         speed = SPEED_100;
8210                         bmcr |= BMCR_SPEED100;
8211                 } else {
8212                         speed = SPEED_1000;
8213                         bmcr |= BMCR_SPEED1000;
8214                 }
8215         }
8216
8217         if (extlpbk) {
8218                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8219                         tg3_readphy(tp, MII_CTRL1000, &val);
8220                         val |= CTL1000_AS_MASTER |
8221                                CTL1000_ENABLE_MASTER;
8222                         tg3_writephy(tp, MII_CTRL1000, val);
8223                 } else {
8224                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8225                                 MII_TG3_FET_PTEST_TRIM_2;
8226                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8227                 }
8228         } else
8229                 bmcr |= BMCR_LOOPBACK;
8230
8231         tg3_writephy(tp, MII_BMCR, bmcr);
8232
8233         /* The write needs to be flushed for the FETs */
8234         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8235                 tg3_readphy(tp, MII_BMCR, &bmcr);
8236
8237         udelay(40);
8238
8239         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8240             tg3_asic_rev(tp) == ASIC_REV_5785) {
8241                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8242                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8243                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8244
8245                 /* The write needs to be flushed for the AC131 */
8246                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8247         }
8248
8249         /* Reset to prevent losing 1st rx packet intermittently */
8250         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8251             tg3_flag(tp, 5780_CLASS)) {
8252                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8253                 udelay(10);
8254                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8255         }
8256
8257         mac_mode = tp->mac_mode &
8258                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8259         if (speed == SPEED_1000)
8260                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8261         else
8262                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8263
8264         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8265                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8266
8267                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8268                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8269                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8270                         mac_mode |= MAC_MODE_LINK_POLARITY;
8271
8272                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8273                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8274         }
8275
8276         tw32(MAC_MODE, mac_mode);
8277         udelay(40);
8278
8279         return 0;
8280 }
8281
8282 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8283 {
8284         struct tg3 *tp = netdev_priv(dev);
8285
8286         if (features & NETIF_F_LOOPBACK) {
8287                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8288                         return;
8289
8290                 spin_lock_bh(&tp->lock);
8291                 tg3_mac_loopback(tp, true);
8292                 netif_carrier_on(tp->dev);
8293                 spin_unlock_bh(&tp->lock);
8294                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8295         } else {
8296                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8297                         return;
8298
8299                 spin_lock_bh(&tp->lock);
8300                 tg3_mac_loopback(tp, false);
8301                 /* Force link status check */
8302                 tg3_setup_phy(tp, true);
8303                 spin_unlock_bh(&tp->lock);
8304                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8305         }
8306 }
8307
8308 static netdev_features_t tg3_fix_features(struct net_device *dev,
8309         netdev_features_t features)
8310 {
8311         struct tg3 *tp = netdev_priv(dev);
8312
8313         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8314                 features &= ~NETIF_F_ALL_TSO;
8315
8316         return features;
8317 }
8318
8319 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8320 {
8321         netdev_features_t changed = dev->features ^ features;
8322
8323         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8324                 tg3_set_loopback(dev, features);
8325
8326         return 0;
8327 }
8328
8329 static void tg3_rx_prodring_free(struct tg3 *tp,
8330                                  struct tg3_rx_prodring_set *tpr)
8331 {
8332         int i;
8333
8334         if (tpr != &tp->napi[0].prodring) {
8335                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8336                      i = (i + 1) & tp->rx_std_ring_mask)
8337                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8338                                         tp->rx_pkt_map_sz);
8339
8340                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8341                         for (i = tpr->rx_jmb_cons_idx;
8342                              i != tpr->rx_jmb_prod_idx;
8343                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8344                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8345                                                 TG3_RX_JMB_MAP_SZ);
8346                         }
8347                 }
8348
8349                 return;
8350         }
8351
8352         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8353                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8354                                 tp->rx_pkt_map_sz);
8355
8356         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8357                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8358                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8359                                         TG3_RX_JMB_MAP_SZ);
8360         }
8361 }
8362
8363 /* Initialize rx rings for packet processing.
8364  *
8365  * The chip has been shut down and the driver detached from
8366  * the networking, so no interrupts or new tx packets will
8367  * end up in the driver.  tp->{tx,}lock are held and thus
8368  * we may not sleep.
8369  */
8370 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8371                                  struct tg3_rx_prodring_set *tpr)
8372 {
8373         u32 i, rx_pkt_dma_sz;
8374
8375         tpr->rx_std_cons_idx = 0;
8376         tpr->rx_std_prod_idx = 0;
8377         tpr->rx_jmb_cons_idx = 0;
8378         tpr->rx_jmb_prod_idx = 0;
8379
8380         if (tpr != &tp->napi[0].prodring) {
8381                 memset(&tpr->rx_std_buffers[0], 0,
8382                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8383                 if (tpr->rx_jmb_buffers)
8384                         memset(&tpr->rx_jmb_buffers[0], 0,
8385                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8386                 goto done;
8387         }
8388
8389         /* Zero out all descriptors. */
8390         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8391
8392         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8393         if (tg3_flag(tp, 5780_CLASS) &&
8394             tp->dev->mtu > ETH_DATA_LEN)
8395                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8396         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8397
8398         /* Initialize invariants of the rings, we only set this
8399          * stuff once.  This works because the card does not
8400          * write into the rx buffer posting rings.
8401          */
8402         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8403                 struct tg3_rx_buffer_desc *rxd;
8404
8405                 rxd = &tpr->rx_std[i];
8406                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8407                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8408                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8409                                (i << RXD_OPAQUE_INDEX_SHIFT));
8410         }
8411
8412         /* Now allocate fresh SKBs for each rx ring. */
8413         for (i = 0; i < tp->rx_pending; i++) {
8414                 unsigned int frag_size;
8415
8416                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8417                                       &frag_size) < 0) {
8418                         netdev_warn(tp->dev,
8419                                     "Using a smaller RX standard ring. Only "
8420                                     "%d out of %d buffers were allocated "
8421                                     "successfully\n", i, tp->rx_pending);
8422                         if (i == 0)
8423                                 goto initfail;
8424                         tp->rx_pending = i;
8425                         break;
8426                 }
8427         }
8428
8429         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8430                 goto done;
8431
8432         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8433
8434         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8435                 goto done;
8436
8437         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8438                 struct tg3_rx_buffer_desc *rxd;
8439
8440                 rxd = &tpr->rx_jmb[i].std;
8441                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8442                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8443                                   RXD_FLAG_JUMBO;
8444                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8445                        (i << RXD_OPAQUE_INDEX_SHIFT));
8446         }
8447
8448         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8449                 unsigned int frag_size;
8450
8451                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8452                                       &frag_size) < 0) {
8453                         netdev_warn(tp->dev,
8454                                     "Using a smaller RX jumbo ring. Only %d "
8455                                     "out of %d buffers were allocated "
8456                                     "successfully\n", i, tp->rx_jumbo_pending);
8457                         if (i == 0)
8458                                 goto initfail;
8459                         tp->rx_jumbo_pending = i;
8460                         break;
8461                 }
8462         }
8463
8464 done:
8465         return 0;
8466
8467 initfail:
8468         tg3_rx_prodring_free(tp, tpr);
8469         return -ENOMEM;
8470 }
8471
8472 static void tg3_rx_prodring_fini(struct tg3 *tp,
8473                                  struct tg3_rx_prodring_set *tpr)
8474 {
8475         kfree(tpr->rx_std_buffers);
8476         tpr->rx_std_buffers = NULL;
8477         kfree(tpr->rx_jmb_buffers);
8478         tpr->rx_jmb_buffers = NULL;
8479         if (tpr->rx_std) {
8480                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8481                                   tpr->rx_std, tpr->rx_std_mapping);
8482                 tpr->rx_std = NULL;
8483         }
8484         if (tpr->rx_jmb) {
8485                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8486                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8487                 tpr->rx_jmb = NULL;
8488         }
8489 }
8490
8491 static int tg3_rx_prodring_init(struct tg3 *tp,
8492                                 struct tg3_rx_prodring_set *tpr)
8493 {
8494         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8495                                       GFP_KERNEL);
8496         if (!tpr->rx_std_buffers)
8497                 return -ENOMEM;
8498
8499         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8500                                          TG3_RX_STD_RING_BYTES(tp),
8501                                          &tpr->rx_std_mapping,
8502                                          GFP_KERNEL);
8503         if (!tpr->rx_std)
8504                 goto err_out;
8505
8506         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8507                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8508                                               GFP_KERNEL);
8509                 if (!tpr->rx_jmb_buffers)
8510                         goto err_out;
8511
8512                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8513                                                  TG3_RX_JMB_RING_BYTES(tp),
8514                                                  &tpr->rx_jmb_mapping,
8515                                                  GFP_KERNEL);
8516                 if (!tpr->rx_jmb)
8517                         goto err_out;
8518         }
8519
8520         return 0;
8521
8522 err_out:
8523         tg3_rx_prodring_fini(tp, tpr);
8524         return -ENOMEM;
8525 }
8526
8527 /* Free up pending packets in all rx/tx rings.
8528  *
8529  * The chip has been shut down and the driver detached from
8530  * the networking, so no interrupts or new tx packets will
8531  * end up in the driver.  tp->{tx,}lock is not held and we are not
8532  * in an interrupt context and thus may sleep.
8533  */
8534 static void tg3_free_rings(struct tg3 *tp)
8535 {
8536         int i, j;
8537
8538         for (j = 0; j < tp->irq_cnt; j++) {
8539                 struct tg3_napi *tnapi = &tp->napi[j];
8540
8541                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8542
8543                 if (!tnapi->tx_buffers)
8544                         continue;
8545
8546                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8547                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8548
8549                         if (!skb)
8550                                 continue;
8551
8552                         tg3_tx_skb_unmap(tnapi, i,
8553                                          skb_shinfo(skb)->nr_frags - 1);
8554
8555                         dev_consume_skb_any(skb);
8556                 }
8557                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8558         }
8559 }
8560
8561 /* Initialize tx/rx rings for packet processing.
8562  *
8563  * The chip has been shut down and the driver detached from
8564  * the networking, so no interrupts or new tx packets will
8565  * end up in the driver.  tp->{tx,}lock are held and thus
8566  * we may not sleep.
8567  */
8568 static int tg3_init_rings(struct tg3 *tp)
8569 {
8570         int i;
8571
8572         /* Free up all the SKBs. */
8573         tg3_free_rings(tp);
8574
8575         for (i = 0; i < tp->irq_cnt; i++) {
8576                 struct tg3_napi *tnapi = &tp->napi[i];
8577
8578                 tnapi->last_tag = 0;
8579                 tnapi->last_irq_tag = 0;
8580                 tnapi->hw_status->status = 0;
8581                 tnapi->hw_status->status_tag = 0;
8582                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8583
8584                 tnapi->tx_prod = 0;
8585                 tnapi->tx_cons = 0;
8586                 if (tnapi->tx_ring)
8587                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8588
8589                 tnapi->rx_rcb_ptr = 0;
8590                 if (tnapi->rx_rcb)
8591                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8592
8593                 if (tnapi->prodring.rx_std &&
8594                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8595                         tg3_free_rings(tp);
8596                         return -ENOMEM;
8597                 }
8598         }
8599
8600         return 0;
8601 }
8602
8603 static void tg3_mem_tx_release(struct tg3 *tp)
8604 {
8605         int i;
8606
8607         for (i = 0; i < tp->irq_max; i++) {
8608                 struct tg3_napi *tnapi = &tp->napi[i];
8609
8610                 if (tnapi->tx_ring) {
8611                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8612                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8613                         tnapi->tx_ring = NULL;
8614                 }
8615
8616                 kfree(tnapi->tx_buffers);
8617                 tnapi->tx_buffers = NULL;
8618         }
8619 }
8620
8621 static int tg3_mem_tx_acquire(struct tg3 *tp)
8622 {
8623         int i;
8624         struct tg3_napi *tnapi = &tp->napi[0];
8625
8626         /* If multivector TSS is enabled, vector 0 does not handle
8627          * tx interrupts.  Don't allocate any resources for it.
8628          */
8629         if (tg3_flag(tp, ENABLE_TSS))
8630                 tnapi++;
8631
8632         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8633                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8634                                             sizeof(struct tg3_tx_ring_info),
8635                                             GFP_KERNEL);
8636                 if (!tnapi->tx_buffers)
8637                         goto err_out;
8638
8639                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8640                                                     TG3_TX_RING_BYTES,
8641                                                     &tnapi->tx_desc_mapping,
8642                                                     GFP_KERNEL);
8643                 if (!tnapi->tx_ring)
8644                         goto err_out;
8645         }
8646
8647         return 0;
8648
8649 err_out:
8650         tg3_mem_tx_release(tp);
8651         return -ENOMEM;
8652 }
8653
8654 static void tg3_mem_rx_release(struct tg3 *tp)
8655 {
8656         int i;
8657
8658         for (i = 0; i < tp->irq_max; i++) {
8659                 struct tg3_napi *tnapi = &tp->napi[i];
8660
8661                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8662
8663                 if (!tnapi->rx_rcb)
8664                         continue;
8665
8666                 dma_free_coherent(&tp->pdev->dev,
8667                                   TG3_RX_RCB_RING_BYTES(tp),
8668                                   tnapi->rx_rcb,
8669                                   tnapi->rx_rcb_mapping);
8670                 tnapi->rx_rcb = NULL;
8671         }
8672 }
8673
8674 static int tg3_mem_rx_acquire(struct tg3 *tp)
8675 {
8676         unsigned int i, limit;
8677
8678         limit = tp->rxq_cnt;
8679
8680         /* If RSS is enabled, we need a (dummy) producer ring
8681          * set on vector zero.  This is the true hw prodring.
8682          */
8683         if (tg3_flag(tp, ENABLE_RSS))
8684                 limit++;
8685
8686         for (i = 0; i < limit; i++) {
8687                 struct tg3_napi *tnapi = &tp->napi[i];
8688
8689                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8690                         goto err_out;
8691
8692                 /* If multivector RSS is enabled, vector 0
8693                  * does not handle rx or tx interrupts.
8694                  * Don't allocate any resources for it.
8695                  */
8696                 if (!i && tg3_flag(tp, ENABLE_RSS))
8697                         continue;
8698
8699                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8700                                                    TG3_RX_RCB_RING_BYTES(tp),
8701                                                    &tnapi->rx_rcb_mapping,
8702                                                    GFP_KERNEL);
8703                 if (!tnapi->rx_rcb)
8704                         goto err_out;
8705         }
8706
8707         return 0;
8708
8709 err_out:
8710         tg3_mem_rx_release(tp);
8711         return -ENOMEM;
8712 }
8713
8714 /*
8715  * Must not be invoked with interrupt sources disabled and
8716  * the hardware shutdown down.
8717  */
8718 static void tg3_free_consistent(struct tg3 *tp)
8719 {
8720         int i;
8721
8722         for (i = 0; i < tp->irq_cnt; i++) {
8723                 struct tg3_napi *tnapi = &tp->napi[i];
8724
8725                 if (tnapi->hw_status) {
8726                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8727                                           tnapi->hw_status,
8728                                           tnapi->status_mapping);
8729                         tnapi->hw_status = NULL;
8730                 }
8731         }
8732
8733         tg3_mem_rx_release(tp);
8734         tg3_mem_tx_release(tp);
8735
8736         /* tp->hw_stats can be referenced safely:
8737          *     1. under rtnl_lock
8738          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8739          */
8740         if (tp->hw_stats) {
8741                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8742                                   tp->hw_stats, tp->stats_mapping);
8743                 tp->hw_stats = NULL;
8744         }
8745 }
8746
8747 /*
8748  * Must not be invoked with interrupt sources disabled and
8749  * the hardware shutdown down.  Can sleep.
8750  */
8751 static int tg3_alloc_consistent(struct tg3 *tp)
8752 {
8753         int i;
8754
8755         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8756                                           sizeof(struct tg3_hw_stats),
8757                                           &tp->stats_mapping, GFP_KERNEL);
8758         if (!tp->hw_stats)
8759                 goto err_out;
8760
8761         for (i = 0; i < tp->irq_cnt; i++) {
8762                 struct tg3_napi *tnapi = &tp->napi[i];
8763                 struct tg3_hw_status *sblk;
8764
8765                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8766                                                       TG3_HW_STATUS_SIZE,
8767                                                       &tnapi->status_mapping,
8768                                                       GFP_KERNEL);
8769                 if (!tnapi->hw_status)
8770                         goto err_out;
8771
8772                 sblk = tnapi->hw_status;
8773
8774                 if (tg3_flag(tp, ENABLE_RSS)) {
8775                         u16 *prodptr = NULL;
8776
8777                         /*
8778                          * When RSS is enabled, the status block format changes
8779                          * slightly.  The "rx_jumbo_consumer", "reserved",
8780                          * and "rx_mini_consumer" members get mapped to the
8781                          * other three rx return ring producer indexes.
8782                          */
8783                         switch (i) {
8784                         case 1:
8785                                 prodptr = &sblk->idx[0].rx_producer;
8786                                 break;
8787                         case 2:
8788                                 prodptr = &sblk->rx_jumbo_consumer;
8789                                 break;
8790                         case 3:
8791                                 prodptr = &sblk->reserved;
8792                                 break;
8793                         case 4:
8794                                 prodptr = &sblk->rx_mini_consumer;
8795                                 break;
8796                         }
8797                         tnapi->rx_rcb_prod_idx = prodptr;
8798                 } else {
8799                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8800                 }
8801         }
8802
8803         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8804                 goto err_out;
8805
8806         return 0;
8807
8808 err_out:
8809         tg3_free_consistent(tp);
8810         return -ENOMEM;
8811 }
8812
8813 #define MAX_WAIT_CNT 1000
8814
8815 /* To stop a block, clear the enable bit and poll till it
8816  * clears.  tp->lock is held.
8817  */
8818 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8819 {
8820         unsigned int i;
8821         u32 val;
8822
8823         if (tg3_flag(tp, 5705_PLUS)) {
8824                 switch (ofs) {
8825                 case RCVLSC_MODE:
8826                 case DMAC_MODE:
8827                 case MBFREE_MODE:
8828                 case BUFMGR_MODE:
8829                 case MEMARB_MODE:
8830                         /* We can't enable/disable these bits of the
8831                          * 5705/5750, just say success.
8832                          */
8833                         return 0;
8834
8835                 default:
8836                         break;
8837                 }
8838         }
8839
8840         val = tr32(ofs);
8841         val &= ~enable_bit;
8842         tw32_f(ofs, val);
8843
8844         for (i = 0; i < MAX_WAIT_CNT; i++) {
8845                 if (pci_channel_offline(tp->pdev)) {
8846                         dev_err(&tp->pdev->dev,
8847                                 "tg3_stop_block device offline, "
8848                                 "ofs=%lx enable_bit=%x\n",
8849                                 ofs, enable_bit);
8850                         return -ENODEV;
8851                 }
8852
8853                 udelay(100);
8854                 val = tr32(ofs);
8855                 if ((val & enable_bit) == 0)
8856                         break;
8857         }
8858
8859         if (i == MAX_WAIT_CNT && !silent) {
8860                 dev_err(&tp->pdev->dev,
8861                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8862                         ofs, enable_bit);
8863                 return -ENODEV;
8864         }
8865
8866         return 0;
8867 }
8868
8869 /* tp->lock is held. */
8870 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8871 {
8872         int i, err;
8873
8874         tg3_disable_ints(tp);
8875
8876         if (pci_channel_offline(tp->pdev)) {
8877                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8878                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8879                 err = -ENODEV;
8880                 goto err_no_dev;
8881         }
8882
8883         tp->rx_mode &= ~RX_MODE_ENABLE;
8884         tw32_f(MAC_RX_MODE, tp->rx_mode);
8885         udelay(10);
8886
8887         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8890         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8891         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8893
8894         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8897         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8898         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8899         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8900         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8901
8902         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8903         tw32_f(MAC_MODE, tp->mac_mode);
8904         udelay(40);
8905
8906         tp->tx_mode &= ~TX_MODE_ENABLE;
8907         tw32_f(MAC_TX_MODE, tp->tx_mode);
8908
8909         for (i = 0; i < MAX_WAIT_CNT; i++) {
8910                 udelay(100);
8911                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8912                         break;
8913         }
8914         if (i >= MAX_WAIT_CNT) {
8915                 dev_err(&tp->pdev->dev,
8916                         "%s timed out, TX_MODE_ENABLE will not clear "
8917                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8918                 err |= -ENODEV;
8919         }
8920
8921         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8922         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8923         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8924
8925         tw32(FTQ_RESET, 0xffffffff);
8926         tw32(FTQ_RESET, 0x00000000);
8927
8928         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8929         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8930
8931 err_no_dev:
8932         for (i = 0; i < tp->irq_cnt; i++) {
8933                 struct tg3_napi *tnapi = &tp->napi[i];
8934                 if (tnapi->hw_status)
8935                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8936         }
8937
8938         return err;
8939 }
8940
8941 /* Save PCI command register before chip reset */
8942 static void tg3_save_pci_state(struct tg3 *tp)
8943 {
8944         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8945 }
8946
8947 /* Restore PCI state after chip reset */
8948 static void tg3_restore_pci_state(struct tg3 *tp)
8949 {
8950         u32 val;
8951
8952         /* Re-enable indirect register accesses. */
8953         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8954                                tp->misc_host_ctrl);
8955
8956         /* Set MAX PCI retry to zero. */
8957         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8958         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8959             tg3_flag(tp, PCIX_MODE))
8960                 val |= PCISTATE_RETRY_SAME_DMA;
8961         /* Allow reads and writes to the APE register and memory space. */
8962         if (tg3_flag(tp, ENABLE_APE))
8963                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8964                        PCISTATE_ALLOW_APE_SHMEM_WR |
8965                        PCISTATE_ALLOW_APE_PSPACE_WR;
8966         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8967
8968         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8969
8970         if (!tg3_flag(tp, PCI_EXPRESS)) {
8971                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8972                                       tp->pci_cacheline_sz);
8973                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8974                                       tp->pci_lat_timer);
8975         }
8976
8977         /* Make sure PCI-X relaxed ordering bit is clear. */
8978         if (tg3_flag(tp, PCIX_MODE)) {
8979                 u16 pcix_cmd;
8980
8981                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8982                                      &pcix_cmd);
8983                 pcix_cmd &= ~PCI_X_CMD_ERO;
8984                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8985                                       pcix_cmd);
8986         }
8987
8988         if (tg3_flag(tp, 5780_CLASS)) {
8989
8990                 /* Chip reset on 5780 will reset MSI enable bit,
8991                  * so need to restore it.
8992                  */
8993                 if (tg3_flag(tp, USING_MSI)) {
8994                         u16 ctrl;
8995
8996                         pci_read_config_word(tp->pdev,
8997                                              tp->msi_cap + PCI_MSI_FLAGS,
8998                                              &ctrl);
8999                         pci_write_config_word(tp->pdev,
9000                                               tp->msi_cap + PCI_MSI_FLAGS,
9001                                               ctrl | PCI_MSI_FLAGS_ENABLE);
9002                         val = tr32(MSGINT_MODE);
9003                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9004                 }
9005         }
9006 }
9007
9008 static void tg3_override_clk(struct tg3 *tp)
9009 {
9010         u32 val;
9011
9012         switch (tg3_asic_rev(tp)) {
9013         case ASIC_REV_5717:
9014                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9015                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9016                      TG3_CPMU_MAC_ORIDE_ENABLE);
9017                 break;
9018
9019         case ASIC_REV_5719:
9020         case ASIC_REV_5720:
9021                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9022                 break;
9023
9024         default:
9025                 return;
9026         }
9027 }
9028
9029 static void tg3_restore_clk(struct tg3 *tp)
9030 {
9031         u32 val;
9032
9033         switch (tg3_asic_rev(tp)) {
9034         case ASIC_REV_5717:
9035                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9036                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9037                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9038                 break;
9039
9040         case ASIC_REV_5719:
9041         case ASIC_REV_5720:
9042                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9043                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9044                 break;
9045
9046         default:
9047                 return;
9048         }
9049 }
9050
9051 /* tp->lock is held. */
9052 static int tg3_chip_reset(struct tg3 *tp)
9053         __releases(tp->lock)
9054         __acquires(tp->lock)
9055 {
9056         u32 val;
9057         void (*write_op)(struct tg3 *, u32, u32);
9058         int i, err;
9059
9060         if (!pci_device_is_present(tp->pdev))
9061                 return -ENODEV;
9062
9063         tg3_nvram_lock(tp);
9064
9065         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9066
9067         /* No matching tg3_nvram_unlock() after this because
9068          * chip reset below will undo the nvram lock.
9069          */
9070         tp->nvram_lock_cnt = 0;
9071
9072         /* GRC_MISC_CFG core clock reset will clear the memory
9073          * enable bit in PCI register 4 and the MSI enable bit
9074          * on some chips, so we save relevant registers here.
9075          */
9076         tg3_save_pci_state(tp);
9077
9078         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9079             tg3_flag(tp, 5755_PLUS))
9080                 tw32(GRC_FASTBOOT_PC, 0);
9081
9082         /*
9083          * We must avoid the readl() that normally takes place.
9084          * It locks machines, causes machine checks, and other
9085          * fun things.  So, temporarily disable the 5701
9086          * hardware workaround, while we do the reset.
9087          */
9088         write_op = tp->write32;
9089         if (write_op == tg3_write_flush_reg32)
9090                 tp->write32 = tg3_write32;
9091
9092         /* Prevent the irq handler from reading or writing PCI registers
9093          * during chip reset when the memory enable bit in the PCI command
9094          * register may be cleared.  The chip does not generate interrupt
9095          * at this time, but the irq handler may still be called due to irq
9096          * sharing or irqpoll.
9097          */
9098         tg3_flag_set(tp, CHIP_RESETTING);
9099         for (i = 0; i < tp->irq_cnt; i++) {
9100                 struct tg3_napi *tnapi = &tp->napi[i];
9101                 if (tnapi->hw_status) {
9102                         tnapi->hw_status->status = 0;
9103                         tnapi->hw_status->status_tag = 0;
9104                 }
9105                 tnapi->last_tag = 0;
9106                 tnapi->last_irq_tag = 0;
9107         }
9108         smp_mb();
9109
9110         tg3_full_unlock(tp);
9111
9112         for (i = 0; i < tp->irq_cnt; i++)
9113                 synchronize_irq(tp->napi[i].irq_vec);
9114
9115         tg3_full_lock(tp, 0);
9116
9117         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9118                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9119                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9120         }
9121
9122         /* do the reset */
9123         val = GRC_MISC_CFG_CORECLK_RESET;
9124
9125         if (tg3_flag(tp, PCI_EXPRESS)) {
9126                 /* Force PCIe 1.0a mode */
9127                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9128                     !tg3_flag(tp, 57765_PLUS) &&
9129                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9130                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9131                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9132
9133                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9134                         tw32(GRC_MISC_CFG, (1 << 29));
9135                         val |= (1 << 29);
9136                 }
9137         }
9138
9139         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9140                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9141                 tw32(GRC_VCPU_EXT_CTRL,
9142                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9143         }
9144
9145         /* Set the clock to the highest frequency to avoid timeouts. With link
9146          * aware mode, the clock speed could be slow and bootcode does not
9147          * complete within the expected time. Override the clock to allow the
9148          * bootcode to finish sooner and then restore it.
9149          */
9150         tg3_override_clk(tp);
9151
9152         /* Manage gphy power for all CPMU absent PCIe devices. */
9153         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9154                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9155
9156         tw32(GRC_MISC_CFG, val);
9157
9158         /* restore 5701 hardware bug workaround write method */
9159         tp->write32 = write_op;
9160
9161         /* Unfortunately, we have to delay before the PCI read back.
9162          * Some 575X chips even will not respond to a PCI cfg access
9163          * when the reset command is given to the chip.
9164          *
9165          * How do these hardware designers expect things to work
9166          * properly if the PCI write is posted for a long period
9167          * of time?  It is always necessary to have some method by
9168          * which a register read back can occur to push the write
9169          * out which does the reset.
9170          *
9171          * For most tg3 variants the trick below was working.
9172          * Ho hum...
9173          */
9174         udelay(120);
9175
9176         /* Flush PCI posted writes.  The normal MMIO registers
9177          * are inaccessible at this time so this is the only
9178          * way to make this reliably (actually, this is no longer
9179          * the case, see above).  I tried to use indirect
9180          * register read/write but this upset some 5701 variants.
9181          */
9182         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9183
9184         udelay(120);
9185
9186         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9187                 u16 val16;
9188
9189                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9190                         int j;
9191                         u32 cfg_val;
9192
9193                         /* Wait for link training to complete.  */
9194                         for (j = 0; j < 5000; j++)
9195                                 udelay(100);
9196
9197                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9198                         pci_write_config_dword(tp->pdev, 0xc4,
9199                                                cfg_val | (1 << 15));
9200                 }
9201
9202                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9203                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9204                 /*
9205                  * Older PCIe devices only support the 128 byte
9206                  * MPS setting.  Enforce the restriction.
9207                  */
9208                 if (!tg3_flag(tp, CPMU_PRESENT))
9209                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9210                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9211
9212                 /* Clear error status */
9213                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9214                                       PCI_EXP_DEVSTA_CED |
9215                                       PCI_EXP_DEVSTA_NFED |
9216                                       PCI_EXP_DEVSTA_FED |
9217                                       PCI_EXP_DEVSTA_URD);
9218         }
9219
9220         tg3_restore_pci_state(tp);
9221
9222         tg3_flag_clear(tp, CHIP_RESETTING);
9223         tg3_flag_clear(tp, ERROR_PROCESSED);
9224
9225         val = 0;
9226         if (tg3_flag(tp, 5780_CLASS))
9227                 val = tr32(MEMARB_MODE);
9228         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9229
9230         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9231                 tg3_stop_fw(tp);
9232                 tw32(0x5000, 0x400);
9233         }
9234
9235         if (tg3_flag(tp, IS_SSB_CORE)) {
9236                 /*
9237                  * BCM4785: In order to avoid repercussions from using
9238                  * potentially defective internal ROM, stop the Rx RISC CPU,
9239                  * which is not required.
9240                  */
9241                 tg3_stop_fw(tp);
9242                 tg3_halt_cpu(tp, RX_CPU_BASE);
9243         }
9244
9245         err = tg3_poll_fw(tp);
9246         if (err)
9247                 return err;
9248
9249         tw32(GRC_MODE, tp->grc_mode);
9250
9251         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9252                 val = tr32(0xc4);
9253
9254                 tw32(0xc4, val | (1 << 15));
9255         }
9256
9257         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9258             tg3_asic_rev(tp) == ASIC_REV_5705) {
9259                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9260                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9261                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9262                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9263         }
9264
9265         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9266                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9267                 val = tp->mac_mode;
9268         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9269                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9270                 val = tp->mac_mode;
9271         } else
9272                 val = 0;
9273
9274         tw32_f(MAC_MODE, val);
9275         udelay(40);
9276
9277         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9278
9279         tg3_mdio_start(tp);
9280
9281         if (tg3_flag(tp, PCI_EXPRESS) &&
9282             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9283             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9284             !tg3_flag(tp, 57765_PLUS)) {
9285                 val = tr32(0x7c00);
9286
9287                 tw32(0x7c00, val | (1 << 25));
9288         }
9289
9290         tg3_restore_clk(tp);
9291
9292         /* Increase the core clock speed to fix tx timeout issue for 5762
9293          * with 100Mbps link speed.
9294          */
9295         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9296                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9297                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9298                      TG3_CPMU_MAC_ORIDE_ENABLE);
9299         }
9300
9301         /* Reprobe ASF enable state.  */
9302         tg3_flag_clear(tp, ENABLE_ASF);
9303         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9304                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9305
9306         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9307         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9308         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9309                 u32 nic_cfg;
9310
9311                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9312                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9313                         tg3_flag_set(tp, ENABLE_ASF);
9314                         tp->last_event_jiffies = jiffies;
9315                         if (tg3_flag(tp, 5750_PLUS))
9316                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9317
9318                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9319                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9320                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9321                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9322                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9323                 }
9324         }
9325
9326         return 0;
9327 }
9328
9329 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9330 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9331 static void __tg3_set_rx_mode(struct net_device *);
9332
9333 /* tp->lock is held. */
9334 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9335 {
9336         int err;
9337
9338         tg3_stop_fw(tp);
9339
9340         tg3_write_sig_pre_reset(tp, kind);
9341
9342         tg3_abort_hw(tp, silent);
9343         err = tg3_chip_reset(tp);
9344
9345         __tg3_set_mac_addr(tp, false);
9346
9347         tg3_write_sig_legacy(tp, kind);
9348         tg3_write_sig_post_reset(tp, kind);
9349
9350         if (tp->hw_stats) {
9351                 /* Save the stats across chip resets... */
9352                 tg3_get_nstats(tp, &tp->net_stats_prev);
9353                 tg3_get_estats(tp, &tp->estats_prev);
9354
9355                 /* And make sure the next sample is new data */
9356                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9357         }
9358
9359         return err;
9360 }
9361
9362 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9363 {
9364         struct tg3 *tp = netdev_priv(dev);
9365         struct sockaddr *addr = p;
9366         int err = 0;
9367         bool skip_mac_1 = false;
9368
9369         if (!is_valid_ether_addr(addr->sa_data))
9370                 return -EADDRNOTAVAIL;
9371
9372         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9373
9374         if (!netif_running(dev))
9375                 return 0;
9376
9377         if (tg3_flag(tp, ENABLE_ASF)) {
9378                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9379
9380                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9381                 addr0_low = tr32(MAC_ADDR_0_LOW);
9382                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9383                 addr1_low = tr32(MAC_ADDR_1_LOW);
9384
9385                 /* Skip MAC addr 1 if ASF is using it. */
9386                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9387                     !(addr1_high == 0 && addr1_low == 0))
9388                         skip_mac_1 = true;
9389         }
9390         spin_lock_bh(&tp->lock);
9391         __tg3_set_mac_addr(tp, skip_mac_1);
9392         __tg3_set_rx_mode(dev);
9393         spin_unlock_bh(&tp->lock);
9394
9395         return err;
9396 }
9397
9398 /* tp->lock is held. */
9399 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9400                            dma_addr_t mapping, u32 maxlen_flags,
9401                            u32 nic_addr)
9402 {
9403         tg3_write_mem(tp,
9404                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9405                       ((u64) mapping >> 32));
9406         tg3_write_mem(tp,
9407                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9408                       ((u64) mapping & 0xffffffff));
9409         tg3_write_mem(tp,
9410                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9411                        maxlen_flags);
9412
9413         if (!tg3_flag(tp, 5705_PLUS))
9414                 tg3_write_mem(tp,
9415                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9416                               nic_addr);
9417 }
9418
9419
9420 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9421 {
9422         int i = 0;
9423
9424         if (!tg3_flag(tp, ENABLE_TSS)) {
9425                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9426                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9427                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9428         } else {
9429                 tw32(HOSTCC_TXCOL_TICKS, 0);
9430                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9431                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9432
9433                 for (; i < tp->txq_cnt; i++) {
9434                         u32 reg;
9435
9436                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9437                         tw32(reg, ec->tx_coalesce_usecs);
9438                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9439                         tw32(reg, ec->tx_max_coalesced_frames);
9440                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9441                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9442                 }
9443         }
9444
9445         for (; i < tp->irq_max - 1; i++) {
9446                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9447                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9448                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9449         }
9450 }
9451
9452 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9453 {
9454         int i = 0;
9455         u32 limit = tp->rxq_cnt;
9456
9457         if (!tg3_flag(tp, ENABLE_RSS)) {
9458                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9459                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9460                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9461                 limit--;
9462         } else {
9463                 tw32(HOSTCC_RXCOL_TICKS, 0);
9464                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9465                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9466         }
9467
9468         for (; i < limit; i++) {
9469                 u32 reg;
9470
9471                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9472                 tw32(reg, ec->rx_coalesce_usecs);
9473                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9474                 tw32(reg, ec->rx_max_coalesced_frames);
9475                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9476                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9477         }
9478
9479         for (; i < tp->irq_max - 1; i++) {
9480                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9481                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9482                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9483         }
9484 }
9485
9486 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9487 {
9488         tg3_coal_tx_init(tp, ec);
9489         tg3_coal_rx_init(tp, ec);
9490
9491         if (!tg3_flag(tp, 5705_PLUS)) {
9492                 u32 val = ec->stats_block_coalesce_usecs;
9493
9494                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9495                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9496
9497                 if (!tp->link_up)
9498                         val = 0;
9499
9500                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9501         }
9502 }
9503
9504 /* tp->lock is held. */
9505 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9506 {
9507         u32 txrcb, limit;
9508
9509         /* Disable all transmit rings but the first. */
9510         if (!tg3_flag(tp, 5705_PLUS))
9511                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9512         else if (tg3_flag(tp, 5717_PLUS))
9513                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9514         else if (tg3_flag(tp, 57765_CLASS) ||
9515                  tg3_asic_rev(tp) == ASIC_REV_5762)
9516                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9517         else
9518                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9519
9520         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9521              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9522                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9523                               BDINFO_FLAGS_DISABLED);
9524 }
9525
9526 /* tp->lock is held. */
9527 static void tg3_tx_rcbs_init(struct tg3 *tp)
9528 {
9529         int i = 0;
9530         u32 txrcb = NIC_SRAM_SEND_RCB;
9531
9532         if (tg3_flag(tp, ENABLE_TSS))
9533                 i++;
9534
9535         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9536                 struct tg3_napi *tnapi = &tp->napi[i];
9537
9538                 if (!tnapi->tx_ring)
9539                         continue;
9540
9541                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9542                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9543                                NIC_SRAM_TX_BUFFER_DESC);
9544         }
9545 }
9546
9547 /* tp->lock is held. */
9548 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9549 {
9550         u32 rxrcb, limit;
9551
9552         /* Disable all receive return rings but the first. */
9553         if (tg3_flag(tp, 5717_PLUS))
9554                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9555         else if (!tg3_flag(tp, 5705_PLUS))
9556                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9557         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9558                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9559                  tg3_flag(tp, 57765_CLASS))
9560                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9561         else
9562                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9563
9564         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9565              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9566                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9567                               BDINFO_FLAGS_DISABLED);
9568 }
9569
9570 /* tp->lock is held. */
9571 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9572 {
9573         int i = 0;
9574         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9575
9576         if (tg3_flag(tp, ENABLE_RSS))
9577                 i++;
9578
9579         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9580                 struct tg3_napi *tnapi = &tp->napi[i];
9581
9582                 if (!tnapi->rx_rcb)
9583                         continue;
9584
9585                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9586                                (tp->rx_ret_ring_mask + 1) <<
9587                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9588         }
9589 }
9590
9591 /* tp->lock is held. */
9592 static void tg3_rings_reset(struct tg3 *tp)
9593 {
9594         int i;
9595         u32 stblk;
9596         struct tg3_napi *tnapi = &tp->napi[0];
9597
9598         tg3_tx_rcbs_disable(tp);
9599
9600         tg3_rx_ret_rcbs_disable(tp);
9601
9602         /* Disable interrupts */
9603         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9604         tp->napi[0].chk_msi_cnt = 0;
9605         tp->napi[0].last_rx_cons = 0;
9606         tp->napi[0].last_tx_cons = 0;
9607
9608         /* Zero mailbox registers. */
9609         if (tg3_flag(tp, SUPPORT_MSIX)) {
9610                 for (i = 1; i < tp->irq_max; i++) {
9611                         tp->napi[i].tx_prod = 0;
9612                         tp->napi[i].tx_cons = 0;
9613                         if (tg3_flag(tp, ENABLE_TSS))
9614                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9615                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9616                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9617                         tp->napi[i].chk_msi_cnt = 0;
9618                         tp->napi[i].last_rx_cons = 0;
9619                         tp->napi[i].last_tx_cons = 0;
9620                 }
9621                 if (!tg3_flag(tp, ENABLE_TSS))
9622                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9623         } else {
9624                 tp->napi[0].tx_prod = 0;
9625                 tp->napi[0].tx_cons = 0;
9626                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9627                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9628         }
9629
9630         /* Make sure the NIC-based send BD rings are disabled. */
9631         if (!tg3_flag(tp, 5705_PLUS)) {
9632                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9633                 for (i = 0; i < 16; i++)
9634                         tw32_tx_mbox(mbox + i * 8, 0);
9635         }
9636
9637         /* Clear status block in ram. */
9638         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9639
9640         /* Set status block DMA address */
9641         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9642              ((u64) tnapi->status_mapping >> 32));
9643         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9644              ((u64) tnapi->status_mapping & 0xffffffff));
9645
9646         stblk = HOSTCC_STATBLCK_RING1;
9647
9648         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9649                 u64 mapping = (u64)tnapi->status_mapping;
9650                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9651                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9652                 stblk += 8;
9653
9654                 /* Clear status block in ram. */
9655                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9656         }
9657
9658         tg3_tx_rcbs_init(tp);
9659         tg3_rx_ret_rcbs_init(tp);
9660 }
9661
9662 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9663 {
9664         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9665
9666         if (!tg3_flag(tp, 5750_PLUS) ||
9667             tg3_flag(tp, 5780_CLASS) ||
9668             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9669             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9670             tg3_flag(tp, 57765_PLUS))
9671                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9672         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9673                  tg3_asic_rev(tp) == ASIC_REV_5787)
9674                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9675         else
9676                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9677
9678         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9679         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9680
9681         val = min(nic_rep_thresh, host_rep_thresh);
9682         tw32(RCVBDI_STD_THRESH, val);
9683
9684         if (tg3_flag(tp, 57765_PLUS))
9685                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9686
9687         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9688                 return;
9689
9690         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9691
9692         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9693
9694         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9695         tw32(RCVBDI_JUMBO_THRESH, val);
9696
9697         if (tg3_flag(tp, 57765_PLUS))
9698                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9699 }
9700
9701 static inline u32 calc_crc(unsigned char *buf, int len)
9702 {
9703         u32 reg;
9704         u32 tmp;
9705         int j, k;
9706
9707         reg = 0xffffffff;
9708
9709         for (j = 0; j < len; j++) {
9710                 reg ^= buf[j];
9711
9712                 for (k = 0; k < 8; k++) {
9713                         tmp = reg & 0x01;
9714
9715                         reg >>= 1;
9716
9717                         if (tmp)
9718                                 reg ^= CRC32_POLY_LE;
9719                 }
9720         }
9721
9722         return ~reg;
9723 }
9724
9725 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9726 {
9727         /* accept or reject all multicast frames */
9728         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9729         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9730         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9731         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9732 }
9733
9734 static void __tg3_set_rx_mode(struct net_device *dev)
9735 {
9736         struct tg3 *tp = netdev_priv(dev);
9737         u32 rx_mode;
9738
9739         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9740                                   RX_MODE_KEEP_VLAN_TAG);
9741
9742 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9743         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9744          * flag clear.
9745          */
9746         if (!tg3_flag(tp, ENABLE_ASF))
9747                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9748 #endif
9749
9750         if (dev->flags & IFF_PROMISC) {
9751                 /* Promiscuous mode. */
9752                 rx_mode |= RX_MODE_PROMISC;
9753         } else if (dev->flags & IFF_ALLMULTI) {
9754                 /* Accept all multicast. */
9755                 tg3_set_multi(tp, 1);
9756         } else if (netdev_mc_empty(dev)) {
9757                 /* Reject all multicast. */
9758                 tg3_set_multi(tp, 0);
9759         } else {
9760                 /* Accept one or more multicast(s). */
9761                 struct netdev_hw_addr *ha;
9762                 u32 mc_filter[4] = { 0, };
9763                 u32 regidx;
9764                 u32 bit;
9765                 u32 crc;
9766
9767                 netdev_for_each_mc_addr(ha, dev) {
9768                         crc = calc_crc(ha->addr, ETH_ALEN);
9769                         bit = ~crc & 0x7f;
9770                         regidx = (bit & 0x60) >> 5;
9771                         bit &= 0x1f;
9772                         mc_filter[regidx] |= (1 << bit);
9773                 }
9774
9775                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9776                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9777                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9778                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9779         }
9780
9781         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9782                 rx_mode |= RX_MODE_PROMISC;
9783         } else if (!(dev->flags & IFF_PROMISC)) {
9784                 /* Add all entries into to the mac addr filter list */
9785                 int i = 0;
9786                 struct netdev_hw_addr *ha;
9787
9788                 netdev_for_each_uc_addr(ha, dev) {
9789                         __tg3_set_one_mac_addr(tp, ha->addr,
9790                                                i + TG3_UCAST_ADDR_IDX(tp));
9791                         i++;
9792                 }
9793         }
9794
9795         if (rx_mode != tp->rx_mode) {
9796                 tp->rx_mode = rx_mode;
9797                 tw32_f(MAC_RX_MODE, rx_mode);
9798                 udelay(10);
9799         }
9800 }
9801
9802 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9803 {
9804         int i;
9805
9806         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9807                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9808 }
9809
9810 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9811 {
9812         int i;
9813
9814         if (!tg3_flag(tp, SUPPORT_MSIX))
9815                 return;
9816
9817         if (tp->rxq_cnt == 1) {
9818                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9819                 return;
9820         }
9821
9822         /* Validate table against current IRQ count */
9823         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9824                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9825                         break;
9826         }
9827
9828         if (i != TG3_RSS_INDIR_TBL_SIZE)
9829                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9830 }
9831
9832 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9833 {
9834         int i = 0;
9835         u32 reg = MAC_RSS_INDIR_TBL_0;
9836
9837         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9838                 u32 val = tp->rss_ind_tbl[i];
9839                 i++;
9840                 for (; i % 8; i++) {
9841                         val <<= 4;
9842                         val |= tp->rss_ind_tbl[i];
9843                 }
9844                 tw32(reg, val);
9845                 reg += 4;
9846         }
9847 }
9848
9849 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9850 {
9851         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9852                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9853         else
9854                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9855 }
9856
9857 /* tp->lock is held. */
9858 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9859 {
9860         u32 val, rdmac_mode;
9861         int i, err, limit;
9862         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9863
9864         tg3_disable_ints(tp);
9865
9866         tg3_stop_fw(tp);
9867
9868         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9869
9870         if (tg3_flag(tp, INIT_COMPLETE))
9871                 tg3_abort_hw(tp, 1);
9872
9873         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9874             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9875                 tg3_phy_pull_config(tp);
9876                 tg3_eee_pull_config(tp, NULL);
9877                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9878         }
9879
9880         /* Enable MAC control of LPI */
9881         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9882                 tg3_setup_eee(tp);
9883
9884         if (reset_phy)
9885                 tg3_phy_reset(tp);
9886
9887         err = tg3_chip_reset(tp);
9888         if (err)
9889                 return err;
9890
9891         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9892
9893         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9894                 val = tr32(TG3_CPMU_CTRL);
9895                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9896                 tw32(TG3_CPMU_CTRL, val);
9897
9898                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9899                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9900                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9901                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9902
9903                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9904                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9905                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9906                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9907
9908                 val = tr32(TG3_CPMU_HST_ACC);
9909                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9910                 val |= CPMU_HST_ACC_MACCLK_6_25;
9911                 tw32(TG3_CPMU_HST_ACC, val);
9912         }
9913
9914         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9915                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9916                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9917                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9918                 tw32(PCIE_PWR_MGMT_THRESH, val);
9919
9920                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9921                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9922
9923                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9924
9925                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9926                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9927         }
9928
9929         if (tg3_flag(tp, L1PLLPD_EN)) {
9930                 u32 grc_mode = tr32(GRC_MODE);
9931
9932                 /* Access the lower 1K of PL PCIE block registers. */
9933                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9934                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9935
9936                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9937                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9938                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9939
9940                 tw32(GRC_MODE, grc_mode);
9941         }
9942
9943         if (tg3_flag(tp, 57765_CLASS)) {
9944                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9945                         u32 grc_mode = tr32(GRC_MODE);
9946
9947                         /* Access the lower 1K of PL PCIE block registers. */
9948                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9949                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9950
9951                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9952                                    TG3_PCIE_PL_LO_PHYCTL5);
9953                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9954                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9955
9956                         tw32(GRC_MODE, grc_mode);
9957                 }
9958
9959                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9960                         u32 grc_mode;
9961
9962                         /* Fix transmit hangs */
9963                         val = tr32(TG3_CPMU_PADRNG_CTL);
9964                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9965                         tw32(TG3_CPMU_PADRNG_CTL, val);
9966
9967                         grc_mode = tr32(GRC_MODE);
9968
9969                         /* Access the lower 1K of DL PCIE block registers. */
9970                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9971                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9972
9973                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9974                                    TG3_PCIE_DL_LO_FTSMAX);
9975                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9976                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9977                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9978
9979                         tw32(GRC_MODE, grc_mode);
9980                 }
9981
9982                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9983                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9984                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9985                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9986         }
9987
9988         /* This works around an issue with Athlon chipsets on
9989          * B3 tigon3 silicon.  This bit has no effect on any
9990          * other revision.  But do not set this on PCI Express
9991          * chips and don't even touch the clocks if the CPMU is present.
9992          */
9993         if (!tg3_flag(tp, CPMU_PRESENT)) {
9994                 if (!tg3_flag(tp, PCI_EXPRESS))
9995                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9996                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9997         }
9998
9999         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
10000             tg3_flag(tp, PCIX_MODE)) {
10001                 val = tr32(TG3PCI_PCISTATE);
10002                 val |= PCISTATE_RETRY_SAME_DMA;
10003                 tw32(TG3PCI_PCISTATE, val);
10004         }
10005
10006         if (tg3_flag(tp, ENABLE_APE)) {
10007                 /* Allow reads and writes to the
10008                  * APE register and memory space.
10009                  */
10010                 val = tr32(TG3PCI_PCISTATE);
10011                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10012                        PCISTATE_ALLOW_APE_SHMEM_WR |
10013                        PCISTATE_ALLOW_APE_PSPACE_WR;
10014                 tw32(TG3PCI_PCISTATE, val);
10015         }
10016
10017         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10018                 /* Enable some hw fixes.  */
10019                 val = tr32(TG3PCI_MSI_DATA);
10020                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10021                 tw32(TG3PCI_MSI_DATA, val);
10022         }
10023
10024         /* Descriptor ring init may make accesses to the
10025          * NIC SRAM area to setup the TX descriptors, so we
10026          * can only do this after the hardware has been
10027          * successfully reset.
10028          */
10029         err = tg3_init_rings(tp);
10030         if (err)
10031                 return err;
10032
10033         if (tg3_flag(tp, 57765_PLUS)) {
10034                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10035                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10036                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10037                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10038                 if (!tg3_flag(tp, 57765_CLASS) &&
10039                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10040                     tg3_asic_rev(tp) != ASIC_REV_5762)
10041                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10042                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10043         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10044                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10045                 /* This value is determined during the probe time DMA
10046                  * engine test, tg3_test_dma.
10047                  */
10048                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10049         }
10050
10051         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10052                           GRC_MODE_4X_NIC_SEND_RINGS |
10053                           GRC_MODE_NO_TX_PHDR_CSUM |
10054                           GRC_MODE_NO_RX_PHDR_CSUM);
10055         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10056
10057         /* Pseudo-header checksum is done by hardware logic and not
10058          * the offload processers, so make the chip do the pseudo-
10059          * header checksums on receive.  For transmit it is more
10060          * convenient to do the pseudo-header checksum in software
10061          * as Linux does that on transmit for us in all cases.
10062          */
10063         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10064
10065         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10066         if (tp->rxptpctl)
10067                 tw32(TG3_RX_PTP_CTL,
10068                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10069
10070         if (tg3_flag(tp, PTP_CAPABLE))
10071                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10072
10073         tw32(GRC_MODE, tp->grc_mode | val);
10074
10075         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10076          * south bridge limitation. As a workaround, Driver is setting MRRS
10077          * to 2048 instead of default 4096.
10078          */
10079         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10080             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10081                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10082                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10083         }
10084
10085         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10086         val = tr32(GRC_MISC_CFG);
10087         val &= ~0xff;
10088         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10089         tw32(GRC_MISC_CFG, val);
10090
10091         /* Initialize MBUF/DESC pool. */
10092         if (tg3_flag(tp, 5750_PLUS)) {
10093                 /* Do nothing.  */
10094         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10095                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10096                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10097                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10098                 else
10099                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10100                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10101                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10102         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10103                 int fw_len;
10104
10105                 fw_len = tp->fw_len;
10106                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10107                 tw32(BUFMGR_MB_POOL_ADDR,
10108                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10109                 tw32(BUFMGR_MB_POOL_SIZE,
10110                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10111         }
10112
10113         if (tp->dev->mtu <= ETH_DATA_LEN) {
10114                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10115                      tp->bufmgr_config.mbuf_read_dma_low_water);
10116                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10117                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10118                 tw32(BUFMGR_MB_HIGH_WATER,
10119                      tp->bufmgr_config.mbuf_high_water);
10120         } else {
10121                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10122                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10123                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10124                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10125                 tw32(BUFMGR_MB_HIGH_WATER,
10126                      tp->bufmgr_config.mbuf_high_water_jumbo);
10127         }
10128         tw32(BUFMGR_DMA_LOW_WATER,
10129              tp->bufmgr_config.dma_low_water);
10130         tw32(BUFMGR_DMA_HIGH_WATER,
10131              tp->bufmgr_config.dma_high_water);
10132
10133         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10134         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10135                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10136         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10137             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10138             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10139             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10140                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10141         tw32(BUFMGR_MODE, val);
10142         for (i = 0; i < 2000; i++) {
10143                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10144                         break;
10145                 udelay(10);
10146         }
10147         if (i >= 2000) {
10148                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10149                 return -ENODEV;
10150         }
10151
10152         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10153                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10154
10155         tg3_setup_rxbd_thresholds(tp);
10156
10157         /* Initialize TG3_BDINFO's at:
10158          *  RCVDBDI_STD_BD:     standard eth size rx ring
10159          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10160          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10161          *
10162          * like so:
10163          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10164          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10165          *                              ring attribute flags
10166          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10167          *
10168          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10169          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10170          *
10171          * The size of each ring is fixed in the firmware, but the location is
10172          * configurable.
10173          */
10174         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10175              ((u64) tpr->rx_std_mapping >> 32));
10176         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10177              ((u64) tpr->rx_std_mapping & 0xffffffff));
10178         if (!tg3_flag(tp, 5717_PLUS))
10179                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10180                      NIC_SRAM_RX_BUFFER_DESC);
10181
10182         /* Disable the mini ring */
10183         if (!tg3_flag(tp, 5705_PLUS))
10184                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10185                      BDINFO_FLAGS_DISABLED);
10186
10187         /* Program the jumbo buffer descriptor ring control
10188          * blocks on those devices that have them.
10189          */
10190         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10191             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10192
10193                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10194                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10195                              ((u64) tpr->rx_jmb_mapping >> 32));
10196                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10197                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10198                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10199                               BDINFO_FLAGS_MAXLEN_SHIFT;
10200                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10201                              val | BDINFO_FLAGS_USE_EXT_RECV);
10202                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10203                             tg3_flag(tp, 57765_CLASS) ||
10204                             tg3_asic_rev(tp) == ASIC_REV_5762)
10205                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10206                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10207                 } else {
10208                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10209                              BDINFO_FLAGS_DISABLED);
10210                 }
10211
10212                 if (tg3_flag(tp, 57765_PLUS)) {
10213                         val = TG3_RX_STD_RING_SIZE(tp);
10214                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10215                         val |= (TG3_RX_STD_DMA_SZ << 2);
10216                 } else
10217                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10218         } else
10219                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10220
10221         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10222
10223         tpr->rx_std_prod_idx = tp->rx_pending;
10224         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10225
10226         tpr->rx_jmb_prod_idx =
10227                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10228         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10229
10230         tg3_rings_reset(tp);
10231
10232         /* Initialize MAC address and backoff seed. */
10233         __tg3_set_mac_addr(tp, false);
10234
10235         /* MTU + ethernet header + FCS + optional VLAN tag */
10236         tw32(MAC_RX_MTU_SIZE,
10237              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10238
10239         /* The slot time is changed by tg3_setup_phy if we
10240          * run at gigabit with half duplex.
10241          */
10242         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10243               (6 << TX_LENGTHS_IPG_SHIFT) |
10244               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10245
10246         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10247             tg3_asic_rev(tp) == ASIC_REV_5762)
10248                 val |= tr32(MAC_TX_LENGTHS) &
10249                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10250                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10251
10252         tw32(MAC_TX_LENGTHS, val);
10253
10254         /* Receive rules. */
10255         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10256         tw32(RCVLPC_CONFIG, 0x0181);
10257
10258         /* Calculate RDMAC_MODE setting early, we need it to determine
10259          * the RCVLPC_STATE_ENABLE mask.
10260          */
10261         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10262                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10263                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10264                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10265                       RDMAC_MODE_LNGREAD_ENAB);
10266
10267         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10268                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10269
10270         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10271             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10272             tg3_asic_rev(tp) == ASIC_REV_57780)
10273                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10274                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10275                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10276
10277         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10278             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10279                 if (tg3_flag(tp, TSO_CAPABLE) &&
10280                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10281                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10282                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10283                            !tg3_flag(tp, IS_5788)) {
10284                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10285                 }
10286         }
10287
10288         if (tg3_flag(tp, PCI_EXPRESS))
10289                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10290
10291         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10292                 tp->dma_limit = 0;
10293                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10294                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10295                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10296                 }
10297         }
10298
10299         if (tg3_flag(tp, HW_TSO_1) ||
10300             tg3_flag(tp, HW_TSO_2) ||
10301             tg3_flag(tp, HW_TSO_3))
10302                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10303
10304         if (tg3_flag(tp, 57765_PLUS) ||
10305             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10306             tg3_asic_rev(tp) == ASIC_REV_57780)
10307                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10308
10309         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10310             tg3_asic_rev(tp) == ASIC_REV_5762)
10311                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10312
10313         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10314             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10315             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10316             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10317             tg3_flag(tp, 57765_PLUS)) {
10318                 u32 tgtreg;
10319
10320                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10321                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10322                 else
10323                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10324
10325                 val = tr32(tgtreg);
10326                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10327                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10328                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10329                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10330                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10331                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10332                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10333                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10334                 }
10335                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10336         }
10337
10338         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10339             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10340             tg3_asic_rev(tp) == ASIC_REV_5762) {
10341                 u32 tgtreg;
10342
10343                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10344                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10345                 else
10346                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10347
10348                 val = tr32(tgtreg);
10349                 tw32(tgtreg, val |
10350                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10351                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10352         }
10353
10354         /* Receive/send statistics. */
10355         if (tg3_flag(tp, 5750_PLUS)) {
10356                 val = tr32(RCVLPC_STATS_ENABLE);
10357                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10358                 tw32(RCVLPC_STATS_ENABLE, val);
10359         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10360                    tg3_flag(tp, TSO_CAPABLE)) {
10361                 val = tr32(RCVLPC_STATS_ENABLE);
10362                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10363                 tw32(RCVLPC_STATS_ENABLE, val);
10364         } else {
10365                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10366         }
10367         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10368         tw32(SNDDATAI_STATSENAB, 0xffffff);
10369         tw32(SNDDATAI_STATSCTRL,
10370              (SNDDATAI_SCTRL_ENABLE |
10371               SNDDATAI_SCTRL_FASTUPD));
10372
10373         /* Setup host coalescing engine. */
10374         tw32(HOSTCC_MODE, 0);
10375         for (i = 0; i < 2000; i++) {
10376                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10377                         break;
10378                 udelay(10);
10379         }
10380
10381         __tg3_set_coalesce(tp, &tp->coal);
10382
10383         if (!tg3_flag(tp, 5705_PLUS)) {
10384                 /* Status/statistics block address.  See tg3_timer,
10385                  * the tg3_periodic_fetch_stats call there, and
10386                  * tg3_get_stats to see how this works for 5705/5750 chips.
10387                  */
10388                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10389                      ((u64) tp->stats_mapping >> 32));
10390                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10391                      ((u64) tp->stats_mapping & 0xffffffff));
10392                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10393
10394                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10395
10396                 /* Clear statistics and status block memory areas */
10397                 for (i = NIC_SRAM_STATS_BLK;
10398                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10399                      i += sizeof(u32)) {
10400                         tg3_write_mem(tp, i, 0);
10401                         udelay(40);
10402                 }
10403         }
10404
10405         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10406
10407         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10408         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10409         if (!tg3_flag(tp, 5705_PLUS))
10410                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10411
10412         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10413                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10414                 /* reset to prevent losing 1st rx packet intermittently */
10415                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10416                 udelay(10);
10417         }
10418
10419         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10420                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10421                         MAC_MODE_FHDE_ENABLE;
10422         if (tg3_flag(tp, ENABLE_APE))
10423                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10424         if (!tg3_flag(tp, 5705_PLUS) &&
10425             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10426             tg3_asic_rev(tp) != ASIC_REV_5700)
10427                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10428         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10429         udelay(40);
10430
10431         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10432          * If TG3_FLAG_IS_NIC is zero, we should read the
10433          * register to preserve the GPIO settings for LOMs. The GPIOs,
10434          * whether used as inputs or outputs, are set by boot code after
10435          * reset.
10436          */
10437         if (!tg3_flag(tp, IS_NIC)) {
10438                 u32 gpio_mask;
10439
10440                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10441                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10442                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10443
10444                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10445                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10446                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10447
10448                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10449                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10450
10451                 tp->grc_local_ctrl &= ~gpio_mask;
10452                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10453
10454                 /* GPIO1 must be driven high for eeprom write protect */
10455                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10456                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10457                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10458         }
10459         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10460         udelay(100);
10461
10462         if (tg3_flag(tp, USING_MSIX)) {
10463                 val = tr32(MSGINT_MODE);
10464                 val |= MSGINT_MODE_ENABLE;
10465                 if (tp->irq_cnt > 1)
10466                         val |= MSGINT_MODE_MULTIVEC_EN;
10467                 if (!tg3_flag(tp, 1SHOT_MSI))
10468                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10469                 tw32(MSGINT_MODE, val);
10470         }
10471
10472         if (!tg3_flag(tp, 5705_PLUS)) {
10473                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10474                 udelay(40);
10475         }
10476
10477         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10478                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10479                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10480                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10481                WDMAC_MODE_LNGREAD_ENAB);
10482
10483         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10484             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10485                 if (tg3_flag(tp, TSO_CAPABLE) &&
10486                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10487                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10488                         /* nothing */
10489                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10490                            !tg3_flag(tp, IS_5788)) {
10491                         val |= WDMAC_MODE_RX_ACCEL;
10492                 }
10493         }
10494
10495         /* Enable host coalescing bug fix */
10496         if (tg3_flag(tp, 5755_PLUS))
10497                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10498
10499         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10500                 val |= WDMAC_MODE_BURST_ALL_DATA;
10501
10502         tw32_f(WDMAC_MODE, val);
10503         udelay(40);
10504
10505         if (tg3_flag(tp, PCIX_MODE)) {
10506                 u16 pcix_cmd;
10507
10508                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10509                                      &pcix_cmd);
10510                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10511                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10512                         pcix_cmd |= PCI_X_CMD_READ_2K;
10513                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10514                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10515                         pcix_cmd |= PCI_X_CMD_READ_2K;
10516                 }
10517                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10518                                       pcix_cmd);
10519         }
10520
10521         tw32_f(RDMAC_MODE, rdmac_mode);
10522         udelay(40);
10523
10524         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10525             tg3_asic_rev(tp) == ASIC_REV_5720) {
10526                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10527                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10528                                 break;
10529                 }
10530                 if (i < TG3_NUM_RDMA_CHANNELS) {
10531                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10532                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10533                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10534                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10535                 }
10536         }
10537
10538         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10539         if (!tg3_flag(tp, 5705_PLUS))
10540                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10541
10542         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10543                 tw32(SNDDATAC_MODE,
10544                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10545         else
10546                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10547
10548         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10549         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10550         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10551         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10552                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10553         tw32(RCVDBDI_MODE, val);
10554         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10555         if (tg3_flag(tp, HW_TSO_1) ||
10556             tg3_flag(tp, HW_TSO_2) ||
10557             tg3_flag(tp, HW_TSO_3))
10558                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10559         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10560         if (tg3_flag(tp, ENABLE_TSS))
10561                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10562         tw32(SNDBDI_MODE, val);
10563         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10564
10565         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10566                 err = tg3_load_5701_a0_firmware_fix(tp);
10567                 if (err)
10568                         return err;
10569         }
10570
10571         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10572                 /* Ignore any errors for the firmware download. If download
10573                  * fails, the device will operate with EEE disabled
10574                  */
10575                 tg3_load_57766_firmware(tp);
10576         }
10577
10578         if (tg3_flag(tp, TSO_CAPABLE)) {
10579                 err = tg3_load_tso_firmware(tp);
10580                 if (err)
10581                         return err;
10582         }
10583
10584         tp->tx_mode = TX_MODE_ENABLE;
10585
10586         if (tg3_flag(tp, 5755_PLUS) ||
10587             tg3_asic_rev(tp) == ASIC_REV_5906)
10588                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10589
10590         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10591             tg3_asic_rev(tp) == ASIC_REV_5762) {
10592                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10593                 tp->tx_mode &= ~val;
10594                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10595         }
10596
10597         tw32_f(MAC_TX_MODE, tp->tx_mode);
10598         udelay(100);
10599
10600         if (tg3_flag(tp, ENABLE_RSS)) {
10601                 u32 rss_key[10];
10602
10603                 tg3_rss_write_indir_tbl(tp);
10604
10605                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10606
10607                 for (i = 0; i < 10 ; i++)
10608                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10609         }
10610
10611         tp->rx_mode = RX_MODE_ENABLE;
10612         if (tg3_flag(tp, 5755_PLUS))
10613                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10614
10615         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10616                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10617
10618         if (tg3_flag(tp, ENABLE_RSS))
10619                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10620                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10621                                RX_MODE_RSS_IPV6_HASH_EN |
10622                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10623                                RX_MODE_RSS_IPV4_HASH_EN |
10624                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10625
10626         tw32_f(MAC_RX_MODE, tp->rx_mode);
10627         udelay(10);
10628
10629         tw32(MAC_LED_CTRL, tp->led_ctrl);
10630
10631         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10632         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10633                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10634                 udelay(10);
10635         }
10636         tw32_f(MAC_RX_MODE, tp->rx_mode);
10637         udelay(10);
10638
10639         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10640                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10641                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10642                         /* Set drive transmission level to 1.2V  */
10643                         /* only if the signal pre-emphasis bit is not set  */
10644                         val = tr32(MAC_SERDES_CFG);
10645                         val &= 0xfffff000;
10646                         val |= 0x880;
10647                         tw32(MAC_SERDES_CFG, val);
10648                 }
10649                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10650                         tw32(MAC_SERDES_CFG, 0x616000);
10651         }
10652
10653         /* Prevent chip from dropping frames when flow control
10654          * is enabled.
10655          */
10656         if (tg3_flag(tp, 57765_CLASS))
10657                 val = 1;
10658         else
10659                 val = 2;
10660         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10661
10662         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10663             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10664                 /* Use hardware link auto-negotiation */
10665                 tg3_flag_set(tp, HW_AUTONEG);
10666         }
10667
10668         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10669             tg3_asic_rev(tp) == ASIC_REV_5714) {
10670                 u32 tmp;
10671
10672                 tmp = tr32(SERDES_RX_CTRL);
10673                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10674                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10675                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10676                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10677         }
10678
10679         if (!tg3_flag(tp, USE_PHYLIB)) {
10680                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10681                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10682
10683                 err = tg3_setup_phy(tp, false);
10684                 if (err)
10685                         return err;
10686
10687                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10688                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10689                         u32 tmp;
10690
10691                         /* Clear CRC stats. */
10692                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10693                                 tg3_writephy(tp, MII_TG3_TEST1,
10694                                              tmp | MII_TG3_TEST1_CRC_EN);
10695                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10696                         }
10697                 }
10698         }
10699
10700         __tg3_set_rx_mode(tp->dev);
10701
10702         /* Initialize receive rules. */
10703         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10704         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10705         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10706         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10707
10708         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10709                 limit = 8;
10710         else
10711                 limit = 16;
10712         if (tg3_flag(tp, ENABLE_ASF))
10713                 limit -= 4;
10714         switch (limit) {
10715         case 16:
10716                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10717                 fallthrough;
10718         case 15:
10719                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10720                 fallthrough;
10721         case 14:
10722                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10723                 fallthrough;
10724         case 13:
10725                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10726                 fallthrough;
10727         case 12:
10728                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10729                 fallthrough;
10730         case 11:
10731                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10732                 fallthrough;
10733         case 10:
10734                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10735                 fallthrough;
10736         case 9:
10737                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10738                 fallthrough;
10739         case 8:
10740                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10741                 fallthrough;
10742         case 7:
10743                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10744                 fallthrough;
10745         case 6:
10746                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10747                 fallthrough;
10748         case 5:
10749                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10750                 fallthrough;
10751         case 4:
10752                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10753         case 3:
10754                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10755         case 2:
10756         case 1:
10757
10758         default:
10759                 break;
10760         }
10761
10762         if (tg3_flag(tp, ENABLE_APE))
10763                 /* Write our heartbeat update interval to APE. */
10764                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10765                                 APE_HOST_HEARTBEAT_INT_5SEC);
10766
10767         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10768
10769         return 0;
10770 }
10771
10772 /* Called at device open time to get the chip ready for
10773  * packet processing.  Invoked with tp->lock held.
10774  */
10775 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10776 {
10777         /* Chip may have been just powered on. If so, the boot code may still
10778          * be running initialization. Wait for it to finish to avoid races in
10779          * accessing the hardware.
10780          */
10781         tg3_enable_register_access(tp);
10782         tg3_poll_fw(tp);
10783
10784         tg3_switch_clocks(tp);
10785
10786         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10787
10788         return tg3_reset_hw(tp, reset_phy);
10789 }
10790
10791 #ifdef CONFIG_TIGON3_HWMON
10792 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10793 {
10794         u32 off, len = TG3_OCIR_LEN;
10795         int i;
10796
10797         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10798                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10799
10800                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10801                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10802                         memset(ocir, 0, len);
10803         }
10804 }
10805
10806 /* sysfs attributes for hwmon */
10807 static ssize_t tg3_show_temp(struct device *dev,
10808                              struct device_attribute *devattr, char *buf)
10809 {
10810         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10811         struct tg3 *tp = dev_get_drvdata(dev);
10812         u32 temperature;
10813
10814         spin_lock_bh(&tp->lock);
10815         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10816                                 sizeof(temperature));
10817         spin_unlock_bh(&tp->lock);
10818         return sprintf(buf, "%u\n", temperature * 1000);
10819 }
10820
10821
10822 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10823                           TG3_TEMP_SENSOR_OFFSET);
10824 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10825                           TG3_TEMP_CAUTION_OFFSET);
10826 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10827                           TG3_TEMP_MAX_OFFSET);
10828
10829 static struct attribute *tg3_attrs[] = {
10830         &sensor_dev_attr_temp1_input.dev_attr.attr,
10831         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10832         &sensor_dev_attr_temp1_max.dev_attr.attr,
10833         NULL
10834 };
10835 ATTRIBUTE_GROUPS(tg3);
10836
10837 static void tg3_hwmon_close(struct tg3 *tp)
10838 {
10839         if (tp->hwmon_dev) {
10840                 hwmon_device_unregister(tp->hwmon_dev);
10841                 tp->hwmon_dev = NULL;
10842         }
10843 }
10844
10845 static void tg3_hwmon_open(struct tg3 *tp)
10846 {
10847         int i;
10848         u32 size = 0;
10849         struct pci_dev *pdev = tp->pdev;
10850         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10851
10852         tg3_sd_scan_scratchpad(tp, ocirs);
10853
10854         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10855                 if (!ocirs[i].src_data_length)
10856                         continue;
10857
10858                 size += ocirs[i].src_hdr_length;
10859                 size += ocirs[i].src_data_length;
10860         }
10861
10862         if (!size)
10863                 return;
10864
10865         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10866                                                           tp, tg3_groups);
10867         if (IS_ERR(tp->hwmon_dev)) {
10868                 tp->hwmon_dev = NULL;
10869                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10870         }
10871 }
10872 #else
10873 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10874 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10875 #endif /* CONFIG_TIGON3_HWMON */
10876
10877
10878 #define TG3_STAT_ADD32(PSTAT, REG) \
10879 do {    u32 __val = tr32(REG); \
10880         (PSTAT)->low += __val; \
10881         if ((PSTAT)->low < __val) \
10882                 (PSTAT)->high += 1; \
10883 } while (0)
10884
10885 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10886 {
10887         struct tg3_hw_stats *sp = tp->hw_stats;
10888
10889         if (!tp->link_up)
10890                 return;
10891
10892         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10893         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10894         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10895         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10896         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10897         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10898         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10899         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10900         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10901         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10902         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10903         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10904         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10905         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10906                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10907                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10908                 u32 val;
10909
10910                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10911                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10912                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10913                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10914         }
10915
10916         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10917         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10918         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10919         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10920         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10921         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10922         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10923         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10924         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10925         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10926         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10927         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10928         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10929         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10930
10931         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10932         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10933             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10934             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10935             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10936                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10937         } else {
10938                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10939                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10940                 if (val) {
10941                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10942                         sp->rx_discards.low += val;
10943                         if (sp->rx_discards.low < val)
10944                                 sp->rx_discards.high += 1;
10945                 }
10946                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10947         }
10948         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10949 }
10950
10951 static void tg3_chk_missed_msi(struct tg3 *tp)
10952 {
10953         u32 i;
10954
10955         for (i = 0; i < tp->irq_cnt; i++) {
10956                 struct tg3_napi *tnapi = &tp->napi[i];
10957
10958                 if (tg3_has_work(tnapi)) {
10959                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10960                             tnapi->last_tx_cons == tnapi->tx_cons) {
10961                                 if (tnapi->chk_msi_cnt < 1) {
10962                                         tnapi->chk_msi_cnt++;
10963                                         return;
10964                                 }
10965                                 tg3_msi(0, tnapi);
10966                         }
10967                 }
10968                 tnapi->chk_msi_cnt = 0;
10969                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10970                 tnapi->last_tx_cons = tnapi->tx_cons;
10971         }
10972 }
10973
10974 static void tg3_timer(struct timer_list *t)
10975 {
10976         struct tg3 *tp = from_timer(tp, t, timer);
10977
10978         spin_lock(&tp->lock);
10979
10980         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10981                 spin_unlock(&tp->lock);
10982                 goto restart_timer;
10983         }
10984
10985         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10986             tg3_flag(tp, 57765_CLASS))
10987                 tg3_chk_missed_msi(tp);
10988
10989         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10990                 /* BCM4785: Flush posted writes from GbE to host memory. */
10991                 tr32(HOSTCC_MODE);
10992         }
10993
10994         if (!tg3_flag(tp, TAGGED_STATUS)) {
10995                 /* All of this garbage is because when using non-tagged
10996                  * IRQ status the mailbox/status_block protocol the chip
10997                  * uses with the cpu is race prone.
10998                  */
10999                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
11000                         tw32(GRC_LOCAL_CTRL,
11001                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
11002                 } else {
11003                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11004                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11005                 }
11006
11007                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11008                         spin_unlock(&tp->lock);
11009                         tg3_reset_task_schedule(tp);
11010                         goto restart_timer;
11011                 }
11012         }
11013
11014         /* This part only runs once per second. */
11015         if (!--tp->timer_counter) {
11016                 if (tg3_flag(tp, 5705_PLUS))
11017                         tg3_periodic_fetch_stats(tp);
11018
11019                 if (tp->setlpicnt && !--tp->setlpicnt)
11020                         tg3_phy_eee_enable(tp);
11021
11022                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11023                         u32 mac_stat;
11024                         int phy_event;
11025
11026                         mac_stat = tr32(MAC_STATUS);
11027
11028                         phy_event = 0;
11029                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11030                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11031                                         phy_event = 1;
11032                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11033                                 phy_event = 1;
11034
11035                         if (phy_event)
11036                                 tg3_setup_phy(tp, false);
11037                 } else if (tg3_flag(tp, POLL_SERDES)) {
11038                         u32 mac_stat = tr32(MAC_STATUS);
11039                         int need_setup = 0;
11040
11041                         if (tp->link_up &&
11042                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11043                                 need_setup = 1;
11044                         }
11045                         if (!tp->link_up &&
11046                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11047                                          MAC_STATUS_SIGNAL_DET))) {
11048                                 need_setup = 1;
11049                         }
11050                         if (need_setup) {
11051                                 if (!tp->serdes_counter) {
11052                                         tw32_f(MAC_MODE,
11053                                              (tp->mac_mode &
11054                                               ~MAC_MODE_PORT_MODE_MASK));
11055                                         udelay(40);
11056                                         tw32_f(MAC_MODE, tp->mac_mode);
11057                                         udelay(40);
11058                                 }
11059                                 tg3_setup_phy(tp, false);
11060                         }
11061                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11062                            tg3_flag(tp, 5780_CLASS)) {
11063                         tg3_serdes_parallel_detect(tp);
11064                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11065                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11066                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11067                                          TG3_CPMU_STATUS_LINK_MASK);
11068
11069                         if (link_up != tp->link_up)
11070                                 tg3_setup_phy(tp, false);
11071                 }
11072
11073                 tp->timer_counter = tp->timer_multiplier;
11074         }
11075
11076         /* Heartbeat is only sent once every 2 seconds.
11077          *
11078          * The heartbeat is to tell the ASF firmware that the host
11079          * driver is still alive.  In the event that the OS crashes,
11080          * ASF needs to reset the hardware to free up the FIFO space
11081          * that may be filled with rx packets destined for the host.
11082          * If the FIFO is full, ASF will no longer function properly.
11083          *
11084          * Unintended resets have been reported on real time kernels
11085          * where the timer doesn't run on time.  Netpoll will also have
11086          * same problem.
11087          *
11088          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11089          * to check the ring condition when the heartbeat is expiring
11090          * before doing the reset.  This will prevent most unintended
11091          * resets.
11092          */
11093         if (!--tp->asf_counter) {
11094                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11095                         tg3_wait_for_event_ack(tp);
11096
11097                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11098                                       FWCMD_NICDRV_ALIVE3);
11099                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11100                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11101                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11102
11103                         tg3_generate_fw_event(tp);
11104                 }
11105                 tp->asf_counter = tp->asf_multiplier;
11106         }
11107
11108         /* Update the APE heartbeat every 5 seconds.*/
11109         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11110
11111         spin_unlock(&tp->lock);
11112
11113 restart_timer:
11114         tp->timer.expires = jiffies + tp->timer_offset;
11115         add_timer(&tp->timer);
11116 }
11117
11118 static void tg3_timer_init(struct tg3 *tp)
11119 {
11120         if (tg3_flag(tp, TAGGED_STATUS) &&
11121             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11122             !tg3_flag(tp, 57765_CLASS))
11123                 tp->timer_offset = HZ;
11124         else
11125                 tp->timer_offset = HZ / 10;
11126
11127         BUG_ON(tp->timer_offset > HZ);
11128
11129         tp->timer_multiplier = (HZ / tp->timer_offset);
11130         tp->asf_multiplier = (HZ / tp->timer_offset) *
11131                              TG3_FW_UPDATE_FREQ_SEC;
11132
11133         timer_setup(&tp->timer, tg3_timer, 0);
11134 }
11135
11136 static void tg3_timer_start(struct tg3 *tp)
11137 {
11138         tp->asf_counter   = tp->asf_multiplier;
11139         tp->timer_counter = tp->timer_multiplier;
11140
11141         tp->timer.expires = jiffies + tp->timer_offset;
11142         add_timer(&tp->timer);
11143 }
11144
11145 static void tg3_timer_stop(struct tg3 *tp)
11146 {
11147         del_timer_sync(&tp->timer);
11148 }
11149
11150 /* Restart hardware after configuration changes, self-test, etc.
11151  * Invoked with tp->lock held.
11152  */
11153 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11154         __releases(tp->lock)
11155         __acquires(tp->lock)
11156 {
11157         int err;
11158
11159         err = tg3_init_hw(tp, reset_phy);
11160         if (err) {
11161                 netdev_err(tp->dev,
11162                            "Failed to re-initialize device, aborting\n");
11163                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11164                 tg3_full_unlock(tp);
11165                 tg3_timer_stop(tp);
11166                 tp->irq_sync = 0;
11167                 tg3_napi_enable(tp);
11168                 dev_close(tp->dev);
11169                 tg3_full_lock(tp, 0);
11170         }
11171         return err;
11172 }
11173
11174 static void tg3_reset_task(struct work_struct *work)
11175 {
11176         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11177         int err;
11178
11179         rtnl_lock();
11180         tg3_full_lock(tp, 0);
11181
11182         if (!netif_running(tp->dev)) {
11183                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11184                 tg3_full_unlock(tp);
11185                 rtnl_unlock();
11186                 return;
11187         }
11188
11189         tg3_full_unlock(tp);
11190
11191         tg3_phy_stop(tp);
11192
11193         tg3_netif_stop(tp);
11194
11195         tg3_full_lock(tp, 1);
11196
11197         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11198                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11199                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11200                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11201                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11202         }
11203
11204         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11205         err = tg3_init_hw(tp, true);
11206         if (err) {
11207                 tg3_full_unlock(tp);
11208                 tp->irq_sync = 0;
11209                 tg3_napi_enable(tp);
11210                 /* Clear this flag so that tg3_reset_task_cancel() will not
11211                  * call cancel_work_sync() and wait forever.
11212                  */
11213                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11214                 dev_close(tp->dev);
11215                 goto out;
11216         }
11217
11218         tg3_netif_start(tp);
11219
11220         tg3_full_unlock(tp);
11221
11222         if (!err)
11223                 tg3_phy_start(tp);
11224
11225         tg3_flag_clear(tp, RESET_TASK_PENDING);
11226 out:
11227         rtnl_unlock();
11228 }
11229
11230 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11231 {
11232         irq_handler_t fn;
11233         unsigned long flags;
11234         char *name;
11235         struct tg3_napi *tnapi = &tp->napi[irq_num];
11236
11237         if (tp->irq_cnt == 1)
11238                 name = tp->dev->name;
11239         else {
11240                 name = &tnapi->irq_lbl[0];
11241                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11242                         snprintf(name, IFNAMSIZ,
11243                                  "%s-txrx-%d", tp->dev->name, irq_num);
11244                 else if (tnapi->tx_buffers)
11245                         snprintf(name, IFNAMSIZ,
11246                                  "%s-tx-%d", tp->dev->name, irq_num);
11247                 else if (tnapi->rx_rcb)
11248                         snprintf(name, IFNAMSIZ,
11249                                  "%s-rx-%d", tp->dev->name, irq_num);
11250                 else
11251                         snprintf(name, IFNAMSIZ,
11252                                  "%s-%d", tp->dev->name, irq_num);
11253                 name[IFNAMSIZ-1] = 0;
11254         }
11255
11256         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11257                 fn = tg3_msi;
11258                 if (tg3_flag(tp, 1SHOT_MSI))
11259                         fn = tg3_msi_1shot;
11260                 flags = 0;
11261         } else {
11262                 fn = tg3_interrupt;
11263                 if (tg3_flag(tp, TAGGED_STATUS))
11264                         fn = tg3_interrupt_tagged;
11265                 flags = IRQF_SHARED;
11266         }
11267
11268         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11269 }
11270
11271 static int tg3_test_interrupt(struct tg3 *tp)
11272 {
11273         struct tg3_napi *tnapi = &tp->napi[0];
11274         struct net_device *dev = tp->dev;
11275         int err, i, intr_ok = 0;
11276         u32 val;
11277
11278         if (!netif_running(dev))
11279                 return -ENODEV;
11280
11281         tg3_disable_ints(tp);
11282
11283         free_irq(tnapi->irq_vec, tnapi);
11284
11285         /*
11286          * Turn off MSI one shot mode.  Otherwise this test has no
11287          * observable way to know whether the interrupt was delivered.
11288          */
11289         if (tg3_flag(tp, 57765_PLUS)) {
11290                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11291                 tw32(MSGINT_MODE, val);
11292         }
11293
11294         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11295                           IRQF_SHARED, dev->name, tnapi);
11296         if (err)
11297                 return err;
11298
11299         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11300         tg3_enable_ints(tp);
11301
11302         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11303                tnapi->coal_now);
11304
11305         for (i = 0; i < 5; i++) {
11306                 u32 int_mbox, misc_host_ctrl;
11307
11308                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11309                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11310
11311                 if ((int_mbox != 0) ||
11312                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11313                         intr_ok = 1;
11314                         break;
11315                 }
11316
11317                 if (tg3_flag(tp, 57765_PLUS) &&
11318                     tnapi->hw_status->status_tag != tnapi->last_tag)
11319                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11320
11321                 msleep(10);
11322         }
11323
11324         tg3_disable_ints(tp);
11325
11326         free_irq(tnapi->irq_vec, tnapi);
11327
11328         err = tg3_request_irq(tp, 0);
11329
11330         if (err)
11331                 return err;
11332
11333         if (intr_ok) {
11334                 /* Reenable MSI one shot mode. */
11335                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11336                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11337                         tw32(MSGINT_MODE, val);
11338                 }
11339                 return 0;
11340         }
11341
11342         return -EIO;
11343 }
11344
11345 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11346  * successfully restored
11347  */
11348 static int tg3_test_msi(struct tg3 *tp)
11349 {
11350         int err;
11351         u16 pci_cmd;
11352
11353         if (!tg3_flag(tp, USING_MSI))
11354                 return 0;
11355
11356         /* Turn off SERR reporting in case MSI terminates with Master
11357          * Abort.
11358          */
11359         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11360         pci_write_config_word(tp->pdev, PCI_COMMAND,
11361                               pci_cmd & ~PCI_COMMAND_SERR);
11362
11363         err = tg3_test_interrupt(tp);
11364
11365         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11366
11367         if (!err)
11368                 return 0;
11369
11370         /* other failures */
11371         if (err != -EIO)
11372                 return err;
11373
11374         /* MSI test failed, go back to INTx mode */
11375         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11376                     "to INTx mode. Please report this failure to the PCI "
11377                     "maintainer and include system chipset information\n");
11378
11379         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11380
11381         pci_disable_msi(tp->pdev);
11382
11383         tg3_flag_clear(tp, USING_MSI);
11384         tp->napi[0].irq_vec = tp->pdev->irq;
11385
11386         err = tg3_request_irq(tp, 0);
11387         if (err)
11388                 return err;
11389
11390         /* Need to reset the chip because the MSI cycle may have terminated
11391          * with Master Abort.
11392          */
11393         tg3_full_lock(tp, 1);
11394
11395         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11396         err = tg3_init_hw(tp, true);
11397
11398         tg3_full_unlock(tp);
11399
11400         if (err)
11401                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11402
11403         return err;
11404 }
11405
11406 static int tg3_request_firmware(struct tg3 *tp)
11407 {
11408         const struct tg3_firmware_hdr *fw_hdr;
11409
11410         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11411                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11412                            tp->fw_needed);
11413                 return -ENOENT;
11414         }
11415
11416         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11417
11418         /* Firmware blob starts with version numbers, followed by
11419          * start address and _full_ length including BSS sections
11420          * (which must be longer than the actual data, of course
11421          */
11422
11423         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11424         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11425                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11426                            tp->fw_len, tp->fw_needed);
11427                 release_firmware(tp->fw);
11428                 tp->fw = NULL;
11429                 return -EINVAL;
11430         }
11431
11432         /* We no longer need firmware; we have it. */
11433         tp->fw_needed = NULL;
11434         return 0;
11435 }
11436
11437 static u32 tg3_irq_count(struct tg3 *tp)
11438 {
11439         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11440
11441         if (irq_cnt > 1) {
11442                 /* We want as many rx rings enabled as there are cpus.
11443                  * In multiqueue MSI-X mode, the first MSI-X vector
11444                  * only deals with link interrupts, etc, so we add
11445                  * one to the number of vectors we are requesting.
11446                  */
11447                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11448         }
11449
11450         return irq_cnt;
11451 }
11452
11453 static bool tg3_enable_msix(struct tg3 *tp)
11454 {
11455         int i, rc;
11456         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11457
11458         tp->txq_cnt = tp->txq_req;
11459         tp->rxq_cnt = tp->rxq_req;
11460         if (!tp->rxq_cnt)
11461                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11462         if (tp->rxq_cnt > tp->rxq_max)
11463                 tp->rxq_cnt = tp->rxq_max;
11464
11465         /* Disable multiple TX rings by default.  Simple round-robin hardware
11466          * scheduling of the TX rings can cause starvation of rings with
11467          * small packets when other rings have TSO or jumbo packets.
11468          */
11469         if (!tp->txq_req)
11470                 tp->txq_cnt = 1;
11471
11472         tp->irq_cnt = tg3_irq_count(tp);
11473
11474         for (i = 0; i < tp->irq_max; i++) {
11475                 msix_ent[i].entry  = i;
11476                 msix_ent[i].vector = 0;
11477         }
11478
11479         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11480         if (rc < 0) {
11481                 return false;
11482         } else if (rc < tp->irq_cnt) {
11483                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11484                               tp->irq_cnt, rc);
11485                 tp->irq_cnt = rc;
11486                 tp->rxq_cnt = max(rc - 1, 1);
11487                 if (tp->txq_cnt)
11488                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11489         }
11490
11491         for (i = 0; i < tp->irq_max; i++)
11492                 tp->napi[i].irq_vec = msix_ent[i].vector;
11493
11494         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11495                 pci_disable_msix(tp->pdev);
11496                 return false;
11497         }
11498
11499         if (tp->irq_cnt == 1)
11500                 return true;
11501
11502         tg3_flag_set(tp, ENABLE_RSS);
11503
11504         if (tp->txq_cnt > 1)
11505                 tg3_flag_set(tp, ENABLE_TSS);
11506
11507         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11508
11509         return true;
11510 }
11511
11512 static void tg3_ints_init(struct tg3 *tp)
11513 {
11514         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11515             !tg3_flag(tp, TAGGED_STATUS)) {
11516                 /* All MSI supporting chips should support tagged
11517                  * status.  Assert that this is the case.
11518                  */
11519                 netdev_warn(tp->dev,
11520                             "MSI without TAGGED_STATUS? Not using MSI\n");
11521                 goto defcfg;
11522         }
11523
11524         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11525                 tg3_flag_set(tp, USING_MSIX);
11526         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11527                 tg3_flag_set(tp, USING_MSI);
11528
11529         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11530                 u32 msi_mode = tr32(MSGINT_MODE);
11531                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11532                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11533                 if (!tg3_flag(tp, 1SHOT_MSI))
11534                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11535                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11536         }
11537 defcfg:
11538         if (!tg3_flag(tp, USING_MSIX)) {
11539                 tp->irq_cnt = 1;
11540                 tp->napi[0].irq_vec = tp->pdev->irq;
11541         }
11542
11543         if (tp->irq_cnt == 1) {
11544                 tp->txq_cnt = 1;
11545                 tp->rxq_cnt = 1;
11546                 netif_set_real_num_tx_queues(tp->dev, 1);
11547                 netif_set_real_num_rx_queues(tp->dev, 1);
11548         }
11549 }
11550
11551 static void tg3_ints_fini(struct tg3 *tp)
11552 {
11553         if (tg3_flag(tp, USING_MSIX))
11554                 pci_disable_msix(tp->pdev);
11555         else if (tg3_flag(tp, USING_MSI))
11556                 pci_disable_msi(tp->pdev);
11557         tg3_flag_clear(tp, USING_MSI);
11558         tg3_flag_clear(tp, USING_MSIX);
11559         tg3_flag_clear(tp, ENABLE_RSS);
11560         tg3_flag_clear(tp, ENABLE_TSS);
11561 }
11562
11563 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11564                      bool init)
11565 {
11566         struct net_device *dev = tp->dev;
11567         int i, err;
11568
11569         /*
11570          * Setup interrupts first so we know how
11571          * many NAPI resources to allocate
11572          */
11573         tg3_ints_init(tp);
11574
11575         tg3_rss_check_indir_tbl(tp);
11576
11577         /* The placement of this call is tied
11578          * to the setup and use of Host TX descriptors.
11579          */
11580         err = tg3_alloc_consistent(tp);
11581         if (err)
11582                 goto out_ints_fini;
11583
11584         tg3_napi_init(tp);
11585
11586         tg3_napi_enable(tp);
11587
11588         for (i = 0; i < tp->irq_cnt; i++) {
11589                 err = tg3_request_irq(tp, i);
11590                 if (err) {
11591                         for (i--; i >= 0; i--) {
11592                                 struct tg3_napi *tnapi = &tp->napi[i];
11593
11594                                 free_irq(tnapi->irq_vec, tnapi);
11595                         }
11596                         goto out_napi_fini;
11597                 }
11598         }
11599
11600         tg3_full_lock(tp, 0);
11601
11602         if (init)
11603                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11604
11605         err = tg3_init_hw(tp, reset_phy);
11606         if (err) {
11607                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11608                 tg3_free_rings(tp);
11609         }
11610
11611         tg3_full_unlock(tp);
11612
11613         if (err)
11614                 goto out_free_irq;
11615
11616         if (test_irq && tg3_flag(tp, USING_MSI)) {
11617                 err = tg3_test_msi(tp);
11618
11619                 if (err) {
11620                         tg3_full_lock(tp, 0);
11621                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11622                         tg3_free_rings(tp);
11623                         tg3_full_unlock(tp);
11624
11625                         goto out_napi_fini;
11626                 }
11627
11628                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11629                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11630
11631                         tw32(PCIE_TRANSACTION_CFG,
11632                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11633                 }
11634         }
11635
11636         tg3_phy_start(tp);
11637
11638         tg3_hwmon_open(tp);
11639
11640         tg3_full_lock(tp, 0);
11641
11642         tg3_timer_start(tp);
11643         tg3_flag_set(tp, INIT_COMPLETE);
11644         tg3_enable_ints(tp);
11645
11646         tg3_ptp_resume(tp);
11647
11648         tg3_full_unlock(tp);
11649
11650         netif_tx_start_all_queues(dev);
11651
11652         /*
11653          * Reset loopback feature if it was turned on while the device was down
11654          * make sure that it's installed properly now.
11655          */
11656         if (dev->features & NETIF_F_LOOPBACK)
11657                 tg3_set_loopback(dev, dev->features);
11658
11659         return 0;
11660
11661 out_free_irq:
11662         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11663                 struct tg3_napi *tnapi = &tp->napi[i];
11664                 free_irq(tnapi->irq_vec, tnapi);
11665         }
11666
11667 out_napi_fini:
11668         tg3_napi_disable(tp);
11669         tg3_napi_fini(tp);
11670         tg3_free_consistent(tp);
11671
11672 out_ints_fini:
11673         tg3_ints_fini(tp);
11674
11675         return err;
11676 }
11677
11678 static void tg3_stop(struct tg3 *tp)
11679 {
11680         int i;
11681
11682         tg3_reset_task_cancel(tp);
11683         tg3_netif_stop(tp);
11684
11685         tg3_timer_stop(tp);
11686
11687         tg3_hwmon_close(tp);
11688
11689         tg3_phy_stop(tp);
11690
11691         tg3_full_lock(tp, 1);
11692
11693         tg3_disable_ints(tp);
11694
11695         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11696         tg3_free_rings(tp);
11697         tg3_flag_clear(tp, INIT_COMPLETE);
11698
11699         tg3_full_unlock(tp);
11700
11701         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11702                 struct tg3_napi *tnapi = &tp->napi[i];
11703                 free_irq(tnapi->irq_vec, tnapi);
11704         }
11705
11706         tg3_ints_fini(tp);
11707
11708         tg3_napi_fini(tp);
11709
11710         tg3_free_consistent(tp);
11711 }
11712
11713 static int tg3_open(struct net_device *dev)
11714 {
11715         struct tg3 *tp = netdev_priv(dev);
11716         int err;
11717
11718         if (tp->pcierr_recovery) {
11719                 netdev_err(dev, "Failed to open device. PCI error recovery "
11720                            "in progress\n");
11721                 return -EAGAIN;
11722         }
11723
11724         if (tp->fw_needed) {
11725                 err = tg3_request_firmware(tp);
11726                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11727                         if (err) {
11728                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11729                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11730                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11731                                 netdev_warn(tp->dev, "EEE capability restored\n");
11732                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11733                         }
11734                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11735                         if (err)
11736                                 return err;
11737                 } else if (err) {
11738                         netdev_warn(tp->dev, "TSO capability disabled\n");
11739                         tg3_flag_clear(tp, TSO_CAPABLE);
11740                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11741                         netdev_notice(tp->dev, "TSO capability restored\n");
11742                         tg3_flag_set(tp, TSO_CAPABLE);
11743                 }
11744         }
11745
11746         tg3_carrier_off(tp);
11747
11748         err = tg3_power_up(tp);
11749         if (err)
11750                 return err;
11751
11752         tg3_full_lock(tp, 0);
11753
11754         tg3_disable_ints(tp);
11755         tg3_flag_clear(tp, INIT_COMPLETE);
11756
11757         tg3_full_unlock(tp);
11758
11759         err = tg3_start(tp,
11760                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11761                         true, true);
11762         if (err) {
11763                 tg3_frob_aux_power(tp, false);
11764                 pci_set_power_state(tp->pdev, PCI_D3hot);
11765         }
11766
11767         return err;
11768 }
11769
11770 static int tg3_close(struct net_device *dev)
11771 {
11772         struct tg3 *tp = netdev_priv(dev);
11773
11774         if (tp->pcierr_recovery) {
11775                 netdev_err(dev, "Failed to close device. PCI error recovery "
11776                            "in progress\n");
11777                 return -EAGAIN;
11778         }
11779
11780         tg3_stop(tp);
11781
11782         if (pci_device_is_present(tp->pdev)) {
11783                 tg3_power_down_prepare(tp);
11784
11785                 tg3_carrier_off(tp);
11786         }
11787         return 0;
11788 }
11789
11790 static inline u64 get_stat64(tg3_stat64_t *val)
11791 {
11792        return ((u64)val->high << 32) | ((u64)val->low);
11793 }
11794
11795 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11796 {
11797         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11798
11799         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11800             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11801              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11802                 u32 val;
11803
11804                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11805                         tg3_writephy(tp, MII_TG3_TEST1,
11806                                      val | MII_TG3_TEST1_CRC_EN);
11807                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11808                 } else
11809                         val = 0;
11810
11811                 tp->phy_crc_errors += val;
11812
11813                 return tp->phy_crc_errors;
11814         }
11815
11816         return get_stat64(&hw_stats->rx_fcs_errors);
11817 }
11818
11819 #define ESTAT_ADD(member) \
11820         estats->member =        old_estats->member + \
11821                                 get_stat64(&hw_stats->member)
11822
11823 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11824 {
11825         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11826         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11827
11828         ESTAT_ADD(rx_octets);
11829         ESTAT_ADD(rx_fragments);
11830         ESTAT_ADD(rx_ucast_packets);
11831         ESTAT_ADD(rx_mcast_packets);
11832         ESTAT_ADD(rx_bcast_packets);
11833         ESTAT_ADD(rx_fcs_errors);
11834         ESTAT_ADD(rx_align_errors);
11835         ESTAT_ADD(rx_xon_pause_rcvd);
11836         ESTAT_ADD(rx_xoff_pause_rcvd);
11837         ESTAT_ADD(rx_mac_ctrl_rcvd);
11838         ESTAT_ADD(rx_xoff_entered);
11839         ESTAT_ADD(rx_frame_too_long_errors);
11840         ESTAT_ADD(rx_jabbers);
11841         ESTAT_ADD(rx_undersize_packets);
11842         ESTAT_ADD(rx_in_length_errors);
11843         ESTAT_ADD(rx_out_length_errors);
11844         ESTAT_ADD(rx_64_or_less_octet_packets);
11845         ESTAT_ADD(rx_65_to_127_octet_packets);
11846         ESTAT_ADD(rx_128_to_255_octet_packets);
11847         ESTAT_ADD(rx_256_to_511_octet_packets);
11848         ESTAT_ADD(rx_512_to_1023_octet_packets);
11849         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11850         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11851         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11852         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11853         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11854
11855         ESTAT_ADD(tx_octets);
11856         ESTAT_ADD(tx_collisions);
11857         ESTAT_ADD(tx_xon_sent);
11858         ESTAT_ADD(tx_xoff_sent);
11859         ESTAT_ADD(tx_flow_control);
11860         ESTAT_ADD(tx_mac_errors);
11861         ESTAT_ADD(tx_single_collisions);
11862         ESTAT_ADD(tx_mult_collisions);
11863         ESTAT_ADD(tx_deferred);
11864         ESTAT_ADD(tx_excessive_collisions);
11865         ESTAT_ADD(tx_late_collisions);
11866         ESTAT_ADD(tx_collide_2times);
11867         ESTAT_ADD(tx_collide_3times);
11868         ESTAT_ADD(tx_collide_4times);
11869         ESTAT_ADD(tx_collide_5times);
11870         ESTAT_ADD(tx_collide_6times);
11871         ESTAT_ADD(tx_collide_7times);
11872         ESTAT_ADD(tx_collide_8times);
11873         ESTAT_ADD(tx_collide_9times);
11874         ESTAT_ADD(tx_collide_10times);
11875         ESTAT_ADD(tx_collide_11times);
11876         ESTAT_ADD(tx_collide_12times);
11877         ESTAT_ADD(tx_collide_13times);
11878         ESTAT_ADD(tx_collide_14times);
11879         ESTAT_ADD(tx_collide_15times);
11880         ESTAT_ADD(tx_ucast_packets);
11881         ESTAT_ADD(tx_mcast_packets);
11882         ESTAT_ADD(tx_bcast_packets);
11883         ESTAT_ADD(tx_carrier_sense_errors);
11884         ESTAT_ADD(tx_discards);
11885         ESTAT_ADD(tx_errors);
11886
11887         ESTAT_ADD(dma_writeq_full);
11888         ESTAT_ADD(dma_write_prioq_full);
11889         ESTAT_ADD(rxbds_empty);
11890         ESTAT_ADD(rx_discards);
11891         ESTAT_ADD(rx_errors);
11892         ESTAT_ADD(rx_threshold_hit);
11893
11894         ESTAT_ADD(dma_readq_full);
11895         ESTAT_ADD(dma_read_prioq_full);
11896         ESTAT_ADD(tx_comp_queue_full);
11897
11898         ESTAT_ADD(ring_set_send_prod_index);
11899         ESTAT_ADD(ring_status_update);
11900         ESTAT_ADD(nic_irqs);
11901         ESTAT_ADD(nic_avoided_irqs);
11902         ESTAT_ADD(nic_tx_threshold_hit);
11903
11904         ESTAT_ADD(mbuf_lwm_thresh_hit);
11905 }
11906
11907 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11908 {
11909         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11910         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11911
11912         stats->rx_packets = old_stats->rx_packets +
11913                 get_stat64(&hw_stats->rx_ucast_packets) +
11914                 get_stat64(&hw_stats->rx_mcast_packets) +
11915                 get_stat64(&hw_stats->rx_bcast_packets);
11916
11917         stats->tx_packets = old_stats->tx_packets +
11918                 get_stat64(&hw_stats->tx_ucast_packets) +
11919                 get_stat64(&hw_stats->tx_mcast_packets) +
11920                 get_stat64(&hw_stats->tx_bcast_packets);
11921
11922         stats->rx_bytes = old_stats->rx_bytes +
11923                 get_stat64(&hw_stats->rx_octets);
11924         stats->tx_bytes = old_stats->tx_bytes +
11925                 get_stat64(&hw_stats->tx_octets);
11926
11927         stats->rx_errors = old_stats->rx_errors +
11928                 get_stat64(&hw_stats->rx_errors);
11929         stats->tx_errors = old_stats->tx_errors +
11930                 get_stat64(&hw_stats->tx_errors) +
11931                 get_stat64(&hw_stats->tx_mac_errors) +
11932                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11933                 get_stat64(&hw_stats->tx_discards);
11934
11935         stats->multicast = old_stats->multicast +
11936                 get_stat64(&hw_stats->rx_mcast_packets);
11937         stats->collisions = old_stats->collisions +
11938                 get_stat64(&hw_stats->tx_collisions);
11939
11940         stats->rx_length_errors = old_stats->rx_length_errors +
11941                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11942                 get_stat64(&hw_stats->rx_undersize_packets);
11943
11944         stats->rx_frame_errors = old_stats->rx_frame_errors +
11945                 get_stat64(&hw_stats->rx_align_errors);
11946         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11947                 get_stat64(&hw_stats->tx_discards);
11948         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11949                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11950
11951         stats->rx_crc_errors = old_stats->rx_crc_errors +
11952                 tg3_calc_crc_errors(tp);
11953
11954         stats->rx_missed_errors = old_stats->rx_missed_errors +
11955                 get_stat64(&hw_stats->rx_discards);
11956
11957         stats->rx_dropped = tp->rx_dropped;
11958         stats->tx_dropped = tp->tx_dropped;
11959 }
11960
11961 static int tg3_get_regs_len(struct net_device *dev)
11962 {
11963         return TG3_REG_BLK_SIZE;
11964 }
11965
11966 static void tg3_get_regs(struct net_device *dev,
11967                 struct ethtool_regs *regs, void *_p)
11968 {
11969         struct tg3 *tp = netdev_priv(dev);
11970
11971         regs->version = 0;
11972
11973         memset(_p, 0, TG3_REG_BLK_SIZE);
11974
11975         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11976                 return;
11977
11978         tg3_full_lock(tp, 0);
11979
11980         tg3_dump_legacy_regs(tp, (u32 *)_p);
11981
11982         tg3_full_unlock(tp);
11983 }
11984
11985 static int tg3_get_eeprom_len(struct net_device *dev)
11986 {
11987         struct tg3 *tp = netdev_priv(dev);
11988
11989         return tp->nvram_size;
11990 }
11991
11992 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11993 {
11994         struct tg3 *tp = netdev_priv(dev);
11995         int ret, cpmu_restore = 0;
11996         u8  *pd;
11997         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11998         __be32 val;
11999
12000         if (tg3_flag(tp, NO_NVRAM))
12001                 return -EINVAL;
12002
12003         offset = eeprom->offset;
12004         len = eeprom->len;
12005         eeprom->len = 0;
12006
12007         eeprom->magic = TG3_EEPROM_MAGIC;
12008
12009         /* Override clock, link aware and link idle modes */
12010         if (tg3_flag(tp, CPMU_PRESENT)) {
12011                 cpmu_val = tr32(TG3_CPMU_CTRL);
12012                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12013                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12014                         tw32(TG3_CPMU_CTRL, cpmu_val &
12015                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12016                                              CPMU_CTRL_LINK_IDLE_MODE));
12017                         cpmu_restore = 1;
12018                 }
12019         }
12020         tg3_override_clk(tp);
12021
12022         if (offset & 3) {
12023                 /* adjustments to start on required 4 byte boundary */
12024                 b_offset = offset & 3;
12025                 b_count = 4 - b_offset;
12026                 if (b_count > len) {
12027                         /* i.e. offset=1 len=2 */
12028                         b_count = len;
12029                 }
12030                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12031                 if (ret)
12032                         goto eeprom_done;
12033                 memcpy(data, ((char *)&val) + b_offset, b_count);
12034                 len -= b_count;
12035                 offset += b_count;
12036                 eeprom->len += b_count;
12037         }
12038
12039         /* read bytes up to the last 4 byte boundary */
12040         pd = &data[eeprom->len];
12041         for (i = 0; i < (len - (len & 3)); i += 4) {
12042                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12043                 if (ret) {
12044                         if (i)
12045                                 i -= 4;
12046                         eeprom->len += i;
12047                         goto eeprom_done;
12048                 }
12049                 memcpy(pd + i, &val, 4);
12050                 if (need_resched()) {
12051                         if (signal_pending(current)) {
12052                                 eeprom->len += i;
12053                                 ret = -EINTR;
12054                                 goto eeprom_done;
12055                         }
12056                         cond_resched();
12057                 }
12058         }
12059         eeprom->len += i;
12060
12061         if (len & 3) {
12062                 /* read last bytes not ending on 4 byte boundary */
12063                 pd = &data[eeprom->len];
12064                 b_count = len & 3;
12065                 b_offset = offset + len - b_count;
12066                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12067                 if (ret)
12068                         goto eeprom_done;
12069                 memcpy(pd, &val, b_count);
12070                 eeprom->len += b_count;
12071         }
12072         ret = 0;
12073
12074 eeprom_done:
12075         /* Restore clock, link aware and link idle modes */
12076         tg3_restore_clk(tp);
12077         if (cpmu_restore)
12078                 tw32(TG3_CPMU_CTRL, cpmu_val);
12079
12080         return ret;
12081 }
12082
12083 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12084 {
12085         struct tg3 *tp = netdev_priv(dev);
12086         int ret;
12087         u32 offset, len, b_offset, odd_len;
12088         u8 *buf;
12089         __be32 start = 0, end;
12090
12091         if (tg3_flag(tp, NO_NVRAM) ||
12092             eeprom->magic != TG3_EEPROM_MAGIC)
12093                 return -EINVAL;
12094
12095         offset = eeprom->offset;
12096         len = eeprom->len;
12097
12098         if ((b_offset = (offset & 3))) {
12099                 /* adjustments to start on required 4 byte boundary */
12100                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12101                 if (ret)
12102                         return ret;
12103                 len += b_offset;
12104                 offset &= ~3;
12105                 if (len < 4)
12106                         len = 4;
12107         }
12108
12109         odd_len = 0;
12110         if (len & 3) {
12111                 /* adjustments to end on required 4 byte boundary */
12112                 odd_len = 1;
12113                 len = (len + 3) & ~3;
12114                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12115                 if (ret)
12116                         return ret;
12117         }
12118
12119         buf = data;
12120         if (b_offset || odd_len) {
12121                 buf = kmalloc(len, GFP_KERNEL);
12122                 if (!buf)
12123                         return -ENOMEM;
12124                 if (b_offset)
12125                         memcpy(buf, &start, 4);
12126                 if (odd_len)
12127                         memcpy(buf+len-4, &end, 4);
12128                 memcpy(buf + b_offset, data, eeprom->len);
12129         }
12130
12131         ret = tg3_nvram_write_block(tp, offset, len, buf);
12132
12133         if (buf != data)
12134                 kfree(buf);
12135
12136         return ret;
12137 }
12138
12139 static int tg3_get_link_ksettings(struct net_device *dev,
12140                                   struct ethtool_link_ksettings *cmd)
12141 {
12142         struct tg3 *tp = netdev_priv(dev);
12143         u32 supported, advertising;
12144
12145         if (tg3_flag(tp, USE_PHYLIB)) {
12146                 struct phy_device *phydev;
12147                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12148                         return -EAGAIN;
12149                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12150                 phy_ethtool_ksettings_get(phydev, cmd);
12151
12152                 return 0;
12153         }
12154
12155         supported = (SUPPORTED_Autoneg);
12156
12157         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12158                 supported |= (SUPPORTED_1000baseT_Half |
12159                               SUPPORTED_1000baseT_Full);
12160
12161         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12162                 supported |= (SUPPORTED_100baseT_Half |
12163                               SUPPORTED_100baseT_Full |
12164                               SUPPORTED_10baseT_Half |
12165                               SUPPORTED_10baseT_Full |
12166                               SUPPORTED_TP);
12167                 cmd->base.port = PORT_TP;
12168         } else {
12169                 supported |= SUPPORTED_FIBRE;
12170                 cmd->base.port = PORT_FIBRE;
12171         }
12172         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12173                                                 supported);
12174
12175         advertising = tp->link_config.advertising;
12176         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12177                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12178                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12179                                 advertising |= ADVERTISED_Pause;
12180                         } else {
12181                                 advertising |= ADVERTISED_Pause |
12182                                         ADVERTISED_Asym_Pause;
12183                         }
12184                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12185                         advertising |= ADVERTISED_Asym_Pause;
12186                 }
12187         }
12188         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12189                                                 advertising);
12190
12191         if (netif_running(dev) && tp->link_up) {
12192                 cmd->base.speed = tp->link_config.active_speed;
12193                 cmd->base.duplex = tp->link_config.active_duplex;
12194                 ethtool_convert_legacy_u32_to_link_mode(
12195                         cmd->link_modes.lp_advertising,
12196                         tp->link_config.rmt_adv);
12197
12198                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12199                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12200                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12201                         else
12202                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12203                 }
12204         } else {
12205                 cmd->base.speed = SPEED_UNKNOWN;
12206                 cmd->base.duplex = DUPLEX_UNKNOWN;
12207                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12208         }
12209         cmd->base.phy_address = tp->phy_addr;
12210         cmd->base.autoneg = tp->link_config.autoneg;
12211         return 0;
12212 }
12213
12214 static int tg3_set_link_ksettings(struct net_device *dev,
12215                                   const struct ethtool_link_ksettings *cmd)
12216 {
12217         struct tg3 *tp = netdev_priv(dev);
12218         u32 speed = cmd->base.speed;
12219         u32 advertising;
12220
12221         if (tg3_flag(tp, USE_PHYLIB)) {
12222                 struct phy_device *phydev;
12223                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12224                         return -EAGAIN;
12225                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12226                 return phy_ethtool_ksettings_set(phydev, cmd);
12227         }
12228
12229         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12230             cmd->base.autoneg != AUTONEG_DISABLE)
12231                 return -EINVAL;
12232
12233         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12234             cmd->base.duplex != DUPLEX_FULL &&
12235             cmd->base.duplex != DUPLEX_HALF)
12236                 return -EINVAL;
12237
12238         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12239                                                 cmd->link_modes.advertising);
12240
12241         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12242                 u32 mask = ADVERTISED_Autoneg |
12243                            ADVERTISED_Pause |
12244                            ADVERTISED_Asym_Pause;
12245
12246                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12247                         mask |= ADVERTISED_1000baseT_Half |
12248                                 ADVERTISED_1000baseT_Full;
12249
12250                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12251                         mask |= ADVERTISED_100baseT_Half |
12252                                 ADVERTISED_100baseT_Full |
12253                                 ADVERTISED_10baseT_Half |
12254                                 ADVERTISED_10baseT_Full |
12255                                 ADVERTISED_TP;
12256                 else
12257                         mask |= ADVERTISED_FIBRE;
12258
12259                 if (advertising & ~mask)
12260                         return -EINVAL;
12261
12262                 mask &= (ADVERTISED_1000baseT_Half |
12263                          ADVERTISED_1000baseT_Full |
12264                          ADVERTISED_100baseT_Half |
12265                          ADVERTISED_100baseT_Full |
12266                          ADVERTISED_10baseT_Half |
12267                          ADVERTISED_10baseT_Full);
12268
12269                 advertising &= mask;
12270         } else {
12271                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12272                         if (speed != SPEED_1000)
12273                                 return -EINVAL;
12274
12275                         if (cmd->base.duplex != DUPLEX_FULL)
12276                                 return -EINVAL;
12277                 } else {
12278                         if (speed != SPEED_100 &&
12279                             speed != SPEED_10)
12280                                 return -EINVAL;
12281                 }
12282         }
12283
12284         tg3_full_lock(tp, 0);
12285
12286         tp->link_config.autoneg = cmd->base.autoneg;
12287         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12288                 tp->link_config.advertising = (advertising |
12289                                               ADVERTISED_Autoneg);
12290                 tp->link_config.speed = SPEED_UNKNOWN;
12291                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12292         } else {
12293                 tp->link_config.advertising = 0;
12294                 tp->link_config.speed = speed;
12295                 tp->link_config.duplex = cmd->base.duplex;
12296         }
12297
12298         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12299
12300         tg3_warn_mgmt_link_flap(tp);
12301
12302         if (netif_running(dev))
12303                 tg3_setup_phy(tp, true);
12304
12305         tg3_full_unlock(tp);
12306
12307         return 0;
12308 }
12309
12310 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12311 {
12312         struct tg3 *tp = netdev_priv(dev);
12313
12314         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12315         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12316         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12317 }
12318
12319 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12320 {
12321         struct tg3 *tp = netdev_priv(dev);
12322
12323         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12324                 wol->supported = WAKE_MAGIC;
12325         else
12326                 wol->supported = 0;
12327         wol->wolopts = 0;
12328         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12329                 wol->wolopts = WAKE_MAGIC;
12330         memset(&wol->sopass, 0, sizeof(wol->sopass));
12331 }
12332
12333 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12334 {
12335         struct tg3 *tp = netdev_priv(dev);
12336         struct device *dp = &tp->pdev->dev;
12337
12338         if (wol->wolopts & ~WAKE_MAGIC)
12339                 return -EINVAL;
12340         if ((wol->wolopts & WAKE_MAGIC) &&
12341             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12342                 return -EINVAL;
12343
12344         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12345
12346         if (device_may_wakeup(dp))
12347                 tg3_flag_set(tp, WOL_ENABLE);
12348         else
12349                 tg3_flag_clear(tp, WOL_ENABLE);
12350
12351         return 0;
12352 }
12353
12354 static u32 tg3_get_msglevel(struct net_device *dev)
12355 {
12356         struct tg3 *tp = netdev_priv(dev);
12357         return tp->msg_enable;
12358 }
12359
12360 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12361 {
12362         struct tg3 *tp = netdev_priv(dev);
12363         tp->msg_enable = value;
12364 }
12365
12366 static int tg3_nway_reset(struct net_device *dev)
12367 {
12368         struct tg3 *tp = netdev_priv(dev);
12369         int r;
12370
12371         if (!netif_running(dev))
12372                 return -EAGAIN;
12373
12374         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12375                 return -EINVAL;
12376
12377         tg3_warn_mgmt_link_flap(tp);
12378
12379         if (tg3_flag(tp, USE_PHYLIB)) {
12380                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12381                         return -EAGAIN;
12382                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12383         } else {
12384                 u32 bmcr;
12385
12386                 spin_lock_bh(&tp->lock);
12387                 r = -EINVAL;
12388                 tg3_readphy(tp, MII_BMCR, &bmcr);
12389                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12390                     ((bmcr & BMCR_ANENABLE) ||
12391                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12392                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12393                                                    BMCR_ANENABLE);
12394                         r = 0;
12395                 }
12396                 spin_unlock_bh(&tp->lock);
12397         }
12398
12399         return r;
12400 }
12401
12402 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12403 {
12404         struct tg3 *tp = netdev_priv(dev);
12405
12406         ering->rx_max_pending = tp->rx_std_ring_mask;
12407         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12408                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12409         else
12410                 ering->rx_jumbo_max_pending = 0;
12411
12412         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12413
12414         ering->rx_pending = tp->rx_pending;
12415         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12416                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12417         else
12418                 ering->rx_jumbo_pending = 0;
12419
12420         ering->tx_pending = tp->napi[0].tx_pending;
12421 }
12422
12423 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12424 {
12425         struct tg3 *tp = netdev_priv(dev);
12426         int i, irq_sync = 0, err = 0;
12427         bool reset_phy = false;
12428
12429         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12430             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12431             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12432             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12433             (tg3_flag(tp, TSO_BUG) &&
12434              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12435                 return -EINVAL;
12436
12437         if (netif_running(dev)) {
12438                 tg3_phy_stop(tp);
12439                 tg3_netif_stop(tp);
12440                 irq_sync = 1;
12441         }
12442
12443         tg3_full_lock(tp, irq_sync);
12444
12445         tp->rx_pending = ering->rx_pending;
12446
12447         if (tg3_flag(tp, MAX_RXPEND_64) &&
12448             tp->rx_pending > 63)
12449                 tp->rx_pending = 63;
12450
12451         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12452                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12453
12454         for (i = 0; i < tp->irq_max; i++)
12455                 tp->napi[i].tx_pending = ering->tx_pending;
12456
12457         if (netif_running(dev)) {
12458                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12459                 /* Reset PHY to avoid PHY lock up */
12460                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12461                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12462                     tg3_asic_rev(tp) == ASIC_REV_5720)
12463                         reset_phy = true;
12464
12465                 err = tg3_restart_hw(tp, reset_phy);
12466                 if (!err)
12467                         tg3_netif_start(tp);
12468         }
12469
12470         tg3_full_unlock(tp);
12471
12472         if (irq_sync && !err)
12473                 tg3_phy_start(tp);
12474
12475         return err;
12476 }
12477
12478 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12479 {
12480         struct tg3 *tp = netdev_priv(dev);
12481
12482         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12483
12484         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12485                 epause->rx_pause = 1;
12486         else
12487                 epause->rx_pause = 0;
12488
12489         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12490                 epause->tx_pause = 1;
12491         else
12492                 epause->tx_pause = 0;
12493 }
12494
12495 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12496 {
12497         struct tg3 *tp = netdev_priv(dev);
12498         int err = 0;
12499         bool reset_phy = false;
12500
12501         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12502                 tg3_warn_mgmt_link_flap(tp);
12503
12504         if (tg3_flag(tp, USE_PHYLIB)) {
12505                 struct phy_device *phydev;
12506
12507                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12508
12509                 if (!phy_validate_pause(phydev, epause))
12510                         return -EINVAL;
12511
12512                 tp->link_config.flowctrl = 0;
12513                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12514                 if (epause->rx_pause) {
12515                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12516
12517                         if (epause->tx_pause) {
12518                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12519                         }
12520                 } else if (epause->tx_pause) {
12521                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12522                 }
12523
12524                 if (epause->autoneg)
12525                         tg3_flag_set(tp, PAUSE_AUTONEG);
12526                 else
12527                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12528
12529                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12530                         if (phydev->autoneg) {
12531                                 /* phy_set_asym_pause() will
12532                                  * renegotiate the link to inform our
12533                                  * link partner of our flow control
12534                                  * settings, even if the flow control
12535                                  * is forced.  Let tg3_adjust_link()
12536                                  * do the final flow control setup.
12537                                  */
12538                                 return 0;
12539                         }
12540
12541                         if (!epause->autoneg)
12542                                 tg3_setup_flow_control(tp, 0, 0);
12543                 }
12544         } else {
12545                 int irq_sync = 0;
12546
12547                 if (netif_running(dev)) {
12548                         tg3_netif_stop(tp);
12549                         irq_sync = 1;
12550                 }
12551
12552                 tg3_full_lock(tp, irq_sync);
12553
12554                 if (epause->autoneg)
12555                         tg3_flag_set(tp, PAUSE_AUTONEG);
12556                 else
12557                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12558                 if (epause->rx_pause)
12559                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12560                 else
12561                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12562                 if (epause->tx_pause)
12563                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12564                 else
12565                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12566
12567                 if (netif_running(dev)) {
12568                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12569                         /* Reset PHY to avoid PHY lock up */
12570                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12571                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12572                             tg3_asic_rev(tp) == ASIC_REV_5720)
12573                                 reset_phy = true;
12574
12575                         err = tg3_restart_hw(tp, reset_phy);
12576                         if (!err)
12577                                 tg3_netif_start(tp);
12578                 }
12579
12580                 tg3_full_unlock(tp);
12581         }
12582
12583         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12584
12585         return err;
12586 }
12587
12588 static int tg3_get_sset_count(struct net_device *dev, int sset)
12589 {
12590         switch (sset) {
12591         case ETH_SS_TEST:
12592                 return TG3_NUM_TEST;
12593         case ETH_SS_STATS:
12594                 return TG3_NUM_STATS;
12595         default:
12596                 return -EOPNOTSUPP;
12597         }
12598 }
12599
12600 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12601                          u32 *rules __always_unused)
12602 {
12603         struct tg3 *tp = netdev_priv(dev);
12604
12605         if (!tg3_flag(tp, SUPPORT_MSIX))
12606                 return -EOPNOTSUPP;
12607
12608         switch (info->cmd) {
12609         case ETHTOOL_GRXRINGS:
12610                 if (netif_running(tp->dev))
12611                         info->data = tp->rxq_cnt;
12612                 else {
12613                         info->data = num_online_cpus();
12614                         if (info->data > TG3_RSS_MAX_NUM_QS)
12615                                 info->data = TG3_RSS_MAX_NUM_QS;
12616                 }
12617
12618                 return 0;
12619
12620         default:
12621                 return -EOPNOTSUPP;
12622         }
12623 }
12624
12625 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12626 {
12627         u32 size = 0;
12628         struct tg3 *tp = netdev_priv(dev);
12629
12630         if (tg3_flag(tp, SUPPORT_MSIX))
12631                 size = TG3_RSS_INDIR_TBL_SIZE;
12632
12633         return size;
12634 }
12635
12636 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12637 {
12638         struct tg3 *tp = netdev_priv(dev);
12639         int i;
12640
12641         if (hfunc)
12642                 *hfunc = ETH_RSS_HASH_TOP;
12643         if (!indir)
12644                 return 0;
12645
12646         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12647                 indir[i] = tp->rss_ind_tbl[i];
12648
12649         return 0;
12650 }
12651
12652 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12653                         const u8 hfunc)
12654 {
12655         struct tg3 *tp = netdev_priv(dev);
12656         size_t i;
12657
12658         /* We require at least one supported parameter to be changed and no
12659          * change in any of the unsupported parameters
12660          */
12661         if (key ||
12662             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12663                 return -EOPNOTSUPP;
12664
12665         if (!indir)
12666                 return 0;
12667
12668         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12669                 tp->rss_ind_tbl[i] = indir[i];
12670
12671         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12672                 return 0;
12673
12674         /* It is legal to write the indirection
12675          * table while the device is running.
12676          */
12677         tg3_full_lock(tp, 0);
12678         tg3_rss_write_indir_tbl(tp);
12679         tg3_full_unlock(tp);
12680
12681         return 0;
12682 }
12683
12684 static void tg3_get_channels(struct net_device *dev,
12685                              struct ethtool_channels *channel)
12686 {
12687         struct tg3 *tp = netdev_priv(dev);
12688         u32 deflt_qs = netif_get_num_default_rss_queues();
12689
12690         channel->max_rx = tp->rxq_max;
12691         channel->max_tx = tp->txq_max;
12692
12693         if (netif_running(dev)) {
12694                 channel->rx_count = tp->rxq_cnt;
12695                 channel->tx_count = tp->txq_cnt;
12696         } else {
12697                 if (tp->rxq_req)
12698                         channel->rx_count = tp->rxq_req;
12699                 else
12700                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12701
12702                 if (tp->txq_req)
12703                         channel->tx_count = tp->txq_req;
12704                 else
12705                         channel->tx_count = min(deflt_qs, tp->txq_max);
12706         }
12707 }
12708
12709 static int tg3_set_channels(struct net_device *dev,
12710                             struct ethtool_channels *channel)
12711 {
12712         struct tg3 *tp = netdev_priv(dev);
12713
12714         if (!tg3_flag(tp, SUPPORT_MSIX))
12715                 return -EOPNOTSUPP;
12716
12717         if (channel->rx_count > tp->rxq_max ||
12718             channel->tx_count > tp->txq_max)
12719                 return -EINVAL;
12720
12721         tp->rxq_req = channel->rx_count;
12722         tp->txq_req = channel->tx_count;
12723
12724         if (!netif_running(dev))
12725                 return 0;
12726
12727         tg3_stop(tp);
12728
12729         tg3_carrier_off(tp);
12730
12731         tg3_start(tp, true, false, false);
12732
12733         return 0;
12734 }
12735
12736 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12737 {
12738         switch (stringset) {
12739         case ETH_SS_STATS:
12740                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12741                 break;
12742         case ETH_SS_TEST:
12743                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12744                 break;
12745         default:
12746                 WARN_ON(1);     /* we need a WARN() */
12747                 break;
12748         }
12749 }
12750
12751 static int tg3_set_phys_id(struct net_device *dev,
12752                             enum ethtool_phys_id_state state)
12753 {
12754         struct tg3 *tp = netdev_priv(dev);
12755
12756         switch (state) {
12757         case ETHTOOL_ID_ACTIVE:
12758                 return 1;       /* cycle on/off once per second */
12759
12760         case ETHTOOL_ID_ON:
12761                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12762                      LED_CTRL_1000MBPS_ON |
12763                      LED_CTRL_100MBPS_ON |
12764                      LED_CTRL_10MBPS_ON |
12765                      LED_CTRL_TRAFFIC_OVERRIDE |
12766                      LED_CTRL_TRAFFIC_BLINK |
12767                      LED_CTRL_TRAFFIC_LED);
12768                 break;
12769
12770         case ETHTOOL_ID_OFF:
12771                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12772                      LED_CTRL_TRAFFIC_OVERRIDE);
12773                 break;
12774
12775         case ETHTOOL_ID_INACTIVE:
12776                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12777                 break;
12778         }
12779
12780         return 0;
12781 }
12782
12783 static void tg3_get_ethtool_stats(struct net_device *dev,
12784                                    struct ethtool_stats *estats, u64 *tmp_stats)
12785 {
12786         struct tg3 *tp = netdev_priv(dev);
12787
12788         if (tp->hw_stats)
12789                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12790         else
12791                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12792 }
12793
12794 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12795 {
12796         int i;
12797         __be32 *buf;
12798         u32 offset = 0, len = 0;
12799         u32 magic, val;
12800
12801         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12802                 return NULL;
12803
12804         if (magic == TG3_EEPROM_MAGIC) {
12805                 for (offset = TG3_NVM_DIR_START;
12806                      offset < TG3_NVM_DIR_END;
12807                      offset += TG3_NVM_DIRENT_SIZE) {
12808                         if (tg3_nvram_read(tp, offset, &val))
12809                                 return NULL;
12810
12811                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12812                             TG3_NVM_DIRTYPE_EXTVPD)
12813                                 break;
12814                 }
12815
12816                 if (offset != TG3_NVM_DIR_END) {
12817                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12818                         if (tg3_nvram_read(tp, offset + 4, &offset))
12819                                 return NULL;
12820
12821                         offset = tg3_nvram_logical_addr(tp, offset);
12822                 }
12823
12824                 if (!offset || !len) {
12825                         offset = TG3_NVM_VPD_OFF;
12826                         len = TG3_NVM_VPD_LEN;
12827                 }
12828         } else {
12829                 len = TG3_NVM_PCI_VPD_MAX_LEN;
12830         }
12831
12832         buf = kmalloc(len, GFP_KERNEL);
12833         if (buf == NULL)
12834                 return NULL;
12835
12836         if (magic == TG3_EEPROM_MAGIC) {
12837                 for (i = 0; i < len; i += 4) {
12838                         /* The data is in little-endian format in NVRAM.
12839                          * Use the big-endian read routines to preserve
12840                          * the byte order as it exists in NVRAM.
12841                          */
12842                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12843                                 goto error;
12844                 }
12845                 *vpdlen = len;
12846         } else {
12847                 ssize_t cnt;
12848
12849                 cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
12850                 if (cnt < 0)
12851                         goto error;
12852                 *vpdlen = cnt;
12853         }
12854
12855         return buf;
12856
12857 error:
12858         kfree(buf);
12859         return NULL;
12860 }
12861
12862 #define NVRAM_TEST_SIZE 0x100
12863 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12864 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12865 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12866 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12867 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12868 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12869 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12870 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12871
12872 static int tg3_test_nvram(struct tg3 *tp)
12873 {
12874         u32 csum, magic, len;
12875         __be32 *buf;
12876         int i, j, k, err = 0, size;
12877
12878         if (tg3_flag(tp, NO_NVRAM))
12879                 return 0;
12880
12881         if (tg3_nvram_read(tp, 0, &magic) != 0)
12882                 return -EIO;
12883
12884         if (magic == TG3_EEPROM_MAGIC)
12885                 size = NVRAM_TEST_SIZE;
12886         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12887                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12888                     TG3_EEPROM_SB_FORMAT_1) {
12889                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12890                         case TG3_EEPROM_SB_REVISION_0:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12892                                 break;
12893                         case TG3_EEPROM_SB_REVISION_2:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_3:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12898                                 break;
12899                         case TG3_EEPROM_SB_REVISION_4:
12900                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12901                                 break;
12902                         case TG3_EEPROM_SB_REVISION_5:
12903                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12904                                 break;
12905                         case TG3_EEPROM_SB_REVISION_6:
12906                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12907                                 break;
12908                         default:
12909                                 return -EIO;
12910                         }
12911                 } else
12912                         return 0;
12913         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12914                 size = NVRAM_SELFBOOT_HW_SIZE;
12915         else
12916                 return -EIO;
12917
12918         buf = kmalloc(size, GFP_KERNEL);
12919         if (buf == NULL)
12920                 return -ENOMEM;
12921
12922         err = -EIO;
12923         for (i = 0, j = 0; i < size; i += 4, j++) {
12924                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12925                 if (err)
12926                         break;
12927         }
12928         if (i < size)
12929                 goto out;
12930
12931         /* Selfboot format */
12932         magic = be32_to_cpu(buf[0]);
12933         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12934             TG3_EEPROM_MAGIC_FW) {
12935                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12936
12937                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12938                     TG3_EEPROM_SB_REVISION_2) {
12939                         /* For rev 2, the csum doesn't include the MBA. */
12940                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12941                                 csum8 += buf8[i];
12942                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12943                                 csum8 += buf8[i];
12944                 } else {
12945                         for (i = 0; i < size; i++)
12946                                 csum8 += buf8[i];
12947                 }
12948
12949                 if (csum8 == 0) {
12950                         err = 0;
12951                         goto out;
12952                 }
12953
12954                 err = -EIO;
12955                 goto out;
12956         }
12957
12958         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12959             TG3_EEPROM_MAGIC_HW) {
12960                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12961                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12962                 u8 *buf8 = (u8 *) buf;
12963
12964                 /* Separate the parity bits and the data bytes.  */
12965                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12966                         if ((i == 0) || (i == 8)) {
12967                                 int l;
12968                                 u8 msk;
12969
12970                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12971                                         parity[k++] = buf8[i] & msk;
12972                                 i++;
12973                         } else if (i == 16) {
12974                                 int l;
12975                                 u8 msk;
12976
12977                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12978                                         parity[k++] = buf8[i] & msk;
12979                                 i++;
12980
12981                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12982                                         parity[k++] = buf8[i] & msk;
12983                                 i++;
12984                         }
12985                         data[j++] = buf8[i];
12986                 }
12987
12988                 err = -EIO;
12989                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12990                         u8 hw8 = hweight8(data[i]);
12991
12992                         if ((hw8 & 0x1) && parity[i])
12993                                 goto out;
12994                         else if (!(hw8 & 0x1) && !parity[i])
12995                                 goto out;
12996                 }
12997                 err = 0;
12998                 goto out;
12999         }
13000
13001         err = -EIO;
13002
13003         /* Bootstrap checksum at offset 0x10 */
13004         csum = calc_crc((unsigned char *) buf, 0x10);
13005         if (csum != le32_to_cpu(buf[0x10/4]))
13006                 goto out;
13007
13008         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13009         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13010         if (csum != le32_to_cpu(buf[0xfc/4]))
13011                 goto out;
13012
13013         kfree(buf);
13014
13015         buf = tg3_vpd_readblock(tp, &len);
13016         if (!buf)
13017                 return -ENOMEM;
13018
13019         i = pci_vpd_find_tag((u8 *)buf, len, PCI_VPD_LRDT_RO_DATA);
13020         if (i > 0) {
13021                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
13022                 if (j < 0)
13023                         goto out;
13024
13025                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
13026                         goto out;
13027
13028                 i += PCI_VPD_LRDT_TAG_SIZE;
13029                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
13030                                               PCI_VPD_RO_KEYWORD_CHKSUM);
13031                 if (j > 0) {
13032                         u8 csum8 = 0;
13033
13034                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
13035
13036                         for (i = 0; i <= j; i++)
13037                                 csum8 += ((u8 *)buf)[i];
13038
13039                         if (csum8)
13040                                 goto out;
13041                 }
13042         }
13043
13044         err = 0;
13045
13046 out:
13047         kfree(buf);
13048         return err;
13049 }
13050
13051 #define TG3_SERDES_TIMEOUT_SEC  2
13052 #define TG3_COPPER_TIMEOUT_SEC  6
13053
13054 static int tg3_test_link(struct tg3 *tp)
13055 {
13056         int i, max;
13057
13058         if (!netif_running(tp->dev))
13059                 return -ENODEV;
13060
13061         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13062                 max = TG3_SERDES_TIMEOUT_SEC;
13063         else
13064                 max = TG3_COPPER_TIMEOUT_SEC;
13065
13066         for (i = 0; i < max; i++) {
13067                 if (tp->link_up)
13068                         return 0;
13069
13070                 if (msleep_interruptible(1000))
13071                         break;
13072         }
13073
13074         return -EIO;
13075 }
13076
13077 /* Only test the commonly used registers */
13078 static int tg3_test_registers(struct tg3 *tp)
13079 {
13080         int i, is_5705, is_5750;
13081         u32 offset, read_mask, write_mask, val, save_val, read_val;
13082         static struct {
13083                 u16 offset;
13084                 u16 flags;
13085 #define TG3_FL_5705     0x1
13086 #define TG3_FL_NOT_5705 0x2
13087 #define TG3_FL_NOT_5788 0x4
13088 #define TG3_FL_NOT_5750 0x8
13089                 u32 read_mask;
13090                 u32 write_mask;
13091         } reg_tbl[] = {
13092                 /* MAC Control Registers */
13093                 { MAC_MODE, TG3_FL_NOT_5705,
13094                         0x00000000, 0x00ef6f8c },
13095                 { MAC_MODE, TG3_FL_5705,
13096                         0x00000000, 0x01ef6b8c },
13097                 { MAC_STATUS, TG3_FL_NOT_5705,
13098                         0x03800107, 0x00000000 },
13099                 { MAC_STATUS, TG3_FL_5705,
13100                         0x03800100, 0x00000000 },
13101                 { MAC_ADDR_0_HIGH, 0x0000,
13102                         0x00000000, 0x0000ffff },
13103                 { MAC_ADDR_0_LOW, 0x0000,
13104                         0x00000000, 0xffffffff },
13105                 { MAC_RX_MTU_SIZE, 0x0000,
13106                         0x00000000, 0x0000ffff },
13107                 { MAC_TX_MODE, 0x0000,
13108                         0x00000000, 0x00000070 },
13109                 { MAC_TX_LENGTHS, 0x0000,
13110                         0x00000000, 0x00003fff },
13111                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13112                         0x00000000, 0x000007fc },
13113                 { MAC_RX_MODE, TG3_FL_5705,
13114                         0x00000000, 0x000007dc },
13115                 { MAC_HASH_REG_0, 0x0000,
13116                         0x00000000, 0xffffffff },
13117                 { MAC_HASH_REG_1, 0x0000,
13118                         0x00000000, 0xffffffff },
13119                 { MAC_HASH_REG_2, 0x0000,
13120                         0x00000000, 0xffffffff },
13121                 { MAC_HASH_REG_3, 0x0000,
13122                         0x00000000, 0xffffffff },
13123
13124                 /* Receive Data and Receive BD Initiator Control Registers. */
13125                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13126                         0x00000000, 0xffffffff },
13127                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13128                         0x00000000, 0xffffffff },
13129                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13130                         0x00000000, 0x00000003 },
13131                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13132                         0x00000000, 0xffffffff },
13133                 { RCVDBDI_STD_BD+0, 0x0000,
13134                         0x00000000, 0xffffffff },
13135                 { RCVDBDI_STD_BD+4, 0x0000,
13136                         0x00000000, 0xffffffff },
13137                 { RCVDBDI_STD_BD+8, 0x0000,
13138                         0x00000000, 0xffff0002 },
13139                 { RCVDBDI_STD_BD+0xc, 0x0000,
13140                         0x00000000, 0xffffffff },
13141
13142                 /* Receive BD Initiator Control Registers. */
13143                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13146                         0x00000000, 0x000003ff },
13147                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149
13150                 /* Host Coalescing Control Registers. */
13151                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13152                         0x00000000, 0x00000004 },
13153                 { HOSTCC_MODE, TG3_FL_5705,
13154                         0x00000000, 0x000000f6 },
13155                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13156                         0x00000000, 0xffffffff },
13157                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13158                         0x00000000, 0x000003ff },
13159                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13160                         0x00000000, 0xffffffff },
13161                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13162                         0x00000000, 0x000003ff },
13163                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13164                         0x00000000, 0xffffffff },
13165                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13166                         0x00000000, 0x000000ff },
13167                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13168                         0x00000000, 0xffffffff },
13169                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13170                         0x00000000, 0x000000ff },
13171                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13172                         0x00000000, 0xffffffff },
13173                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13174                         0x00000000, 0xffffffff },
13175                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13176                         0x00000000, 0xffffffff },
13177                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13178                         0x00000000, 0x000000ff },
13179                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13180                         0x00000000, 0xffffffff },
13181                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13182                         0x00000000, 0x000000ff },
13183                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13184                         0x00000000, 0xffffffff },
13185                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13186                         0x00000000, 0xffffffff },
13187                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13188                         0x00000000, 0xffffffff },
13189                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13190                         0x00000000, 0xffffffff },
13191                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13192                         0x00000000, 0xffffffff },
13193                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13194                         0xffffffff, 0x00000000 },
13195                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13196                         0xffffffff, 0x00000000 },
13197
13198                 /* Buffer Manager Control Registers. */
13199                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13200                         0x00000000, 0x007fff80 },
13201                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13202                         0x00000000, 0x007fffff },
13203                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13204                         0x00000000, 0x0000003f },
13205                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13206                         0x00000000, 0x000001ff },
13207                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13208                         0x00000000, 0x000001ff },
13209                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13210                         0xffffffff, 0x00000000 },
13211                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13212                         0xffffffff, 0x00000000 },
13213
13214                 /* Mailbox Registers */
13215                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13216                         0x00000000, 0x000001ff },
13217                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13218                         0x00000000, 0x000001ff },
13219                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13220                         0x00000000, 0x000007ff },
13221                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13222                         0x00000000, 0x000001ff },
13223
13224                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13225         };
13226
13227         is_5705 = is_5750 = 0;
13228         if (tg3_flag(tp, 5705_PLUS)) {
13229                 is_5705 = 1;
13230                 if (tg3_flag(tp, 5750_PLUS))
13231                         is_5750 = 1;
13232         }
13233
13234         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13235                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13236                         continue;
13237
13238                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13239                         continue;
13240
13241                 if (tg3_flag(tp, IS_5788) &&
13242                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13243                         continue;
13244
13245                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13246                         continue;
13247
13248                 offset = (u32) reg_tbl[i].offset;
13249                 read_mask = reg_tbl[i].read_mask;
13250                 write_mask = reg_tbl[i].write_mask;
13251
13252                 /* Save the original register content */
13253                 save_val = tr32(offset);
13254
13255                 /* Determine the read-only value. */
13256                 read_val = save_val & read_mask;
13257
13258                 /* Write zero to the register, then make sure the read-only bits
13259                  * are not changed and the read/write bits are all zeros.
13260                  */
13261                 tw32(offset, 0);
13262
13263                 val = tr32(offset);
13264
13265                 /* Test the read-only and read/write bits. */
13266                 if (((val & read_mask) != read_val) || (val & write_mask))
13267                         goto out;
13268
13269                 /* Write ones to all the bits defined by RdMask and WrMask, then
13270                  * make sure the read-only bits are not changed and the
13271                  * read/write bits are all ones.
13272                  */
13273                 tw32(offset, read_mask | write_mask);
13274
13275                 val = tr32(offset);
13276
13277                 /* Test the read-only bits. */
13278                 if ((val & read_mask) != read_val)
13279                         goto out;
13280
13281                 /* Test the read/write bits. */
13282                 if ((val & write_mask) != write_mask)
13283                         goto out;
13284
13285                 tw32(offset, save_val);
13286         }
13287
13288         return 0;
13289
13290 out:
13291         if (netif_msg_hw(tp))
13292                 netdev_err(tp->dev,
13293                            "Register test failed at offset %x\n", offset);
13294         tw32(offset, save_val);
13295         return -EIO;
13296 }
13297
13298 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13299 {
13300         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13301         int i;
13302         u32 j;
13303
13304         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13305                 for (j = 0; j < len; j += 4) {
13306                         u32 val;
13307
13308                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13309                         tg3_read_mem(tp, offset + j, &val);
13310                         if (val != test_pattern[i])
13311                                 return -EIO;
13312                 }
13313         }
13314         return 0;
13315 }
13316
13317 static int tg3_test_memory(struct tg3 *tp)
13318 {
13319         static struct mem_entry {
13320                 u32 offset;
13321                 u32 len;
13322         } mem_tbl_570x[] = {
13323                 { 0x00000000, 0x00b50},
13324                 { 0x00002000, 0x1c000},
13325                 { 0xffffffff, 0x00000}
13326         }, mem_tbl_5705[] = {
13327                 { 0x00000100, 0x0000c},
13328                 { 0x00000200, 0x00008},
13329                 { 0x00004000, 0x00800},
13330                 { 0x00006000, 0x01000},
13331                 { 0x00008000, 0x02000},
13332                 { 0x00010000, 0x0e000},
13333                 { 0xffffffff, 0x00000}
13334         }, mem_tbl_5755[] = {
13335                 { 0x00000200, 0x00008},
13336                 { 0x00004000, 0x00800},
13337                 { 0x00006000, 0x00800},
13338                 { 0x00008000, 0x02000},
13339                 { 0x00010000, 0x0c000},
13340                 { 0xffffffff, 0x00000}
13341         }, mem_tbl_5906[] = {
13342                 { 0x00000200, 0x00008},
13343                 { 0x00004000, 0x00400},
13344                 { 0x00006000, 0x00400},
13345                 { 0x00008000, 0x01000},
13346                 { 0x00010000, 0x01000},
13347                 { 0xffffffff, 0x00000}
13348         }, mem_tbl_5717[] = {
13349                 { 0x00000200, 0x00008},
13350                 { 0x00010000, 0x0a000},
13351                 { 0x00020000, 0x13c00},
13352                 { 0xffffffff, 0x00000}
13353         }, mem_tbl_57765[] = {
13354                 { 0x00000200, 0x00008},
13355                 { 0x00004000, 0x00800},
13356                 { 0x00006000, 0x09800},
13357                 { 0x00010000, 0x0a000},
13358                 { 0xffffffff, 0x00000}
13359         };
13360         struct mem_entry *mem_tbl;
13361         int err = 0;
13362         int i;
13363
13364         if (tg3_flag(tp, 5717_PLUS))
13365                 mem_tbl = mem_tbl_5717;
13366         else if (tg3_flag(tp, 57765_CLASS) ||
13367                  tg3_asic_rev(tp) == ASIC_REV_5762)
13368                 mem_tbl = mem_tbl_57765;
13369         else if (tg3_flag(tp, 5755_PLUS))
13370                 mem_tbl = mem_tbl_5755;
13371         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13372                 mem_tbl = mem_tbl_5906;
13373         else if (tg3_flag(tp, 5705_PLUS))
13374                 mem_tbl = mem_tbl_5705;
13375         else
13376                 mem_tbl = mem_tbl_570x;
13377
13378         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13379                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13380                 if (err)
13381                         break;
13382         }
13383
13384         return err;
13385 }
13386
13387 #define TG3_TSO_MSS             500
13388
13389 #define TG3_TSO_IP_HDR_LEN      20
13390 #define TG3_TSO_TCP_HDR_LEN     20
13391 #define TG3_TSO_TCP_OPT_LEN     12
13392
13393 static const u8 tg3_tso_header[] = {
13394 0x08, 0x00,
13395 0x45, 0x00, 0x00, 0x00,
13396 0x00, 0x00, 0x40, 0x00,
13397 0x40, 0x06, 0x00, 0x00,
13398 0x0a, 0x00, 0x00, 0x01,
13399 0x0a, 0x00, 0x00, 0x02,
13400 0x0d, 0x00, 0xe0, 0x00,
13401 0x00, 0x00, 0x01, 0x00,
13402 0x00, 0x00, 0x02, 0x00,
13403 0x80, 0x10, 0x10, 0x00,
13404 0x14, 0x09, 0x00, 0x00,
13405 0x01, 0x01, 0x08, 0x0a,
13406 0x11, 0x11, 0x11, 0x11,
13407 0x11, 0x11, 0x11, 0x11,
13408 };
13409
13410 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13411 {
13412         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13413         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13414         u32 budget;
13415         struct sk_buff *skb;
13416         u8 *tx_data, *rx_data;
13417         dma_addr_t map;
13418         int num_pkts, tx_len, rx_len, i, err;
13419         struct tg3_rx_buffer_desc *desc;
13420         struct tg3_napi *tnapi, *rnapi;
13421         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13422
13423         tnapi = &tp->napi[0];
13424         rnapi = &tp->napi[0];
13425         if (tp->irq_cnt > 1) {
13426                 if (tg3_flag(tp, ENABLE_RSS))
13427                         rnapi = &tp->napi[1];
13428                 if (tg3_flag(tp, ENABLE_TSS))
13429                         tnapi = &tp->napi[1];
13430         }
13431         coal_now = tnapi->coal_now | rnapi->coal_now;
13432
13433         err = -EIO;
13434
13435         tx_len = pktsz;
13436         skb = netdev_alloc_skb(tp->dev, tx_len);
13437         if (!skb)
13438                 return -ENOMEM;
13439
13440         tx_data = skb_put(skb, tx_len);
13441         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13442         memset(tx_data + ETH_ALEN, 0x0, 8);
13443
13444         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13445
13446         if (tso_loopback) {
13447                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13448
13449                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13450                               TG3_TSO_TCP_OPT_LEN;
13451
13452                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13453                        sizeof(tg3_tso_header));
13454                 mss = TG3_TSO_MSS;
13455
13456                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13457                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13458
13459                 /* Set the total length field in the IP header */
13460                 iph->tot_len = htons((u16)(mss + hdr_len));
13461
13462                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13463                               TXD_FLAG_CPU_POST_DMA);
13464
13465                 if (tg3_flag(tp, HW_TSO_1) ||
13466                     tg3_flag(tp, HW_TSO_2) ||
13467                     tg3_flag(tp, HW_TSO_3)) {
13468                         struct tcphdr *th;
13469                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13470                         th = (struct tcphdr *)&tx_data[val];
13471                         th->check = 0;
13472                 } else
13473                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13474
13475                 if (tg3_flag(tp, HW_TSO_3)) {
13476                         mss |= (hdr_len & 0xc) << 12;
13477                         if (hdr_len & 0x10)
13478                                 base_flags |= 0x00000010;
13479                         base_flags |= (hdr_len & 0x3e0) << 5;
13480                 } else if (tg3_flag(tp, HW_TSO_2))
13481                         mss |= hdr_len << 9;
13482                 else if (tg3_flag(tp, HW_TSO_1) ||
13483                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13484                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13485                 } else {
13486                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13487                 }
13488
13489                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13490         } else {
13491                 num_pkts = 1;
13492                 data_off = ETH_HLEN;
13493
13494                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13495                     tx_len > VLAN_ETH_FRAME_LEN)
13496                         base_flags |= TXD_FLAG_JMB_PKT;
13497         }
13498
13499         for (i = data_off; i < tx_len; i++)
13500                 tx_data[i] = (u8) (i & 0xff);
13501
13502         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13503         if (pci_dma_mapping_error(tp->pdev, map)) {
13504                 dev_kfree_skb(skb);
13505                 return -EIO;
13506         }
13507
13508         val = tnapi->tx_prod;
13509         tnapi->tx_buffers[val].skb = skb;
13510         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13511
13512         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13513                rnapi->coal_now);
13514
13515         udelay(10);
13516
13517         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13518
13519         budget = tg3_tx_avail(tnapi);
13520         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13521                             base_flags | TXD_FLAG_END, mss, 0)) {
13522                 tnapi->tx_buffers[val].skb = NULL;
13523                 dev_kfree_skb(skb);
13524                 return -EIO;
13525         }
13526
13527         tnapi->tx_prod++;
13528
13529         /* Sync BD data before updating mailbox */
13530         wmb();
13531
13532         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13533         tr32_mailbox(tnapi->prodmbox);
13534
13535         udelay(10);
13536
13537         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13538         for (i = 0; i < 35; i++) {
13539                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13540                        coal_now);
13541
13542                 udelay(10);
13543
13544                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13545                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13546                 if ((tx_idx == tnapi->tx_prod) &&
13547                     (rx_idx == (rx_start_idx + num_pkts)))
13548                         break;
13549         }
13550
13551         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13552         dev_kfree_skb(skb);
13553
13554         if (tx_idx != tnapi->tx_prod)
13555                 goto out;
13556
13557         if (rx_idx != rx_start_idx + num_pkts)
13558                 goto out;
13559
13560         val = data_off;
13561         while (rx_idx != rx_start_idx) {
13562                 desc = &rnapi->rx_rcb[rx_start_idx++];
13563                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13564                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13565
13566                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13567                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13568                         goto out;
13569
13570                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13571                          - ETH_FCS_LEN;
13572
13573                 if (!tso_loopback) {
13574                         if (rx_len != tx_len)
13575                                 goto out;
13576
13577                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13578                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13579                                         goto out;
13580                         } else {
13581                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13582                                         goto out;
13583                         }
13584                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13585                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13586                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13587                         goto out;
13588                 }
13589
13590                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13591                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13592                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13593                                              mapping);
13594                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13595                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13596                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13597                                              mapping);
13598                 } else
13599                         goto out;
13600
13601                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13602                                             PCI_DMA_FROMDEVICE);
13603
13604                 rx_data += TG3_RX_OFFSET(tp);
13605                 for (i = data_off; i < rx_len; i++, val++) {
13606                         if (*(rx_data + i) != (u8) (val & 0xff))
13607                                 goto out;
13608                 }
13609         }
13610
13611         err = 0;
13612
13613         /* tg3_free_rings will unmap and free the rx_data */
13614 out:
13615         return err;
13616 }
13617
13618 #define TG3_STD_LOOPBACK_FAILED         1
13619 #define TG3_JMB_LOOPBACK_FAILED         2
13620 #define TG3_TSO_LOOPBACK_FAILED         4
13621 #define TG3_LOOPBACK_FAILED \
13622         (TG3_STD_LOOPBACK_FAILED | \
13623          TG3_JMB_LOOPBACK_FAILED | \
13624          TG3_TSO_LOOPBACK_FAILED)
13625
13626 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13627 {
13628         int err = -EIO;
13629         u32 eee_cap;
13630         u32 jmb_pkt_sz = 9000;
13631
13632         if (tp->dma_limit)
13633                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13634
13635         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13636         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13637
13638         if (!netif_running(tp->dev)) {
13639                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13640                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13641                 if (do_extlpbk)
13642                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13643                 goto done;
13644         }
13645
13646         err = tg3_reset_hw(tp, true);
13647         if (err) {
13648                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13649                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13650                 if (do_extlpbk)
13651                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13652                 goto done;
13653         }
13654
13655         if (tg3_flag(tp, ENABLE_RSS)) {
13656                 int i;
13657
13658                 /* Reroute all rx packets to the 1st queue */
13659                 for (i = MAC_RSS_INDIR_TBL_0;
13660                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13661                         tw32(i, 0x0);
13662         }
13663
13664         /* HW errata - mac loopback fails in some cases on 5780.
13665          * Normal traffic and PHY loopback are not affected by
13666          * errata.  Also, the MAC loopback test is deprecated for
13667          * all newer ASIC revisions.
13668          */
13669         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13670             !tg3_flag(tp, CPMU_PRESENT)) {
13671                 tg3_mac_loopback(tp, true);
13672
13673                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13674                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13675
13676                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13677                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13678                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13679
13680                 tg3_mac_loopback(tp, false);
13681         }
13682
13683         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13684             !tg3_flag(tp, USE_PHYLIB)) {
13685                 int i;
13686
13687                 tg3_phy_lpbk_set(tp, 0, false);
13688
13689                 /* Wait for link */
13690                 for (i = 0; i < 100; i++) {
13691                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13692                                 break;
13693                         mdelay(1);
13694                 }
13695
13696                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13697                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13698                 if (tg3_flag(tp, TSO_CAPABLE) &&
13699                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13700                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13701                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13702                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13703                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13704
13705                 if (do_extlpbk) {
13706                         tg3_phy_lpbk_set(tp, 0, true);
13707
13708                         /* All link indications report up, but the hardware
13709                          * isn't really ready for about 20 msec.  Double it
13710                          * to be sure.
13711                          */
13712                         mdelay(40);
13713
13714                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13715                                 data[TG3_EXT_LOOPB_TEST] |=
13716                                                         TG3_STD_LOOPBACK_FAILED;
13717                         if (tg3_flag(tp, TSO_CAPABLE) &&
13718                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13719                                 data[TG3_EXT_LOOPB_TEST] |=
13720                                                         TG3_TSO_LOOPBACK_FAILED;
13721                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13722                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13723                                 data[TG3_EXT_LOOPB_TEST] |=
13724                                                         TG3_JMB_LOOPBACK_FAILED;
13725                 }
13726
13727                 /* Re-enable gphy autopowerdown. */
13728                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13729                         tg3_phy_toggle_apd(tp, true);
13730         }
13731
13732         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13733                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13734
13735 done:
13736         tp->phy_flags |= eee_cap;
13737
13738         return err;
13739 }
13740
13741 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13742                           u64 *data)
13743 {
13744         struct tg3 *tp = netdev_priv(dev);
13745         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13746
13747         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13748                 if (tg3_power_up(tp)) {
13749                         etest->flags |= ETH_TEST_FL_FAILED;
13750                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13751                         return;
13752                 }
13753                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13754         }
13755
13756         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13757
13758         if (tg3_test_nvram(tp) != 0) {
13759                 etest->flags |= ETH_TEST_FL_FAILED;
13760                 data[TG3_NVRAM_TEST] = 1;
13761         }
13762         if (!doextlpbk && tg3_test_link(tp)) {
13763                 etest->flags |= ETH_TEST_FL_FAILED;
13764                 data[TG3_LINK_TEST] = 1;
13765         }
13766         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13767                 int err, err2 = 0, irq_sync = 0;
13768
13769                 if (netif_running(dev)) {
13770                         tg3_phy_stop(tp);
13771                         tg3_netif_stop(tp);
13772                         irq_sync = 1;
13773                 }
13774
13775                 tg3_full_lock(tp, irq_sync);
13776                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13777                 err = tg3_nvram_lock(tp);
13778                 tg3_halt_cpu(tp, RX_CPU_BASE);
13779                 if (!tg3_flag(tp, 5705_PLUS))
13780                         tg3_halt_cpu(tp, TX_CPU_BASE);
13781                 if (!err)
13782                         tg3_nvram_unlock(tp);
13783
13784                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13785                         tg3_phy_reset(tp);
13786
13787                 if (tg3_test_registers(tp) != 0) {
13788                         etest->flags |= ETH_TEST_FL_FAILED;
13789                         data[TG3_REGISTER_TEST] = 1;
13790                 }
13791
13792                 if (tg3_test_memory(tp) != 0) {
13793                         etest->flags |= ETH_TEST_FL_FAILED;
13794                         data[TG3_MEMORY_TEST] = 1;
13795                 }
13796
13797                 if (doextlpbk)
13798                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13799
13800                 if (tg3_test_loopback(tp, data, doextlpbk))
13801                         etest->flags |= ETH_TEST_FL_FAILED;
13802
13803                 tg3_full_unlock(tp);
13804
13805                 if (tg3_test_interrupt(tp) != 0) {
13806                         etest->flags |= ETH_TEST_FL_FAILED;
13807                         data[TG3_INTERRUPT_TEST] = 1;
13808                 }
13809
13810                 tg3_full_lock(tp, 0);
13811
13812                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13813                 if (netif_running(dev)) {
13814                         tg3_flag_set(tp, INIT_COMPLETE);
13815                         err2 = tg3_restart_hw(tp, true);
13816                         if (!err2)
13817                                 tg3_netif_start(tp);
13818                 }
13819
13820                 tg3_full_unlock(tp);
13821
13822                 if (irq_sync && !err2)
13823                         tg3_phy_start(tp);
13824         }
13825         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13826                 tg3_power_down_prepare(tp);
13827
13828 }
13829
13830 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13831 {
13832         struct tg3 *tp = netdev_priv(dev);
13833         struct hwtstamp_config stmpconf;
13834
13835         if (!tg3_flag(tp, PTP_CAPABLE))
13836                 return -EOPNOTSUPP;
13837
13838         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13839                 return -EFAULT;
13840
13841         if (stmpconf.flags)
13842                 return -EINVAL;
13843
13844         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13845             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13846                 return -ERANGE;
13847
13848         switch (stmpconf.rx_filter) {
13849         case HWTSTAMP_FILTER_NONE:
13850                 tp->rxptpctl = 0;
13851                 break;
13852         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13854                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13855                 break;
13856         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13858                                TG3_RX_PTP_CTL_SYNC_EVNT;
13859                 break;
13860         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13861                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13862                                TG3_RX_PTP_CTL_DELAY_REQ;
13863                 break;
13864         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13865                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13866                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13867                 break;
13868         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13869                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13870                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13871                 break;
13872         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13873                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13874                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13875                 break;
13876         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13877                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13878                                TG3_RX_PTP_CTL_SYNC_EVNT;
13879                 break;
13880         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13881                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13882                                TG3_RX_PTP_CTL_SYNC_EVNT;
13883                 break;
13884         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13885                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13886                                TG3_RX_PTP_CTL_SYNC_EVNT;
13887                 break;
13888         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13889                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13890                                TG3_RX_PTP_CTL_DELAY_REQ;
13891                 break;
13892         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13893                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13894                                TG3_RX_PTP_CTL_DELAY_REQ;
13895                 break;
13896         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13897                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13898                                TG3_RX_PTP_CTL_DELAY_REQ;
13899                 break;
13900         default:
13901                 return -ERANGE;
13902         }
13903
13904         if (netif_running(dev) && tp->rxptpctl)
13905                 tw32(TG3_RX_PTP_CTL,
13906                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13907
13908         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13909                 tg3_flag_set(tp, TX_TSTAMP_EN);
13910         else
13911                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13912
13913         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13914                 -EFAULT : 0;
13915 }
13916
13917 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13918 {
13919         struct tg3 *tp = netdev_priv(dev);
13920         struct hwtstamp_config stmpconf;
13921
13922         if (!tg3_flag(tp, PTP_CAPABLE))
13923                 return -EOPNOTSUPP;
13924
13925         stmpconf.flags = 0;
13926         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13927                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13928
13929         switch (tp->rxptpctl) {
13930         case 0:
13931                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13932                 break;
13933         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13934                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13935                 break;
13936         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13937                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13938                 break;
13939         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13940                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13941                 break;
13942         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13943                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13944                 break;
13945         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13946                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13947                 break;
13948         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13949                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13950                 break;
13951         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13952                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13953                 break;
13954         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13955                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13956                 break;
13957         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13958                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13959                 break;
13960         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13961                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13962                 break;
13963         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13964                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13965                 break;
13966         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13967                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13968                 break;
13969         default:
13970                 WARN_ON_ONCE(1);
13971                 return -ERANGE;
13972         }
13973
13974         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13975                 -EFAULT : 0;
13976 }
13977
13978 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13979 {
13980         struct mii_ioctl_data *data = if_mii(ifr);
13981         struct tg3 *tp = netdev_priv(dev);
13982         int err;
13983
13984         if (tg3_flag(tp, USE_PHYLIB)) {
13985                 struct phy_device *phydev;
13986                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13987                         return -EAGAIN;
13988                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13989                 return phy_mii_ioctl(phydev, ifr, cmd);
13990         }
13991
13992         switch (cmd) {
13993         case SIOCGMIIPHY:
13994                 data->phy_id = tp->phy_addr;
13995
13996                 fallthrough;
13997         case SIOCGMIIREG: {
13998                 u32 mii_regval;
13999
14000                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14001                         break;                  /* We have no PHY */
14002
14003                 if (!netif_running(dev))
14004                         return -EAGAIN;
14005
14006                 spin_lock_bh(&tp->lock);
14007                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
14008                                     data->reg_num & 0x1f, &mii_regval);
14009                 spin_unlock_bh(&tp->lock);
14010
14011                 data->val_out = mii_regval;
14012
14013                 return err;
14014         }
14015
14016         case SIOCSMIIREG:
14017                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14018                         break;                  /* We have no PHY */
14019
14020                 if (!netif_running(dev))
14021                         return -EAGAIN;
14022
14023                 spin_lock_bh(&tp->lock);
14024                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
14025                                      data->reg_num & 0x1f, data->val_in);
14026                 spin_unlock_bh(&tp->lock);
14027
14028                 return err;
14029
14030         case SIOCSHWTSTAMP:
14031                 return tg3_hwtstamp_set(dev, ifr);
14032
14033         case SIOCGHWTSTAMP:
14034                 return tg3_hwtstamp_get(dev, ifr);
14035
14036         default:
14037                 /* do nothing */
14038                 break;
14039         }
14040         return -EOPNOTSUPP;
14041 }
14042
14043 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14044 {
14045         struct tg3 *tp = netdev_priv(dev);
14046
14047         memcpy(ec, &tp->coal, sizeof(*ec));
14048         return 0;
14049 }
14050
14051 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14052 {
14053         struct tg3 *tp = netdev_priv(dev);
14054         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14055         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14056
14057         if (!tg3_flag(tp, 5705_PLUS)) {
14058                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14059                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14060                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14061                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14062         }
14063
14064         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14065             (!ec->rx_coalesce_usecs) ||
14066             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14067             (!ec->tx_coalesce_usecs) ||
14068             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14069             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14070             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14071             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14072             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14073             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14074             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14075             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14076                 return -EINVAL;
14077
14078         /* Only copy relevant parameters, ignore all others. */
14079         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14080         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14081         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14082         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14083         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14084         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14085         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14086         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14087         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14088
14089         if (netif_running(dev)) {
14090                 tg3_full_lock(tp, 0);
14091                 __tg3_set_coalesce(tp, &tp->coal);
14092                 tg3_full_unlock(tp);
14093         }
14094         return 0;
14095 }
14096
14097 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14098 {
14099         struct tg3 *tp = netdev_priv(dev);
14100
14101         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14102                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14103                 return -EOPNOTSUPP;
14104         }
14105
14106         if (edata->advertised != tp->eee.advertised) {
14107                 netdev_warn(tp->dev,
14108                             "Direct manipulation of EEE advertisement is not supported\n");
14109                 return -EINVAL;
14110         }
14111
14112         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14113                 netdev_warn(tp->dev,
14114                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14115                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14116                 return -EINVAL;
14117         }
14118
14119         tp->eee = *edata;
14120
14121         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14122         tg3_warn_mgmt_link_flap(tp);
14123
14124         if (netif_running(tp->dev)) {
14125                 tg3_full_lock(tp, 0);
14126                 tg3_setup_eee(tp);
14127                 tg3_phy_reset(tp);
14128                 tg3_full_unlock(tp);
14129         }
14130
14131         return 0;
14132 }
14133
14134 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14135 {
14136         struct tg3 *tp = netdev_priv(dev);
14137
14138         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14139                 netdev_warn(tp->dev,
14140                             "Board does not support EEE!\n");
14141                 return -EOPNOTSUPP;
14142         }
14143
14144         *edata = tp->eee;
14145         return 0;
14146 }
14147
14148 static const struct ethtool_ops tg3_ethtool_ops = {
14149         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14150                                      ETHTOOL_COALESCE_MAX_FRAMES |
14151                                      ETHTOOL_COALESCE_USECS_IRQ |
14152                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14153                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14154         .get_drvinfo            = tg3_get_drvinfo,
14155         .get_regs_len           = tg3_get_regs_len,
14156         .get_regs               = tg3_get_regs,
14157         .get_wol                = tg3_get_wol,
14158         .set_wol                = tg3_set_wol,
14159         .get_msglevel           = tg3_get_msglevel,
14160         .set_msglevel           = tg3_set_msglevel,
14161         .nway_reset             = tg3_nway_reset,
14162         .get_link               = ethtool_op_get_link,
14163         .get_eeprom_len         = tg3_get_eeprom_len,
14164         .get_eeprom             = tg3_get_eeprom,
14165         .set_eeprom             = tg3_set_eeprom,
14166         .get_ringparam          = tg3_get_ringparam,
14167         .set_ringparam          = tg3_set_ringparam,
14168         .get_pauseparam         = tg3_get_pauseparam,
14169         .set_pauseparam         = tg3_set_pauseparam,
14170         .self_test              = tg3_self_test,
14171         .get_strings            = tg3_get_strings,
14172         .set_phys_id            = tg3_set_phys_id,
14173         .get_ethtool_stats      = tg3_get_ethtool_stats,
14174         .get_coalesce           = tg3_get_coalesce,
14175         .set_coalesce           = tg3_set_coalesce,
14176         .get_sset_count         = tg3_get_sset_count,
14177         .get_rxnfc              = tg3_get_rxnfc,
14178         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14179         .get_rxfh               = tg3_get_rxfh,
14180         .set_rxfh               = tg3_set_rxfh,
14181         .get_channels           = tg3_get_channels,
14182         .set_channels           = tg3_set_channels,
14183         .get_ts_info            = tg3_get_ts_info,
14184         .get_eee                = tg3_get_eee,
14185         .set_eee                = tg3_set_eee,
14186         .get_link_ksettings     = tg3_get_link_ksettings,
14187         .set_link_ksettings     = tg3_set_link_ksettings,
14188 };
14189
14190 static void tg3_get_stats64(struct net_device *dev,
14191                             struct rtnl_link_stats64 *stats)
14192 {
14193         struct tg3 *tp = netdev_priv(dev);
14194
14195         spin_lock_bh(&tp->lock);
14196         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14197                 *stats = tp->net_stats_prev;
14198                 spin_unlock_bh(&tp->lock);
14199                 return;
14200         }
14201
14202         tg3_get_nstats(tp, stats);
14203         spin_unlock_bh(&tp->lock);
14204 }
14205
14206 static void tg3_set_rx_mode(struct net_device *dev)
14207 {
14208         struct tg3 *tp = netdev_priv(dev);
14209
14210         if (!netif_running(dev))
14211                 return;
14212
14213         tg3_full_lock(tp, 0);
14214         __tg3_set_rx_mode(dev);
14215         tg3_full_unlock(tp);
14216 }
14217
14218 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14219                                int new_mtu)
14220 {
14221         dev->mtu = new_mtu;
14222
14223         if (new_mtu > ETH_DATA_LEN) {
14224                 if (tg3_flag(tp, 5780_CLASS)) {
14225                         netdev_update_features(dev);
14226                         tg3_flag_clear(tp, TSO_CAPABLE);
14227                 } else {
14228                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14229                 }
14230         } else {
14231                 if (tg3_flag(tp, 5780_CLASS)) {
14232                         tg3_flag_set(tp, TSO_CAPABLE);
14233                         netdev_update_features(dev);
14234                 }
14235                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14236         }
14237 }
14238
14239 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14240 {
14241         struct tg3 *tp = netdev_priv(dev);
14242         int err;
14243         bool reset_phy = false;
14244
14245         if (!netif_running(dev)) {
14246                 /* We'll just catch it later when the
14247                  * device is up'd.
14248                  */
14249                 tg3_set_mtu(dev, tp, new_mtu);
14250                 return 0;
14251         }
14252
14253         tg3_phy_stop(tp);
14254
14255         tg3_netif_stop(tp);
14256
14257         tg3_set_mtu(dev, tp, new_mtu);
14258
14259         tg3_full_lock(tp, 1);
14260
14261         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14262
14263         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14264          * breaks all requests to 256 bytes.
14265          */
14266         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14267             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14268             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14269             tg3_asic_rev(tp) == ASIC_REV_5720)
14270                 reset_phy = true;
14271
14272         err = tg3_restart_hw(tp, reset_phy);
14273
14274         if (!err)
14275                 tg3_netif_start(tp);
14276
14277         tg3_full_unlock(tp);
14278
14279         if (!err)
14280                 tg3_phy_start(tp);
14281
14282         return err;
14283 }
14284
14285 static const struct net_device_ops tg3_netdev_ops = {
14286         .ndo_open               = tg3_open,
14287         .ndo_stop               = tg3_close,
14288         .ndo_start_xmit         = tg3_start_xmit,
14289         .ndo_get_stats64        = tg3_get_stats64,
14290         .ndo_validate_addr      = eth_validate_addr,
14291         .ndo_set_rx_mode        = tg3_set_rx_mode,
14292         .ndo_set_mac_address    = tg3_set_mac_addr,
14293         .ndo_do_ioctl           = tg3_ioctl,
14294         .ndo_tx_timeout         = tg3_tx_timeout,
14295         .ndo_change_mtu         = tg3_change_mtu,
14296         .ndo_fix_features       = tg3_fix_features,
14297         .ndo_set_features       = tg3_set_features,
14298 #ifdef CONFIG_NET_POLL_CONTROLLER
14299         .ndo_poll_controller    = tg3_poll_controller,
14300 #endif
14301 };
14302
14303 static void tg3_get_eeprom_size(struct tg3 *tp)
14304 {
14305         u32 cursize, val, magic;
14306
14307         tp->nvram_size = EEPROM_CHIP_SIZE;
14308
14309         if (tg3_nvram_read(tp, 0, &magic) != 0)
14310                 return;
14311
14312         if ((magic != TG3_EEPROM_MAGIC) &&
14313             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14314             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14315                 return;
14316
14317         /*
14318          * Size the chip by reading offsets at increasing powers of two.
14319          * When we encounter our validation signature, we know the addressing
14320          * has wrapped around, and thus have our chip size.
14321          */
14322         cursize = 0x10;
14323
14324         while (cursize < tp->nvram_size) {
14325                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14326                         return;
14327
14328                 if (val == magic)
14329                         break;
14330
14331                 cursize <<= 1;
14332         }
14333
14334         tp->nvram_size = cursize;
14335 }
14336
14337 static void tg3_get_nvram_size(struct tg3 *tp)
14338 {
14339         u32 val;
14340
14341         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14342                 return;
14343
14344         /* Selfboot format */
14345         if (val != TG3_EEPROM_MAGIC) {
14346                 tg3_get_eeprom_size(tp);
14347                 return;
14348         }
14349
14350         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14351                 if (val != 0) {
14352                         /* This is confusing.  We want to operate on the
14353                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14354                          * call will read from NVRAM and byteswap the data
14355                          * according to the byteswapping settings for all
14356                          * other register accesses.  This ensures the data we
14357                          * want will always reside in the lower 16-bits.
14358                          * However, the data in NVRAM is in LE format, which
14359                          * means the data from the NVRAM read will always be
14360                          * opposite the endianness of the CPU.  The 16-bit
14361                          * byteswap then brings the data to CPU endianness.
14362                          */
14363                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14364                         return;
14365                 }
14366         }
14367         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14368 }
14369
14370 static void tg3_get_nvram_info(struct tg3 *tp)
14371 {
14372         u32 nvcfg1;
14373
14374         nvcfg1 = tr32(NVRAM_CFG1);
14375         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14376                 tg3_flag_set(tp, FLASH);
14377         } else {
14378                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14379                 tw32(NVRAM_CFG1, nvcfg1);
14380         }
14381
14382         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14383             tg3_flag(tp, 5780_CLASS)) {
14384                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14385                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14386                         tp->nvram_jedecnum = JEDEC_ATMEL;
14387                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14388                         tg3_flag_set(tp, NVRAM_BUFFERED);
14389                         break;
14390                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14391                         tp->nvram_jedecnum = JEDEC_ATMEL;
14392                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14393                         break;
14394                 case FLASH_VENDOR_ATMEL_EEPROM:
14395                         tp->nvram_jedecnum = JEDEC_ATMEL;
14396                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14397                         tg3_flag_set(tp, NVRAM_BUFFERED);
14398                         break;
14399                 case FLASH_VENDOR_ST:
14400                         tp->nvram_jedecnum = JEDEC_ST;
14401                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14402                         tg3_flag_set(tp, NVRAM_BUFFERED);
14403                         break;
14404                 case FLASH_VENDOR_SAIFUN:
14405                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14406                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14407                         break;
14408                 case FLASH_VENDOR_SST_SMALL:
14409                 case FLASH_VENDOR_SST_LARGE:
14410                         tp->nvram_jedecnum = JEDEC_SST;
14411                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14412                         break;
14413                 }
14414         } else {
14415                 tp->nvram_jedecnum = JEDEC_ATMEL;
14416                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14417                 tg3_flag_set(tp, NVRAM_BUFFERED);
14418         }
14419 }
14420
14421 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14422 {
14423         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14424         case FLASH_5752PAGE_SIZE_256:
14425                 tp->nvram_pagesize = 256;
14426                 break;
14427         case FLASH_5752PAGE_SIZE_512:
14428                 tp->nvram_pagesize = 512;
14429                 break;
14430         case FLASH_5752PAGE_SIZE_1K:
14431                 tp->nvram_pagesize = 1024;
14432                 break;
14433         case FLASH_5752PAGE_SIZE_2K:
14434                 tp->nvram_pagesize = 2048;
14435                 break;
14436         case FLASH_5752PAGE_SIZE_4K:
14437                 tp->nvram_pagesize = 4096;
14438                 break;
14439         case FLASH_5752PAGE_SIZE_264:
14440                 tp->nvram_pagesize = 264;
14441                 break;
14442         case FLASH_5752PAGE_SIZE_528:
14443                 tp->nvram_pagesize = 528;
14444                 break;
14445         }
14446 }
14447
14448 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14449 {
14450         u32 nvcfg1;
14451
14452         nvcfg1 = tr32(NVRAM_CFG1);
14453
14454         /* NVRAM protection for TPM */
14455         if (nvcfg1 & (1 << 27))
14456                 tg3_flag_set(tp, PROTECTED_NVRAM);
14457
14458         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14459         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14460         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14461                 tp->nvram_jedecnum = JEDEC_ATMEL;
14462                 tg3_flag_set(tp, NVRAM_BUFFERED);
14463                 break;
14464         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14465                 tp->nvram_jedecnum = JEDEC_ATMEL;
14466                 tg3_flag_set(tp, NVRAM_BUFFERED);
14467                 tg3_flag_set(tp, FLASH);
14468                 break;
14469         case FLASH_5752VENDOR_ST_M45PE10:
14470         case FLASH_5752VENDOR_ST_M45PE20:
14471         case FLASH_5752VENDOR_ST_M45PE40:
14472                 tp->nvram_jedecnum = JEDEC_ST;
14473                 tg3_flag_set(tp, NVRAM_BUFFERED);
14474                 tg3_flag_set(tp, FLASH);
14475                 break;
14476         }
14477
14478         if (tg3_flag(tp, FLASH)) {
14479                 tg3_nvram_get_pagesize(tp, nvcfg1);
14480         } else {
14481                 /* For eeprom, set pagesize to maximum eeprom size */
14482                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14483
14484                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14485                 tw32(NVRAM_CFG1, nvcfg1);
14486         }
14487 }
14488
14489 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14490 {
14491         u32 nvcfg1, protect = 0;
14492
14493         nvcfg1 = tr32(NVRAM_CFG1);
14494
14495         /* NVRAM protection for TPM */
14496         if (nvcfg1 & (1 << 27)) {
14497                 tg3_flag_set(tp, PROTECTED_NVRAM);
14498                 protect = 1;
14499         }
14500
14501         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14502         switch (nvcfg1) {
14503         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14504         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14505         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14506         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14507                 tp->nvram_jedecnum = JEDEC_ATMEL;
14508                 tg3_flag_set(tp, NVRAM_BUFFERED);
14509                 tg3_flag_set(tp, FLASH);
14510                 tp->nvram_pagesize = 264;
14511                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14512                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14513                         tp->nvram_size = (protect ? 0x3e200 :
14514                                           TG3_NVRAM_SIZE_512KB);
14515                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14516                         tp->nvram_size = (protect ? 0x1f200 :
14517                                           TG3_NVRAM_SIZE_256KB);
14518                 else
14519                         tp->nvram_size = (protect ? 0x1f200 :
14520                                           TG3_NVRAM_SIZE_128KB);
14521                 break;
14522         case FLASH_5752VENDOR_ST_M45PE10:
14523         case FLASH_5752VENDOR_ST_M45PE20:
14524         case FLASH_5752VENDOR_ST_M45PE40:
14525                 tp->nvram_jedecnum = JEDEC_ST;
14526                 tg3_flag_set(tp, NVRAM_BUFFERED);
14527                 tg3_flag_set(tp, FLASH);
14528                 tp->nvram_pagesize = 256;
14529                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14530                         tp->nvram_size = (protect ?
14531                                           TG3_NVRAM_SIZE_64KB :
14532                                           TG3_NVRAM_SIZE_128KB);
14533                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14534                         tp->nvram_size = (protect ?
14535                                           TG3_NVRAM_SIZE_64KB :
14536                                           TG3_NVRAM_SIZE_256KB);
14537                 else
14538                         tp->nvram_size = (protect ?
14539                                           TG3_NVRAM_SIZE_128KB :
14540                                           TG3_NVRAM_SIZE_512KB);
14541                 break;
14542         }
14543 }
14544
14545 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14546 {
14547         u32 nvcfg1;
14548
14549         nvcfg1 = tr32(NVRAM_CFG1);
14550
14551         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14552         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14553         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14554         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14555         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14556                 tp->nvram_jedecnum = JEDEC_ATMEL;
14557                 tg3_flag_set(tp, NVRAM_BUFFERED);
14558                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14559
14560                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14561                 tw32(NVRAM_CFG1, nvcfg1);
14562                 break;
14563         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14564         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14565         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14566         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14567                 tp->nvram_jedecnum = JEDEC_ATMEL;
14568                 tg3_flag_set(tp, NVRAM_BUFFERED);
14569                 tg3_flag_set(tp, FLASH);
14570                 tp->nvram_pagesize = 264;
14571                 break;
14572         case FLASH_5752VENDOR_ST_M45PE10:
14573         case FLASH_5752VENDOR_ST_M45PE20:
14574         case FLASH_5752VENDOR_ST_M45PE40:
14575                 tp->nvram_jedecnum = JEDEC_ST;
14576                 tg3_flag_set(tp, NVRAM_BUFFERED);
14577                 tg3_flag_set(tp, FLASH);
14578                 tp->nvram_pagesize = 256;
14579                 break;
14580         }
14581 }
14582
14583 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14584 {
14585         u32 nvcfg1, protect = 0;
14586
14587         nvcfg1 = tr32(NVRAM_CFG1);
14588
14589         /* NVRAM protection for TPM */
14590         if (nvcfg1 & (1 << 27)) {
14591                 tg3_flag_set(tp, PROTECTED_NVRAM);
14592                 protect = 1;
14593         }
14594
14595         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14596         switch (nvcfg1) {
14597         case FLASH_5761VENDOR_ATMEL_ADB021D:
14598         case FLASH_5761VENDOR_ATMEL_ADB041D:
14599         case FLASH_5761VENDOR_ATMEL_ADB081D:
14600         case FLASH_5761VENDOR_ATMEL_ADB161D:
14601         case FLASH_5761VENDOR_ATMEL_MDB021D:
14602         case FLASH_5761VENDOR_ATMEL_MDB041D:
14603         case FLASH_5761VENDOR_ATMEL_MDB081D:
14604         case FLASH_5761VENDOR_ATMEL_MDB161D:
14605                 tp->nvram_jedecnum = JEDEC_ATMEL;
14606                 tg3_flag_set(tp, NVRAM_BUFFERED);
14607                 tg3_flag_set(tp, FLASH);
14608                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14609                 tp->nvram_pagesize = 256;
14610                 break;
14611         case FLASH_5761VENDOR_ST_A_M45PE20:
14612         case FLASH_5761VENDOR_ST_A_M45PE40:
14613         case FLASH_5761VENDOR_ST_A_M45PE80:
14614         case FLASH_5761VENDOR_ST_A_M45PE16:
14615         case FLASH_5761VENDOR_ST_M_M45PE20:
14616         case FLASH_5761VENDOR_ST_M_M45PE40:
14617         case FLASH_5761VENDOR_ST_M_M45PE80:
14618         case FLASH_5761VENDOR_ST_M_M45PE16:
14619                 tp->nvram_jedecnum = JEDEC_ST;
14620                 tg3_flag_set(tp, NVRAM_BUFFERED);
14621                 tg3_flag_set(tp, FLASH);
14622                 tp->nvram_pagesize = 256;
14623                 break;
14624         }
14625
14626         if (protect) {
14627                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14628         } else {
14629                 switch (nvcfg1) {
14630                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14631                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14632                 case FLASH_5761VENDOR_ST_A_M45PE16:
14633                 case FLASH_5761VENDOR_ST_M_M45PE16:
14634                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14635                         break;
14636                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14637                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14638                 case FLASH_5761VENDOR_ST_A_M45PE80:
14639                 case FLASH_5761VENDOR_ST_M_M45PE80:
14640                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14641                         break;
14642                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14643                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14644                 case FLASH_5761VENDOR_ST_A_M45PE40:
14645                 case FLASH_5761VENDOR_ST_M_M45PE40:
14646                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14647                         break;
14648                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14649                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14650                 case FLASH_5761VENDOR_ST_A_M45PE20:
14651                 case FLASH_5761VENDOR_ST_M_M45PE20:
14652                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14653                         break;
14654                 }
14655         }
14656 }
14657
14658 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14659 {
14660         tp->nvram_jedecnum = JEDEC_ATMEL;
14661         tg3_flag_set(tp, NVRAM_BUFFERED);
14662         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14663 }
14664
14665 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14666 {
14667         u32 nvcfg1;
14668
14669         nvcfg1 = tr32(NVRAM_CFG1);
14670
14671         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14672         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14673         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14674                 tp->nvram_jedecnum = JEDEC_ATMEL;
14675                 tg3_flag_set(tp, NVRAM_BUFFERED);
14676                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14677
14678                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14679                 tw32(NVRAM_CFG1, nvcfg1);
14680                 return;
14681         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14682         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14683         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14684         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14685         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14686         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14687         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14688                 tp->nvram_jedecnum = JEDEC_ATMEL;
14689                 tg3_flag_set(tp, NVRAM_BUFFERED);
14690                 tg3_flag_set(tp, FLASH);
14691
14692                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14693                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14694                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14695                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14696                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14697                         break;
14698                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14699                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14700                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14701                         break;
14702                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14703                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14704                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14705                         break;
14706                 }
14707                 break;
14708         case FLASH_5752VENDOR_ST_M45PE10:
14709         case FLASH_5752VENDOR_ST_M45PE20:
14710         case FLASH_5752VENDOR_ST_M45PE40:
14711                 tp->nvram_jedecnum = JEDEC_ST;
14712                 tg3_flag_set(tp, NVRAM_BUFFERED);
14713                 tg3_flag_set(tp, FLASH);
14714
14715                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14716                 case FLASH_5752VENDOR_ST_M45PE10:
14717                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14718                         break;
14719                 case FLASH_5752VENDOR_ST_M45PE20:
14720                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14721                         break;
14722                 case FLASH_5752VENDOR_ST_M45PE40:
14723                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14724                         break;
14725                 }
14726                 break;
14727         default:
14728                 tg3_flag_set(tp, NO_NVRAM);
14729                 return;
14730         }
14731
14732         tg3_nvram_get_pagesize(tp, nvcfg1);
14733         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14734                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14735 }
14736
14737
14738 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14739 {
14740         u32 nvcfg1;
14741
14742         nvcfg1 = tr32(NVRAM_CFG1);
14743
14744         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14745         case FLASH_5717VENDOR_ATMEL_EEPROM:
14746         case FLASH_5717VENDOR_MICRO_EEPROM:
14747                 tp->nvram_jedecnum = JEDEC_ATMEL;
14748                 tg3_flag_set(tp, NVRAM_BUFFERED);
14749                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14750
14751                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14752                 tw32(NVRAM_CFG1, nvcfg1);
14753                 return;
14754         case FLASH_5717VENDOR_ATMEL_MDB011D:
14755         case FLASH_5717VENDOR_ATMEL_ADB011B:
14756         case FLASH_5717VENDOR_ATMEL_ADB011D:
14757         case FLASH_5717VENDOR_ATMEL_MDB021D:
14758         case FLASH_5717VENDOR_ATMEL_ADB021B:
14759         case FLASH_5717VENDOR_ATMEL_ADB021D:
14760         case FLASH_5717VENDOR_ATMEL_45USPT:
14761                 tp->nvram_jedecnum = JEDEC_ATMEL;
14762                 tg3_flag_set(tp, NVRAM_BUFFERED);
14763                 tg3_flag_set(tp, FLASH);
14764
14765                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14766                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14767                         /* Detect size with tg3_nvram_get_size() */
14768                         break;
14769                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14770                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14771                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14772                         break;
14773                 default:
14774                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14775                         break;
14776                 }
14777                 break;
14778         case FLASH_5717VENDOR_ST_M_M25PE10:
14779         case FLASH_5717VENDOR_ST_A_M25PE10:
14780         case FLASH_5717VENDOR_ST_M_M45PE10:
14781         case FLASH_5717VENDOR_ST_A_M45PE10:
14782         case FLASH_5717VENDOR_ST_M_M25PE20:
14783         case FLASH_5717VENDOR_ST_A_M25PE20:
14784         case FLASH_5717VENDOR_ST_M_M45PE20:
14785         case FLASH_5717VENDOR_ST_A_M45PE20:
14786         case FLASH_5717VENDOR_ST_25USPT:
14787         case FLASH_5717VENDOR_ST_45USPT:
14788                 tp->nvram_jedecnum = JEDEC_ST;
14789                 tg3_flag_set(tp, NVRAM_BUFFERED);
14790                 tg3_flag_set(tp, FLASH);
14791
14792                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14793                 case FLASH_5717VENDOR_ST_M_M25PE20:
14794                 case FLASH_5717VENDOR_ST_M_M45PE20:
14795                         /* Detect size with tg3_nvram_get_size() */
14796                         break;
14797                 case FLASH_5717VENDOR_ST_A_M25PE20:
14798                 case FLASH_5717VENDOR_ST_A_M45PE20:
14799                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14800                         break;
14801                 default:
14802                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14803                         break;
14804                 }
14805                 break;
14806         default:
14807                 tg3_flag_set(tp, NO_NVRAM);
14808                 return;
14809         }
14810
14811         tg3_nvram_get_pagesize(tp, nvcfg1);
14812         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14813                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14814 }
14815
14816 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14817 {
14818         u32 nvcfg1, nvmpinstrp, nv_status;
14819
14820         nvcfg1 = tr32(NVRAM_CFG1);
14821         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14822
14823         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14824                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14825                         tg3_flag_set(tp, NO_NVRAM);
14826                         return;
14827                 }
14828
14829                 switch (nvmpinstrp) {
14830                 case FLASH_5762_MX25L_100:
14831                 case FLASH_5762_MX25L_200:
14832                 case FLASH_5762_MX25L_400:
14833                 case FLASH_5762_MX25L_800:
14834                 case FLASH_5762_MX25L_160_320:
14835                         tp->nvram_pagesize = 4096;
14836                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14837                         tg3_flag_set(tp, NVRAM_BUFFERED);
14838                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14839                         tg3_flag_set(tp, FLASH);
14840                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14841                         tp->nvram_size =
14842                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14843                                                 AUTOSENSE_DEVID_MASK)
14844                                         << AUTOSENSE_SIZE_IN_MB);
14845                         return;
14846
14847                 case FLASH_5762_EEPROM_HD:
14848                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14849                         break;
14850                 case FLASH_5762_EEPROM_LD:
14851                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14852                         break;
14853                 case FLASH_5720VENDOR_M_ST_M45PE20:
14854                         /* This pinstrap supports multiple sizes, so force it
14855                          * to read the actual size from location 0xf0.
14856                          */
14857                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14858                         break;
14859                 }
14860         }
14861
14862         switch (nvmpinstrp) {
14863         case FLASH_5720_EEPROM_HD:
14864         case FLASH_5720_EEPROM_LD:
14865                 tp->nvram_jedecnum = JEDEC_ATMEL;
14866                 tg3_flag_set(tp, NVRAM_BUFFERED);
14867
14868                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14869                 tw32(NVRAM_CFG1, nvcfg1);
14870                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14871                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14872                 else
14873                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14874                 return;
14875         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14876         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14877         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14878         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14879         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14880         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14881         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14882         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14883         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14884         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14885         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14886         case FLASH_5720VENDOR_ATMEL_45USPT:
14887                 tp->nvram_jedecnum = JEDEC_ATMEL;
14888                 tg3_flag_set(tp, NVRAM_BUFFERED);
14889                 tg3_flag_set(tp, FLASH);
14890
14891                 switch (nvmpinstrp) {
14892                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14893                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14894                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14895                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14896                         break;
14897                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14898                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14899                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14900                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14901                         break;
14902                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14903                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14904                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14905                         break;
14906                 default:
14907                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14908                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14909                         break;
14910                 }
14911                 break;
14912         case FLASH_5720VENDOR_M_ST_M25PE10:
14913         case FLASH_5720VENDOR_M_ST_M45PE10:
14914         case FLASH_5720VENDOR_A_ST_M25PE10:
14915         case FLASH_5720VENDOR_A_ST_M45PE10:
14916         case FLASH_5720VENDOR_M_ST_M25PE20:
14917         case FLASH_5720VENDOR_M_ST_M45PE20:
14918         case FLASH_5720VENDOR_A_ST_M25PE20:
14919         case FLASH_5720VENDOR_A_ST_M45PE20:
14920         case FLASH_5720VENDOR_M_ST_M25PE40:
14921         case FLASH_5720VENDOR_M_ST_M45PE40:
14922         case FLASH_5720VENDOR_A_ST_M25PE40:
14923         case FLASH_5720VENDOR_A_ST_M45PE40:
14924         case FLASH_5720VENDOR_M_ST_M25PE80:
14925         case FLASH_5720VENDOR_M_ST_M45PE80:
14926         case FLASH_5720VENDOR_A_ST_M25PE80:
14927         case FLASH_5720VENDOR_A_ST_M45PE80:
14928         case FLASH_5720VENDOR_ST_25USPT:
14929         case FLASH_5720VENDOR_ST_45USPT:
14930                 tp->nvram_jedecnum = JEDEC_ST;
14931                 tg3_flag_set(tp, NVRAM_BUFFERED);
14932                 tg3_flag_set(tp, FLASH);
14933
14934                 switch (nvmpinstrp) {
14935                 case FLASH_5720VENDOR_M_ST_M25PE20:
14936                 case FLASH_5720VENDOR_M_ST_M45PE20:
14937                 case FLASH_5720VENDOR_A_ST_M25PE20:
14938                 case FLASH_5720VENDOR_A_ST_M45PE20:
14939                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14940                         break;
14941                 case FLASH_5720VENDOR_M_ST_M25PE40:
14942                 case FLASH_5720VENDOR_M_ST_M45PE40:
14943                 case FLASH_5720VENDOR_A_ST_M25PE40:
14944                 case FLASH_5720VENDOR_A_ST_M45PE40:
14945                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14946                         break;
14947                 case FLASH_5720VENDOR_M_ST_M25PE80:
14948                 case FLASH_5720VENDOR_M_ST_M45PE80:
14949                 case FLASH_5720VENDOR_A_ST_M25PE80:
14950                 case FLASH_5720VENDOR_A_ST_M45PE80:
14951                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14952                         break;
14953                 default:
14954                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14955                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14956                         break;
14957                 }
14958                 break;
14959         default:
14960                 tg3_flag_set(tp, NO_NVRAM);
14961                 return;
14962         }
14963
14964         tg3_nvram_get_pagesize(tp, nvcfg1);
14965         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14966                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14967
14968         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14969                 u32 val;
14970
14971                 if (tg3_nvram_read(tp, 0, &val))
14972                         return;
14973
14974                 if (val != TG3_EEPROM_MAGIC &&
14975                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14976                         tg3_flag_set(tp, NO_NVRAM);
14977         }
14978 }
14979
14980 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14981 static void tg3_nvram_init(struct tg3 *tp)
14982 {
14983         if (tg3_flag(tp, IS_SSB_CORE)) {
14984                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14985                 tg3_flag_clear(tp, NVRAM);
14986                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14987                 tg3_flag_set(tp, NO_NVRAM);
14988                 return;
14989         }
14990
14991         tw32_f(GRC_EEPROM_ADDR,
14992              (EEPROM_ADDR_FSM_RESET |
14993               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14994                EEPROM_ADDR_CLKPERD_SHIFT)));
14995
14996         msleep(1);
14997
14998         /* Enable seeprom accesses. */
14999         tw32_f(GRC_LOCAL_CTRL,
15000              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
15001         udelay(100);
15002
15003         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15004             tg3_asic_rev(tp) != ASIC_REV_5701) {
15005                 tg3_flag_set(tp, NVRAM);
15006
15007                 if (tg3_nvram_lock(tp)) {
15008                         netdev_warn(tp->dev,
15009                                     "Cannot get nvram lock, %s failed\n",
15010                                     __func__);
15011                         return;
15012                 }
15013                 tg3_enable_nvram_access(tp);
15014
15015                 tp->nvram_size = 0;
15016
15017                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
15018                         tg3_get_5752_nvram_info(tp);
15019                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
15020                         tg3_get_5755_nvram_info(tp);
15021                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
15022                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
15023                          tg3_asic_rev(tp) == ASIC_REV_5785)
15024                         tg3_get_5787_nvram_info(tp);
15025                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15026                         tg3_get_5761_nvram_info(tp);
15027                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15028                         tg3_get_5906_nvram_info(tp);
15029                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15030                          tg3_flag(tp, 57765_CLASS))
15031                         tg3_get_57780_nvram_info(tp);
15032                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15033                          tg3_asic_rev(tp) == ASIC_REV_5719)
15034                         tg3_get_5717_nvram_info(tp);
15035                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15036                          tg3_asic_rev(tp) == ASIC_REV_5762)
15037                         tg3_get_5720_nvram_info(tp);
15038                 else
15039                         tg3_get_nvram_info(tp);
15040
15041                 if (tp->nvram_size == 0)
15042                         tg3_get_nvram_size(tp);
15043
15044                 tg3_disable_nvram_access(tp);
15045                 tg3_nvram_unlock(tp);
15046
15047         } else {
15048                 tg3_flag_clear(tp, NVRAM);
15049                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15050
15051                 tg3_get_eeprom_size(tp);
15052         }
15053 }
15054
15055 struct subsys_tbl_ent {
15056         u16 subsys_vendor, subsys_devid;
15057         u32 phy_id;
15058 };
15059
15060 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15061         /* Broadcom boards. */
15062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15063           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15065           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15066         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15067           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15068         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15069           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15070         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15071           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15072         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15073           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15074         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15075           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15076         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15077           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15078         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15079           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15080         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15081           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15082         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15083           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15084
15085         /* 3com boards. */
15086         { TG3PCI_SUBVENDOR_ID_3COM,
15087           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15088         { TG3PCI_SUBVENDOR_ID_3COM,
15089           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15090         { TG3PCI_SUBVENDOR_ID_3COM,
15091           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15092         { TG3PCI_SUBVENDOR_ID_3COM,
15093           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15094         { TG3PCI_SUBVENDOR_ID_3COM,
15095           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15096
15097         /* DELL boards. */
15098         { TG3PCI_SUBVENDOR_ID_DELL,
15099           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15100         { TG3PCI_SUBVENDOR_ID_DELL,
15101           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15102         { TG3PCI_SUBVENDOR_ID_DELL,
15103           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15104         { TG3PCI_SUBVENDOR_ID_DELL,
15105           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15106
15107         /* Compaq boards. */
15108         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15109           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15110         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15111           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15112         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15113           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15114         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15115           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15116         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15117           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15118
15119         /* IBM boards. */
15120         { TG3PCI_SUBVENDOR_ID_IBM,
15121           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15122 };
15123
15124 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15125 {
15126         int i;
15127
15128         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15129                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15130                      tp->pdev->subsystem_vendor) &&
15131                     (subsys_id_to_phy_id[i].subsys_devid ==
15132                      tp->pdev->subsystem_device))
15133                         return &subsys_id_to_phy_id[i];
15134         }
15135         return NULL;
15136 }
15137
15138 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15139 {
15140         u32 val;
15141
15142         tp->phy_id = TG3_PHY_ID_INVALID;
15143         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15144
15145         /* Assume an onboard device and WOL capable by default.  */
15146         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15147         tg3_flag_set(tp, WOL_CAP);
15148
15149         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15150                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15151                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15152                         tg3_flag_set(tp, IS_NIC);
15153                 }
15154                 val = tr32(VCPU_CFGSHDW);
15155                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15156                         tg3_flag_set(tp, ASPM_WORKAROUND);
15157                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15158                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15159                         tg3_flag_set(tp, WOL_ENABLE);
15160                         device_set_wakeup_enable(&tp->pdev->dev, true);
15161                 }
15162                 goto done;
15163         }
15164
15165         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15166         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15167                 u32 nic_cfg, led_cfg;
15168                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15169                 u32 nic_phy_id, ver, eeprom_phy_id;
15170                 int eeprom_phy_serdes = 0;
15171
15172                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15173                 tp->nic_sram_data_cfg = nic_cfg;
15174
15175                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15176                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15177                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15178                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15179                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15180                     (ver > 0) && (ver < 0x100))
15181                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15182
15183                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15184                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15185
15186                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15187                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15188                     tg3_asic_rev(tp) == ASIC_REV_5720)
15189                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15190
15191                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15192                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15193                         eeprom_phy_serdes = 1;
15194
15195                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15196                 if (nic_phy_id != 0) {
15197                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15198                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15199
15200                         eeprom_phy_id  = (id1 >> 16) << 10;
15201                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15202                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15203                 } else
15204                         eeprom_phy_id = 0;
15205
15206                 tp->phy_id = eeprom_phy_id;
15207                 if (eeprom_phy_serdes) {
15208                         if (!tg3_flag(tp, 5705_PLUS))
15209                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15210                         else
15211                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15212                 }
15213
15214                 if (tg3_flag(tp, 5750_PLUS))
15215                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15216                                     SHASTA_EXT_LED_MODE_MASK);
15217                 else
15218                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15219
15220                 switch (led_cfg) {
15221                 default:
15222                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15223                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15224                         break;
15225
15226                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15227                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15228                         break;
15229
15230                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15231                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15232
15233                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15234                          * read on some older 5700/5701 bootcode.
15235                          */
15236                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15237                             tg3_asic_rev(tp) == ASIC_REV_5701)
15238                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15239
15240                         break;
15241
15242                 case SHASTA_EXT_LED_SHARED:
15243                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15244                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15245                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15246                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15247                                                  LED_CTRL_MODE_PHY_2);
15248
15249                         if (tg3_flag(tp, 5717_PLUS) ||
15250                             tg3_asic_rev(tp) == ASIC_REV_5762)
15251                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15252                                                 LED_CTRL_BLINK_RATE_MASK;
15253
15254                         break;
15255
15256                 case SHASTA_EXT_LED_MAC:
15257                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15258                         break;
15259
15260                 case SHASTA_EXT_LED_COMBO:
15261                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15262                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15263                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15264                                                  LED_CTRL_MODE_PHY_2);
15265                         break;
15266
15267                 }
15268
15269                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15270                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15271                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15272                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15273
15274                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15275                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15276
15277                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15278                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15279                         if ((tp->pdev->subsystem_vendor ==
15280                              PCI_VENDOR_ID_ARIMA) &&
15281                             (tp->pdev->subsystem_device == 0x205a ||
15282                              tp->pdev->subsystem_device == 0x2063))
15283                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15284                 } else {
15285                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15286                         tg3_flag_set(tp, IS_NIC);
15287                 }
15288
15289                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15290                         tg3_flag_set(tp, ENABLE_ASF);
15291                         if (tg3_flag(tp, 5750_PLUS))
15292                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15293                 }
15294
15295                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15296                     tg3_flag(tp, 5750_PLUS))
15297                         tg3_flag_set(tp, ENABLE_APE);
15298
15299                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15300                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15301                         tg3_flag_clear(tp, WOL_CAP);
15302
15303                 if (tg3_flag(tp, WOL_CAP) &&
15304                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15305                         tg3_flag_set(tp, WOL_ENABLE);
15306                         device_set_wakeup_enable(&tp->pdev->dev, true);
15307                 }
15308
15309                 if (cfg2 & (1 << 17))
15310                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15311
15312                 /* serdes signal pre-emphasis in register 0x590 set by */
15313                 /* bootcode if bit 18 is set */
15314                 if (cfg2 & (1 << 18))
15315                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15316
15317                 if ((tg3_flag(tp, 57765_PLUS) ||
15318                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15319                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15320                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15321                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15322
15323                 if (tg3_flag(tp, PCI_EXPRESS)) {
15324                         u32 cfg3;
15325
15326                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15327                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15328                             !tg3_flag(tp, 57765_PLUS) &&
15329                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15330                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15331                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15332                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15333                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15334                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15335                 }
15336
15337                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15338                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15339                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15340                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15341                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15342                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15343
15344                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15345                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15346         }
15347 done:
15348         if (tg3_flag(tp, WOL_CAP))
15349                 device_set_wakeup_enable(&tp->pdev->dev,
15350                                          tg3_flag(tp, WOL_ENABLE));
15351         else
15352                 device_set_wakeup_capable(&tp->pdev->dev, false);
15353 }
15354
15355 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15356 {
15357         int i, err;
15358         u32 val2, off = offset * 8;
15359
15360         err = tg3_nvram_lock(tp);
15361         if (err)
15362                 return err;
15363
15364         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15365         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15366                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15367         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15368         udelay(10);
15369
15370         for (i = 0; i < 100; i++) {
15371                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15372                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15373                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15374                         break;
15375                 }
15376                 udelay(10);
15377         }
15378
15379         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15380
15381         tg3_nvram_unlock(tp);
15382         if (val2 & APE_OTP_STATUS_CMD_DONE)
15383                 return 0;
15384
15385         return -EBUSY;
15386 }
15387
15388 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15389 {
15390         int i;
15391         u32 val;
15392
15393         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15394         tw32(OTP_CTRL, cmd);
15395
15396         /* Wait for up to 1 ms for command to execute. */
15397         for (i = 0; i < 100; i++) {
15398                 val = tr32(OTP_STATUS);
15399                 if (val & OTP_STATUS_CMD_DONE)
15400                         break;
15401                 udelay(10);
15402         }
15403
15404         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15405 }
15406
15407 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15408  * configuration is a 32-bit value that straddles the alignment boundary.
15409  * We do two 32-bit reads and then shift and merge the results.
15410  */
15411 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15412 {
15413         u32 bhalf_otp, thalf_otp;
15414
15415         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15416
15417         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15418                 return 0;
15419
15420         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15421
15422         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15423                 return 0;
15424
15425         thalf_otp = tr32(OTP_READ_DATA);
15426
15427         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15428
15429         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15430                 return 0;
15431
15432         bhalf_otp = tr32(OTP_READ_DATA);
15433
15434         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15435 }
15436
15437 static void tg3_phy_init_link_config(struct tg3 *tp)
15438 {
15439         u32 adv = ADVERTISED_Autoneg;
15440
15441         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15442                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15443                         adv |= ADVERTISED_1000baseT_Half;
15444                 adv |= ADVERTISED_1000baseT_Full;
15445         }
15446
15447         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15448                 adv |= ADVERTISED_100baseT_Half |
15449                        ADVERTISED_100baseT_Full |
15450                        ADVERTISED_10baseT_Half |
15451                        ADVERTISED_10baseT_Full |
15452                        ADVERTISED_TP;
15453         else
15454                 adv |= ADVERTISED_FIBRE;
15455
15456         tp->link_config.advertising = adv;
15457         tp->link_config.speed = SPEED_UNKNOWN;
15458         tp->link_config.duplex = DUPLEX_UNKNOWN;
15459         tp->link_config.autoneg = AUTONEG_ENABLE;
15460         tp->link_config.active_speed = SPEED_UNKNOWN;
15461         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15462
15463         tp->old_link = -1;
15464 }
15465
15466 static int tg3_phy_probe(struct tg3 *tp)
15467 {
15468         u32 hw_phy_id_1, hw_phy_id_2;
15469         u32 hw_phy_id, hw_phy_id_masked;
15470         int err;
15471
15472         /* flow control autonegotiation is default behavior */
15473         tg3_flag_set(tp, PAUSE_AUTONEG);
15474         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15475
15476         if (tg3_flag(tp, ENABLE_APE)) {
15477                 switch (tp->pci_fn) {
15478                 case 0:
15479                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15480                         break;
15481                 case 1:
15482                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15483                         break;
15484                 case 2:
15485                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15486                         break;
15487                 case 3:
15488                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15489                         break;
15490                 }
15491         }
15492
15493         if (!tg3_flag(tp, ENABLE_ASF) &&
15494             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15495             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15496                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15497                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15498
15499         if (tg3_flag(tp, USE_PHYLIB))
15500                 return tg3_phy_init(tp);
15501
15502         /* Reading the PHY ID register can conflict with ASF
15503          * firmware access to the PHY hardware.
15504          */
15505         err = 0;
15506         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15507                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15508         } else {
15509                 /* Now read the physical PHY_ID from the chip and verify
15510                  * that it is sane.  If it doesn't look good, we fall back
15511                  * to either the hard-coded table based PHY_ID and failing
15512                  * that the value found in the eeprom area.
15513                  */
15514                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15515                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15516
15517                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15518                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15519                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15520
15521                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15522         }
15523
15524         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15525                 tp->phy_id = hw_phy_id;
15526                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15527                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15528                 else
15529                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15530         } else {
15531                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15532                         /* Do nothing, phy ID already set up in
15533                          * tg3_get_eeprom_hw_cfg().
15534                          */
15535                 } else {
15536                         struct subsys_tbl_ent *p;
15537
15538                         /* No eeprom signature?  Try the hardcoded
15539                          * subsys device table.
15540                          */
15541                         p = tg3_lookup_by_subsys(tp);
15542                         if (p) {
15543                                 tp->phy_id = p->phy_id;
15544                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15545                                 /* For now we saw the IDs 0xbc050cd0,
15546                                  * 0xbc050f80 and 0xbc050c30 on devices
15547                                  * connected to an BCM4785 and there are
15548                                  * probably more. Just assume that the phy is
15549                                  * supported when it is connected to a SSB core
15550                                  * for now.
15551                                  */
15552                                 return -ENODEV;
15553                         }
15554
15555                         if (!tp->phy_id ||
15556                             tp->phy_id == TG3_PHY_ID_BCM8002)
15557                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15558                 }
15559         }
15560
15561         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15562             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15563              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15564              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15565              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15566              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15567               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15568              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15569               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15570                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15571
15572                 tp->eee.supported = SUPPORTED_100baseT_Full |
15573                                     SUPPORTED_1000baseT_Full;
15574                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15575                                      ADVERTISED_1000baseT_Full;
15576                 tp->eee.eee_enabled = 1;
15577                 tp->eee.tx_lpi_enabled = 1;
15578                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15579         }
15580
15581         tg3_phy_init_link_config(tp);
15582
15583         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15584             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15585             !tg3_flag(tp, ENABLE_APE) &&
15586             !tg3_flag(tp, ENABLE_ASF)) {
15587                 u32 bmsr, dummy;
15588
15589                 tg3_readphy(tp, MII_BMSR, &bmsr);
15590                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15591                     (bmsr & BMSR_LSTATUS))
15592                         goto skip_phy_reset;
15593
15594                 err = tg3_phy_reset(tp);
15595                 if (err)
15596                         return err;
15597
15598                 tg3_phy_set_wirespeed(tp);
15599
15600                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15601                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15602                                             tp->link_config.flowctrl);
15603
15604                         tg3_writephy(tp, MII_BMCR,
15605                                      BMCR_ANENABLE | BMCR_ANRESTART);
15606                 }
15607         }
15608
15609 skip_phy_reset:
15610         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15611                 err = tg3_init_5401phy_dsp(tp);
15612                 if (err)
15613                         return err;
15614
15615                 err = tg3_init_5401phy_dsp(tp);
15616         }
15617
15618         return err;
15619 }
15620
15621 static void tg3_read_vpd(struct tg3 *tp)
15622 {
15623         u8 *vpd_data;
15624         unsigned int block_end, rosize, len;
15625         u32 vpdlen;
15626         int j, i = 0;
15627
15628         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15629         if (!vpd_data)
15630                 goto out_no_vpd;
15631
15632         i = pci_vpd_find_tag(vpd_data, vpdlen, PCI_VPD_LRDT_RO_DATA);
15633         if (i < 0)
15634                 goto out_not_found;
15635
15636         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15637         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15638         i += PCI_VPD_LRDT_TAG_SIZE;
15639
15640         if (block_end > vpdlen)
15641                 goto out_not_found;
15642
15643         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15644                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15645         if (j > 0) {
15646                 len = pci_vpd_info_field_size(&vpd_data[j]);
15647
15648                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15649                 if (j + len > block_end || len != 4 ||
15650                     memcmp(&vpd_data[j], "1028", 4))
15651                         goto partno;
15652
15653                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15654                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15655                 if (j < 0)
15656                         goto partno;
15657
15658                 len = pci_vpd_info_field_size(&vpd_data[j]);
15659
15660                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15661                 if (j + len > block_end)
15662                         goto partno;
15663
15664                 if (len >= sizeof(tp->fw_ver))
15665                         len = sizeof(tp->fw_ver) - 1;
15666                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15667                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15668                          &vpd_data[j]);
15669         }
15670
15671 partno:
15672         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15673                                       PCI_VPD_RO_KEYWORD_PARTNO);
15674         if (i < 0)
15675                 goto out_not_found;
15676
15677         len = pci_vpd_info_field_size(&vpd_data[i]);
15678
15679         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15680         if (len > TG3_BPN_SIZE ||
15681             (len + i) > vpdlen)
15682                 goto out_not_found;
15683
15684         memcpy(tp->board_part_number, &vpd_data[i], len);
15685
15686 out_not_found:
15687         kfree(vpd_data);
15688         if (tp->board_part_number[0])
15689                 return;
15690
15691 out_no_vpd:
15692         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15693                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15694                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15695                         strcpy(tp->board_part_number, "BCM5717");
15696                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15697                         strcpy(tp->board_part_number, "BCM5718");
15698                 else
15699                         goto nomatch;
15700         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15701                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15702                         strcpy(tp->board_part_number, "BCM57780");
15703                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15704                         strcpy(tp->board_part_number, "BCM57760");
15705                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15706                         strcpy(tp->board_part_number, "BCM57790");
15707                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15708                         strcpy(tp->board_part_number, "BCM57788");
15709                 else
15710                         goto nomatch;
15711         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15712                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15713                         strcpy(tp->board_part_number, "BCM57761");
15714                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15715                         strcpy(tp->board_part_number, "BCM57765");
15716                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15717                         strcpy(tp->board_part_number, "BCM57781");
15718                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15719                         strcpy(tp->board_part_number, "BCM57785");
15720                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15721                         strcpy(tp->board_part_number, "BCM57791");
15722                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15723                         strcpy(tp->board_part_number, "BCM57795");
15724                 else
15725                         goto nomatch;
15726         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15727                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15728                         strcpy(tp->board_part_number, "BCM57762");
15729                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15730                         strcpy(tp->board_part_number, "BCM57766");
15731                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15732                         strcpy(tp->board_part_number, "BCM57782");
15733                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15734                         strcpy(tp->board_part_number, "BCM57786");
15735                 else
15736                         goto nomatch;
15737         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15738                 strcpy(tp->board_part_number, "BCM95906");
15739         } else {
15740 nomatch:
15741                 strcpy(tp->board_part_number, "none");
15742         }
15743 }
15744
15745 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15746 {
15747         u32 val;
15748
15749         if (tg3_nvram_read(tp, offset, &val) ||
15750             (val & 0xfc000000) != 0x0c000000 ||
15751             tg3_nvram_read(tp, offset + 4, &val) ||
15752             val != 0)
15753                 return 0;
15754
15755         return 1;
15756 }
15757
15758 static void tg3_read_bc_ver(struct tg3 *tp)
15759 {
15760         u32 val, offset, start, ver_offset;
15761         int i, dst_off;
15762         bool newver = false;
15763
15764         if (tg3_nvram_read(tp, 0xc, &offset) ||
15765             tg3_nvram_read(tp, 0x4, &start))
15766                 return;
15767
15768         offset = tg3_nvram_logical_addr(tp, offset);
15769
15770         if (tg3_nvram_read(tp, offset, &val))
15771                 return;
15772
15773         if ((val & 0xfc000000) == 0x0c000000) {
15774                 if (tg3_nvram_read(tp, offset + 4, &val))
15775                         return;
15776
15777                 if (val == 0)
15778                         newver = true;
15779         }
15780
15781         dst_off = strlen(tp->fw_ver);
15782
15783         if (newver) {
15784                 if (TG3_VER_SIZE - dst_off < 16 ||
15785                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15786                         return;
15787
15788                 offset = offset + ver_offset - start;
15789                 for (i = 0; i < 16; i += 4) {
15790                         __be32 v;
15791                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15792                                 return;
15793
15794                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15795                 }
15796         } else {
15797                 u32 major, minor;
15798
15799                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15800                         return;
15801
15802                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15803                         TG3_NVM_BCVER_MAJSFT;
15804                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15805                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15806                          "v%d.%02d", major, minor);
15807         }
15808 }
15809
15810 static void tg3_read_hwsb_ver(struct tg3 *tp)
15811 {
15812         u32 val, major, minor;
15813
15814         /* Use native endian representation */
15815         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15816                 return;
15817
15818         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15819                 TG3_NVM_HWSB_CFG1_MAJSFT;
15820         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15821                 TG3_NVM_HWSB_CFG1_MINSFT;
15822
15823         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15824 }
15825
15826 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15827 {
15828         u32 offset, major, minor, build;
15829
15830         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15831
15832         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15833                 return;
15834
15835         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15836         case TG3_EEPROM_SB_REVISION_0:
15837                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15838                 break;
15839         case TG3_EEPROM_SB_REVISION_2:
15840                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15841                 break;
15842         case TG3_EEPROM_SB_REVISION_3:
15843                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15844                 break;
15845         case TG3_EEPROM_SB_REVISION_4:
15846                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15847                 break;
15848         case TG3_EEPROM_SB_REVISION_5:
15849                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15850                 break;
15851         case TG3_EEPROM_SB_REVISION_6:
15852                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15853                 break;
15854         default:
15855                 return;
15856         }
15857
15858         if (tg3_nvram_read(tp, offset, &val))
15859                 return;
15860
15861         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15862                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15863         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15864                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15865         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15866
15867         if (minor > 99 || build > 26)
15868                 return;
15869
15870         offset = strlen(tp->fw_ver);
15871         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15872                  " v%d.%02d", major, minor);
15873
15874         if (build > 0) {
15875                 offset = strlen(tp->fw_ver);
15876                 if (offset < TG3_VER_SIZE - 1)
15877                         tp->fw_ver[offset] = 'a' + build - 1;
15878         }
15879 }
15880
15881 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15882 {
15883         u32 val, offset, start;
15884         int i, vlen;
15885
15886         for (offset = TG3_NVM_DIR_START;
15887              offset < TG3_NVM_DIR_END;
15888              offset += TG3_NVM_DIRENT_SIZE) {
15889                 if (tg3_nvram_read(tp, offset, &val))
15890                         return;
15891
15892                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15893                         break;
15894         }
15895
15896         if (offset == TG3_NVM_DIR_END)
15897                 return;
15898
15899         if (!tg3_flag(tp, 5705_PLUS))
15900                 start = 0x08000000;
15901         else if (tg3_nvram_read(tp, offset - 4, &start))
15902                 return;
15903
15904         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15905             !tg3_fw_img_is_valid(tp, offset) ||
15906             tg3_nvram_read(tp, offset + 8, &val))
15907                 return;
15908
15909         offset += val - start;
15910
15911         vlen = strlen(tp->fw_ver);
15912
15913         tp->fw_ver[vlen++] = ',';
15914         tp->fw_ver[vlen++] = ' ';
15915
15916         for (i = 0; i < 4; i++) {
15917                 __be32 v;
15918                 if (tg3_nvram_read_be32(tp, offset, &v))
15919                         return;
15920
15921                 offset += sizeof(v);
15922
15923                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15924                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15925                         break;
15926                 }
15927
15928                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15929                 vlen += sizeof(v);
15930         }
15931 }
15932
15933 static void tg3_probe_ncsi(struct tg3 *tp)
15934 {
15935         u32 apedata;
15936
15937         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15938         if (apedata != APE_SEG_SIG_MAGIC)
15939                 return;
15940
15941         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15942         if (!(apedata & APE_FW_STATUS_READY))
15943                 return;
15944
15945         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15946                 tg3_flag_set(tp, APE_HAS_NCSI);
15947 }
15948
15949 static void tg3_read_dash_ver(struct tg3 *tp)
15950 {
15951         int vlen;
15952         u32 apedata;
15953         char *fwtype;
15954
15955         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15956
15957         if (tg3_flag(tp, APE_HAS_NCSI))
15958                 fwtype = "NCSI";
15959         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15960                 fwtype = "SMASH";
15961         else
15962                 fwtype = "DASH";
15963
15964         vlen = strlen(tp->fw_ver);
15965
15966         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15967                  fwtype,
15968                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15969                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15970                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15971                  (apedata & APE_FW_VERSION_BLDMSK));
15972 }
15973
15974 static void tg3_read_otp_ver(struct tg3 *tp)
15975 {
15976         u32 val, val2;
15977
15978         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15979                 return;
15980
15981         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15982             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15983             TG3_OTP_MAGIC0_VALID(val)) {
15984                 u64 val64 = (u64) val << 32 | val2;
15985                 u32 ver = 0;
15986                 int i, vlen;
15987
15988                 for (i = 0; i < 7; i++) {
15989                         if ((val64 & 0xff) == 0)
15990                                 break;
15991                         ver = val64 & 0xff;
15992                         val64 >>= 8;
15993                 }
15994                 vlen = strlen(tp->fw_ver);
15995                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15996         }
15997 }
15998
15999 static void tg3_read_fw_ver(struct tg3 *tp)
16000 {
16001         u32 val;
16002         bool vpd_vers = false;
16003
16004         if (tp->fw_ver[0] != 0)
16005                 vpd_vers = true;
16006
16007         if (tg3_flag(tp, NO_NVRAM)) {
16008                 strcat(tp->fw_ver, "sb");
16009                 tg3_read_otp_ver(tp);
16010                 return;
16011         }
16012
16013         if (tg3_nvram_read(tp, 0, &val))
16014                 return;
16015
16016         if (val == TG3_EEPROM_MAGIC)
16017                 tg3_read_bc_ver(tp);
16018         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
16019                 tg3_read_sb_ver(tp, val);
16020         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
16021                 tg3_read_hwsb_ver(tp);
16022
16023         if (tg3_flag(tp, ENABLE_ASF)) {
16024                 if (tg3_flag(tp, ENABLE_APE)) {
16025                         tg3_probe_ncsi(tp);
16026                         if (!vpd_vers)
16027                                 tg3_read_dash_ver(tp);
16028                 } else if (!vpd_vers) {
16029                         tg3_read_mgmtfw_ver(tp);
16030                 }
16031         }
16032
16033         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
16034 }
16035
16036 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
16037 {
16038         if (tg3_flag(tp, LRG_PROD_RING_CAP))
16039                 return TG3_RX_RET_MAX_SIZE_5717;
16040         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
16041                 return TG3_RX_RET_MAX_SIZE_5700;
16042         else
16043                 return TG3_RX_RET_MAX_SIZE_5705;
16044 }
16045
16046 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
16047         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
16048         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
16049         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
16050         { },
16051 };
16052
16053 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16054 {
16055         struct pci_dev *peer;
16056         unsigned int func, devnr = tp->pdev->devfn & ~7;
16057
16058         for (func = 0; func < 8; func++) {
16059                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16060                 if (peer && peer != tp->pdev)
16061                         break;
16062                 pci_dev_put(peer);
16063         }
16064         /* 5704 can be configured in single-port mode, set peer to
16065          * tp->pdev in that case.
16066          */
16067         if (!peer) {
16068                 peer = tp->pdev;
16069                 return peer;
16070         }
16071
16072         /*
16073          * We don't need to keep the refcount elevated; there's no way
16074          * to remove one half of this device without removing the other
16075          */
16076         pci_dev_put(peer);
16077
16078         return peer;
16079 }
16080
16081 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16082 {
16083         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16084         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16085                 u32 reg;
16086
16087                 /* All devices that use the alternate
16088                  * ASIC REV location have a CPMU.
16089                  */
16090                 tg3_flag_set(tp, CPMU_PRESENT);
16091
16092                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16093                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16094                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16095                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16096                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16097                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16098                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16099                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16100                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16101                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16102                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16103                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16104                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16105                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16106                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16107                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16108                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16109                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16110                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16111                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16112                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16113                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16114                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16115                 else
16116                         reg = TG3PCI_PRODID_ASICREV;
16117
16118                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16119         }
16120
16121         /* Wrong chip ID in 5752 A0. This code can be removed later
16122          * as A0 is not in production.
16123          */
16124         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16125                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16126
16127         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16128                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16129
16130         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16131             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16132             tg3_asic_rev(tp) == ASIC_REV_5720)
16133                 tg3_flag_set(tp, 5717_PLUS);
16134
16135         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16136             tg3_asic_rev(tp) == ASIC_REV_57766)
16137                 tg3_flag_set(tp, 57765_CLASS);
16138
16139         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16140              tg3_asic_rev(tp) == ASIC_REV_5762)
16141                 tg3_flag_set(tp, 57765_PLUS);
16142
16143         /* Intentionally exclude ASIC_REV_5906 */
16144         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16145             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16146             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16147             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16148             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16149             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16150             tg3_flag(tp, 57765_PLUS))
16151                 tg3_flag_set(tp, 5755_PLUS);
16152
16153         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16154             tg3_asic_rev(tp) == ASIC_REV_5714)
16155                 tg3_flag_set(tp, 5780_CLASS);
16156
16157         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16158             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16159             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16160             tg3_flag(tp, 5755_PLUS) ||
16161             tg3_flag(tp, 5780_CLASS))
16162                 tg3_flag_set(tp, 5750_PLUS);
16163
16164         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16165             tg3_flag(tp, 5750_PLUS))
16166                 tg3_flag_set(tp, 5705_PLUS);
16167 }
16168
16169 static bool tg3_10_100_only_device(struct tg3 *tp,
16170                                    const struct pci_device_id *ent)
16171 {
16172         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16173
16174         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16175              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16176             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16177                 return true;
16178
16179         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16180                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16181                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16182                                 return true;
16183                 } else {
16184                         return true;
16185                 }
16186         }
16187
16188         return false;
16189 }
16190
16191 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16192 {
16193         u32 misc_ctrl_reg;
16194         u32 pci_state_reg, grc_misc_cfg;
16195         u32 val;
16196         u16 pci_cmd;
16197         int err;
16198
16199         /* Force memory write invalidate off.  If we leave it on,
16200          * then on 5700_BX chips we have to enable a workaround.
16201          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16202          * to match the cacheline size.  The Broadcom driver have this
16203          * workaround but turns MWI off all the times so never uses
16204          * it.  This seems to suggest that the workaround is insufficient.
16205          */
16206         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16207         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16208         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16209
16210         /* Important! -- Make sure register accesses are byteswapped
16211          * correctly.  Also, for those chips that require it, make
16212          * sure that indirect register accesses are enabled before
16213          * the first operation.
16214          */
16215         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16216                               &misc_ctrl_reg);
16217         tp->misc_host_ctrl |= (misc_ctrl_reg &
16218                                MISC_HOST_CTRL_CHIPREV);
16219         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16220                                tp->misc_host_ctrl);
16221
16222         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16223
16224         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16225          * we need to disable memory and use config. cycles
16226          * only to access all registers. The 5702/03 chips
16227          * can mistakenly decode the special cycles from the
16228          * ICH chipsets as memory write cycles, causing corruption
16229          * of register and memory space. Only certain ICH bridges
16230          * will drive special cycles with non-zero data during the
16231          * address phase which can fall within the 5703's address
16232          * range. This is not an ICH bug as the PCI spec allows
16233          * non-zero address during special cycles. However, only
16234          * these ICH bridges are known to drive non-zero addresses
16235          * during special cycles.
16236          *
16237          * Since special cycles do not cross PCI bridges, we only
16238          * enable this workaround if the 5703 is on the secondary
16239          * bus of these ICH bridges.
16240          */
16241         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16242             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16243                 static struct tg3_dev_id {
16244                         u32     vendor;
16245                         u32     device;
16246                         u32     rev;
16247                 } ich_chipsets[] = {
16248                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16249                           PCI_ANY_ID },
16250                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16251                           PCI_ANY_ID },
16252                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16253                           0xa },
16254                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16255                           PCI_ANY_ID },
16256                         { },
16257                 };
16258                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16259                 struct pci_dev *bridge = NULL;
16260
16261                 while (pci_id->vendor != 0) {
16262                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16263                                                 bridge);
16264                         if (!bridge) {
16265                                 pci_id++;
16266                                 continue;
16267                         }
16268                         if (pci_id->rev != PCI_ANY_ID) {
16269                                 if (bridge->revision > pci_id->rev)
16270                                         continue;
16271                         }
16272                         if (bridge->subordinate &&
16273                             (bridge->subordinate->number ==
16274                              tp->pdev->bus->number)) {
16275                                 tg3_flag_set(tp, ICH_WORKAROUND);
16276                                 pci_dev_put(bridge);
16277                                 break;
16278                         }
16279                 }
16280         }
16281
16282         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16283                 static struct tg3_dev_id {
16284                         u32     vendor;
16285                         u32     device;
16286                 } bridge_chipsets[] = {
16287                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16288                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16289                         { },
16290                 };
16291                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16292                 struct pci_dev *bridge = NULL;
16293
16294                 while (pci_id->vendor != 0) {
16295                         bridge = pci_get_device(pci_id->vendor,
16296                                                 pci_id->device,
16297                                                 bridge);
16298                         if (!bridge) {
16299                                 pci_id++;
16300                                 continue;
16301                         }
16302                         if (bridge->subordinate &&
16303                             (bridge->subordinate->number <=
16304                              tp->pdev->bus->number) &&
16305                             (bridge->subordinate->busn_res.end >=
16306                              tp->pdev->bus->number)) {
16307                                 tg3_flag_set(tp, 5701_DMA_BUG);
16308                                 pci_dev_put(bridge);
16309                                 break;
16310                         }
16311                 }
16312         }
16313
16314         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16315          * DMA addresses > 40-bit. This bridge may have other additional
16316          * 57xx devices behind it in some 4-port NIC designs for example.
16317          * Any tg3 device found behind the bridge will also need the 40-bit
16318          * DMA workaround.
16319          */
16320         if (tg3_flag(tp, 5780_CLASS)) {
16321                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16322                 tp->msi_cap = tp->pdev->msi_cap;
16323         } else {
16324                 struct pci_dev *bridge = NULL;
16325
16326                 do {
16327                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16328                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16329                                                 bridge);
16330                         if (bridge && bridge->subordinate &&
16331                             (bridge->subordinate->number <=
16332                              tp->pdev->bus->number) &&
16333                             (bridge->subordinate->busn_res.end >=
16334                              tp->pdev->bus->number)) {
16335                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16336                                 pci_dev_put(bridge);
16337                                 break;
16338                         }
16339                 } while (bridge);
16340         }
16341
16342         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16343             tg3_asic_rev(tp) == ASIC_REV_5714)
16344                 tp->pdev_peer = tg3_find_peer(tp);
16345
16346         /* Determine TSO capabilities */
16347         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16348                 ; /* Do nothing. HW bug. */
16349         else if (tg3_flag(tp, 57765_PLUS))
16350                 tg3_flag_set(tp, HW_TSO_3);
16351         else if (tg3_flag(tp, 5755_PLUS) ||
16352                  tg3_asic_rev(tp) == ASIC_REV_5906)
16353                 tg3_flag_set(tp, HW_TSO_2);
16354         else if (tg3_flag(tp, 5750_PLUS)) {
16355                 tg3_flag_set(tp, HW_TSO_1);
16356                 tg3_flag_set(tp, TSO_BUG);
16357                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16358                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16359                         tg3_flag_clear(tp, TSO_BUG);
16360         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16361                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16362                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16363                 tg3_flag_set(tp, FW_TSO);
16364                 tg3_flag_set(tp, TSO_BUG);
16365                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16366                         tp->fw_needed = FIRMWARE_TG3TSO5;
16367                 else
16368                         tp->fw_needed = FIRMWARE_TG3TSO;
16369         }
16370
16371         /* Selectively allow TSO based on operating conditions */
16372         if (tg3_flag(tp, HW_TSO_1) ||
16373             tg3_flag(tp, HW_TSO_2) ||
16374             tg3_flag(tp, HW_TSO_3) ||
16375             tg3_flag(tp, FW_TSO)) {
16376                 /* For firmware TSO, assume ASF is disabled.
16377                  * We'll disable TSO later if we discover ASF
16378                  * is enabled in tg3_get_eeprom_hw_cfg().
16379                  */
16380                 tg3_flag_set(tp, TSO_CAPABLE);
16381         } else {
16382                 tg3_flag_clear(tp, TSO_CAPABLE);
16383                 tg3_flag_clear(tp, TSO_BUG);
16384                 tp->fw_needed = NULL;
16385         }
16386
16387         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16388                 tp->fw_needed = FIRMWARE_TG3;
16389
16390         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16391                 tp->fw_needed = FIRMWARE_TG357766;
16392
16393         tp->irq_max = 1;
16394
16395         if (tg3_flag(tp, 5750_PLUS)) {
16396                 tg3_flag_set(tp, SUPPORT_MSI);
16397                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16398                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16399                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16400                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16401                      tp->pdev_peer == tp->pdev))
16402                         tg3_flag_clear(tp, SUPPORT_MSI);
16403
16404                 if (tg3_flag(tp, 5755_PLUS) ||
16405                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16406                         tg3_flag_set(tp, 1SHOT_MSI);
16407                 }
16408
16409                 if (tg3_flag(tp, 57765_PLUS)) {
16410                         tg3_flag_set(tp, SUPPORT_MSIX);
16411                         tp->irq_max = TG3_IRQ_MAX_VECS;
16412                 }
16413         }
16414
16415         tp->txq_max = 1;
16416         tp->rxq_max = 1;
16417         if (tp->irq_max > 1) {
16418                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16419                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16420
16421                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16422                     tg3_asic_rev(tp) == ASIC_REV_5720)
16423                         tp->txq_max = tp->irq_max - 1;
16424         }
16425
16426         if (tg3_flag(tp, 5755_PLUS) ||
16427             tg3_asic_rev(tp) == ASIC_REV_5906)
16428                 tg3_flag_set(tp, SHORT_DMA_BUG);
16429
16430         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16431                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16432
16433         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16434             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16435             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16436             tg3_asic_rev(tp) == ASIC_REV_5762)
16437                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16438
16439         if (tg3_flag(tp, 57765_PLUS) &&
16440             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16441                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16442
16443         if (!tg3_flag(tp, 5705_PLUS) ||
16444             tg3_flag(tp, 5780_CLASS) ||
16445             tg3_flag(tp, USE_JUMBO_BDFLAG))
16446                 tg3_flag_set(tp, JUMBO_CAPABLE);
16447
16448         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16449                               &pci_state_reg);
16450
16451         if (pci_is_pcie(tp->pdev)) {
16452                 u16 lnkctl;
16453
16454                 tg3_flag_set(tp, PCI_EXPRESS);
16455
16456                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16457                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16458                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16459                                 tg3_flag_clear(tp, HW_TSO_2);
16460                                 tg3_flag_clear(tp, TSO_CAPABLE);
16461                         }
16462                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16463                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16464                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16465                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16466                                 tg3_flag_set(tp, CLKREQ_BUG);
16467                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16468                         tg3_flag_set(tp, L1PLLPD_EN);
16469                 }
16470         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16471                 /* BCM5785 devices are effectively PCIe devices, and should
16472                  * follow PCIe codepaths, but do not have a PCIe capabilities
16473                  * section.
16474                  */
16475                 tg3_flag_set(tp, PCI_EXPRESS);
16476         } else if (!tg3_flag(tp, 5705_PLUS) ||
16477                    tg3_flag(tp, 5780_CLASS)) {
16478                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16479                 if (!tp->pcix_cap) {
16480                         dev_err(&tp->pdev->dev,
16481                                 "Cannot find PCI-X capability, aborting\n");
16482                         return -EIO;
16483                 }
16484
16485                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16486                         tg3_flag_set(tp, PCIX_MODE);
16487         }
16488
16489         /* If we have an AMD 762 or VIA K8T800 chipset, write
16490          * reordering to the mailbox registers done by the host
16491          * controller can cause major troubles.  We read back from
16492          * every mailbox register write to force the writes to be
16493          * posted to the chip in order.
16494          */
16495         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16496             !tg3_flag(tp, PCI_EXPRESS))
16497                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16498
16499         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16500                              &tp->pci_cacheline_sz);
16501         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16502                              &tp->pci_lat_timer);
16503         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16504             tp->pci_lat_timer < 64) {
16505                 tp->pci_lat_timer = 64;
16506                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16507                                       tp->pci_lat_timer);
16508         }
16509
16510         /* Important! -- It is critical that the PCI-X hw workaround
16511          * situation is decided before the first MMIO register access.
16512          */
16513         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16514                 /* 5700 BX chips need to have their TX producer index
16515                  * mailboxes written twice to workaround a bug.
16516                  */
16517                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16518
16519                 /* If we are in PCI-X mode, enable register write workaround.
16520                  *
16521                  * The workaround is to use indirect register accesses
16522                  * for all chip writes not to mailbox registers.
16523                  */
16524                 if (tg3_flag(tp, PCIX_MODE)) {
16525                         u32 pm_reg;
16526
16527                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16528
16529                         /* The chip can have it's power management PCI config
16530                          * space registers clobbered due to this bug.
16531                          * So explicitly force the chip into D0 here.
16532                          */
16533                         pci_read_config_dword(tp->pdev,
16534                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16535                                               &pm_reg);
16536                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16537                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16538                         pci_write_config_dword(tp->pdev,
16539                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16540                                                pm_reg);
16541
16542                         /* Also, force SERR#/PERR# in PCI command. */
16543                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16544                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16545                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16546                 }
16547         }
16548
16549         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16550                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16551         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16552                 tg3_flag_set(tp, PCI_32BIT);
16553
16554         /* Chip-specific fixup from Broadcom driver */
16555         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16556             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16557                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16558                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16559         }
16560
16561         /* Default fast path register access methods */
16562         tp->read32 = tg3_read32;
16563         tp->write32 = tg3_write32;
16564         tp->read32_mbox = tg3_read32;
16565         tp->write32_mbox = tg3_write32;
16566         tp->write32_tx_mbox = tg3_write32;
16567         tp->write32_rx_mbox = tg3_write32;
16568
16569         /* Various workaround register access methods */
16570         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16571                 tp->write32 = tg3_write_indirect_reg32;
16572         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16573                  (tg3_flag(tp, PCI_EXPRESS) &&
16574                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16575                 /*
16576                  * Back to back register writes can cause problems on these
16577                  * chips, the workaround is to read back all reg writes
16578                  * except those to mailbox regs.
16579                  *
16580                  * See tg3_write_indirect_reg32().
16581                  */
16582                 tp->write32 = tg3_write_flush_reg32;
16583         }
16584
16585         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16586                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16587                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16588                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16589         }
16590
16591         if (tg3_flag(tp, ICH_WORKAROUND)) {
16592                 tp->read32 = tg3_read_indirect_reg32;
16593                 tp->write32 = tg3_write_indirect_reg32;
16594                 tp->read32_mbox = tg3_read_indirect_mbox;
16595                 tp->write32_mbox = tg3_write_indirect_mbox;
16596                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16597                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16598
16599                 iounmap(tp->regs);
16600                 tp->regs = NULL;
16601
16602                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16603                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16604                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16605         }
16606         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16607                 tp->read32_mbox = tg3_read32_mbox_5906;
16608                 tp->write32_mbox = tg3_write32_mbox_5906;
16609                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16610                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16611         }
16612
16613         if (tp->write32 == tg3_write_indirect_reg32 ||
16614             (tg3_flag(tp, PCIX_MODE) &&
16615              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16616               tg3_asic_rev(tp) == ASIC_REV_5701)))
16617                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16618
16619         /* The memory arbiter has to be enabled in order for SRAM accesses
16620          * to succeed.  Normally on powerup the tg3 chip firmware will make
16621          * sure it is enabled, but other entities such as system netboot
16622          * code might disable it.
16623          */
16624         val = tr32(MEMARB_MODE);
16625         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16626
16627         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16628         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16629             tg3_flag(tp, 5780_CLASS)) {
16630                 if (tg3_flag(tp, PCIX_MODE)) {
16631                         pci_read_config_dword(tp->pdev,
16632                                               tp->pcix_cap + PCI_X_STATUS,
16633                                               &val);
16634                         tp->pci_fn = val & 0x7;
16635                 }
16636         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16637                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16638                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16639                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16640                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16641                         val = tr32(TG3_CPMU_STATUS);
16642
16643                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16644                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16645                 else
16646                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16647                                      TG3_CPMU_STATUS_FSHFT_5719;
16648         }
16649
16650         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16651                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16652                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16653         }
16654
16655         /* Get eeprom hw config before calling tg3_set_power_state().
16656          * In particular, the TG3_FLAG_IS_NIC flag must be
16657          * determined before calling tg3_set_power_state() so that
16658          * we know whether or not to switch out of Vaux power.
16659          * When the flag is set, it means that GPIO1 is used for eeprom
16660          * write protect and also implies that it is a LOM where GPIOs
16661          * are not used to switch power.
16662          */
16663         tg3_get_eeprom_hw_cfg(tp);
16664
16665         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16666                 tg3_flag_clear(tp, TSO_CAPABLE);
16667                 tg3_flag_clear(tp, TSO_BUG);
16668                 tp->fw_needed = NULL;
16669         }
16670
16671         if (tg3_flag(tp, ENABLE_APE)) {
16672                 /* Allow reads and writes to the
16673                  * APE register and memory space.
16674                  */
16675                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16676                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16677                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16678                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16679                                        pci_state_reg);
16680
16681                 tg3_ape_lock_init(tp);
16682                 tp->ape_hb_interval =
16683                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16684         }
16685
16686         /* Set up tp->grc_local_ctrl before calling
16687          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16688          * will bring 5700's external PHY out of reset.
16689          * It is also used as eeprom write protect on LOMs.
16690          */
16691         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16692         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16693             tg3_flag(tp, EEPROM_WRITE_PROT))
16694                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16695                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16696         /* Unused GPIO3 must be driven as output on 5752 because there
16697          * are no pull-up resistors on unused GPIO pins.
16698          */
16699         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16700                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16701
16702         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16703             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16704             tg3_flag(tp, 57765_CLASS))
16705                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16706
16707         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16708             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16709                 /* Turn off the debug UART. */
16710                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16711                 if (tg3_flag(tp, IS_NIC))
16712                         /* Keep VMain power. */
16713                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16714                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16715         }
16716
16717         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16718                 tp->grc_local_ctrl |=
16719                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16720
16721         /* Switch out of Vaux if it is a NIC */
16722         tg3_pwrsrc_switch_to_vmain(tp);
16723
16724         /* Derive initial jumbo mode from MTU assigned in
16725          * ether_setup() via the alloc_etherdev() call
16726          */
16727         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16728                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16729
16730         /* Determine WakeOnLan speed to use. */
16731         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16732             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16733             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16734             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16735                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16736         } else {
16737                 tg3_flag_set(tp, WOL_SPEED_100MB);
16738         }
16739
16740         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16741                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16742
16743         /* A few boards don't want Ethernet@WireSpeed phy feature */
16744         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16745             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16746              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16747              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16748             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16749             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16750                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16751
16752         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16753             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16754                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16755         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16756                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16757
16758         if (tg3_flag(tp, 5705_PLUS) &&
16759             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16760             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16761             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16762             !tg3_flag(tp, 57765_PLUS)) {
16763                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16764                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16765                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16766                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16767                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16768                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16769                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16770                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16771                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16772                 } else
16773                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16774         }
16775
16776         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16777             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16778                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16779                 if (tp->phy_otp == 0)
16780                         tp->phy_otp = TG3_OTP_DEFAULT;
16781         }
16782
16783         if (tg3_flag(tp, CPMU_PRESENT))
16784                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16785         else
16786                 tp->mi_mode = MAC_MI_MODE_BASE;
16787
16788         tp->coalesce_mode = 0;
16789         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16790             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16791                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16792
16793         /* Set these bits to enable statistics workaround. */
16794         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16795             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16796             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16797             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16798                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16799                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16800         }
16801
16802         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16803             tg3_asic_rev(tp) == ASIC_REV_57780)
16804                 tg3_flag_set(tp, USE_PHYLIB);
16805
16806         err = tg3_mdio_init(tp);
16807         if (err)
16808                 return err;
16809
16810         /* Initialize data/descriptor byte/word swapping. */
16811         val = tr32(GRC_MODE);
16812         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16813             tg3_asic_rev(tp) == ASIC_REV_5762)
16814                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16815                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16816                         GRC_MODE_B2HRX_ENABLE |
16817                         GRC_MODE_HTX2B_ENABLE |
16818                         GRC_MODE_HOST_STACKUP);
16819         else
16820                 val &= GRC_MODE_HOST_STACKUP;
16821
16822         tw32(GRC_MODE, val | tp->grc_mode);
16823
16824         tg3_switch_clocks(tp);
16825
16826         /* Clear this out for sanity. */
16827         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16828
16829         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16830         tw32(TG3PCI_REG_BASE_ADDR, 0);
16831
16832         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16833                               &pci_state_reg);
16834         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16835             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16836                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16837                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16838                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16839                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16840                         void __iomem *sram_base;
16841
16842                         /* Write some dummy words into the SRAM status block
16843                          * area, see if it reads back correctly.  If the return
16844                          * value is bad, force enable the PCIX workaround.
16845                          */
16846                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16847
16848                         writel(0x00000000, sram_base);
16849                         writel(0x00000000, sram_base + 4);
16850                         writel(0xffffffff, sram_base + 4);
16851                         if (readl(sram_base) != 0x00000000)
16852                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16853                 }
16854         }
16855
16856         udelay(50);
16857         tg3_nvram_init(tp);
16858
16859         /* If the device has an NVRAM, no need to load patch firmware */
16860         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16861             !tg3_flag(tp, NO_NVRAM))
16862                 tp->fw_needed = NULL;
16863
16864         grc_misc_cfg = tr32(GRC_MISC_CFG);
16865         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16866
16867         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16868             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16869              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16870                 tg3_flag_set(tp, IS_5788);
16871
16872         if (!tg3_flag(tp, IS_5788) &&
16873             tg3_asic_rev(tp) != ASIC_REV_5700)
16874                 tg3_flag_set(tp, TAGGED_STATUS);
16875         if (tg3_flag(tp, TAGGED_STATUS)) {
16876                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16877                                       HOSTCC_MODE_CLRTICK_TXBD);
16878
16879                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16880                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16881                                        tp->misc_host_ctrl);
16882         }
16883
16884         /* Preserve the APE MAC_MODE bits */
16885         if (tg3_flag(tp, ENABLE_APE))
16886                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16887         else
16888                 tp->mac_mode = 0;
16889
16890         if (tg3_10_100_only_device(tp, ent))
16891                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16892
16893         err = tg3_phy_probe(tp);
16894         if (err) {
16895                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16896                 /* ... but do not return immediately ... */
16897                 tg3_mdio_fini(tp);
16898         }
16899
16900         tg3_read_vpd(tp);
16901         tg3_read_fw_ver(tp);
16902
16903         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16904                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16905         } else {
16906                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16907                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16908                 else
16909                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16910         }
16911
16912         /* 5700 {AX,BX} chips have a broken status block link
16913          * change bit implementation, so we must use the
16914          * status register in those cases.
16915          */
16916         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16917                 tg3_flag_set(tp, USE_LINKCHG_REG);
16918         else
16919                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16920
16921         /* The led_ctrl is set during tg3_phy_probe, here we might
16922          * have to force the link status polling mechanism based
16923          * upon subsystem IDs.
16924          */
16925         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16926             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16927             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16928                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16929                 tg3_flag_set(tp, USE_LINKCHG_REG);
16930         }
16931
16932         /* For all SERDES we poll the MAC status register. */
16933         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16934                 tg3_flag_set(tp, POLL_SERDES);
16935         else
16936                 tg3_flag_clear(tp, POLL_SERDES);
16937
16938         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16939                 tg3_flag_set(tp, POLL_CPMU_LINK);
16940
16941         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16942         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16943         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16944             tg3_flag(tp, PCIX_MODE)) {
16945                 tp->rx_offset = NET_SKB_PAD;
16946 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16947                 tp->rx_copy_thresh = ~(u16)0;
16948 #endif
16949         }
16950
16951         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16952         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16953         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16954
16955         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16956
16957         /* Increment the rx prod index on the rx std ring by at most
16958          * 8 for these chips to workaround hw errata.
16959          */
16960         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16961             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16962             tg3_asic_rev(tp) == ASIC_REV_5755)
16963                 tp->rx_std_max_post = 8;
16964
16965         if (tg3_flag(tp, ASPM_WORKAROUND))
16966                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16967                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16968
16969         return err;
16970 }
16971
16972 static int tg3_get_device_address(struct tg3 *tp)
16973 {
16974         struct net_device *dev = tp->dev;
16975         u32 hi, lo, mac_offset;
16976         int addr_ok = 0;
16977         int err;
16978
16979         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16980                 return 0;
16981
16982         if (tg3_flag(tp, IS_SSB_CORE)) {
16983                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16984                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16985                         return 0;
16986         }
16987
16988         mac_offset = 0x7c;
16989         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16990             tg3_flag(tp, 5780_CLASS)) {
16991                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16992                         mac_offset = 0xcc;
16993                 if (tg3_nvram_lock(tp))
16994                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16995                 else
16996                         tg3_nvram_unlock(tp);
16997         } else if (tg3_flag(tp, 5717_PLUS)) {
16998                 if (tp->pci_fn & 1)
16999                         mac_offset = 0xcc;
17000                 if (tp->pci_fn > 1)
17001                         mac_offset += 0x18c;
17002         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
17003                 mac_offset = 0x10;
17004
17005         /* First try to get it from MAC address mailbox. */
17006         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
17007         if ((hi >> 16) == 0x484b) {
17008                 dev->dev_addr[0] = (hi >>  8) & 0xff;
17009                 dev->dev_addr[1] = (hi >>  0) & 0xff;
17010
17011                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
17012                 dev->dev_addr[2] = (lo >> 24) & 0xff;
17013                 dev->dev_addr[3] = (lo >> 16) & 0xff;
17014                 dev->dev_addr[4] = (lo >>  8) & 0xff;
17015                 dev->dev_addr[5] = (lo >>  0) & 0xff;
17016
17017                 /* Some old bootcode may report a 0 MAC address in SRAM */
17018                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
17019         }
17020         if (!addr_ok) {
17021                 /* Next, try NVRAM. */
17022                 if (!tg3_flag(tp, NO_NVRAM) &&
17023                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
17024                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
17025                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
17026                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
17027                 }
17028                 /* Finally just fetch it out of the MAC control regs. */
17029                 else {
17030                         hi = tr32(MAC_ADDR_0_HIGH);
17031                         lo = tr32(MAC_ADDR_0_LOW);
17032
17033                         dev->dev_addr[5] = lo & 0xff;
17034                         dev->dev_addr[4] = (lo >> 8) & 0xff;
17035                         dev->dev_addr[3] = (lo >> 16) & 0xff;
17036                         dev->dev_addr[2] = (lo >> 24) & 0xff;
17037                         dev->dev_addr[1] = hi & 0xff;
17038                         dev->dev_addr[0] = (hi >> 8) & 0xff;
17039                 }
17040         }
17041
17042         if (!is_valid_ether_addr(&dev->dev_addr[0]))
17043                 return -EINVAL;
17044         return 0;
17045 }
17046
17047 #define BOUNDARY_SINGLE_CACHELINE       1
17048 #define BOUNDARY_MULTI_CACHELINE        2
17049
17050 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17051 {
17052         int cacheline_size;
17053         u8 byte;
17054         int goal;
17055
17056         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17057         if (byte == 0)
17058                 cacheline_size = 1024;
17059         else
17060                 cacheline_size = (int) byte * 4;
17061
17062         /* On 5703 and later chips, the boundary bits have no
17063          * effect.
17064          */
17065         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17066             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17067             !tg3_flag(tp, PCI_EXPRESS))
17068                 goto out;
17069
17070 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17071         goal = BOUNDARY_MULTI_CACHELINE;
17072 #else
17073 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17074         goal = BOUNDARY_SINGLE_CACHELINE;
17075 #else
17076         goal = 0;
17077 #endif
17078 #endif
17079
17080         if (tg3_flag(tp, 57765_PLUS)) {
17081                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17082                 goto out;
17083         }
17084
17085         if (!goal)
17086                 goto out;
17087
17088         /* PCI controllers on most RISC systems tend to disconnect
17089          * when a device tries to burst across a cache-line boundary.
17090          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17091          *
17092          * Unfortunately, for PCI-E there are only limited
17093          * write-side controls for this, and thus for reads
17094          * we will still get the disconnects.  We'll also waste
17095          * these PCI cycles for both read and write for chips
17096          * other than 5700 and 5701 which do not implement the
17097          * boundary bits.
17098          */
17099         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17100                 switch (cacheline_size) {
17101                 case 16:
17102                 case 32:
17103                 case 64:
17104                 case 128:
17105                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17106                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17107                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17108                         } else {
17109                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17110                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17111                         }
17112                         break;
17113
17114                 case 256:
17115                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17116                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17117                         break;
17118
17119                 default:
17120                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17121                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17122                         break;
17123                 }
17124         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17125                 switch (cacheline_size) {
17126                 case 16:
17127                 case 32:
17128                 case 64:
17129                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17130                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17131                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17132                                 break;
17133                         }
17134                         fallthrough;
17135                 case 128:
17136                 default:
17137                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17138                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17139                         break;
17140                 }
17141         } else {
17142                 switch (cacheline_size) {
17143                 case 16:
17144                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17145                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17146                                         DMA_RWCTRL_WRITE_BNDRY_16);
17147                                 break;
17148                         }
17149                         fallthrough;
17150                 case 32:
17151                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17152                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17153                                         DMA_RWCTRL_WRITE_BNDRY_32);
17154                                 break;
17155                         }
17156                         fallthrough;
17157                 case 64:
17158                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17159                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17160                                         DMA_RWCTRL_WRITE_BNDRY_64);
17161                                 break;
17162                         }
17163                         fallthrough;
17164                 case 128:
17165                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17166                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17167                                         DMA_RWCTRL_WRITE_BNDRY_128);
17168                                 break;
17169                         }
17170                         fallthrough;
17171                 case 256:
17172                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17173                                 DMA_RWCTRL_WRITE_BNDRY_256);
17174                         break;
17175                 case 512:
17176                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17177                                 DMA_RWCTRL_WRITE_BNDRY_512);
17178                         break;
17179                 case 1024:
17180                 default:
17181                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17182                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17183                         break;
17184                 }
17185         }
17186
17187 out:
17188         return val;
17189 }
17190
17191 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17192                            int size, bool to_device)
17193 {
17194         struct tg3_internal_buffer_desc test_desc;
17195         u32 sram_dma_descs;
17196         int i, ret;
17197
17198         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17199
17200         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17201         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17202         tw32(RDMAC_STATUS, 0);
17203         tw32(WDMAC_STATUS, 0);
17204
17205         tw32(BUFMGR_MODE, 0);
17206         tw32(FTQ_RESET, 0);
17207
17208         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17209         test_desc.addr_lo = buf_dma & 0xffffffff;
17210         test_desc.nic_mbuf = 0x00002100;
17211         test_desc.len = size;
17212
17213         /*
17214          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17215          * the *second* time the tg3 driver was getting loaded after an
17216          * initial scan.
17217          *
17218          * Broadcom tells me:
17219          *   ...the DMA engine is connected to the GRC block and a DMA
17220          *   reset may affect the GRC block in some unpredictable way...
17221          *   The behavior of resets to individual blocks has not been tested.
17222          *
17223          * Broadcom noted the GRC reset will also reset all sub-components.
17224          */
17225         if (to_device) {
17226                 test_desc.cqid_sqid = (13 << 8) | 2;
17227
17228                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17229                 udelay(40);
17230         } else {
17231                 test_desc.cqid_sqid = (16 << 8) | 7;
17232
17233                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17234                 udelay(40);
17235         }
17236         test_desc.flags = 0x00000005;
17237
17238         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17239                 u32 val;
17240
17241                 val = *(((u32 *)&test_desc) + i);
17242                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17243                                        sram_dma_descs + (i * sizeof(u32)));
17244                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17245         }
17246         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17247
17248         if (to_device)
17249                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17250         else
17251                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17252
17253         ret = -ENODEV;
17254         for (i = 0; i < 40; i++) {
17255                 u32 val;
17256
17257                 if (to_device)
17258                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17259                 else
17260                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17261                 if ((val & 0xffff) == sram_dma_descs) {
17262                         ret = 0;
17263                         break;
17264                 }
17265
17266                 udelay(100);
17267         }
17268
17269         return ret;
17270 }
17271
17272 #define TEST_BUFFER_SIZE        0x2000
17273
17274 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17275         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17276         { },
17277 };
17278
17279 static int tg3_test_dma(struct tg3 *tp)
17280 {
17281         dma_addr_t buf_dma;
17282         u32 *buf, saved_dma_rwctrl;
17283         int ret = 0;
17284
17285         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17286                                  &buf_dma, GFP_KERNEL);
17287         if (!buf) {
17288                 ret = -ENOMEM;
17289                 goto out_nofree;
17290         }
17291
17292         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17293                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17294
17295         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17296
17297         if (tg3_flag(tp, 57765_PLUS))
17298                 goto out;
17299
17300         if (tg3_flag(tp, PCI_EXPRESS)) {
17301                 /* DMA read watermark not used on PCIE */
17302                 tp->dma_rwctrl |= 0x00180000;
17303         } else if (!tg3_flag(tp, PCIX_MODE)) {
17304                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17305                     tg3_asic_rev(tp) == ASIC_REV_5750)
17306                         tp->dma_rwctrl |= 0x003f0000;
17307                 else
17308                         tp->dma_rwctrl |= 0x003f000f;
17309         } else {
17310                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17311                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17312                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17313                         u32 read_water = 0x7;
17314
17315                         /* If the 5704 is behind the EPB bridge, we can
17316                          * do the less restrictive ONE_DMA workaround for
17317                          * better performance.
17318                          */
17319                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17320                             tg3_asic_rev(tp) == ASIC_REV_5704)
17321                                 tp->dma_rwctrl |= 0x8000;
17322                         else if (ccval == 0x6 || ccval == 0x7)
17323                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17324
17325                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17326                                 read_water = 4;
17327                         /* Set bit 23 to enable PCIX hw bug fix */
17328                         tp->dma_rwctrl |=
17329                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17330                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17331                                 (1 << 23);
17332                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17333                         /* 5780 always in PCIX mode */
17334                         tp->dma_rwctrl |= 0x00144000;
17335                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17336                         /* 5714 always in PCIX mode */
17337                         tp->dma_rwctrl |= 0x00148000;
17338                 } else {
17339                         tp->dma_rwctrl |= 0x001b000f;
17340                 }
17341         }
17342         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17343                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17344
17345         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17346             tg3_asic_rev(tp) == ASIC_REV_5704)
17347                 tp->dma_rwctrl &= 0xfffffff0;
17348
17349         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17350             tg3_asic_rev(tp) == ASIC_REV_5701) {
17351                 /* Remove this if it causes problems for some boards. */
17352                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17353
17354                 /* On 5700/5701 chips, we need to set this bit.
17355                  * Otherwise the chip will issue cacheline transactions
17356                  * to streamable DMA memory with not all the byte
17357                  * enables turned on.  This is an error on several
17358                  * RISC PCI controllers, in particular sparc64.
17359                  *
17360                  * On 5703/5704 chips, this bit has been reassigned
17361                  * a different meaning.  In particular, it is used
17362                  * on those chips to enable a PCI-X workaround.
17363                  */
17364                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17365         }
17366
17367         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17368
17369
17370         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17371             tg3_asic_rev(tp) != ASIC_REV_5701)
17372                 goto out;
17373
17374         /* It is best to perform DMA test with maximum write burst size
17375          * to expose the 5700/5701 write DMA bug.
17376          */
17377         saved_dma_rwctrl = tp->dma_rwctrl;
17378         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17379         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17380
17381         while (1) {
17382                 u32 *p = buf, i;
17383
17384                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17385                         p[i] = i;
17386
17387                 /* Send the buffer to the chip. */
17388                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17389                 if (ret) {
17390                         dev_err(&tp->pdev->dev,
17391                                 "%s: Buffer write failed. err = %d\n",
17392                                 __func__, ret);
17393                         break;
17394                 }
17395
17396                 /* Now read it back. */
17397                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17398                 if (ret) {
17399                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17400                                 "err = %d\n", __func__, ret);
17401                         break;
17402                 }
17403
17404                 /* Verify it. */
17405                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17406                         if (p[i] == i)
17407                                 continue;
17408
17409                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17410                             DMA_RWCTRL_WRITE_BNDRY_16) {
17411                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17412                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17413                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17414                                 break;
17415                         } else {
17416                                 dev_err(&tp->pdev->dev,
17417                                         "%s: Buffer corrupted on read back! "
17418                                         "(%d != %d)\n", __func__, p[i], i);
17419                                 ret = -ENODEV;
17420                                 goto out;
17421                         }
17422                 }
17423
17424                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17425                         /* Success. */
17426                         ret = 0;
17427                         break;
17428                 }
17429         }
17430         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17431             DMA_RWCTRL_WRITE_BNDRY_16) {
17432                 /* DMA test passed without adjusting DMA boundary,
17433                  * now look for chipsets that are known to expose the
17434                  * DMA bug without failing the test.
17435                  */
17436                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17437                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17438                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17439                 } else {
17440                         /* Safe to use the calculated DMA boundary. */
17441                         tp->dma_rwctrl = saved_dma_rwctrl;
17442                 }
17443
17444                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17445         }
17446
17447 out:
17448         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17449 out_nofree:
17450         return ret;
17451 }
17452
17453 static void tg3_init_bufmgr_config(struct tg3 *tp)
17454 {
17455         if (tg3_flag(tp, 57765_PLUS)) {
17456                 tp->bufmgr_config.mbuf_read_dma_low_water =
17457                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17458                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17459                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17460                 tp->bufmgr_config.mbuf_high_water =
17461                         DEFAULT_MB_HIGH_WATER_57765;
17462
17463                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17464                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17465                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17466                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17467                 tp->bufmgr_config.mbuf_high_water_jumbo =
17468                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17469         } else if (tg3_flag(tp, 5705_PLUS)) {
17470                 tp->bufmgr_config.mbuf_read_dma_low_water =
17471                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17472                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17473                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17474                 tp->bufmgr_config.mbuf_high_water =
17475                         DEFAULT_MB_HIGH_WATER_5705;
17476                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17477                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17478                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17479                         tp->bufmgr_config.mbuf_high_water =
17480                                 DEFAULT_MB_HIGH_WATER_5906;
17481                 }
17482
17483                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17484                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17485                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17486                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17487                 tp->bufmgr_config.mbuf_high_water_jumbo =
17488                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17489         } else {
17490                 tp->bufmgr_config.mbuf_read_dma_low_water =
17491                         DEFAULT_MB_RDMA_LOW_WATER;
17492                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17493                         DEFAULT_MB_MACRX_LOW_WATER;
17494                 tp->bufmgr_config.mbuf_high_water =
17495                         DEFAULT_MB_HIGH_WATER;
17496
17497                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17498                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17499                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17500                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17501                 tp->bufmgr_config.mbuf_high_water_jumbo =
17502                         DEFAULT_MB_HIGH_WATER_JUMBO;
17503         }
17504
17505         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17506         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17507 }
17508
17509 static char *tg3_phy_string(struct tg3 *tp)
17510 {
17511         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17512         case TG3_PHY_ID_BCM5400:        return "5400";
17513         case TG3_PHY_ID_BCM5401:        return "5401";
17514         case TG3_PHY_ID_BCM5411:        return "5411";
17515         case TG3_PHY_ID_BCM5701:        return "5701";
17516         case TG3_PHY_ID_BCM5703:        return "5703";
17517         case TG3_PHY_ID_BCM5704:        return "5704";
17518         case TG3_PHY_ID_BCM5705:        return "5705";
17519         case TG3_PHY_ID_BCM5750:        return "5750";
17520         case TG3_PHY_ID_BCM5752:        return "5752";
17521         case TG3_PHY_ID_BCM5714:        return "5714";
17522         case TG3_PHY_ID_BCM5780:        return "5780";
17523         case TG3_PHY_ID_BCM5755:        return "5755";
17524         case TG3_PHY_ID_BCM5787:        return "5787";
17525         case TG3_PHY_ID_BCM5784:        return "5784";
17526         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17527         case TG3_PHY_ID_BCM5906:        return "5906";
17528         case TG3_PHY_ID_BCM5761:        return "5761";
17529         case TG3_PHY_ID_BCM5718C:       return "5718C";
17530         case TG3_PHY_ID_BCM5718S:       return "5718S";
17531         case TG3_PHY_ID_BCM57765:       return "57765";
17532         case TG3_PHY_ID_BCM5719C:       return "5719C";
17533         case TG3_PHY_ID_BCM5720C:       return "5720C";
17534         case TG3_PHY_ID_BCM5762:        return "5762C";
17535         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17536         case 0:                 return "serdes";
17537         default:                return "unknown";
17538         }
17539 }
17540
17541 static char *tg3_bus_string(struct tg3 *tp, char *str)
17542 {
17543         if (tg3_flag(tp, PCI_EXPRESS)) {
17544                 strcpy(str, "PCI Express");
17545                 return str;
17546         } else if (tg3_flag(tp, PCIX_MODE)) {
17547                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17548
17549                 strcpy(str, "PCIX:");
17550
17551                 if ((clock_ctrl == 7) ||
17552                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17553                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17554                         strcat(str, "133MHz");
17555                 else if (clock_ctrl == 0)
17556                         strcat(str, "33MHz");
17557                 else if (clock_ctrl == 2)
17558                         strcat(str, "50MHz");
17559                 else if (clock_ctrl == 4)
17560                         strcat(str, "66MHz");
17561                 else if (clock_ctrl == 6)
17562                         strcat(str, "100MHz");
17563         } else {
17564                 strcpy(str, "PCI:");
17565                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17566                         strcat(str, "66MHz");
17567                 else
17568                         strcat(str, "33MHz");
17569         }
17570         if (tg3_flag(tp, PCI_32BIT))
17571                 strcat(str, ":32-bit");
17572         else
17573                 strcat(str, ":64-bit");
17574         return str;
17575 }
17576
17577 static void tg3_init_coal(struct tg3 *tp)
17578 {
17579         struct ethtool_coalesce *ec = &tp->coal;
17580
17581         memset(ec, 0, sizeof(*ec));
17582         ec->cmd = ETHTOOL_GCOALESCE;
17583         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17584         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17585         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17586         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17587         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17588         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17589         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17590         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17591         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17592
17593         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17594                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17595                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17596                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17597                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17598                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17599         }
17600
17601         if (tg3_flag(tp, 5705_PLUS)) {
17602                 ec->rx_coalesce_usecs_irq = 0;
17603                 ec->tx_coalesce_usecs_irq = 0;
17604                 ec->stats_block_coalesce_usecs = 0;
17605         }
17606 }
17607
17608 static int tg3_init_one(struct pci_dev *pdev,
17609                                   const struct pci_device_id *ent)
17610 {
17611         struct net_device *dev;
17612         struct tg3 *tp;
17613         int i, err;
17614         u32 sndmbx, rcvmbx, intmbx;
17615         char str[40];
17616         u64 dma_mask, persist_dma_mask;
17617         netdev_features_t features = 0;
17618
17619         err = pci_enable_device(pdev);
17620         if (err) {
17621                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17622                 return err;
17623         }
17624
17625         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17626         if (err) {
17627                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17628                 goto err_out_disable_pdev;
17629         }
17630
17631         pci_set_master(pdev);
17632
17633         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17634         if (!dev) {
17635                 err = -ENOMEM;
17636                 goto err_out_free_res;
17637         }
17638
17639         SET_NETDEV_DEV(dev, &pdev->dev);
17640
17641         tp = netdev_priv(dev);
17642         tp->pdev = pdev;
17643         tp->dev = dev;
17644         tp->rx_mode = TG3_DEF_RX_MODE;
17645         tp->tx_mode = TG3_DEF_TX_MODE;
17646         tp->irq_sync = 1;
17647         tp->pcierr_recovery = false;
17648
17649         if (tg3_debug > 0)
17650                 tp->msg_enable = tg3_debug;
17651         else
17652                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17653
17654         if (pdev_is_ssb_gige_core(pdev)) {
17655                 tg3_flag_set(tp, IS_SSB_CORE);
17656                 if (ssb_gige_must_flush_posted_writes(pdev))
17657                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17658                 if (ssb_gige_one_dma_at_once(pdev))
17659                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17660                 if (ssb_gige_have_roboswitch(pdev)) {
17661                         tg3_flag_set(tp, USE_PHYLIB);
17662                         tg3_flag_set(tp, ROBOSWITCH);
17663                 }
17664                 if (ssb_gige_is_rgmii(pdev))
17665                         tg3_flag_set(tp, RGMII_MODE);
17666         }
17667
17668         /* The word/byte swap controls here control register access byte
17669          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17670          * setting below.
17671          */
17672         tp->misc_host_ctrl =
17673                 MISC_HOST_CTRL_MASK_PCI_INT |
17674                 MISC_HOST_CTRL_WORD_SWAP |
17675                 MISC_HOST_CTRL_INDIR_ACCESS |
17676                 MISC_HOST_CTRL_PCISTATE_RW;
17677
17678         /* The NONFRM (non-frame) byte/word swap controls take effect
17679          * on descriptor entries, anything which isn't packet data.
17680          *
17681          * The StrongARM chips on the board (one for tx, one for rx)
17682          * are running in big-endian mode.
17683          */
17684         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17685                         GRC_MODE_WSWAP_NONFRM_DATA);
17686 #ifdef __BIG_ENDIAN
17687         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17688 #endif
17689         spin_lock_init(&tp->lock);
17690         spin_lock_init(&tp->indirect_lock);
17691         INIT_WORK(&tp->reset_task, tg3_reset_task);
17692
17693         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17694         if (!tp->regs) {
17695                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17696                 err = -ENOMEM;
17697                 goto err_out_free_dev;
17698         }
17699
17700         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17701             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17702             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17703             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17704             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17705             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17706             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17707             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17708             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17709             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17710             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17711             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17712             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17713             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17714             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17715                 tg3_flag_set(tp, ENABLE_APE);
17716                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17717                 if (!tp->aperegs) {
17718                         dev_err(&pdev->dev,
17719                                 "Cannot map APE registers, aborting\n");
17720                         err = -ENOMEM;
17721                         goto err_out_iounmap;
17722                 }
17723         }
17724
17725         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17726         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17727
17728         dev->ethtool_ops = &tg3_ethtool_ops;
17729         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17730         dev->netdev_ops = &tg3_netdev_ops;
17731         dev->irq = pdev->irq;
17732
17733         err = tg3_get_invariants(tp, ent);
17734         if (err) {
17735                 dev_err(&pdev->dev,
17736                         "Problem fetching invariants of chip, aborting\n");
17737                 goto err_out_apeunmap;
17738         }
17739
17740         /* The EPB bridge inside 5714, 5715, and 5780 and any
17741          * device behind the EPB cannot support DMA addresses > 40-bit.
17742          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17743          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17744          * do DMA address check in tg3_start_xmit().
17745          */
17746         if (tg3_flag(tp, IS_5788))
17747                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17748         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17749                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17750 #ifdef CONFIG_HIGHMEM
17751                 dma_mask = DMA_BIT_MASK(64);
17752 #endif
17753         } else
17754                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17755
17756         /* Configure DMA attributes. */
17757         if (dma_mask > DMA_BIT_MASK(32)) {
17758                 err = pci_set_dma_mask(pdev, dma_mask);
17759                 if (!err) {
17760                         features |= NETIF_F_HIGHDMA;
17761                         err = pci_set_consistent_dma_mask(pdev,
17762                                                           persist_dma_mask);
17763                         if (err < 0) {
17764                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17765                                         "DMA for consistent allocations\n");
17766                                 goto err_out_apeunmap;
17767                         }
17768                 }
17769         }
17770         if (err || dma_mask == DMA_BIT_MASK(32)) {
17771                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17772                 if (err) {
17773                         dev_err(&pdev->dev,
17774                                 "No usable DMA configuration, aborting\n");
17775                         goto err_out_apeunmap;
17776                 }
17777         }
17778
17779         tg3_init_bufmgr_config(tp);
17780
17781         /* 5700 B0 chips do not support checksumming correctly due
17782          * to hardware bugs.
17783          */
17784         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17785                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17786
17787                 if (tg3_flag(tp, 5755_PLUS))
17788                         features |= NETIF_F_IPV6_CSUM;
17789         }
17790
17791         /* TSO is on by default on chips that support hardware TSO.
17792          * Firmware TSO on older chips gives lower performance, so it
17793          * is off by default, but can be enabled using ethtool.
17794          */
17795         if ((tg3_flag(tp, HW_TSO_1) ||
17796              tg3_flag(tp, HW_TSO_2) ||
17797              tg3_flag(tp, HW_TSO_3)) &&
17798             (features & NETIF_F_IP_CSUM))
17799                 features |= NETIF_F_TSO;
17800         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17801                 if (features & NETIF_F_IPV6_CSUM)
17802                         features |= NETIF_F_TSO6;
17803                 if (tg3_flag(tp, HW_TSO_3) ||
17804                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17805                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17806                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17807                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17808                     tg3_asic_rev(tp) == ASIC_REV_57780)
17809                         features |= NETIF_F_TSO_ECN;
17810         }
17811
17812         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17813                          NETIF_F_HW_VLAN_CTAG_RX;
17814         dev->vlan_features |= features;
17815
17816         /*
17817          * Add loopback capability only for a subset of devices that support
17818          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17819          * loopback for the remaining devices.
17820          */
17821         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17822             !tg3_flag(tp, CPMU_PRESENT))
17823                 /* Add the loopback capability */
17824                 features |= NETIF_F_LOOPBACK;
17825
17826         dev->hw_features |= features;
17827         dev->priv_flags |= IFF_UNICAST_FLT;
17828
17829         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17830         dev->min_mtu = TG3_MIN_MTU;
17831         dev->max_mtu = TG3_MAX_MTU(tp);
17832
17833         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17834             !tg3_flag(tp, TSO_CAPABLE) &&
17835             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17836                 tg3_flag_set(tp, MAX_RXPEND_64);
17837                 tp->rx_pending = 63;
17838         }
17839
17840         err = tg3_get_device_address(tp);
17841         if (err) {
17842                 dev_err(&pdev->dev,
17843                         "Could not obtain valid ethernet address, aborting\n");
17844                 goto err_out_apeunmap;
17845         }
17846
17847         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17848         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17849         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17850         for (i = 0; i < tp->irq_max; i++) {
17851                 struct tg3_napi *tnapi = &tp->napi[i];
17852
17853                 tnapi->tp = tp;
17854                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17855
17856                 tnapi->int_mbox = intmbx;
17857                 if (i <= 4)
17858                         intmbx += 0x8;
17859                 else
17860                         intmbx += 0x4;
17861
17862                 tnapi->consmbox = rcvmbx;
17863                 tnapi->prodmbox = sndmbx;
17864
17865                 if (i)
17866                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17867                 else
17868                         tnapi->coal_now = HOSTCC_MODE_NOW;
17869
17870                 if (!tg3_flag(tp, SUPPORT_MSIX))
17871                         break;
17872
17873                 /*
17874                  * If we support MSIX, we'll be using RSS.  If we're using
17875                  * RSS, the first vector only handles link interrupts and the
17876                  * remaining vectors handle rx and tx interrupts.  Reuse the
17877                  * mailbox values for the next iteration.  The values we setup
17878                  * above are still useful for the single vectored mode.
17879                  */
17880                 if (!i)
17881                         continue;
17882
17883                 rcvmbx += 0x8;
17884
17885                 if (sndmbx & 0x4)
17886                         sndmbx -= 0x4;
17887                 else
17888                         sndmbx += 0xc;
17889         }
17890
17891         /*
17892          * Reset chip in case UNDI or EFI driver did not shutdown
17893          * DMA self test will enable WDMAC and we'll see (spurious)
17894          * pending DMA on the PCI bus at that point.
17895          */
17896         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17897             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17898                 tg3_full_lock(tp, 0);
17899                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17900                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17901                 tg3_full_unlock(tp);
17902         }
17903
17904         err = tg3_test_dma(tp);
17905         if (err) {
17906                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17907                 goto err_out_apeunmap;
17908         }
17909
17910         tg3_init_coal(tp);
17911
17912         pci_set_drvdata(pdev, dev);
17913
17914         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17915             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17916             tg3_asic_rev(tp) == ASIC_REV_5762)
17917                 tg3_flag_set(tp, PTP_CAPABLE);
17918
17919         tg3_timer_init(tp);
17920
17921         tg3_carrier_off(tp);
17922
17923         err = register_netdev(dev);
17924         if (err) {
17925                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17926                 goto err_out_apeunmap;
17927         }
17928
17929         if (tg3_flag(tp, PTP_CAPABLE)) {
17930                 tg3_ptp_init(tp);
17931                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17932                                                    &tp->pdev->dev);
17933                 if (IS_ERR(tp->ptp_clock))
17934                         tp->ptp_clock = NULL;
17935         }
17936
17937         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17938                     tp->board_part_number,
17939                     tg3_chip_rev_id(tp),
17940                     tg3_bus_string(tp, str),
17941                     dev->dev_addr);
17942
17943         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17944                 char *ethtype;
17945
17946                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17947                         ethtype = "10/100Base-TX";
17948                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17949                         ethtype = "1000Base-SX";
17950                 else
17951                         ethtype = "10/100/1000Base-T";
17952
17953                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17954                             "(WireSpeed[%d], EEE[%d])\n",
17955                             tg3_phy_string(tp), ethtype,
17956                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17957                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17958         }
17959
17960         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17961                     (dev->features & NETIF_F_RXCSUM) != 0,
17962                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17963                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17964                     tg3_flag(tp, ENABLE_ASF) != 0,
17965                     tg3_flag(tp, TSO_CAPABLE) != 0);
17966         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17967                     tp->dma_rwctrl,
17968                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17969                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17970
17971         pci_save_state(pdev);
17972
17973         return 0;
17974
17975 err_out_apeunmap:
17976         if (tp->aperegs) {
17977                 iounmap(tp->aperegs);
17978                 tp->aperegs = NULL;
17979         }
17980
17981 err_out_iounmap:
17982         if (tp->regs) {
17983                 iounmap(tp->regs);
17984                 tp->regs = NULL;
17985         }
17986
17987 err_out_free_dev:
17988         free_netdev(dev);
17989
17990 err_out_free_res:
17991         pci_release_regions(pdev);
17992
17993 err_out_disable_pdev:
17994         if (pci_is_enabled(pdev))
17995                 pci_disable_device(pdev);
17996         return err;
17997 }
17998
17999 static void tg3_remove_one(struct pci_dev *pdev)
18000 {
18001         struct net_device *dev = pci_get_drvdata(pdev);
18002
18003         if (dev) {
18004                 struct tg3 *tp = netdev_priv(dev);
18005
18006                 tg3_ptp_fini(tp);
18007
18008                 release_firmware(tp->fw);
18009
18010                 tg3_reset_task_cancel(tp);
18011
18012                 if (tg3_flag(tp, USE_PHYLIB)) {
18013                         tg3_phy_fini(tp);
18014                         tg3_mdio_fini(tp);
18015                 }
18016
18017                 unregister_netdev(dev);
18018                 if (tp->aperegs) {
18019                         iounmap(tp->aperegs);
18020                         tp->aperegs = NULL;
18021                 }
18022                 if (tp->regs) {
18023                         iounmap(tp->regs);
18024                         tp->regs = NULL;
18025                 }
18026                 free_netdev(dev);
18027                 pci_release_regions(pdev);
18028                 pci_disable_device(pdev);
18029         }
18030 }
18031
18032 #ifdef CONFIG_PM_SLEEP
18033 static int tg3_suspend(struct device *device)
18034 {
18035         struct net_device *dev = dev_get_drvdata(device);
18036         struct tg3 *tp = netdev_priv(dev);
18037         int err = 0;
18038
18039         rtnl_lock();
18040
18041         if (!netif_running(dev))
18042                 goto unlock;
18043
18044         tg3_reset_task_cancel(tp);
18045         tg3_phy_stop(tp);
18046         tg3_netif_stop(tp);
18047
18048         tg3_timer_stop(tp);
18049
18050         tg3_full_lock(tp, 1);
18051         tg3_disable_ints(tp);
18052         tg3_full_unlock(tp);
18053
18054         netif_device_detach(dev);
18055
18056         tg3_full_lock(tp, 0);
18057         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18058         tg3_flag_clear(tp, INIT_COMPLETE);
18059         tg3_full_unlock(tp);
18060
18061         err = tg3_power_down_prepare(tp);
18062         if (err) {
18063                 int err2;
18064
18065                 tg3_full_lock(tp, 0);
18066
18067                 tg3_flag_set(tp, INIT_COMPLETE);
18068                 err2 = tg3_restart_hw(tp, true);
18069                 if (err2)
18070                         goto out;
18071
18072                 tg3_timer_start(tp);
18073
18074                 netif_device_attach(dev);
18075                 tg3_netif_start(tp);
18076
18077 out:
18078                 tg3_full_unlock(tp);
18079
18080                 if (!err2)
18081                         tg3_phy_start(tp);
18082         }
18083
18084 unlock:
18085         rtnl_unlock();
18086         return err;
18087 }
18088
18089 static int tg3_resume(struct device *device)
18090 {
18091         struct net_device *dev = dev_get_drvdata(device);
18092         struct tg3 *tp = netdev_priv(dev);
18093         int err = 0;
18094
18095         rtnl_lock();
18096
18097         if (!netif_running(dev))
18098                 goto unlock;
18099
18100         netif_device_attach(dev);
18101
18102         tg3_full_lock(tp, 0);
18103
18104         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18105
18106         tg3_flag_set(tp, INIT_COMPLETE);
18107         err = tg3_restart_hw(tp,
18108                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18109         if (err)
18110                 goto out;
18111
18112         tg3_timer_start(tp);
18113
18114         tg3_netif_start(tp);
18115
18116 out:
18117         tg3_full_unlock(tp);
18118
18119         if (!err)
18120                 tg3_phy_start(tp);
18121
18122 unlock:
18123         rtnl_unlock();
18124         return err;
18125 }
18126 #endif /* CONFIG_PM_SLEEP */
18127
18128 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18129
18130 static void tg3_shutdown(struct pci_dev *pdev)
18131 {
18132         struct net_device *dev = pci_get_drvdata(pdev);
18133         struct tg3 *tp = netdev_priv(dev);
18134
18135         rtnl_lock();
18136         netif_device_detach(dev);
18137
18138         if (netif_running(dev))
18139                 dev_close(dev);
18140
18141         if (system_state == SYSTEM_POWER_OFF)
18142                 tg3_power_down(tp);
18143
18144         rtnl_unlock();
18145 }
18146
18147 /**
18148  * tg3_io_error_detected - called when PCI error is detected
18149  * @pdev: Pointer to PCI device
18150  * @state: The current pci connection state
18151  *
18152  * This function is called after a PCI bus error affecting
18153  * this device has been detected.
18154  */
18155 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18156                                               pci_channel_state_t state)
18157 {
18158         struct net_device *netdev = pci_get_drvdata(pdev);
18159         struct tg3 *tp = netdev_priv(netdev);
18160         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18161
18162         netdev_info(netdev, "PCI I/O error detected\n");
18163
18164         rtnl_lock();
18165
18166         /* Could be second call or maybe we don't have netdev yet */
18167         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18168                 goto done;
18169
18170         /* We needn't recover from permanent error */
18171         if (state == pci_channel_io_frozen)
18172                 tp->pcierr_recovery = true;
18173
18174         tg3_phy_stop(tp);
18175
18176         tg3_netif_stop(tp);
18177
18178         tg3_timer_stop(tp);
18179
18180         /* Want to make sure that the reset task doesn't run */
18181         tg3_reset_task_cancel(tp);
18182
18183         netif_device_detach(netdev);
18184
18185         /* Clean up software state, even if MMIO is blocked */
18186         tg3_full_lock(tp, 0);
18187         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18188         tg3_full_unlock(tp);
18189
18190 done:
18191         if (state == pci_channel_io_perm_failure) {
18192                 if (netdev) {
18193                         tg3_napi_enable(tp);
18194                         dev_close(netdev);
18195                 }
18196                 err = PCI_ERS_RESULT_DISCONNECT;
18197         } else {
18198                 pci_disable_device(pdev);
18199         }
18200
18201         rtnl_unlock();
18202
18203         return err;
18204 }
18205
18206 /**
18207  * tg3_io_slot_reset - called after the pci bus has been reset.
18208  * @pdev: Pointer to PCI device
18209  *
18210  * Restart the card from scratch, as if from a cold-boot.
18211  * At this point, the card has exprienced a hard reset,
18212  * followed by fixups by BIOS, and has its config space
18213  * set up identically to what it was at cold boot.
18214  */
18215 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18216 {
18217         struct net_device *netdev = pci_get_drvdata(pdev);
18218         struct tg3 *tp = netdev_priv(netdev);
18219         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18220         int err;
18221
18222         rtnl_lock();
18223
18224         if (pci_enable_device(pdev)) {
18225                 dev_err(&pdev->dev,
18226                         "Cannot re-enable PCI device after reset.\n");
18227                 goto done;
18228         }
18229
18230         pci_set_master(pdev);
18231         pci_restore_state(pdev);
18232         pci_save_state(pdev);
18233
18234         if (!netdev || !netif_running(netdev)) {
18235                 rc = PCI_ERS_RESULT_RECOVERED;
18236                 goto done;
18237         }
18238
18239         err = tg3_power_up(tp);
18240         if (err)
18241                 goto done;
18242
18243         rc = PCI_ERS_RESULT_RECOVERED;
18244
18245 done:
18246         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18247                 tg3_napi_enable(tp);
18248                 dev_close(netdev);
18249         }
18250         rtnl_unlock();
18251
18252         return rc;
18253 }
18254
18255 /**
18256  * tg3_io_resume - called when traffic can start flowing again.
18257  * @pdev: Pointer to PCI device
18258  *
18259  * This callback is called when the error recovery driver tells
18260  * us that its OK to resume normal operation.
18261  */
18262 static void tg3_io_resume(struct pci_dev *pdev)
18263 {
18264         struct net_device *netdev = pci_get_drvdata(pdev);
18265         struct tg3 *tp = netdev_priv(netdev);
18266         int err;
18267
18268         rtnl_lock();
18269
18270         if (!netdev || !netif_running(netdev))
18271                 goto done;
18272
18273         tg3_full_lock(tp, 0);
18274         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18275         tg3_flag_set(tp, INIT_COMPLETE);
18276         err = tg3_restart_hw(tp, true);
18277         if (err) {
18278                 tg3_full_unlock(tp);
18279                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18280                 goto done;
18281         }
18282
18283         netif_device_attach(netdev);
18284
18285         tg3_timer_start(tp);
18286
18287         tg3_netif_start(tp);
18288
18289         tg3_full_unlock(tp);
18290
18291         tg3_phy_start(tp);
18292
18293 done:
18294         tp->pcierr_recovery = false;
18295         rtnl_unlock();
18296 }
18297
18298 static const struct pci_error_handlers tg3_err_handler = {
18299         .error_detected = tg3_io_error_detected,
18300         .slot_reset     = tg3_io_slot_reset,
18301         .resume         = tg3_io_resume
18302 };
18303
18304 static struct pci_driver tg3_driver = {
18305         .name           = DRV_MODULE_NAME,
18306         .id_table       = tg3_pci_tbl,
18307         .probe          = tg3_init_one,
18308         .remove         = tg3_remove_one,
18309         .err_handler    = &tg3_err_handler,
18310         .driver.pm      = &tg3_pm_ops,
18311         .shutdown       = tg3_shutdown,
18312 };
18313
18314 module_pci_driver(tg3_driver);