Merge tag 'pci-v5.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2016 Broadcom Corporation.
8  * Copyright (C) 2016-2017 Broadcom Limited.
9  * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10  * refers to Broadcom Inc. and/or its subsidiaries.
11  *
12  * Firmware is:
13  *      Derived from proprietary unpublished source code,
14  *      Copyright (C) 2000-2016 Broadcom Corporation.
15  *      Copyright (C) 2016-2017 Broadcom Ltd.
16  *      Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17  *      refers to Broadcom Inc. and/or its subsidiaries.
18  *
19  *      Permission is hereby granted for the distribution of this firmware
20  *      data in hexadecimal or equivalent format, provided this copyright
21  *      notice is accompanying it.
22  */
23
24
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/stringify.h>
28 #include <linux/kernel.h>
29 #include <linux/sched/signal.h>
30 #include <linux/types.h>
31 #include <linux/compiler.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/in.h>
35 #include <linux/interrupt.h>
36 #include <linux/ioport.h>
37 #include <linux/pci.h>
38 #include <linux/netdevice.h>
39 #include <linux/etherdevice.h>
40 #include <linux/skbuff.h>
41 #include <linux/ethtool.h>
42 #include <linux/mdio.h>
43 #include <linux/mii.h>
44 #include <linux/phy.h>
45 #include <linux/brcmphy.h>
46 #include <linux/if.h>
47 #include <linux/if_vlan.h>
48 #include <linux/ip.h>
49 #include <linux/tcp.h>
50 #include <linux/workqueue.h>
51 #include <linux/prefetch.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/firmware.h>
54 #include <linux/ssb/ssb_driver_gige.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <linux/crc32poly.h>
58
59 #include <net/checksum.h>
60 #include <net/ip.h>
61
62 #include <linux/io.h>
63 #include <asm/byteorder.h>
64 #include <linux/uaccess.h>
65
66 #include <uapi/linux/net_tstamp.h>
67 #include <linux/ptp_clock_kernel.h>
68
69 #define BAR_0   0
70 #define BAR_2   2
71
72 #include "tg3.h"
73
74 /* Functions & macros to verify TG3_FLAGS types */
75
76 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
77 {
78         return test_bit(flag, bits);
79 }
80
81 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
82 {
83         set_bit(flag, bits);
84 }
85
86 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
87 {
88         clear_bit(flag, bits);
89 }
90
91 #define tg3_flag(tp, flag)                              \
92         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define tg3_flag_set(tp, flag)                          \
94         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define tg3_flag_clear(tp, flag)                        \
96         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
97
98 #define DRV_MODULE_NAME         "tg3"
99 /* DO NOT UPDATE TG3_*_NUM defines */
100 #define TG3_MAJ_NUM                     3
101 #define TG3_MIN_NUM                     137
102
103 #define RESET_KIND_SHUTDOWN     0
104 #define RESET_KIND_INIT         1
105 #define RESET_KIND_SUSPEND      2
106
107 #define TG3_DEF_RX_MODE         0
108 #define TG3_DEF_TX_MODE         0
109 #define TG3_DEF_MSG_ENABLE        \
110         (NETIF_MSG_DRV          | \
111          NETIF_MSG_PROBE        | \
112          NETIF_MSG_LINK         | \
113          NETIF_MSG_TIMER        | \
114          NETIF_MSG_IFDOWN       | \
115          NETIF_MSG_IFUP         | \
116          NETIF_MSG_RX_ERR       | \
117          NETIF_MSG_TX_ERR)
118
119 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
120
121 /* length of time before we decide the hardware is borked,
122  * and dev->tx_timeout() should be called to fix the problem
123  */
124
125 #define TG3_TX_TIMEOUT                  (5 * HZ)
126
127 /* hardware minimum and maximum for a single frame's data payload */
128 #define TG3_MIN_MTU                     ETH_ZLEN
129 #define TG3_MAX_MTU(tp) \
130         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131
132 /* These numbers seem to be hard coded in the NIC firmware somehow.
133  * You can't change the ring sizes, but you can change where you place
134  * them in the NIC onboard memory.
135  */
136 #define TG3_RX_STD_RING_SIZE(tp) \
137         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
138          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
139 #define TG3_DEF_RX_RING_PENDING         200
140 #define TG3_RX_JMB_RING_SIZE(tp) \
141         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
142          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
143 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
144
145 /* Do not place this n-ring entries value into the tp struct itself,
146  * we really want to expose these constants to GCC so that modulo et
147  * al.  operations are done with shifts and masks instead of with
148  * hw multiply/modulo instructions.  Another solution would be to
149  * replace things like '% foo' with '& (foo - 1)'.
150  */
151
152 #define TG3_TX_RING_SIZE                512
153 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
154
155 #define TG3_RX_STD_RING_BYTES(tp) \
156         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
157 #define TG3_RX_JMB_RING_BYTES(tp) \
158         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
159 #define TG3_RX_RCB_RING_BYTES(tp) \
160         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
161 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
162                                  TG3_TX_RING_SIZE)
163 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164
165 #define TG3_DMA_BYTE_ENAB               64
166
167 #define TG3_RX_STD_DMA_SZ               1536
168 #define TG3_RX_JMB_DMA_SZ               9046
169
170 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
171
172 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
173 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174
175 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
176         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177
178 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
179         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180
181 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
182  * that are at least dword aligned when used in PCIX mode.  The driver
183  * works around this bug by double copying the packet.  This workaround
184  * is built into the normal double copy length check for efficiency.
185  *
186  * However, the double copy is only necessary on those architectures
187  * where unaligned memory accesses are inefficient.  For those architectures
188  * where unaligned memory accesses incur little penalty, we can reintegrate
189  * the 5701 in the normal rx path.  Doing so saves a device structure
190  * dereference by hardcoding the double copy threshold in place.
191  */
192 #define TG3_RX_COPY_THRESHOLD           256
193 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
194         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
195 #else
196         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
197 #endif
198
199 #if (NET_IP_ALIGN != 0)
200 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
201 #else
202 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
203 #endif
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
207 #define TG3_TX_BD_DMA_MAX_2K            2048
208 #define TG3_TX_BD_DMA_MAX_4K            4096
209
210 #define TG3_RAW_IP_ALIGN 2
211
212 #define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
213 #define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
214
215 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
216 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
217
218 #define FIRMWARE_TG3            "tigon/tg3.bin"
219 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
220 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
221 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
222
223 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
224 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
225 MODULE_LICENSE("GPL");
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static const struct pci_device_id tg3_pci_tbl[] = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
353         {}
354 };
355
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
357
358 static const struct {
359         const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
361         { "rx_octets" },
362         { "rx_fragments" },
363         { "rx_ucast_packets" },
364         { "rx_mcast_packets" },
365         { "rx_bcast_packets" },
366         { "rx_fcs_errors" },
367         { "rx_align_errors" },
368         { "rx_xon_pause_rcvd" },
369         { "rx_xoff_pause_rcvd" },
370         { "rx_mac_ctrl_rcvd" },
371         { "rx_xoff_entered" },
372         { "rx_frame_too_long_errors" },
373         { "rx_jabbers" },
374         { "rx_undersize_packets" },
375         { "rx_in_length_errors" },
376         { "rx_out_length_errors" },
377         { "rx_64_or_less_octet_packets" },
378         { "rx_65_to_127_octet_packets" },
379         { "rx_128_to_255_octet_packets" },
380         { "rx_256_to_511_octet_packets" },
381         { "rx_512_to_1023_octet_packets" },
382         { "rx_1024_to_1522_octet_packets" },
383         { "rx_1523_to_2047_octet_packets" },
384         { "rx_2048_to_4095_octet_packets" },
385         { "rx_4096_to_8191_octet_packets" },
386         { "rx_8192_to_9022_octet_packets" },
387
388         { "tx_octets" },
389         { "tx_collisions" },
390
391         { "tx_xon_sent" },
392         { "tx_xoff_sent" },
393         { "tx_flow_control" },
394         { "tx_mac_errors" },
395         { "tx_single_collisions" },
396         { "tx_mult_collisions" },
397         { "tx_deferred" },
398         { "tx_excessive_collisions" },
399         { "tx_late_collisions" },
400         { "tx_collide_2times" },
401         { "tx_collide_3times" },
402         { "tx_collide_4times" },
403         { "tx_collide_5times" },
404         { "tx_collide_6times" },
405         { "tx_collide_7times" },
406         { "tx_collide_8times" },
407         { "tx_collide_9times" },
408         { "tx_collide_10times" },
409         { "tx_collide_11times" },
410         { "tx_collide_12times" },
411         { "tx_collide_13times" },
412         { "tx_collide_14times" },
413         { "tx_collide_15times" },
414         { "tx_ucast_packets" },
415         { "tx_mcast_packets" },
416         { "tx_bcast_packets" },
417         { "tx_carrier_sense_errors" },
418         { "tx_discards" },
419         { "tx_errors" },
420
421         { "dma_writeq_full" },
422         { "dma_write_prioq_full" },
423         { "rxbds_empty" },
424         { "rx_discards" },
425         { "rx_errors" },
426         { "rx_threshold_hit" },
427
428         { "dma_readq_full" },
429         { "dma_read_prioq_full" },
430         { "tx_comp_queue_full" },
431
432         { "ring_set_send_prod_index" },
433         { "ring_status_update" },
434         { "nic_irqs" },
435         { "nic_avoided_irqs" },
436         { "nic_tx_threshold_hit" },
437
438         { "mbuf_lwm_thresh_hit" },
439 };
440
441 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST          0
443 #define TG3_LINK_TEST           1
444 #define TG3_REGISTER_TEST       2
445 #define TG3_MEMORY_TEST         3
446 #define TG3_MAC_LOOPB_TEST      4
447 #define TG3_PHY_LOOPB_TEST      5
448 #define TG3_EXT_LOOPB_TEST      6
449 #define TG3_INTERRUPT_TEST      7
450
451
452 static const struct {
453         const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
456         [TG3_LINK_TEST]         = { "link test         (online) " },
457         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
458         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
459         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
460         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
461         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
462         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
463 };
464
465 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
466
467
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
469 {
470         writel(val, tp->regs + off);
471 }
472
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
474 {
475         return readl(tp->regs + off);
476 }
477
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
479 {
480         writel(val, tp->aperegs + off);
481 }
482
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
484 {
485         return readl(tp->aperegs + off);
486 }
487
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
489 {
490         unsigned long flags;
491
492         spin_lock_irqsave(&tp->indirect_lock, flags);
493         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495         spin_unlock_irqrestore(&tp->indirect_lock, flags);
496 }
497
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
499 {
500         writel(val, tp->regs + off);
501         readl(tp->regs + off);
502 }
503
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
505 {
506         unsigned long flags;
507         u32 val;
508
509         spin_lock_irqsave(&tp->indirect_lock, flags);
510         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512         spin_unlock_irqrestore(&tp->indirect_lock, flags);
513         return val;
514 }
515
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
517 {
518         unsigned long flags;
519
520         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525         if (off == TG3_RX_STD_PROD_IDX_REG) {
526                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527                                        TG3_64BIT_REG_LOW, val);
528                 return;
529         }
530
531         spin_lock_irqsave(&tp->indirect_lock, flags);
532         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534         spin_unlock_irqrestore(&tp->indirect_lock, flags);
535
536         /* In indirect mode when disabling interrupts, we also need
537          * to clear the interrupt bit in the GRC local ctrl register.
538          */
539         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
540             (val == 0x1)) {
541                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
543         }
544 }
545
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
547 {
548         unsigned long flags;
549         u32 val;
550
551         spin_lock_irqsave(&tp->indirect_lock, flags);
552         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554         spin_unlock_irqrestore(&tp->indirect_lock, flags);
555         return val;
556 }
557
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559  * where it is unsafe to read back the register without some delay.
560  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
562  */
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
564 {
565         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566                 /* Non-posted methods */
567                 tp->write32(tp, off, val);
568         else {
569                 /* Posted method */
570                 tg3_write32(tp, off, val);
571                 if (usec_wait)
572                         udelay(usec_wait);
573                 tp->read32(tp, off);
574         }
575         /* Wait again after the read for the posted method to guarantee that
576          * the wait time is met.
577          */
578         if (usec_wait)
579                 udelay(usec_wait);
580 }
581
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
583 {
584         tp->write32_mbox(tp, off, val);
585         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587              !tg3_flag(tp, ICH_WORKAROUND)))
588                 tp->read32_mbox(tp, off);
589 }
590
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
592 {
593         void __iomem *mbox = tp->regs + off;
594         writel(val, mbox);
595         if (tg3_flag(tp, TXD_MBOX_HWBUG))
596                 writel(val, mbox);
597         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598             tg3_flag(tp, FLUSH_POSTED_WRITES))
599                 readl(mbox);
600 }
601
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
603 {
604         return readl(tp->regs + off + GRCMBOX_BASE);
605 }
606
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
608 {
609         writel(val, tp->regs + off + GRCMBOX_BASE);
610 }
611
612 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
617
618 #define tw32(reg, val)                  tp->write32(tp, reg, val)
619 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg)                       tp->read32(tp, reg)
622
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
624 {
625         unsigned long flags;
626
627         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
629                 return;
630
631         spin_lock_irqsave(&tp->indirect_lock, flags);
632         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
635
636                 /* Always leave this as zero. */
637                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
638         } else {
639                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
641
642                 /* Always leave this as zero. */
643                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
644         }
645         spin_unlock_irqrestore(&tp->indirect_lock, flags);
646 }
647
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
649 {
650         unsigned long flags;
651
652         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
654                 *val = 0;
655                 return;
656         }
657
658         spin_lock_irqsave(&tp->indirect_lock, flags);
659         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
662
663                 /* Always leave this as zero. */
664                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
665         } else {
666                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667                 *val = tr32(TG3PCI_MEM_WIN_DATA);
668
669                 /* Always leave this as zero. */
670                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
671         }
672         spin_unlock_irqrestore(&tp->indirect_lock, flags);
673 }
674
675 static void tg3_ape_lock_init(struct tg3 *tp)
676 {
677         int i;
678         u32 regbase, bit;
679
680         if (tg3_asic_rev(tp) == ASIC_REV_5761)
681                 regbase = TG3_APE_LOCK_GRANT;
682         else
683                 regbase = TG3_APE_PER_LOCK_GRANT;
684
685         /* Make sure the driver hasn't any stale locks. */
686         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
687                 switch (i) {
688                 case TG3_APE_LOCK_PHY0:
689                 case TG3_APE_LOCK_PHY1:
690                 case TG3_APE_LOCK_PHY2:
691                 case TG3_APE_LOCK_PHY3:
692                         bit = APE_LOCK_GRANT_DRIVER;
693                         break;
694                 default:
695                         if (!tp->pci_fn)
696                                 bit = APE_LOCK_GRANT_DRIVER;
697                         else
698                                 bit = 1 << tp->pci_fn;
699                 }
700                 tg3_ape_write32(tp, regbase + 4 * i, bit);
701         }
702
703 }
704
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
706 {
707         int i, off;
708         int ret = 0;
709         u32 status, req, gnt, bit;
710
711         if (!tg3_flag(tp, ENABLE_APE))
712                 return 0;
713
714         switch (locknum) {
715         case TG3_APE_LOCK_GPIO:
716                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
717                         return 0;
718                 fallthrough;
719         case TG3_APE_LOCK_GRC:
720         case TG3_APE_LOCK_MEM:
721                 if (!tp->pci_fn)
722                         bit = APE_LOCK_REQ_DRIVER;
723                 else
724                         bit = 1 << tp->pci_fn;
725                 break;
726         case TG3_APE_LOCK_PHY0:
727         case TG3_APE_LOCK_PHY1:
728         case TG3_APE_LOCK_PHY2:
729         case TG3_APE_LOCK_PHY3:
730                 bit = APE_LOCK_REQ_DRIVER;
731                 break;
732         default:
733                 return -EINVAL;
734         }
735
736         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
737                 req = TG3_APE_LOCK_REQ;
738                 gnt = TG3_APE_LOCK_GRANT;
739         } else {
740                 req = TG3_APE_PER_LOCK_REQ;
741                 gnt = TG3_APE_PER_LOCK_GRANT;
742         }
743
744         off = 4 * locknum;
745
746         tg3_ape_write32(tp, req + off, bit);
747
748         /* Wait for up to 1 millisecond to acquire lock. */
749         for (i = 0; i < 100; i++) {
750                 status = tg3_ape_read32(tp, gnt + off);
751                 if (status == bit)
752                         break;
753                 if (pci_channel_offline(tp->pdev))
754                         break;
755
756                 udelay(10);
757         }
758
759         if (status != bit) {
760                 /* Revoke the lock request. */
761                 tg3_ape_write32(tp, gnt + off, bit);
762                 ret = -EBUSY;
763         }
764
765         return ret;
766 }
767
768 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
769 {
770         u32 gnt, bit;
771
772         if (!tg3_flag(tp, ENABLE_APE))
773                 return;
774
775         switch (locknum) {
776         case TG3_APE_LOCK_GPIO:
777                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
778                         return;
779                 fallthrough;
780         case TG3_APE_LOCK_GRC:
781         case TG3_APE_LOCK_MEM:
782                 if (!tp->pci_fn)
783                         bit = APE_LOCK_GRANT_DRIVER;
784                 else
785                         bit = 1 << tp->pci_fn;
786                 break;
787         case TG3_APE_LOCK_PHY0:
788         case TG3_APE_LOCK_PHY1:
789         case TG3_APE_LOCK_PHY2:
790         case TG3_APE_LOCK_PHY3:
791                 bit = APE_LOCK_GRANT_DRIVER;
792                 break;
793         default:
794                 return;
795         }
796
797         if (tg3_asic_rev(tp) == ASIC_REV_5761)
798                 gnt = TG3_APE_LOCK_GRANT;
799         else
800                 gnt = TG3_APE_PER_LOCK_GRANT;
801
802         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 }
804
805 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
806 {
807         u32 apedata;
808
809         while (timeout_us) {
810                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811                         return -EBUSY;
812
813                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
814                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815                         break;
816
817                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818
819                 udelay(10);
820                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821         }
822
823         return timeout_us ? 0 : -EBUSY;
824 }
825
826 #ifdef CONFIG_TIGON3_HWMON
827 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 {
829         u32 i, apedata;
830
831         for (i = 0; i < timeout_us / 10; i++) {
832                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835                         break;
836
837                 udelay(10);
838         }
839
840         return i == timeout_us / 10;
841 }
842
843 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844                                    u32 len)
845 {
846         int err;
847         u32 i, bufoff, msgoff, maxlen, apedata;
848
849         if (!tg3_flag(tp, APE_HAS_NCSI))
850                 return 0;
851
852         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853         if (apedata != APE_SEG_SIG_MAGIC)
854                 return -ENODEV;
855
856         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857         if (!(apedata & APE_FW_STATUS_READY))
858                 return -EAGAIN;
859
860         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861                  TG3_APE_SHMEM_BASE;
862         msgoff = bufoff + 2 * sizeof(u32);
863         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865         while (len) {
866                 u32 length;
867
868                 /* Cap xfer sizes to scratchpad limits. */
869                 length = (len > maxlen) ? maxlen : len;
870                 len -= length;
871
872                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873                 if (!(apedata & APE_FW_STATUS_READY))
874                         return -EAGAIN;
875
876                 /* Wait for up to 1 msec for APE to service previous event. */
877                 err = tg3_ape_event_lock(tp, 1000);
878                 if (err)
879                         return err;
880
881                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882                           APE_EVENT_STATUS_SCRTCHPD_READ |
883                           APE_EVENT_STATUS_EVENT_PENDING;
884                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886                 tg3_ape_write32(tp, bufoff, base_off);
887                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892                 base_off += length;
893
894                 if (tg3_ape_wait_for_event(tp, 30000))
895                         return -EAGAIN;
896
897                 for (i = 0; length; i += 4, length -= 4) {
898                         u32 val = tg3_ape_read32(tp, msgoff + i);
899                         memcpy(data, &val, sizeof(u32));
900                         data++;
901                 }
902         }
903
904         return 0;
905 }
906 #endif
907
908 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 {
910         int err;
911         u32 apedata;
912
913         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
914         if (apedata != APE_SEG_SIG_MAGIC)
915                 return -EAGAIN;
916
917         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
918         if (!(apedata & APE_FW_STATUS_READY))
919                 return -EAGAIN;
920
921         /* Wait for up to 20 millisecond for APE to service previous event. */
922         err = tg3_ape_event_lock(tp, 20000);
923         if (err)
924                 return err;
925
926         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
927                         event | APE_EVENT_STATUS_EVENT_PENDING);
928
929         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
930         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931
932         return 0;
933 }
934
935 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 {
937         u32 event;
938         u32 apedata;
939
940         if (!tg3_flag(tp, ENABLE_APE))
941                 return;
942
943         switch (kind) {
944         case RESET_KIND_INIT:
945                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
946                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
947                                 APE_HOST_SEG_SIG_MAGIC);
948                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
949                                 APE_HOST_SEG_LEN_MAGIC);
950                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
951                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
952                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
953                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
954                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
955                                 APE_HOST_BEHAV_NO_PHYLOCK);
956                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
957                                     TG3_APE_HOST_DRVR_STATE_START);
958
959                 event = APE_EVENT_STATUS_STATE_START;
960                 break;
961         case RESET_KIND_SHUTDOWN:
962                 if (device_may_wakeup(&tp->pdev->dev) &&
963                     tg3_flag(tp, WOL_ENABLE)) {
964                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
965                                             TG3_APE_HOST_WOL_SPEED_AUTO);
966                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
967                 } else
968                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
969
970                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
971
972                 event = APE_EVENT_STATUS_STATE_UNLOAD;
973                 break;
974         default:
975                 return;
976         }
977
978         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980         tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_send_ape_heartbeat(struct tg3 *tp,
984                                    unsigned long interval)
985 {
986         /* Check if hb interval has exceeded */
987         if (!tg3_flag(tp, ENABLE_APE) ||
988             time_before(jiffies, tp->ape_hb_jiffies + interval))
989                 return;
990
991         tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
992         tp->ape_hb_jiffies = jiffies;
993 }
994
995 static void tg3_disable_ints(struct tg3 *tp)
996 {
997         int i;
998
999         tw32(TG3PCI_MISC_HOST_CTRL,
1000              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
1001         for (i = 0; i < tp->irq_max; i++)
1002                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
1003 }
1004
1005 static void tg3_enable_ints(struct tg3 *tp)
1006 {
1007         int i;
1008
1009         tp->irq_sync = 0;
1010         wmb();
1011
1012         tw32(TG3PCI_MISC_HOST_CTRL,
1013              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1014
1015         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1016         for (i = 0; i < tp->irq_cnt; i++) {
1017                 struct tg3_napi *tnapi = &tp->napi[i];
1018
1019                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1020                 if (tg3_flag(tp, 1SHOT_MSI))
1021                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1022
1023                 tp->coal_now |= tnapi->coal_now;
1024         }
1025
1026         /* Force an initial interrupt */
1027         if (!tg3_flag(tp, TAGGED_STATUS) &&
1028             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1029                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1030         else
1031                 tw32(HOSTCC_MODE, tp->coal_now);
1032
1033         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1034 }
1035
1036 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1037 {
1038         struct tg3 *tp = tnapi->tp;
1039         struct tg3_hw_status *sblk = tnapi->hw_status;
1040         unsigned int work_exists = 0;
1041
1042         /* check for phy events */
1043         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1044                 if (sblk->status & SD_STATUS_LINK_CHG)
1045                         work_exists = 1;
1046         }
1047
1048         /* check for TX work to do */
1049         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1050                 work_exists = 1;
1051
1052         /* check for RX work to do */
1053         if (tnapi->rx_rcb_prod_idx &&
1054             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1055                 work_exists = 1;
1056
1057         return work_exists;
1058 }
1059
1060 /* tg3_int_reenable
1061  *  similar to tg3_enable_ints, but it accurately determines whether there
1062  *  is new work pending and can return without flushing the PIO write
1063  *  which reenables interrupts
1064  */
1065 static void tg3_int_reenable(struct tg3_napi *tnapi)
1066 {
1067         struct tg3 *tp = tnapi->tp;
1068
1069         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1070
1071         /* When doing tagged status, this work check is unnecessary.
1072          * The last_tag we write above tells the chip which piece of
1073          * work we've completed.
1074          */
1075         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1076                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1077                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1078 }
1079
1080 static void tg3_switch_clocks(struct tg3 *tp)
1081 {
1082         u32 clock_ctrl;
1083         u32 orig_clock_ctrl;
1084
1085         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1086                 return;
1087
1088         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1089
1090         orig_clock_ctrl = clock_ctrl;
1091         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1092                        CLOCK_CTRL_CLKRUN_OENABLE |
1093                        0x1f);
1094         tp->pci_clock_ctrl = clock_ctrl;
1095
1096         if (tg3_flag(tp, 5705_PLUS)) {
1097                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1098                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1099                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1100                 }
1101         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1102                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1103                             clock_ctrl |
1104                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1105                             40);
1106                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1107                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1108                             40);
1109         }
1110         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1111 }
1112
1113 #define PHY_BUSY_LOOPS  5000
1114
1115 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1116                          u32 *val)
1117 {
1118         u32 frame_val;
1119         unsigned int loops;
1120         int ret;
1121
1122         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1123                 tw32_f(MAC_MI_MODE,
1124                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1125                 udelay(80);
1126         }
1127
1128         tg3_ape_lock(tp, tp->phy_ape_lock);
1129
1130         *val = 0x0;
1131
1132         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1133                       MI_COM_PHY_ADDR_MASK);
1134         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1135                       MI_COM_REG_ADDR_MASK);
1136         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1137
1138         tw32_f(MAC_MI_COM, frame_val);
1139
1140         loops = PHY_BUSY_LOOPS;
1141         while (loops != 0) {
1142                 udelay(10);
1143                 frame_val = tr32(MAC_MI_COM);
1144
1145                 if ((frame_val & MI_COM_BUSY) == 0) {
1146                         udelay(5);
1147                         frame_val = tr32(MAC_MI_COM);
1148                         break;
1149                 }
1150                 loops -= 1;
1151         }
1152
1153         ret = -EBUSY;
1154         if (loops != 0) {
1155                 *val = frame_val & MI_COM_DATA_MASK;
1156                 ret = 0;
1157         }
1158
1159         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1160                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1161                 udelay(80);
1162         }
1163
1164         tg3_ape_unlock(tp, tp->phy_ape_lock);
1165
1166         return ret;
1167 }
1168
1169 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1170 {
1171         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1172 }
1173
1174 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1175                           u32 val)
1176 {
1177         u32 frame_val;
1178         unsigned int loops;
1179         int ret;
1180
1181         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1182             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1183                 return 0;
1184
1185         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1186                 tw32_f(MAC_MI_MODE,
1187                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1188                 udelay(80);
1189         }
1190
1191         tg3_ape_lock(tp, tp->phy_ape_lock);
1192
1193         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1194                       MI_COM_PHY_ADDR_MASK);
1195         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1196                       MI_COM_REG_ADDR_MASK);
1197         frame_val |= (val & MI_COM_DATA_MASK);
1198         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1199
1200         tw32_f(MAC_MI_COM, frame_val);
1201
1202         loops = PHY_BUSY_LOOPS;
1203         while (loops != 0) {
1204                 udelay(10);
1205                 frame_val = tr32(MAC_MI_COM);
1206                 if ((frame_val & MI_COM_BUSY) == 0) {
1207                         udelay(5);
1208                         frame_val = tr32(MAC_MI_COM);
1209                         break;
1210                 }
1211                 loops -= 1;
1212         }
1213
1214         ret = -EBUSY;
1215         if (loops != 0)
1216                 ret = 0;
1217
1218         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1219                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1220                 udelay(80);
1221         }
1222
1223         tg3_ape_unlock(tp, tp->phy_ape_lock);
1224
1225         return ret;
1226 }
1227
1228 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1229 {
1230         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1231 }
1232
1233 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1234 {
1235         int err;
1236
1237         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1238         if (err)
1239                 goto done;
1240
1241         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1242         if (err)
1243                 goto done;
1244
1245         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1246                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1247         if (err)
1248                 goto done;
1249
1250         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1251
1252 done:
1253         return err;
1254 }
1255
1256 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1257 {
1258         int err;
1259
1260         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1261         if (err)
1262                 goto done;
1263
1264         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1265         if (err)
1266                 goto done;
1267
1268         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1269                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1270         if (err)
1271                 goto done;
1272
1273         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1274
1275 done:
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1295         if (!err)
1296                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1297
1298         return err;
1299 }
1300
1301 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1302 {
1303         int err;
1304
1305         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1306                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1307                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1308         if (!err)
1309                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1310
1311         return err;
1312 }
1313
1314 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1315 {
1316         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1317                 set |= MII_TG3_AUXCTL_MISC_WREN;
1318
1319         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1320 }
1321
1322 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1323 {
1324         u32 val;
1325         int err;
1326
1327         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1328
1329         if (err)
1330                 return err;
1331
1332         if (enable)
1333                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1334         else
1335                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1336
1337         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1338                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1339
1340         return err;
1341 }
1342
1343 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1344 {
1345         return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1346                             reg | val | MII_TG3_MISC_SHDW_WREN);
1347 }
1348
1349 static int tg3_bmcr_reset(struct tg3 *tp)
1350 {
1351         u32 phy_control;
1352         int limit, err;
1353
1354         /* OK, reset it, and poll the BMCR_RESET bit until it
1355          * clears or we time out.
1356          */
1357         phy_control = BMCR_RESET;
1358         err = tg3_writephy(tp, MII_BMCR, phy_control);
1359         if (err != 0)
1360                 return -EBUSY;
1361
1362         limit = 5000;
1363         while (limit--) {
1364                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1365                 if (err != 0)
1366                         return -EBUSY;
1367
1368                 if ((phy_control & BMCR_RESET) == 0) {
1369                         udelay(40);
1370                         break;
1371                 }
1372                 udelay(10);
1373         }
1374         if (limit < 0)
1375                 return -EBUSY;
1376
1377         return 0;
1378 }
1379
1380 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1381 {
1382         struct tg3 *tp = bp->priv;
1383         u32 val;
1384
1385         spin_lock_bh(&tp->lock);
1386
1387         if (__tg3_readphy(tp, mii_id, reg, &val))
1388                 val = -EIO;
1389
1390         spin_unlock_bh(&tp->lock);
1391
1392         return val;
1393 }
1394
1395 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1396 {
1397         struct tg3 *tp = bp->priv;
1398         u32 ret = 0;
1399
1400         spin_lock_bh(&tp->lock);
1401
1402         if (__tg3_writephy(tp, mii_id, reg, val))
1403                 ret = -EIO;
1404
1405         spin_unlock_bh(&tp->lock);
1406
1407         return ret;
1408 }
1409
1410 static void tg3_mdio_config_5785(struct tg3 *tp)
1411 {
1412         u32 val;
1413         struct phy_device *phydev;
1414
1415         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1416         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1417         case PHY_ID_BCM50610:
1418         case PHY_ID_BCM50610M:
1419                 val = MAC_PHYCFG2_50610_LED_MODES;
1420                 break;
1421         case PHY_ID_BCMAC131:
1422                 val = MAC_PHYCFG2_AC131_LED_MODES;
1423                 break;
1424         case PHY_ID_RTL8211C:
1425                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1426                 break;
1427         case PHY_ID_RTL8201E:
1428                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1429                 break;
1430         default:
1431                 return;
1432         }
1433
1434         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1435                 tw32(MAC_PHYCFG2, val);
1436
1437                 val = tr32(MAC_PHYCFG1);
1438                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1439                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1440                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1441                 tw32(MAC_PHYCFG1, val);
1442
1443                 return;
1444         }
1445
1446         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1447                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1448                        MAC_PHYCFG2_FMODE_MASK_MASK |
1449                        MAC_PHYCFG2_GMODE_MASK_MASK |
1450                        MAC_PHYCFG2_ACT_MASK_MASK   |
1451                        MAC_PHYCFG2_QUAL_MASK_MASK |
1452                        MAC_PHYCFG2_INBAND_ENABLE;
1453
1454         tw32(MAC_PHYCFG2, val);
1455
1456         val = tr32(MAC_PHYCFG1);
1457         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1458                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1459         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1462                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1463                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1464         }
1465         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1466                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1467         tw32(MAC_PHYCFG1, val);
1468
1469         val = tr32(MAC_EXT_RGMII_MODE);
1470         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1471                  MAC_RGMII_MODE_RX_QUALITY |
1472                  MAC_RGMII_MODE_RX_ACTIVITY |
1473                  MAC_RGMII_MODE_RX_ENG_DET |
1474                  MAC_RGMII_MODE_TX_ENABLE |
1475                  MAC_RGMII_MODE_TX_LOWPWR |
1476                  MAC_RGMII_MODE_TX_RESET);
1477         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1478                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479                         val |= MAC_RGMII_MODE_RX_INT_B |
1480                                MAC_RGMII_MODE_RX_QUALITY |
1481                                MAC_RGMII_MODE_RX_ACTIVITY |
1482                                MAC_RGMII_MODE_RX_ENG_DET;
1483                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1484                         val |= MAC_RGMII_MODE_TX_ENABLE |
1485                                MAC_RGMII_MODE_TX_LOWPWR |
1486                                MAC_RGMII_MODE_TX_RESET;
1487         }
1488         tw32(MAC_EXT_RGMII_MODE, val);
1489 }
1490
1491 static void tg3_mdio_start(struct tg3 *tp)
1492 {
1493         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1494         tw32_f(MAC_MI_MODE, tp->mi_mode);
1495         udelay(80);
1496
1497         if (tg3_flag(tp, MDIOBUS_INITED) &&
1498             tg3_asic_rev(tp) == ASIC_REV_5785)
1499                 tg3_mdio_config_5785(tp);
1500 }
1501
1502 static int tg3_mdio_init(struct tg3 *tp)
1503 {
1504         int i;
1505         u32 reg;
1506         struct phy_device *phydev;
1507
1508         if (tg3_flag(tp, 5717_PLUS)) {
1509                 u32 is_serdes;
1510
1511                 tp->phy_addr = tp->pci_fn + 1;
1512
1513                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1514                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1515                 else
1516                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1517                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1518                 if (is_serdes)
1519                         tp->phy_addr += 7;
1520         } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1521                 int addr;
1522
1523                 addr = ssb_gige_get_phyaddr(tp->pdev);
1524                 if (addr < 0)
1525                         return addr;
1526                 tp->phy_addr = addr;
1527         } else
1528                 tp->phy_addr = TG3_PHY_MII_ADDR;
1529
1530         tg3_mdio_start(tp);
1531
1532         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1533                 return 0;
1534
1535         tp->mdio_bus = mdiobus_alloc();
1536         if (tp->mdio_bus == NULL)
1537                 return -ENOMEM;
1538
1539         tp->mdio_bus->name     = "tg3 mdio bus";
1540         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1541                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1542         tp->mdio_bus->priv     = tp;
1543         tp->mdio_bus->parent   = &tp->pdev->dev;
1544         tp->mdio_bus->read     = &tg3_mdio_read;
1545         tp->mdio_bus->write    = &tg3_mdio_write;
1546         tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1547
1548         /* The bus registration will look for all the PHYs on the mdio bus.
1549          * Unfortunately, it does not ensure the PHY is powered up before
1550          * accessing the PHY ID registers.  A chip reset is the
1551          * quickest way to bring the device back to an operational state..
1552          */
1553         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1554                 tg3_bmcr_reset(tp);
1555
1556         i = mdiobus_register(tp->mdio_bus);
1557         if (i) {
1558                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1559                 mdiobus_free(tp->mdio_bus);
1560                 return i;
1561         }
1562
1563         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
1564
1565         if (!phydev || !phydev->drv) {
1566                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1567                 mdiobus_unregister(tp->mdio_bus);
1568                 mdiobus_free(tp->mdio_bus);
1569                 return -ENODEV;
1570         }
1571
1572         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1573         case PHY_ID_BCM57780:
1574                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1575                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1576                 break;
1577         case PHY_ID_BCM50610:
1578         case PHY_ID_BCM50610M:
1579                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1580                                      PHY_BRCM_RX_REFCLK_UNUSED |
1581                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1582                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 fallthrough;
1584         case PHY_ID_RTL8211C:
1585                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1586                 break;
1587         case PHY_ID_RTL8201E:
1588         case PHY_ID_BCMAC131:
1589                 phydev->interface = PHY_INTERFACE_MODE_MII;
1590                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1591                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1592                 break;
1593         }
1594
1595         tg3_flag_set(tp, MDIOBUS_INITED);
1596
1597         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1598                 tg3_mdio_config_5785(tp);
1599
1600         return 0;
1601 }
1602
1603 static void tg3_mdio_fini(struct tg3 *tp)
1604 {
1605         if (tg3_flag(tp, MDIOBUS_INITED)) {
1606                 tg3_flag_clear(tp, MDIOBUS_INITED);
1607                 mdiobus_unregister(tp->mdio_bus);
1608                 mdiobus_free(tp->mdio_bus);
1609         }
1610 }
1611
1612 /* tp->lock is held. */
1613 static inline void tg3_generate_fw_event(struct tg3 *tp)
1614 {
1615         u32 val;
1616
1617         val = tr32(GRC_RX_CPU_EVENT);
1618         val |= GRC_RX_CPU_DRIVER_EVENT;
1619         tw32_f(GRC_RX_CPU_EVENT, val);
1620
1621         tp->last_event_jiffies = jiffies;
1622 }
1623
1624 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1625
1626 /* tp->lock is held. */
1627 static void tg3_wait_for_event_ack(struct tg3 *tp)
1628 {
1629         int i;
1630         unsigned int delay_cnt;
1631         long time_remain;
1632
1633         /* If enough time has passed, no wait is necessary. */
1634         time_remain = (long)(tp->last_event_jiffies + 1 +
1635                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1636                       (long)jiffies;
1637         if (time_remain < 0)
1638                 return;
1639
1640         /* Check if we can shorten the wait time. */
1641         delay_cnt = jiffies_to_usecs(time_remain);
1642         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1643                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1644         delay_cnt = (delay_cnt >> 3) + 1;
1645
1646         for (i = 0; i < delay_cnt; i++) {
1647                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1648                         break;
1649                 if (pci_channel_offline(tp->pdev))
1650                         break;
1651
1652                 udelay(8);
1653         }
1654 }
1655
1656 /* tp->lock is held. */
1657 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1658 {
1659         u32 reg, val;
1660
1661         val = 0;
1662         if (!tg3_readphy(tp, MII_BMCR, &reg))
1663                 val = reg << 16;
1664         if (!tg3_readphy(tp, MII_BMSR, &reg))
1665                 val |= (reg & 0xffff);
1666         *data++ = val;
1667
1668         val = 0;
1669         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1670                 val = reg << 16;
1671         if (!tg3_readphy(tp, MII_LPA, &reg))
1672                 val |= (reg & 0xffff);
1673         *data++ = val;
1674
1675         val = 0;
1676         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1677                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1678                         val = reg << 16;
1679                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1680                         val |= (reg & 0xffff);
1681         }
1682         *data++ = val;
1683
1684         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1685                 val = reg << 16;
1686         else
1687                 val = 0;
1688         *data++ = val;
1689 }
1690
1691 /* tp->lock is held. */
1692 static void tg3_ump_link_report(struct tg3 *tp)
1693 {
1694         u32 data[4];
1695
1696         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1697                 return;
1698
1699         tg3_phy_gather_ump_data(tp, data);
1700
1701         tg3_wait_for_event_ack(tp);
1702
1703         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1704         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1705         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1706         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1707         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1708         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1709
1710         tg3_generate_fw_event(tp);
1711 }
1712
1713 /* tp->lock is held. */
1714 static void tg3_stop_fw(struct tg3 *tp)
1715 {
1716         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1717                 /* Wait for RX cpu to ACK the previous event. */
1718                 tg3_wait_for_event_ack(tp);
1719
1720                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1721
1722                 tg3_generate_fw_event(tp);
1723
1724                 /* Wait for RX cpu to ACK this event. */
1725                 tg3_wait_for_event_ack(tp);
1726         }
1727 }
1728
1729 /* tp->lock is held. */
1730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1731 {
1732         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1733                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1734
1735         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1736                 switch (kind) {
1737                 case RESET_KIND_INIT:
1738                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739                                       DRV_STATE_START);
1740                         break;
1741
1742                 case RESET_KIND_SHUTDOWN:
1743                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1744                                       DRV_STATE_UNLOAD);
1745                         break;
1746
1747                 case RESET_KIND_SUSPEND:
1748                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749                                       DRV_STATE_SUSPEND);
1750                         break;
1751
1752                 default:
1753                         break;
1754                 }
1755         }
1756 }
1757
1758 /* tp->lock is held. */
1759 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1760 {
1761         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1762                 switch (kind) {
1763                 case RESET_KIND_INIT:
1764                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765                                       DRV_STATE_START_DONE);
1766                         break;
1767
1768                 case RESET_KIND_SHUTDOWN:
1769                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1770                                       DRV_STATE_UNLOAD_DONE);
1771                         break;
1772
1773                 default:
1774                         break;
1775                 }
1776         }
1777 }
1778
1779 /* tp->lock is held. */
1780 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 {
1782         if (tg3_flag(tp, ENABLE_ASF)) {
1783                 switch (kind) {
1784                 case RESET_KIND_INIT:
1785                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1786                                       DRV_STATE_START);
1787                         break;
1788
1789                 case RESET_KIND_SHUTDOWN:
1790                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1791                                       DRV_STATE_UNLOAD);
1792                         break;
1793
1794                 case RESET_KIND_SUSPEND:
1795                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796                                       DRV_STATE_SUSPEND);
1797                         break;
1798
1799                 default:
1800                         break;
1801                 }
1802         }
1803 }
1804
1805 static int tg3_poll_fw(struct tg3 *tp)
1806 {
1807         int i;
1808         u32 val;
1809
1810         if (tg3_flag(tp, NO_FWARE_REPORTED))
1811                 return 0;
1812
1813         if (tg3_flag(tp, IS_SSB_CORE)) {
1814                 /* We don't use firmware. */
1815                 return 0;
1816         }
1817
1818         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1819                 /* Wait up to 20ms for init done. */
1820                 for (i = 0; i < 200; i++) {
1821                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822                                 return 0;
1823                         if (pci_channel_offline(tp->pdev))
1824                                 return -ENODEV;
1825
1826                         udelay(100);
1827                 }
1828                 return -ENODEV;
1829         }
1830
1831         /* Wait for firmware initialization to complete. */
1832         for (i = 0; i < 100000; i++) {
1833                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1834                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835                         break;
1836                 if (pci_channel_offline(tp->pdev)) {
1837                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1838                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1839                                 netdev_info(tp->dev, "No firmware running\n");
1840                         }
1841
1842                         break;
1843                 }
1844
1845                 udelay(10);
1846         }
1847
1848         /* Chip might not be fitted with firmware.  Some Sun onboard
1849          * parts are configured like that.  So don't signal the timeout
1850          * of the above loop as an error, but do report the lack of
1851          * running firmware once.
1852          */
1853         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1854                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855
1856                 netdev_info(tp->dev, "No firmware running\n");
1857         }
1858
1859         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1860                 /* The 57765 A0 needs a little more
1861                  * time to do some important work.
1862                  */
1863                 mdelay(10);
1864         }
1865
1866         return 0;
1867 }
1868
1869 static void tg3_link_report(struct tg3 *tp)
1870 {
1871         if (!netif_carrier_ok(tp->dev)) {
1872                 netif_info(tp, link, tp->dev, "Link is down\n");
1873                 tg3_ump_link_report(tp);
1874         } else if (netif_msg_link(tp)) {
1875                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1876                             (tp->link_config.active_speed == SPEED_1000 ?
1877                              1000 :
1878                              (tp->link_config.active_speed == SPEED_100 ?
1879                               100 : 10)),
1880                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1881                              "full" : "half"));
1882
1883                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1884                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885                             "on" : "off",
1886                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1887                             "on" : "off");
1888
1889                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1890                         netdev_info(tp->dev, "EEE is %s\n",
1891                                     tp->setlpicnt ? "enabled" : "disabled");
1892
1893                 tg3_ump_link_report(tp);
1894         }
1895
1896         tp->link_up = netif_carrier_ok(tp->dev);
1897 }
1898
1899 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1900 {
1901         u32 flowctrl = 0;
1902
1903         if (adv & ADVERTISE_PAUSE_CAP) {
1904                 flowctrl |= FLOW_CTRL_RX;
1905                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1906                         flowctrl |= FLOW_CTRL_TX;
1907         } else if (adv & ADVERTISE_PAUSE_ASYM)
1908                 flowctrl |= FLOW_CTRL_TX;
1909
1910         return flowctrl;
1911 }
1912
1913 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1914 {
1915         u16 miireg;
1916
1917         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1918                 miireg = ADVERTISE_1000XPAUSE;
1919         else if (flow_ctrl & FLOW_CTRL_TX)
1920                 miireg = ADVERTISE_1000XPSE_ASYM;
1921         else if (flow_ctrl & FLOW_CTRL_RX)
1922                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1923         else
1924                 miireg = 0;
1925
1926         return miireg;
1927 }
1928
1929 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1930 {
1931         u32 flowctrl = 0;
1932
1933         if (adv & ADVERTISE_1000XPAUSE) {
1934                 flowctrl |= FLOW_CTRL_RX;
1935                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1936                         flowctrl |= FLOW_CTRL_TX;
1937         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1938                 flowctrl |= FLOW_CTRL_TX;
1939
1940         return flowctrl;
1941 }
1942
1943 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1944 {
1945         u8 cap = 0;
1946
1947         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1948                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1949         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1950                 if (lcladv & ADVERTISE_1000XPAUSE)
1951                         cap = FLOW_CTRL_RX;
1952                 if (rmtadv & ADVERTISE_1000XPAUSE)
1953                         cap = FLOW_CTRL_TX;
1954         }
1955
1956         return cap;
1957 }
1958
1959 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1960 {
1961         u8 autoneg;
1962         u8 flowctrl = 0;
1963         u32 old_rx_mode = tp->rx_mode;
1964         u32 old_tx_mode = tp->tx_mode;
1965
1966         if (tg3_flag(tp, USE_PHYLIB))
1967                 autoneg = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr)->autoneg;
1968         else
1969                 autoneg = tp->link_config.autoneg;
1970
1971         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1972                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1973                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974                 else
1975                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976         } else
1977                 flowctrl = tp->link_config.flowctrl;
1978
1979         tp->link_config.active_flowctrl = flowctrl;
1980
1981         if (flowctrl & FLOW_CTRL_RX)
1982                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983         else
1984                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985
1986         if (old_rx_mode != tp->rx_mode)
1987                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988
1989         if (flowctrl & FLOW_CTRL_TX)
1990                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991         else
1992                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993
1994         if (old_tx_mode != tp->tx_mode)
1995                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1996 }
1997
1998 static void tg3_adjust_link(struct net_device *dev)
1999 {
2000         u8 oldflowctrl, linkmesg = 0;
2001         u32 mac_mode, lcl_adv, rmt_adv;
2002         struct tg3 *tp = netdev_priv(dev);
2003         struct phy_device *phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2004
2005         spin_lock_bh(&tp->lock);
2006
2007         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2008                                     MAC_MODE_HALF_DUPLEX);
2009
2010         oldflowctrl = tp->link_config.active_flowctrl;
2011
2012         if (phydev->link) {
2013                 lcl_adv = 0;
2014                 rmt_adv = 0;
2015
2016                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2017                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2018                 else if (phydev->speed == SPEED_1000 ||
2019                          tg3_asic_rev(tp) != ASIC_REV_5785)
2020                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021                 else
2022                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2023
2024                 if (phydev->duplex == DUPLEX_HALF)
2025                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2026                 else {
2027                         lcl_adv = mii_advertise_flowctrl(
2028                                   tp->link_config.flowctrl);
2029
2030                         if (phydev->pause)
2031                                 rmt_adv = LPA_PAUSE_CAP;
2032                         if (phydev->asym_pause)
2033                                 rmt_adv |= LPA_PAUSE_ASYM;
2034                 }
2035
2036                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037         } else
2038                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039
2040         if (mac_mode != tp->mac_mode) {
2041                 tp->mac_mode = mac_mode;
2042                 tw32_f(MAC_MODE, tp->mac_mode);
2043                 udelay(40);
2044         }
2045
2046         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2047                 if (phydev->speed == SPEED_10)
2048                         tw32(MAC_MI_STAT,
2049                              MAC_MI_STAT_10MBPS_MODE |
2050                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051                 else
2052                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2053         }
2054
2055         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2056                 tw32(MAC_TX_LENGTHS,
2057                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2058                       (6 << TX_LENGTHS_IPG_SHIFT) |
2059                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060         else
2061                 tw32(MAC_TX_LENGTHS,
2062                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2063                       (6 << TX_LENGTHS_IPG_SHIFT) |
2064                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065
2066         if (phydev->link != tp->old_link ||
2067             phydev->speed != tp->link_config.active_speed ||
2068             phydev->duplex != tp->link_config.active_duplex ||
2069             oldflowctrl != tp->link_config.active_flowctrl)
2070                 linkmesg = 1;
2071
2072         tp->old_link = phydev->link;
2073         tp->link_config.active_speed = phydev->speed;
2074         tp->link_config.active_duplex = phydev->duplex;
2075
2076         spin_unlock_bh(&tp->lock);
2077
2078         if (linkmesg)
2079                 tg3_link_report(tp);
2080 }
2081
2082 static int tg3_phy_init(struct tg3 *tp)
2083 {
2084         struct phy_device *phydev;
2085
2086         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2087                 return 0;
2088
2089         /* Bring the PHY back to a known state. */
2090         tg3_bmcr_reset(tp);
2091
2092         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2093
2094         /* Attach the MAC to the PHY. */
2095         phydev = phy_connect(tp->dev, phydev_name(phydev),
2096                              tg3_adjust_link, phydev->interface);
2097         if (IS_ERR(phydev)) {
2098                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2099                 return PTR_ERR(phydev);
2100         }
2101
2102         /* Mask with MAC supported features. */
2103         switch (phydev->interface) {
2104         case PHY_INTERFACE_MODE_GMII:
2105         case PHY_INTERFACE_MODE_RGMII:
2106                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2107                         phy_set_max_speed(phydev, SPEED_1000);
2108                         phy_support_asym_pause(phydev);
2109                         break;
2110                 }
2111                 fallthrough;
2112         case PHY_INTERFACE_MODE_MII:
2113                 phy_set_max_speed(phydev, SPEED_100);
2114                 phy_support_asym_pause(phydev);
2115                 break;
2116         default:
2117                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2118                 return -EINVAL;
2119         }
2120
2121         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2122
2123         phy_attached_info(phydev);
2124
2125         return 0;
2126 }
2127
2128 static void tg3_phy_start(struct tg3 *tp)
2129 {
2130         struct phy_device *phydev;
2131
2132         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2133                 return;
2134
2135         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
2136
2137         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2138                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2139                 phydev->speed = tp->link_config.speed;
2140                 phydev->duplex = tp->link_config.duplex;
2141                 phydev->autoneg = tp->link_config.autoneg;
2142                 ethtool_convert_legacy_u32_to_link_mode(
2143                         phydev->advertising, tp->link_config.advertising);
2144         }
2145
2146         phy_start(phydev);
2147
2148         phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154                 return;
2155
2156         phy_stop(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162                 phy_disconnect(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
2163                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164         }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169         int err;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173                 return 0;
2174
2175         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176                 /* Cannot do read-modify-write on 5401 */
2177                 err = tg3_phy_auxctl_write(tp,
2178                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180                                            0x4c20);
2181                 goto done;
2182         }
2183
2184         err = tg3_phy_auxctl_read(tp,
2185                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186         if (err)
2187                 return err;
2188
2189         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190         err = tg3_phy_auxctl_write(tp,
2191                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194         return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199         u32 phytest;
2200
2201         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202                 u32 phy;
2203
2204                 tg3_writephy(tp, MII_TG3_FET_TEST,
2205                              phytest | MII_TG3_FET_SHADOW_EN);
2206                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207                         if (enable)
2208                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209                         else
2210                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212                 }
2213                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214         }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219         u32 reg;
2220
2221         if (!tg3_flag(tp, 5705_PLUS) ||
2222             (tg3_flag(tp, 5717_PLUS) &&
2223              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224                 return;
2225
2226         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227                 tg3_phy_fet_toggle_apd(tp, enable);
2228                 return;
2229         }
2230
2231         reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2232               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2233               MII_TG3_MISC_SHDW_SCR5_SDTL |
2234               MII_TG3_MISC_SHDW_SCR5_C125OE;
2235         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2236                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2237
2238         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2239
2240
2241         reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2242         if (enable)
2243                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2244
2245         tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2246 }
2247
2248 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2249 {
2250         u32 phy;
2251
2252         if (!tg3_flag(tp, 5705_PLUS) ||
2253             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2254                 return;
2255
2256         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2257                 u32 ephy;
2258
2259                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2260                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2261
2262                         tg3_writephy(tp, MII_TG3_FET_TEST,
2263                                      ephy | MII_TG3_FET_SHADOW_EN);
2264                         if (!tg3_readphy(tp, reg, &phy)) {
2265                                 if (enable)
2266                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2267                                 else
2268                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2269                                 tg3_writephy(tp, reg, phy);
2270                         }
2271                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2272                 }
2273         } else {
2274                 int ret;
2275
2276                 ret = tg3_phy_auxctl_read(tp,
2277                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2278                 if (!ret) {
2279                         if (enable)
2280                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2281                         else
2282                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2283                         tg3_phy_auxctl_write(tp,
2284                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2285                 }
2286         }
2287 }
2288
2289 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2290 {
2291         int ret;
2292         u32 val;
2293
2294         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2295                 return;
2296
2297         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2298         if (!ret)
2299                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2300                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2301 }
2302
2303 static void tg3_phy_apply_otp(struct tg3 *tp)
2304 {
2305         u32 otp, phy;
2306
2307         if (!tp->phy_otp)
2308                 return;
2309
2310         otp = tp->phy_otp;
2311
2312         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2313                 return;
2314
2315         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2316         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2317         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2318
2319         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2320               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2321         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2322
2323         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2324         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2325         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2326
2327         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2328         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2329
2330         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2331         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2332
2333         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2334               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2336
2337         tg3_phy_toggle_auxctl_smdsp(tp, false);
2338 }
2339
2340 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2341 {
2342         u32 val;
2343         struct ethtool_eee *dest = &tp->eee;
2344
2345         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2346                 return;
2347
2348         if (eee)
2349                 dest = eee;
2350
2351         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2352                 return;
2353
2354         /* Pull eee_active */
2355         if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2356             val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2357                 dest->eee_active = 1;
2358         } else
2359                 dest->eee_active = 0;
2360
2361         /* Pull lp advertised settings */
2362         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2363                 return;
2364         dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2365
2366         /* Pull advertised and eee_enabled settings */
2367         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2368                 return;
2369         dest->eee_enabled = !!val;
2370         dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2371
2372         /* Pull tx_lpi_enabled */
2373         val = tr32(TG3_CPMU_EEE_MODE);
2374         dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2375
2376         /* Pull lpi timer value */
2377         dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2378 }
2379
2380 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2381 {
2382         u32 val;
2383
2384         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2385                 return;
2386
2387         tp->setlpicnt = 0;
2388
2389         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2390             current_link_up &&
2391             tp->link_config.active_duplex == DUPLEX_FULL &&
2392             (tp->link_config.active_speed == SPEED_100 ||
2393              tp->link_config.active_speed == SPEED_1000)) {
2394                 u32 eeectl;
2395
2396                 if (tp->link_config.active_speed == SPEED_1000)
2397                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2398                 else
2399                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2400
2401                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2402
2403                 tg3_eee_pull_config(tp, NULL);
2404                 if (tp->eee.eee_active)
2405                         tp->setlpicnt = 2;
2406         }
2407
2408         if (!tp->setlpicnt) {
2409                 if (current_link_up &&
2410                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2411                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2412                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2413                 }
2414
2415                 val = tr32(TG3_CPMU_EEE_MODE);
2416                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2417         }
2418 }
2419
2420 static void tg3_phy_eee_enable(struct tg3 *tp)
2421 {
2422         u32 val;
2423
2424         if (tp->link_config.active_speed == SPEED_1000 &&
2425             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2426              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2427              tg3_flag(tp, 57765_CLASS)) &&
2428             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2429                 val = MII_TG3_DSP_TAP26_ALNOKO |
2430                       MII_TG3_DSP_TAP26_RMRXSTO;
2431                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2432                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2433         }
2434
2435         val = tr32(TG3_CPMU_EEE_MODE);
2436         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2437 }
2438
2439 static int tg3_wait_macro_done(struct tg3 *tp)
2440 {
2441         int limit = 100;
2442
2443         while (limit--) {
2444                 u32 tmp32;
2445
2446                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2447                         if ((tmp32 & 0x1000) == 0)
2448                                 break;
2449                 }
2450         }
2451         if (limit < 0)
2452                 return -EBUSY;
2453
2454         return 0;
2455 }
2456
2457 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2458 {
2459         static const u32 test_pat[4][6] = {
2460         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2461         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2462         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2463         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2464         };
2465         int chan;
2466
2467         for (chan = 0; chan < 4; chan++) {
2468                 int i;
2469
2470                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2471                              (chan * 0x2000) | 0x0200);
2472                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2473
2474                 for (i = 0; i < 6; i++)
2475                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2476                                      test_pat[chan][i]);
2477
2478                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2479                 if (tg3_wait_macro_done(tp)) {
2480                         *resetp = 1;
2481                         return -EBUSY;
2482                 }
2483
2484                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2485                              (chan * 0x2000) | 0x0200);
2486                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2487                 if (tg3_wait_macro_done(tp)) {
2488                         *resetp = 1;
2489                         return -EBUSY;
2490                 }
2491
2492                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2493                 if (tg3_wait_macro_done(tp)) {
2494                         *resetp = 1;
2495                         return -EBUSY;
2496                 }
2497
2498                 for (i = 0; i < 6; i += 2) {
2499                         u32 low, high;
2500
2501                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2502                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2503                             tg3_wait_macro_done(tp)) {
2504                                 *resetp = 1;
2505                                 return -EBUSY;
2506                         }
2507                         low &= 0x7fff;
2508                         high &= 0x000f;
2509                         if (low != test_pat[chan][i] ||
2510                             high != test_pat[chan][i+1]) {
2511                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2512                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2513                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2514
2515                                 return -EBUSY;
2516                         }
2517                 }
2518         }
2519
2520         return 0;
2521 }
2522
2523 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2524 {
2525         int chan;
2526
2527         for (chan = 0; chan < 4; chan++) {
2528                 int i;
2529
2530                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2531                              (chan * 0x2000) | 0x0200);
2532                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2533                 for (i = 0; i < 6; i++)
2534                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2535                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2536                 if (tg3_wait_macro_done(tp))
2537                         return -EBUSY;
2538         }
2539
2540         return 0;
2541 }
2542
2543 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2544 {
2545         u32 reg32, phy9_orig;
2546         int retries, do_phy_reset, err;
2547
2548         retries = 10;
2549         do_phy_reset = 1;
2550         do {
2551                 if (do_phy_reset) {
2552                         err = tg3_bmcr_reset(tp);
2553                         if (err)
2554                                 return err;
2555                         do_phy_reset = 0;
2556                 }
2557
2558                 /* Disable transmitter and interrupt.  */
2559                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2560                         continue;
2561
2562                 reg32 |= 0x3000;
2563                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2564
2565                 /* Set full-duplex, 1000 mbps.  */
2566                 tg3_writephy(tp, MII_BMCR,
2567                              BMCR_FULLDPLX | BMCR_SPEED1000);
2568
2569                 /* Set to master mode.  */
2570                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2571                         continue;
2572
2573                 tg3_writephy(tp, MII_CTRL1000,
2574                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2575
2576                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2577                 if (err)
2578                         return err;
2579
2580                 /* Block the PHY control access.  */
2581                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2582
2583                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2584                 if (!err)
2585                         break;
2586         } while (--retries);
2587
2588         err = tg3_phy_reset_chanpat(tp);
2589         if (err)
2590                 return err;
2591
2592         tg3_phydsp_write(tp, 0x8005, 0x0000);
2593
2594         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2595         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2596
2597         tg3_phy_toggle_auxctl_smdsp(tp, false);
2598
2599         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2600
2601         err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2602         if (err)
2603                 return err;
2604
2605         reg32 &= ~0x3000;
2606         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2607
2608         return 0;
2609 }
2610
2611 static void tg3_carrier_off(struct tg3 *tp)
2612 {
2613         netif_carrier_off(tp->dev);
2614         tp->link_up = false;
2615 }
2616
2617 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2618 {
2619         if (tg3_flag(tp, ENABLE_ASF))
2620                 netdev_warn(tp->dev,
2621                             "Management side-band traffic will be interrupted during phy settings change\n");
2622 }
2623
2624 /* This will reset the tigon3 PHY if there is no valid
2625  * link unless the FORCE argument is non-zero.
2626  */
2627 static int tg3_phy_reset(struct tg3 *tp)
2628 {
2629         u32 val, cpmuctrl;
2630         int err;
2631
2632         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2633                 val = tr32(GRC_MISC_CFG);
2634                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2635                 udelay(40);
2636         }
2637         err  = tg3_readphy(tp, MII_BMSR, &val);
2638         err |= tg3_readphy(tp, MII_BMSR, &val);
2639         if (err != 0)
2640                 return -EBUSY;
2641
2642         if (netif_running(tp->dev) && tp->link_up) {
2643                 netif_carrier_off(tp->dev);
2644                 tg3_link_report(tp);
2645         }
2646
2647         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2648             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2649             tg3_asic_rev(tp) == ASIC_REV_5705) {
2650                 err = tg3_phy_reset_5703_4_5(tp);
2651                 if (err)
2652                         return err;
2653                 goto out;
2654         }
2655
2656         cpmuctrl = 0;
2657         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2658             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2659                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2660                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2661                         tw32(TG3_CPMU_CTRL,
2662                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2663         }
2664
2665         err = tg3_bmcr_reset(tp);
2666         if (err)
2667                 return err;
2668
2669         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2670                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2671                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2672
2673                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2674         }
2675
2676         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2677             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2678                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2679                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2680                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2681                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2682                         udelay(40);
2683                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2684                 }
2685         }
2686
2687         if (tg3_flag(tp, 5717_PLUS) &&
2688             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2689                 return 0;
2690
2691         tg3_phy_apply_otp(tp);
2692
2693         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2694                 tg3_phy_toggle_apd(tp, true);
2695         else
2696                 tg3_phy_toggle_apd(tp, false);
2697
2698 out:
2699         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2700             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2701                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2702                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2703                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2704         }
2705
2706         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2707                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2708                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2709         }
2710
2711         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2712                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2713                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2714                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2715                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2716                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2717                 }
2718         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2719                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2721                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2722                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2723                                 tg3_writephy(tp, MII_TG3_TEST1,
2724                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2725                         } else
2726                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2727
2728                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2729                 }
2730         }
2731
2732         /* Set Extended packet length bit (bit 14) on all chips that */
2733         /* support jumbo frames */
2734         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2735                 /* Cannot do read-modify-write on 5401 */
2736                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2737         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2738                 /* Set bit 14 with read-modify-write to preserve other bits */
2739                 err = tg3_phy_auxctl_read(tp,
2740                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2741                 if (!err)
2742                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2743                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2744         }
2745
2746         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2747          * jumbo frames transmission.
2748          */
2749         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2750                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2751                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2752                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2753         }
2754
2755         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2756                 /* adjust output voltage */
2757                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2758         }
2759
2760         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2761                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2762
2763         tg3_phy_toggle_automdix(tp, true);
2764         tg3_phy_set_wirespeed(tp);
2765         return 0;
2766 }
2767
2768 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2769 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2770 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2771                                           TG3_GPIO_MSG_NEED_VAUX)
2772 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2773         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2774          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2775          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2776          (TG3_GPIO_MSG_DRVR_PRES << 12))
2777
2778 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2779         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2780          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2781          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2782          (TG3_GPIO_MSG_NEED_VAUX << 12))
2783
2784 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2785 {
2786         u32 status, shift;
2787
2788         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2789             tg3_asic_rev(tp) == ASIC_REV_5719)
2790                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2791         else
2792                 status = tr32(TG3_CPMU_DRV_STATUS);
2793
2794         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2795         status &= ~(TG3_GPIO_MSG_MASK << shift);
2796         status |= (newstat << shift);
2797
2798         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2799             tg3_asic_rev(tp) == ASIC_REV_5719)
2800                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2801         else
2802                 tw32(TG3_CPMU_DRV_STATUS, status);
2803
2804         return status >> TG3_APE_GPIO_MSG_SHIFT;
2805 }
2806
2807 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2808 {
2809         if (!tg3_flag(tp, IS_NIC))
2810                 return 0;
2811
2812         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2813             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2814             tg3_asic_rev(tp) == ASIC_REV_5720) {
2815                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2816                         return -EIO;
2817
2818                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2819
2820                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2821                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2822
2823                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2824         } else {
2825                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2827         }
2828
2829         return 0;
2830 }
2831
2832 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2833 {
2834         u32 grc_local_ctrl;
2835
2836         if (!tg3_flag(tp, IS_NIC) ||
2837             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2838             tg3_asic_rev(tp) == ASIC_REV_5701)
2839                 return;
2840
2841         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2842
2843         tw32_wait_f(GRC_LOCAL_CTRL,
2844                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2845                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2846
2847         tw32_wait_f(GRC_LOCAL_CTRL,
2848                     grc_local_ctrl,
2849                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2850
2851         tw32_wait_f(GRC_LOCAL_CTRL,
2852                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2853                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 }
2855
2856 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2857 {
2858         if (!tg3_flag(tp, IS_NIC))
2859                 return;
2860
2861         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2862             tg3_asic_rev(tp) == ASIC_REV_5701) {
2863                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864                             (GRC_LCLCTRL_GPIO_OE0 |
2865                              GRC_LCLCTRL_GPIO_OE1 |
2866                              GRC_LCLCTRL_GPIO_OE2 |
2867                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2868                              GRC_LCLCTRL_GPIO_OUTPUT1),
2869                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2870         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2871                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2872                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2873                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2874                                      GRC_LCLCTRL_GPIO_OE1 |
2875                                      GRC_LCLCTRL_GPIO_OE2 |
2876                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2877                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2878                                      tp->grc_local_ctrl;
2879                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2880                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2881
2882                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2883                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2884                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2885
2886                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2887                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889         } else {
2890                 u32 no_gpio2;
2891                 u32 grc_local_ctrl = 0;
2892
2893                 /* Workaround to prevent overdrawing Amps. */
2894                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2895                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2896                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2897                                     grc_local_ctrl,
2898                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2899                 }
2900
2901                 /* On 5753 and variants, GPIO2 cannot be used. */
2902                 no_gpio2 = tp->nic_sram_data_cfg &
2903                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2904
2905                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2906                                   GRC_LCLCTRL_GPIO_OE1 |
2907                                   GRC_LCLCTRL_GPIO_OE2 |
2908                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2909                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2910                 if (no_gpio2) {
2911                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2912                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2913                 }
2914                 tw32_wait_f(GRC_LOCAL_CTRL,
2915                             tp->grc_local_ctrl | grc_local_ctrl,
2916                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2917
2918                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2919
2920                 tw32_wait_f(GRC_LOCAL_CTRL,
2921                             tp->grc_local_ctrl | grc_local_ctrl,
2922                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2923
2924                 if (!no_gpio2) {
2925                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2926                         tw32_wait_f(GRC_LOCAL_CTRL,
2927                                     tp->grc_local_ctrl | grc_local_ctrl,
2928                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2929                 }
2930         }
2931 }
2932
2933 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2934 {
2935         u32 msg = 0;
2936
2937         /* Serialize power state transitions */
2938         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2939                 return;
2940
2941         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2942                 msg = TG3_GPIO_MSG_NEED_VAUX;
2943
2944         msg = tg3_set_function_status(tp, msg);
2945
2946         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2947                 goto done;
2948
2949         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2950                 tg3_pwrsrc_switch_to_vaux(tp);
2951         else
2952                 tg3_pwrsrc_die_with_vmain(tp);
2953
2954 done:
2955         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2956 }
2957
2958 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2959 {
2960         bool need_vaux = false;
2961
2962         /* The GPIOs do something completely different on 57765. */
2963         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2964                 return;
2965
2966         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2967             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2968             tg3_asic_rev(tp) == ASIC_REV_5720) {
2969                 tg3_frob_aux_power_5717(tp, include_wol ?
2970                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2971                 return;
2972         }
2973
2974         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2975                 struct net_device *dev_peer;
2976
2977                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2978
2979                 /* remove_one() may have been run on the peer. */
2980                 if (dev_peer) {
2981                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2982
2983                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2984                                 return;
2985
2986                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2987                             tg3_flag(tp_peer, ENABLE_ASF))
2988                                 need_vaux = true;
2989                 }
2990         }
2991
2992         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2993             tg3_flag(tp, ENABLE_ASF))
2994                 need_vaux = true;
2995
2996         if (need_vaux)
2997                 tg3_pwrsrc_switch_to_vaux(tp);
2998         else
2999                 tg3_pwrsrc_die_with_vmain(tp);
3000 }
3001
3002 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3003 {
3004         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3005                 return 1;
3006         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3007                 if (speed != SPEED_10)
3008                         return 1;
3009         } else if (speed == SPEED_10)
3010                 return 1;
3011
3012         return 0;
3013 }
3014
3015 static bool tg3_phy_power_bug(struct tg3 *tp)
3016 {
3017         switch (tg3_asic_rev(tp)) {
3018         case ASIC_REV_5700:
3019         case ASIC_REV_5704:
3020                 return true;
3021         case ASIC_REV_5780:
3022                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3023                         return true;
3024                 return false;
3025         case ASIC_REV_5717:
3026                 if (!tp->pci_fn)
3027                         return true;
3028                 return false;
3029         case ASIC_REV_5719:
3030         case ASIC_REV_5720:
3031                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3032                     !tp->pci_fn)
3033                         return true;
3034                 return false;
3035         }
3036
3037         return false;
3038 }
3039
3040 static bool tg3_phy_led_bug(struct tg3 *tp)
3041 {
3042         switch (tg3_asic_rev(tp)) {
3043         case ASIC_REV_5719:
3044         case ASIC_REV_5720:
3045                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3046                     !tp->pci_fn)
3047                         return true;
3048                 return false;
3049         }
3050
3051         return false;
3052 }
3053
3054 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3055 {
3056         u32 val;
3057
3058         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3059                 return;
3060
3061         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3062                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3063                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3064                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3065
3066                         sg_dig_ctrl |=
3067                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3068                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3069                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3070                 }
3071                 return;
3072         }
3073
3074         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3075                 tg3_bmcr_reset(tp);
3076                 val = tr32(GRC_MISC_CFG);
3077                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3078                 udelay(40);
3079                 return;
3080         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3081                 u32 phytest;
3082                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3083                         u32 phy;
3084
3085                         tg3_writephy(tp, MII_ADVERTISE, 0);
3086                         tg3_writephy(tp, MII_BMCR,
3087                                      BMCR_ANENABLE | BMCR_ANRESTART);
3088
3089                         tg3_writephy(tp, MII_TG3_FET_TEST,
3090                                      phytest | MII_TG3_FET_SHADOW_EN);
3091                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3092                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3093                                 tg3_writephy(tp,
3094                                              MII_TG3_FET_SHDW_AUXMODE4,
3095                                              phy);
3096                         }
3097                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3098                 }
3099                 return;
3100         } else if (do_low_power) {
3101                 if (!tg3_phy_led_bug(tp))
3102                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3103                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3104
3105                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3106                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3107                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3108                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3109         }
3110
3111         /* The PHY should not be powered down on some chips because
3112          * of bugs.
3113          */
3114         if (tg3_phy_power_bug(tp))
3115                 return;
3116
3117         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3118             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3119                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3120                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3121                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3122                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3123         }
3124
3125         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3126 }
3127
3128 /* tp->lock is held. */
3129 static int tg3_nvram_lock(struct tg3 *tp)
3130 {
3131         if (tg3_flag(tp, NVRAM)) {
3132                 int i;
3133
3134                 if (tp->nvram_lock_cnt == 0) {
3135                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3136                         for (i = 0; i < 8000; i++) {
3137                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3138                                         break;
3139                                 udelay(20);
3140                         }
3141                         if (i == 8000) {
3142                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3143                                 return -ENODEV;
3144                         }
3145                 }
3146                 tp->nvram_lock_cnt++;
3147         }
3148         return 0;
3149 }
3150
3151 /* tp->lock is held. */
3152 static void tg3_nvram_unlock(struct tg3 *tp)
3153 {
3154         if (tg3_flag(tp, NVRAM)) {
3155                 if (tp->nvram_lock_cnt > 0)
3156                         tp->nvram_lock_cnt--;
3157                 if (tp->nvram_lock_cnt == 0)
3158                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3159         }
3160 }
3161
3162 /* tp->lock is held. */
3163 static void tg3_enable_nvram_access(struct tg3 *tp)
3164 {
3165         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3166                 u32 nvaccess = tr32(NVRAM_ACCESS);
3167
3168                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3169         }
3170 }
3171
3172 /* tp->lock is held. */
3173 static void tg3_disable_nvram_access(struct tg3 *tp)
3174 {
3175         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3176                 u32 nvaccess = tr32(NVRAM_ACCESS);
3177
3178                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3179         }
3180 }
3181
3182 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3183                                         u32 offset, u32 *val)
3184 {
3185         u32 tmp;
3186         int i;
3187
3188         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3189                 return -EINVAL;
3190
3191         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3192                                         EEPROM_ADDR_DEVID_MASK |
3193                                         EEPROM_ADDR_READ);
3194         tw32(GRC_EEPROM_ADDR,
3195              tmp |
3196              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3197              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3198               EEPROM_ADDR_ADDR_MASK) |
3199              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3200
3201         for (i = 0; i < 1000; i++) {
3202                 tmp = tr32(GRC_EEPROM_ADDR);
3203
3204                 if (tmp & EEPROM_ADDR_COMPLETE)
3205                         break;
3206                 msleep(1);
3207         }
3208         if (!(tmp & EEPROM_ADDR_COMPLETE))
3209                 return -EBUSY;
3210
3211         tmp = tr32(GRC_EEPROM_DATA);
3212
3213         /*
3214          * The data will always be opposite the native endian
3215          * format.  Perform a blind byteswap to compensate.
3216          */
3217         *val = swab32(tmp);
3218
3219         return 0;
3220 }
3221
3222 #define NVRAM_CMD_TIMEOUT 10000
3223
3224 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3225 {
3226         int i;
3227
3228         tw32(NVRAM_CMD, nvram_cmd);
3229         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3230                 usleep_range(10, 40);
3231                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3232                         udelay(10);
3233                         break;
3234                 }
3235         }
3236
3237         if (i == NVRAM_CMD_TIMEOUT)
3238                 return -EBUSY;
3239
3240         return 0;
3241 }
3242
3243 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3244 {
3245         if (tg3_flag(tp, NVRAM) &&
3246             tg3_flag(tp, NVRAM_BUFFERED) &&
3247             tg3_flag(tp, FLASH) &&
3248             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3249             (tp->nvram_jedecnum == JEDEC_ATMEL))
3250
3251                 addr = ((addr / tp->nvram_pagesize) <<
3252                         ATMEL_AT45DB0X1B_PAGE_POS) +
3253                        (addr % tp->nvram_pagesize);
3254
3255         return addr;
3256 }
3257
3258 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3259 {
3260         if (tg3_flag(tp, NVRAM) &&
3261             tg3_flag(tp, NVRAM_BUFFERED) &&
3262             tg3_flag(tp, FLASH) &&
3263             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3264             (tp->nvram_jedecnum == JEDEC_ATMEL))
3265
3266                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3267                         tp->nvram_pagesize) +
3268                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3269
3270         return addr;
3271 }
3272
3273 /* NOTE: Data read in from NVRAM is byteswapped according to
3274  * the byteswapping settings for all other register accesses.
3275  * tg3 devices are BE devices, so on a BE machine, the data
3276  * returned will be exactly as it is seen in NVRAM.  On a LE
3277  * machine, the 32-bit value will be byteswapped.
3278  */
3279 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3280 {
3281         int ret;
3282
3283         if (!tg3_flag(tp, NVRAM))
3284                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3285
3286         offset = tg3_nvram_phys_addr(tp, offset);
3287
3288         if (offset > NVRAM_ADDR_MSK)
3289                 return -EINVAL;
3290
3291         ret = tg3_nvram_lock(tp);
3292         if (ret)
3293                 return ret;
3294
3295         tg3_enable_nvram_access(tp);
3296
3297         tw32(NVRAM_ADDR, offset);
3298         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3299                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3300
3301         if (ret == 0)
3302                 *val = tr32(NVRAM_RDDATA);
3303
3304         tg3_disable_nvram_access(tp);
3305
3306         tg3_nvram_unlock(tp);
3307
3308         return ret;
3309 }
3310
3311 /* Ensures NVRAM data is in bytestream format. */
3312 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3313 {
3314         u32 v;
3315         int res = tg3_nvram_read(tp, offset, &v);
3316         if (!res)
3317                 *val = cpu_to_be32(v);
3318         return res;
3319 }
3320
3321 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3322                                     u32 offset, u32 len, u8 *buf)
3323 {
3324         int i, j, rc = 0;
3325         u32 val;
3326
3327         for (i = 0; i < len; i += 4) {
3328                 u32 addr;
3329                 __be32 data;
3330
3331                 addr = offset + i;
3332
3333                 memcpy(&data, buf + i, 4);
3334
3335                 /*
3336                  * The SEEPROM interface expects the data to always be opposite
3337                  * the native endian format.  We accomplish this by reversing
3338                  * all the operations that would have been performed on the
3339                  * data from a call to tg3_nvram_read_be32().
3340                  */
3341                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3342
3343                 val = tr32(GRC_EEPROM_ADDR);
3344                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3345
3346                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3347                         EEPROM_ADDR_READ);
3348                 tw32(GRC_EEPROM_ADDR, val |
3349                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3350                         (addr & EEPROM_ADDR_ADDR_MASK) |
3351                         EEPROM_ADDR_START |
3352                         EEPROM_ADDR_WRITE);
3353
3354                 for (j = 0; j < 1000; j++) {
3355                         val = tr32(GRC_EEPROM_ADDR);
3356
3357                         if (val & EEPROM_ADDR_COMPLETE)
3358                                 break;
3359                         msleep(1);
3360                 }
3361                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3362                         rc = -EBUSY;
3363                         break;
3364                 }
3365         }
3366
3367         return rc;
3368 }
3369
3370 /* offset and length are dword aligned */
3371 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3372                 u8 *buf)
3373 {
3374         int ret = 0;
3375         u32 pagesize = tp->nvram_pagesize;
3376         u32 pagemask = pagesize - 1;
3377         u32 nvram_cmd;
3378         u8 *tmp;
3379
3380         tmp = kmalloc(pagesize, GFP_KERNEL);
3381         if (tmp == NULL)
3382                 return -ENOMEM;
3383
3384         while (len) {
3385                 int j;
3386                 u32 phy_addr, page_off, size;
3387
3388                 phy_addr = offset & ~pagemask;
3389
3390                 for (j = 0; j < pagesize; j += 4) {
3391                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3392                                                   (__be32 *) (tmp + j));
3393                         if (ret)
3394                                 break;
3395                 }
3396                 if (ret)
3397                         break;
3398
3399                 page_off = offset & pagemask;
3400                 size = pagesize;
3401                 if (len < size)
3402                         size = len;
3403
3404                 len -= size;
3405
3406                 memcpy(tmp + page_off, buf, size);
3407
3408                 offset = offset + (pagesize - page_off);
3409
3410                 tg3_enable_nvram_access(tp);
3411
3412                 /*
3413                  * Before we can erase the flash page, we need
3414                  * to issue a special "write enable" command.
3415                  */
3416                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3417
3418                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3419                         break;
3420
3421                 /* Erase the target page */
3422                 tw32(NVRAM_ADDR, phy_addr);
3423
3424                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3425                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3426
3427                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428                         break;
3429
3430                 /* Issue another write enable to start the write. */
3431                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3432
3433                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3434                         break;
3435
3436                 for (j = 0; j < pagesize; j += 4) {
3437                         __be32 data;
3438
3439                         data = *((__be32 *) (tmp + j));
3440
3441                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3442
3443                         tw32(NVRAM_ADDR, phy_addr + j);
3444
3445                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3446                                 NVRAM_CMD_WR;
3447
3448                         if (j == 0)
3449                                 nvram_cmd |= NVRAM_CMD_FIRST;
3450                         else if (j == (pagesize - 4))
3451                                 nvram_cmd |= NVRAM_CMD_LAST;
3452
3453                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3454                         if (ret)
3455                                 break;
3456                 }
3457                 if (ret)
3458                         break;
3459         }
3460
3461         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3462         tg3_nvram_exec_cmd(tp, nvram_cmd);
3463
3464         kfree(tmp);
3465
3466         return ret;
3467 }
3468
3469 /* offset and length are dword aligned */
3470 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3471                 u8 *buf)
3472 {
3473         int i, ret = 0;
3474
3475         for (i = 0; i < len; i += 4, offset += 4) {
3476                 u32 page_off, phy_addr, nvram_cmd;
3477                 __be32 data;
3478
3479                 memcpy(&data, buf + i, 4);
3480                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3481
3482                 page_off = offset % tp->nvram_pagesize;
3483
3484                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3485
3486                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3487
3488                 if (page_off == 0 || i == 0)
3489                         nvram_cmd |= NVRAM_CMD_FIRST;
3490                 if (page_off == (tp->nvram_pagesize - 4))
3491                         nvram_cmd |= NVRAM_CMD_LAST;
3492
3493                 if (i == (len - 4))
3494                         nvram_cmd |= NVRAM_CMD_LAST;
3495
3496                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3497                     !tg3_flag(tp, FLASH) ||
3498                     !tg3_flag(tp, 57765_PLUS))
3499                         tw32(NVRAM_ADDR, phy_addr);
3500
3501                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3502                     !tg3_flag(tp, 5755_PLUS) &&
3503                     (tp->nvram_jedecnum == JEDEC_ST) &&
3504                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3505                         u32 cmd;
3506
3507                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3508                         ret = tg3_nvram_exec_cmd(tp, cmd);
3509                         if (ret)
3510                                 break;
3511                 }
3512                 if (!tg3_flag(tp, FLASH)) {
3513                         /* We always do complete word writes to eeprom. */
3514                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3515                 }
3516
3517                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3518                 if (ret)
3519                         break;
3520         }
3521         return ret;
3522 }
3523
3524 /* offset and length are dword aligned */
3525 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3526 {
3527         int ret;
3528
3529         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3530                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3531                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3532                 udelay(40);
3533         }
3534
3535         if (!tg3_flag(tp, NVRAM)) {
3536                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3537         } else {
3538                 u32 grc_mode;
3539
3540                 ret = tg3_nvram_lock(tp);
3541                 if (ret)
3542                         return ret;
3543
3544                 tg3_enable_nvram_access(tp);
3545                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3546                         tw32(NVRAM_WRITE1, 0x406);
3547
3548                 grc_mode = tr32(GRC_MODE);
3549                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3550
3551                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3552                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3553                                 buf);
3554                 } else {
3555                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3556                                 buf);
3557                 }
3558
3559                 grc_mode = tr32(GRC_MODE);
3560                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3561
3562                 tg3_disable_nvram_access(tp);
3563                 tg3_nvram_unlock(tp);
3564         }
3565
3566         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3567                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3568                 udelay(40);
3569         }
3570
3571         return ret;
3572 }
3573
3574 #define RX_CPU_SCRATCH_BASE     0x30000
3575 #define RX_CPU_SCRATCH_SIZE     0x04000
3576 #define TX_CPU_SCRATCH_BASE     0x34000
3577 #define TX_CPU_SCRATCH_SIZE     0x04000
3578
3579 /* tp->lock is held. */
3580 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3581 {
3582         int i;
3583         const int iters = 10000;
3584
3585         for (i = 0; i < iters; i++) {
3586                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3587                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3588                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3589                         break;
3590                 if (pci_channel_offline(tp->pdev))
3591                         return -EBUSY;
3592         }
3593
3594         return (i == iters) ? -EBUSY : 0;
3595 }
3596
3597 /* tp->lock is held. */
3598 static int tg3_rxcpu_pause(struct tg3 *tp)
3599 {
3600         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3601
3602         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3603         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3604         udelay(10);
3605
3606         return rc;
3607 }
3608
3609 /* tp->lock is held. */
3610 static int tg3_txcpu_pause(struct tg3 *tp)
3611 {
3612         return tg3_pause_cpu(tp, TX_CPU_BASE);
3613 }
3614
3615 /* tp->lock is held. */
3616 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3617 {
3618         tw32(cpu_base + CPU_STATE, 0xffffffff);
3619         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3620 }
3621
3622 /* tp->lock is held. */
3623 static void tg3_rxcpu_resume(struct tg3 *tp)
3624 {
3625         tg3_resume_cpu(tp, RX_CPU_BASE);
3626 }
3627
3628 /* tp->lock is held. */
3629 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3630 {
3631         int rc;
3632
3633         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3634
3635         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3636                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3637
3638                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3639                 return 0;
3640         }
3641         if (cpu_base == RX_CPU_BASE) {
3642                 rc = tg3_rxcpu_pause(tp);
3643         } else {
3644                 /*
3645                  * There is only an Rx CPU for the 5750 derivative in the
3646                  * BCM4785.
3647                  */
3648                 if (tg3_flag(tp, IS_SSB_CORE))
3649                         return 0;
3650
3651                 rc = tg3_txcpu_pause(tp);
3652         }
3653
3654         if (rc) {
3655                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3656                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3657                 return -ENODEV;
3658         }
3659
3660         /* Clear firmware's nvram arbitration. */
3661         if (tg3_flag(tp, NVRAM))
3662                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3663         return 0;
3664 }
3665
3666 static int tg3_fw_data_len(struct tg3 *tp,
3667                            const struct tg3_firmware_hdr *fw_hdr)
3668 {
3669         int fw_len;
3670
3671         /* Non fragmented firmware have one firmware header followed by a
3672          * contiguous chunk of data to be written. The length field in that
3673          * header is not the length of data to be written but the complete
3674          * length of the bss. The data length is determined based on
3675          * tp->fw->size minus headers.
3676          *
3677          * Fragmented firmware have a main header followed by multiple
3678          * fragments. Each fragment is identical to non fragmented firmware
3679          * with a firmware header followed by a contiguous chunk of data. In
3680          * the main header, the length field is unused and set to 0xffffffff.
3681          * In each fragment header the length is the entire size of that
3682          * fragment i.e. fragment data + header length. Data length is
3683          * therefore length field in the header minus TG3_FW_HDR_LEN.
3684          */
3685         if (tp->fw_len == 0xffffffff)
3686                 fw_len = be32_to_cpu(fw_hdr->len);
3687         else
3688                 fw_len = tp->fw->size;
3689
3690         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3691 }
3692
3693 /* tp->lock is held. */
3694 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3695                                  u32 cpu_scratch_base, int cpu_scratch_size,
3696                                  const struct tg3_firmware_hdr *fw_hdr)
3697 {
3698         int err, i;
3699         void (*write_op)(struct tg3 *, u32, u32);
3700         int total_len = tp->fw->size;
3701
3702         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3703                 netdev_err(tp->dev,
3704                            "%s: Trying to load TX cpu firmware which is 5705\n",
3705                            __func__);
3706                 return -EINVAL;
3707         }
3708
3709         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3710                 write_op = tg3_write_mem;
3711         else
3712                 write_op = tg3_write_indirect_reg32;
3713
3714         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3715                 /* It is possible that bootcode is still loading at this point.
3716                  * Get the nvram lock first before halting the cpu.
3717                  */
3718                 int lock_err = tg3_nvram_lock(tp);
3719                 err = tg3_halt_cpu(tp, cpu_base);
3720                 if (!lock_err)
3721                         tg3_nvram_unlock(tp);
3722                 if (err)
3723                         goto out;
3724
3725                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3726                         write_op(tp, cpu_scratch_base + i, 0);
3727                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3728                 tw32(cpu_base + CPU_MODE,
3729                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3730         } else {
3731                 /* Subtract additional main header for fragmented firmware and
3732                  * advance to the first fragment
3733                  */
3734                 total_len -= TG3_FW_HDR_LEN;
3735                 fw_hdr++;
3736         }
3737
3738         do {
3739                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3740                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3741                         write_op(tp, cpu_scratch_base +
3742                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3743                                      (i * sizeof(u32)),
3744                                  be32_to_cpu(fw_data[i]));
3745
3746                 total_len -= be32_to_cpu(fw_hdr->len);
3747
3748                 /* Advance to next fragment */
3749                 fw_hdr = (struct tg3_firmware_hdr *)
3750                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3751         } while (total_len > 0);
3752
3753         err = 0;
3754
3755 out:
3756         return err;
3757 }
3758
3759 /* tp->lock is held. */
3760 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3761 {
3762         int i;
3763         const int iters = 5;
3764
3765         tw32(cpu_base + CPU_STATE, 0xffffffff);
3766         tw32_f(cpu_base + CPU_PC, pc);
3767
3768         for (i = 0; i < iters; i++) {
3769                 if (tr32(cpu_base + CPU_PC) == pc)
3770                         break;
3771                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3772                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3773                 tw32_f(cpu_base + CPU_PC, pc);
3774                 udelay(1000);
3775         }
3776
3777         return (i == iters) ? -EBUSY : 0;
3778 }
3779
3780 /* tp->lock is held. */
3781 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3782 {
3783         const struct tg3_firmware_hdr *fw_hdr;
3784         int err;
3785
3786         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3787
3788         /* Firmware blob starts with version numbers, followed by
3789            start address and length. We are setting complete length.
3790            length = end_address_of_bss - start_address_of_text.
3791            Remainder is the blob to be loaded contiguously
3792            from start address. */
3793
3794         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3795                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3796                                     fw_hdr);
3797         if (err)
3798                 return err;
3799
3800         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3801                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3802                                     fw_hdr);
3803         if (err)
3804                 return err;
3805
3806         /* Now startup only the RX cpu. */
3807         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3808                                        be32_to_cpu(fw_hdr->base_addr));
3809         if (err) {
3810                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3811                            "should be %08x\n", __func__,
3812                            tr32(RX_CPU_BASE + CPU_PC),
3813                                 be32_to_cpu(fw_hdr->base_addr));
3814                 return -ENODEV;
3815         }
3816
3817         tg3_rxcpu_resume(tp);
3818
3819         return 0;
3820 }
3821
3822 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3823 {
3824         const int iters = 1000;
3825         int i;
3826         u32 val;
3827
3828         /* Wait for boot code to complete initialization and enter service
3829          * loop. It is then safe to download service patches
3830          */
3831         for (i = 0; i < iters; i++) {
3832                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3833                         break;
3834
3835                 udelay(10);
3836         }
3837
3838         if (i == iters) {
3839                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3840                 return -EBUSY;
3841         }
3842
3843         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3844         if (val & 0xff) {
3845                 netdev_warn(tp->dev,
3846                             "Other patches exist. Not downloading EEE patch\n");
3847                 return -EEXIST;
3848         }
3849
3850         return 0;
3851 }
3852
3853 /* tp->lock is held. */
3854 static void tg3_load_57766_firmware(struct tg3 *tp)
3855 {
3856         struct tg3_firmware_hdr *fw_hdr;
3857
3858         if (!tg3_flag(tp, NO_NVRAM))
3859                 return;
3860
3861         if (tg3_validate_rxcpu_state(tp))
3862                 return;
3863
3864         if (!tp->fw)
3865                 return;
3866
3867         /* This firmware blob has a different format than older firmware
3868          * releases as given below. The main difference is we have fragmented
3869          * data to be written to non-contiguous locations.
3870          *
3871          * In the beginning we have a firmware header identical to other
3872          * firmware which consists of version, base addr and length. The length
3873          * here is unused and set to 0xffffffff.
3874          *
3875          * This is followed by a series of firmware fragments which are
3876          * individually identical to previous firmware. i.e. they have the
3877          * firmware header and followed by data for that fragment. The version
3878          * field of the individual fragment header is unused.
3879          */
3880
3881         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3882         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3883                 return;
3884
3885         if (tg3_rxcpu_pause(tp))
3886                 return;
3887
3888         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3889         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3890
3891         tg3_rxcpu_resume(tp);
3892 }
3893
3894 /* tp->lock is held. */
3895 static int tg3_load_tso_firmware(struct tg3 *tp)
3896 {
3897         const struct tg3_firmware_hdr *fw_hdr;
3898         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3899         int err;
3900
3901         if (!tg3_flag(tp, FW_TSO))
3902                 return 0;
3903
3904         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3905
3906         /* Firmware blob starts with version numbers, followed by
3907            start address and length. We are setting complete length.
3908            length = end_address_of_bss - start_address_of_text.
3909            Remainder is the blob to be loaded contiguously
3910            from start address. */
3911
3912         cpu_scratch_size = tp->fw_len;
3913
3914         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3915                 cpu_base = RX_CPU_BASE;
3916                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3917         } else {
3918                 cpu_base = TX_CPU_BASE;
3919                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3920                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3921         }
3922
3923         err = tg3_load_firmware_cpu(tp, cpu_base,
3924                                     cpu_scratch_base, cpu_scratch_size,
3925                                     fw_hdr);
3926         if (err)
3927                 return err;
3928
3929         /* Now startup the cpu. */
3930         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3931                                        be32_to_cpu(fw_hdr->base_addr));
3932         if (err) {
3933                 netdev_err(tp->dev,
3934                            "%s fails to set CPU PC, is %08x should be %08x\n",
3935                            __func__, tr32(cpu_base + CPU_PC),
3936                            be32_to_cpu(fw_hdr->base_addr));
3937                 return -ENODEV;
3938         }
3939
3940         tg3_resume_cpu(tp, cpu_base);
3941         return 0;
3942 }
3943
3944 /* tp->lock is held. */
3945 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3946 {
3947         u32 addr_high, addr_low;
3948
3949         addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3950         addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3951                     (mac_addr[4] <<  8) | mac_addr[5]);
3952
3953         if (index < 4) {
3954                 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3955                 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3956         } else {
3957                 index -= 4;
3958                 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3959                 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3960         }
3961 }
3962
3963 /* tp->lock is held. */
3964 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3965 {
3966         u32 addr_high;
3967         int i;
3968
3969         for (i = 0; i < 4; i++) {
3970                 if (i == 1 && skip_mac_1)
3971                         continue;
3972                 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3973         }
3974
3975         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3976             tg3_asic_rev(tp) == ASIC_REV_5704) {
3977                 for (i = 4; i < 16; i++)
3978                         __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3979         }
3980
3981         addr_high = (tp->dev->dev_addr[0] +
3982                      tp->dev->dev_addr[1] +
3983                      tp->dev->dev_addr[2] +
3984                      tp->dev->dev_addr[3] +
3985                      tp->dev->dev_addr[4] +
3986                      tp->dev->dev_addr[5]) &
3987                 TX_BACKOFF_SEED_MASK;
3988         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3989 }
3990
3991 static void tg3_enable_register_access(struct tg3 *tp)
3992 {
3993         /*
3994          * Make sure register accesses (indirect or otherwise) will function
3995          * correctly.
3996          */
3997         pci_write_config_dword(tp->pdev,
3998                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3999 }
4000
4001 static int tg3_power_up(struct tg3 *tp)
4002 {
4003         int err;
4004
4005         tg3_enable_register_access(tp);
4006
4007         err = pci_set_power_state(tp->pdev, PCI_D0);
4008         if (!err) {
4009                 /* Switch out of Vaux if it is a NIC */
4010                 tg3_pwrsrc_switch_to_vmain(tp);
4011         } else {
4012                 netdev_err(tp->dev, "Transition to D0 failed\n");
4013         }
4014
4015         return err;
4016 }
4017
4018 static int tg3_setup_phy(struct tg3 *, bool);
4019
4020 static int tg3_power_down_prepare(struct tg3 *tp)
4021 {
4022         u32 misc_host_ctrl;
4023         bool device_should_wake, do_low_power;
4024
4025         tg3_enable_register_access(tp);
4026
4027         /* Restore the CLKREQ setting. */
4028         if (tg3_flag(tp, CLKREQ_BUG))
4029                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4030                                          PCI_EXP_LNKCTL_CLKREQ_EN);
4031
4032         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4033         tw32(TG3PCI_MISC_HOST_CTRL,
4034              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4035
4036         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4037                              tg3_flag(tp, WOL_ENABLE);
4038
4039         if (tg3_flag(tp, USE_PHYLIB)) {
4040                 do_low_power = false;
4041                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4042                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4043                         __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
4044                         struct phy_device *phydev;
4045                         u32 phyid;
4046
4047                         phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
4048
4049                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4050
4051                         tp->link_config.speed = phydev->speed;
4052                         tp->link_config.duplex = phydev->duplex;
4053                         tp->link_config.autoneg = phydev->autoneg;
4054                         ethtool_convert_link_mode_to_legacy_u32(
4055                                 &tp->link_config.advertising,
4056                                 phydev->advertising);
4057
4058                         linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
4059                         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
4060                                          advertising);
4061                         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
4062                                          advertising);
4063                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
4064                                          advertising);
4065
4066                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4067                                 if (tg3_flag(tp, WOL_SPEED_100MB)) {
4068                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
4069                                                          advertising);
4070                                         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
4071                                                          advertising);
4072                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4073                                                          advertising);
4074                                 } else {
4075                                         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
4076                                                          advertising);
4077                                 }
4078                         }
4079
4080                         linkmode_copy(phydev->advertising, advertising);
4081                         phy_start_aneg(phydev);
4082
4083                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4084                         if (phyid != PHY_ID_BCMAC131) {
4085                                 phyid &= PHY_BCM_OUI_MASK;
4086                                 if (phyid == PHY_BCM_OUI_1 ||
4087                                     phyid == PHY_BCM_OUI_2 ||
4088                                     phyid == PHY_BCM_OUI_3)
4089                                         do_low_power = true;
4090                         }
4091                 }
4092         } else {
4093                 do_low_power = true;
4094
4095                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4096                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4097
4098                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4099                         tg3_setup_phy(tp, false);
4100         }
4101
4102         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4103                 u32 val;
4104
4105                 val = tr32(GRC_VCPU_EXT_CTRL);
4106                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4107         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4108                 int i;
4109                 u32 val;
4110
4111                 for (i = 0; i < 200; i++) {
4112                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4113                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4114                                 break;
4115                         msleep(1);
4116                 }
4117         }
4118         if (tg3_flag(tp, WOL_CAP))
4119                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4120                                                      WOL_DRV_STATE_SHUTDOWN |
4121                                                      WOL_DRV_WOL |
4122                                                      WOL_SET_MAGIC_PKT);
4123
4124         if (device_should_wake) {
4125                 u32 mac_mode;
4126
4127                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4128                         if (do_low_power &&
4129                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4130                                 tg3_phy_auxctl_write(tp,
4131                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4132                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4133                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4134                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4135                                 udelay(40);
4136                         }
4137
4138                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4139                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4140                         else if (tp->phy_flags &
4141                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4142                                 if (tp->link_config.active_speed == SPEED_1000)
4143                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4144                                 else
4145                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4146                         } else
4147                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4148
4149                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4150                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4151                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4152                                              SPEED_100 : SPEED_10;
4153                                 if (tg3_5700_link_polarity(tp, speed))
4154                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4155                                 else
4156                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4157                         }
4158                 } else {
4159                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4160                 }
4161
4162                 if (!tg3_flag(tp, 5750_PLUS))
4163                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4164
4165                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4166                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4167                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4168                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4169
4170                 if (tg3_flag(tp, ENABLE_APE))
4171                         mac_mode |= MAC_MODE_APE_TX_EN |
4172                                     MAC_MODE_APE_RX_EN |
4173                                     MAC_MODE_TDE_ENABLE;
4174
4175                 tw32_f(MAC_MODE, mac_mode);
4176                 udelay(100);
4177
4178                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4179                 udelay(10);
4180         }
4181
4182         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4183             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4184              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4185                 u32 base_val;
4186
4187                 base_val = tp->pci_clock_ctrl;
4188                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4189                              CLOCK_CTRL_TXCLK_DISABLE);
4190
4191                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4192                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4193         } else if (tg3_flag(tp, 5780_CLASS) ||
4194                    tg3_flag(tp, CPMU_PRESENT) ||
4195                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4196                 /* do nothing */
4197         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4198                 u32 newbits1, newbits2;
4199
4200                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4201                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4202                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4203                                     CLOCK_CTRL_TXCLK_DISABLE |
4204                                     CLOCK_CTRL_ALTCLK);
4205                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4206                 } else if (tg3_flag(tp, 5705_PLUS)) {
4207                         newbits1 = CLOCK_CTRL_625_CORE;
4208                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4209                 } else {
4210                         newbits1 = CLOCK_CTRL_ALTCLK;
4211                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212                 }
4213
4214                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4215                             40);
4216
4217                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4218                             40);
4219
4220                 if (!tg3_flag(tp, 5705_PLUS)) {
4221                         u32 newbits3;
4222
4223                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4224                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4225                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4226                                             CLOCK_CTRL_TXCLK_DISABLE |
4227                                             CLOCK_CTRL_44MHZ_CORE);
4228                         } else {
4229                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4230                         }
4231
4232                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4233                                     tp->pci_clock_ctrl | newbits3, 40);
4234                 }
4235         }
4236
4237         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4238                 tg3_power_down_phy(tp, do_low_power);
4239
4240         tg3_frob_aux_power(tp, true);
4241
4242         /* Workaround for unstable PLL clock */
4243         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4244             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4245              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4246                 u32 val = tr32(0x7d00);
4247
4248                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4249                 tw32(0x7d00, val);
4250                 if (!tg3_flag(tp, ENABLE_ASF)) {
4251                         int err;
4252
4253                         err = tg3_nvram_lock(tp);
4254                         tg3_halt_cpu(tp, RX_CPU_BASE);
4255                         if (!err)
4256                                 tg3_nvram_unlock(tp);
4257                 }
4258         }
4259
4260         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4261
4262         tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4263
4264         return 0;
4265 }
4266
4267 static void tg3_power_down(struct tg3 *tp)
4268 {
4269         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4270         pci_set_power_state(tp->pdev, PCI_D3hot);
4271 }
4272
4273 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
4274 {
4275         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4276         case MII_TG3_AUX_STAT_10HALF:
4277                 *speed = SPEED_10;
4278                 *duplex = DUPLEX_HALF;
4279                 break;
4280
4281         case MII_TG3_AUX_STAT_10FULL:
4282                 *speed = SPEED_10;
4283                 *duplex = DUPLEX_FULL;
4284                 break;
4285
4286         case MII_TG3_AUX_STAT_100HALF:
4287                 *speed = SPEED_100;
4288                 *duplex = DUPLEX_HALF;
4289                 break;
4290
4291         case MII_TG3_AUX_STAT_100FULL:
4292                 *speed = SPEED_100;
4293                 *duplex = DUPLEX_FULL;
4294                 break;
4295
4296         case MII_TG3_AUX_STAT_1000HALF:
4297                 *speed = SPEED_1000;
4298                 *duplex = DUPLEX_HALF;
4299                 break;
4300
4301         case MII_TG3_AUX_STAT_1000FULL:
4302                 *speed = SPEED_1000;
4303                 *duplex = DUPLEX_FULL;
4304                 break;
4305
4306         default:
4307                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4308                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4309                                  SPEED_10;
4310                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4311                                   DUPLEX_HALF;
4312                         break;
4313                 }
4314                 *speed = SPEED_UNKNOWN;
4315                 *duplex = DUPLEX_UNKNOWN;
4316                 break;
4317         }
4318 }
4319
4320 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4321 {
4322         int err = 0;
4323         u32 val, new_adv;
4324
4325         new_adv = ADVERTISE_CSMA;
4326         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4327         new_adv |= mii_advertise_flowctrl(flowctrl);
4328
4329         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4330         if (err)
4331                 goto done;
4332
4333         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4334                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4335
4336                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4337                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4338                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4339
4340                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4341                 if (err)
4342                         goto done;
4343         }
4344
4345         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4346                 goto done;
4347
4348         tw32(TG3_CPMU_EEE_MODE,
4349              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4350
4351         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4352         if (!err) {
4353                 u32 err2;
4354
4355                 val = 0;
4356                 /* Advertise 100-BaseTX EEE ability */
4357                 if (advertise & ADVERTISED_100baseT_Full)
4358                         val |= MDIO_AN_EEE_ADV_100TX;
4359                 /* Advertise 1000-BaseT EEE ability */
4360                 if (advertise & ADVERTISED_1000baseT_Full)
4361                         val |= MDIO_AN_EEE_ADV_1000T;
4362
4363                 if (!tp->eee.eee_enabled) {
4364                         val = 0;
4365                         tp->eee.advertised = 0;
4366                 } else {
4367                         tp->eee.advertised = advertise &
4368                                              (ADVERTISED_100baseT_Full |
4369                                               ADVERTISED_1000baseT_Full);
4370                 }
4371
4372                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4373                 if (err)
4374                         val = 0;
4375
4376                 switch (tg3_asic_rev(tp)) {
4377                 case ASIC_REV_5717:
4378                 case ASIC_REV_57765:
4379                 case ASIC_REV_57766:
4380                 case ASIC_REV_5719:
4381                         /* If we advertised any eee advertisements above... */
4382                         if (val)
4383                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4384                                       MII_TG3_DSP_TAP26_RMRXSTO |
4385                                       MII_TG3_DSP_TAP26_OPCSINPT;
4386                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4387                         fallthrough;
4388                 case ASIC_REV_5720:
4389                 case ASIC_REV_5762:
4390                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4391                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4392                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4393                 }
4394
4395                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4396                 if (!err)
4397                         err = err2;
4398         }
4399
4400 done:
4401         return err;
4402 }
4403
4404 static void tg3_phy_copper_begin(struct tg3 *tp)
4405 {
4406         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4407             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4408                 u32 adv, fc;
4409
4410                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4411                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4412                         adv = ADVERTISED_10baseT_Half |
4413                               ADVERTISED_10baseT_Full;
4414                         if (tg3_flag(tp, WOL_SPEED_100MB))
4415                                 adv |= ADVERTISED_100baseT_Half |
4416                                        ADVERTISED_100baseT_Full;
4417                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4418                                 if (!(tp->phy_flags &
4419                                       TG3_PHYFLG_DISABLE_1G_HD_ADV))
4420                                         adv |= ADVERTISED_1000baseT_Half;
4421                                 adv |= ADVERTISED_1000baseT_Full;
4422                         }
4423
4424                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4425                 } else {
4426                         adv = tp->link_config.advertising;
4427                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4428                                 adv &= ~(ADVERTISED_1000baseT_Half |
4429                                          ADVERTISED_1000baseT_Full);
4430
4431                         fc = tp->link_config.flowctrl;
4432                 }
4433
4434                 tg3_phy_autoneg_cfg(tp, adv, fc);
4435
4436                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4437                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4438                         /* Normally during power down we want to autonegotiate
4439                          * the lowest possible speed for WOL. However, to avoid
4440                          * link flap, we leave it untouched.
4441                          */
4442                         return;
4443                 }
4444
4445                 tg3_writephy(tp, MII_BMCR,
4446                              BMCR_ANENABLE | BMCR_ANRESTART);
4447         } else {
4448                 int i;
4449                 u32 bmcr, orig_bmcr;
4450
4451                 tp->link_config.active_speed = tp->link_config.speed;
4452                 tp->link_config.active_duplex = tp->link_config.duplex;
4453
4454                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4455                         /* With autoneg disabled, 5715 only links up when the
4456                          * advertisement register has the configured speed
4457                          * enabled.
4458                          */
4459                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4460                 }
4461
4462                 bmcr = 0;
4463                 switch (tp->link_config.speed) {
4464                 default:
4465                 case SPEED_10:
4466                         break;
4467
4468                 case SPEED_100:
4469                         bmcr |= BMCR_SPEED100;
4470                         break;
4471
4472                 case SPEED_1000:
4473                         bmcr |= BMCR_SPEED1000;
4474                         break;
4475                 }
4476
4477                 if (tp->link_config.duplex == DUPLEX_FULL)
4478                         bmcr |= BMCR_FULLDPLX;
4479
4480                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4481                     (bmcr != orig_bmcr)) {
4482                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4483                         for (i = 0; i < 1500; i++) {
4484                                 u32 tmp;
4485
4486                                 udelay(10);
4487                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4488                                     tg3_readphy(tp, MII_BMSR, &tmp))
4489                                         continue;
4490                                 if (!(tmp & BMSR_LSTATUS)) {
4491                                         udelay(40);
4492                                         break;
4493                                 }
4494                         }
4495                         tg3_writephy(tp, MII_BMCR, bmcr);
4496                         udelay(40);
4497                 }
4498         }
4499 }
4500
4501 static int tg3_phy_pull_config(struct tg3 *tp)
4502 {
4503         int err;
4504         u32 val;
4505
4506         err = tg3_readphy(tp, MII_BMCR, &val);
4507         if (err)
4508                 goto done;
4509
4510         if (!(val & BMCR_ANENABLE)) {
4511                 tp->link_config.autoneg = AUTONEG_DISABLE;
4512                 tp->link_config.advertising = 0;
4513                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4514
4515                 err = -EIO;
4516
4517                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4518                 case 0:
4519                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520                                 goto done;
4521
4522                         tp->link_config.speed = SPEED_10;
4523                         break;
4524                 case BMCR_SPEED100:
4525                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526                                 goto done;
4527
4528                         tp->link_config.speed = SPEED_100;
4529                         break;
4530                 case BMCR_SPEED1000:
4531                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4532                                 tp->link_config.speed = SPEED_1000;
4533                                 break;
4534                         }
4535                         fallthrough;
4536                 default:
4537                         goto done;
4538                 }
4539
4540                 if (val & BMCR_FULLDPLX)
4541                         tp->link_config.duplex = DUPLEX_FULL;
4542                 else
4543                         tp->link_config.duplex = DUPLEX_HALF;
4544
4545                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4546
4547                 err = 0;
4548                 goto done;
4549         }
4550
4551         tp->link_config.autoneg = AUTONEG_ENABLE;
4552         tp->link_config.advertising = ADVERTISED_Autoneg;
4553         tg3_flag_set(tp, PAUSE_AUTONEG);
4554
4555         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4556                 u32 adv;
4557
4558                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4559                 if (err)
4560                         goto done;
4561
4562                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4563                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4564
4565                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4566         } else {
4567                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4568         }
4569
4570         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4571                 u32 adv;
4572
4573                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4574                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4575                         if (err)
4576                                 goto done;
4577
4578                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4579                 } else {
4580                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4581                         if (err)
4582                                 goto done;
4583
4584                         adv = tg3_decode_flowctrl_1000X(val);
4585                         tp->link_config.flowctrl = adv;
4586
4587                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4588                         adv = mii_adv_to_ethtool_adv_x(val);
4589                 }
4590
4591                 tp->link_config.advertising |= adv;
4592         }
4593
4594 done:
4595         return err;
4596 }
4597
4598 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4599 {
4600         int err;
4601
4602         /* Turn off tap power management. */
4603         /* Set Extended packet length bit */
4604         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4605
4606         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4607         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4608         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4609         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4610         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4611
4612         udelay(40);
4613
4614         return err;
4615 }
4616
4617 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4618 {
4619         struct ethtool_eee eee;
4620
4621         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4622                 return true;
4623
4624         tg3_eee_pull_config(tp, &eee);
4625
4626         if (tp->eee.eee_enabled) {
4627                 if (tp->eee.advertised != eee.advertised ||
4628                     tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4629                     tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4630                         return false;
4631         } else {
4632                 /* EEE is disabled but we're advertising */
4633                 if (eee.advertised)
4634                         return false;
4635         }
4636
4637         return true;
4638 }
4639
4640 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4641 {
4642         u32 advmsk, tgtadv, advertising;
4643
4644         advertising = tp->link_config.advertising;
4645         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4646
4647         advmsk = ADVERTISE_ALL;
4648         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4649                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4650                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4651         }
4652
4653         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4654                 return false;
4655
4656         if ((*lcladv & advmsk) != tgtadv)
4657                 return false;
4658
4659         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4660                 u32 tg3_ctrl;
4661
4662                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4663
4664                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4665                         return false;
4666
4667                 if (tgtadv &&
4668                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4669                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4670                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4671                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4672                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4673                 } else {
4674                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4675                 }
4676
4677                 if (tg3_ctrl != tgtadv)
4678                         return false;
4679         }
4680
4681         return true;
4682 }
4683
4684 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4685 {
4686         u32 lpeth = 0;
4687
4688         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4689                 u32 val;
4690
4691                 if (tg3_readphy(tp, MII_STAT1000, &val))
4692                         return false;
4693
4694                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4695         }
4696
4697         if (tg3_readphy(tp, MII_LPA, rmtadv))
4698                 return false;
4699
4700         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4701         tp->link_config.rmt_adv = lpeth;
4702
4703         return true;
4704 }
4705
4706 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4707 {
4708         if (curr_link_up != tp->link_up) {
4709                 if (curr_link_up) {
4710                         netif_carrier_on(tp->dev);
4711                 } else {
4712                         netif_carrier_off(tp->dev);
4713                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4714                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4715                 }
4716
4717                 tg3_link_report(tp);
4718                 return true;
4719         }
4720
4721         return false;
4722 }
4723
4724 static void tg3_clear_mac_status(struct tg3 *tp)
4725 {
4726         tw32(MAC_EVENT, 0);
4727
4728         tw32_f(MAC_STATUS,
4729                MAC_STATUS_SYNC_CHANGED |
4730                MAC_STATUS_CFG_CHANGED |
4731                MAC_STATUS_MI_COMPLETION |
4732                MAC_STATUS_LNKSTATE_CHANGED);
4733         udelay(40);
4734 }
4735
4736 static void tg3_setup_eee(struct tg3 *tp)
4737 {
4738         u32 val;
4739
4740         val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4741               TG3_CPMU_EEE_LNKIDL_UART_IDL;
4742         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4743                 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4744
4745         tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4746
4747         tw32_f(TG3_CPMU_EEE_CTRL,
4748                TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4749
4750         val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4751               (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4752               TG3_CPMU_EEEMD_LPI_IN_RX |
4753               TG3_CPMU_EEEMD_EEE_ENABLE;
4754
4755         if (tg3_asic_rev(tp) != ASIC_REV_5717)
4756                 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4757
4758         if (tg3_flag(tp, ENABLE_APE))
4759                 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4760
4761         tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4762
4763         tw32_f(TG3_CPMU_EEE_DBTMR1,
4764                TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4765                (tp->eee.tx_lpi_timer & 0xffff));
4766
4767         tw32_f(TG3_CPMU_EEE_DBTMR2,
4768                TG3_CPMU_DBTMR2_APE_TX_2047US |
4769                TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4770 }
4771
4772 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4773 {
4774         bool current_link_up;
4775         u32 bmsr, val;
4776         u32 lcl_adv, rmt_adv;
4777         u32 current_speed;
4778         u8 current_duplex;
4779         int i, err;
4780
4781         tg3_clear_mac_status(tp);
4782
4783         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4784                 tw32_f(MAC_MI_MODE,
4785                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4786                 udelay(80);
4787         }
4788
4789         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4790
4791         /* Some third-party PHYs need to be reset on link going
4792          * down.
4793          */
4794         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4795              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4796              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4797             tp->link_up) {
4798                 tg3_readphy(tp, MII_BMSR, &bmsr);
4799                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4800                     !(bmsr & BMSR_LSTATUS))
4801                         force_reset = true;
4802         }
4803         if (force_reset)
4804                 tg3_phy_reset(tp);
4805
4806         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4807                 tg3_readphy(tp, MII_BMSR, &bmsr);
4808                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4809                     !tg3_flag(tp, INIT_COMPLETE))
4810                         bmsr = 0;
4811
4812                 if (!(bmsr & BMSR_LSTATUS)) {
4813                         err = tg3_init_5401phy_dsp(tp);
4814                         if (err)
4815                                 return err;
4816
4817                         tg3_readphy(tp, MII_BMSR, &bmsr);
4818                         for (i = 0; i < 1000; i++) {
4819                                 udelay(10);
4820                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4821                                     (bmsr & BMSR_LSTATUS)) {
4822                                         udelay(40);
4823                                         break;
4824                                 }
4825                         }
4826
4827                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4828                             TG3_PHY_REV_BCM5401_B0 &&
4829                             !(bmsr & BMSR_LSTATUS) &&
4830                             tp->link_config.active_speed == SPEED_1000) {
4831                                 err = tg3_phy_reset(tp);
4832                                 if (!err)
4833                                         err = tg3_init_5401phy_dsp(tp);
4834                                 if (err)
4835                                         return err;
4836                         }
4837                 }
4838         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4839                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4840                 /* 5701 {A0,B0} CRC bug workaround */
4841                 tg3_writephy(tp, 0x15, 0x0a75);
4842                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4843                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4844                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845         }
4846
4847         /* Clear pending interrupts... */
4848         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4850
4851         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4852                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4853         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4854                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4855
4856         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4857             tg3_asic_rev(tp) == ASIC_REV_5701) {
4858                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4859                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4860                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4861                 else
4862                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4863         }
4864
4865         current_link_up = false;
4866         current_speed = SPEED_UNKNOWN;
4867         current_duplex = DUPLEX_UNKNOWN;
4868         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4869         tp->link_config.rmt_adv = 0;
4870
4871         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4872                 err = tg3_phy_auxctl_read(tp,
4873                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874                                           &val);
4875                 if (!err && !(val & (1 << 10))) {
4876                         tg3_phy_auxctl_write(tp,
4877                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4878                                              val | (1 << 10));
4879                         goto relink;
4880                 }
4881         }
4882
4883         bmsr = 0;
4884         for (i = 0; i < 100; i++) {
4885                 tg3_readphy(tp, MII_BMSR, &bmsr);
4886                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4887                     (bmsr & BMSR_LSTATUS))
4888                         break;
4889                 udelay(40);
4890         }
4891
4892         if (bmsr & BMSR_LSTATUS) {
4893                 u32 aux_stat, bmcr;
4894
4895                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4896                 for (i = 0; i < 2000; i++) {
4897                         udelay(10);
4898                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4899                             aux_stat)
4900                                 break;
4901                 }
4902
4903                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4904                                              &current_speed,
4905                                              &current_duplex);
4906
4907                 bmcr = 0;
4908                 for (i = 0; i < 200; i++) {
4909                         tg3_readphy(tp, MII_BMCR, &bmcr);
4910                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4911                                 continue;
4912                         if (bmcr && bmcr != 0x7fff)
4913                                 break;
4914                         udelay(10);
4915                 }
4916
4917                 lcl_adv = 0;
4918                 rmt_adv = 0;
4919
4920                 tp->link_config.active_speed = current_speed;
4921                 tp->link_config.active_duplex = current_duplex;
4922
4923                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4924                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4925
4926                         if ((bmcr & BMCR_ANENABLE) &&
4927                             eee_config_ok &&
4928                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4929                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4930                                 current_link_up = true;
4931
4932                         /* EEE settings changes take effect only after a phy
4933                          * reset.  If we have skipped a reset due to Link Flap
4934                          * Avoidance being enabled, do it now.
4935                          */
4936                         if (!eee_config_ok &&
4937                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4938                             !force_reset) {
4939                                 tg3_setup_eee(tp);
4940                                 tg3_phy_reset(tp);
4941                         }
4942                 } else {
4943                         if (!(bmcr & BMCR_ANENABLE) &&
4944                             tp->link_config.speed == current_speed &&
4945                             tp->link_config.duplex == current_duplex) {
4946                                 current_link_up = true;
4947                         }
4948                 }
4949
4950                 if (current_link_up &&
4951                     tp->link_config.active_duplex == DUPLEX_FULL) {
4952                         u32 reg, bit;
4953
4954                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4955                                 reg = MII_TG3_FET_GEN_STAT;
4956                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4957                         } else {
4958                                 reg = MII_TG3_EXT_STAT;
4959                                 bit = MII_TG3_EXT_STAT_MDIX;
4960                         }
4961
4962                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4963                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4964
4965                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4966                 }
4967         }
4968
4969 relink:
4970         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4971                 tg3_phy_copper_begin(tp);
4972
4973                 if (tg3_flag(tp, ROBOSWITCH)) {
4974                         current_link_up = true;
4975                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4976                         current_speed = SPEED_1000;
4977                         current_duplex = DUPLEX_FULL;
4978                         tp->link_config.active_speed = current_speed;
4979                         tp->link_config.active_duplex = current_duplex;
4980                 }
4981
4982                 tg3_readphy(tp, MII_BMSR, &bmsr);
4983                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4984                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4985                         current_link_up = true;
4986         }
4987
4988         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4989         if (current_link_up) {
4990                 if (tp->link_config.active_speed == SPEED_100 ||
4991                     tp->link_config.active_speed == SPEED_10)
4992                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993                 else
4994                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4996                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4997         else
4998                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4999
5000         /* In order for the 5750 core in BCM4785 chip to work properly
5001          * in RGMII mode, the Led Control Register must be set up.
5002          */
5003         if (tg3_flag(tp, RGMII_MODE)) {
5004                 u32 led_ctrl = tr32(MAC_LED_CTRL);
5005                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5006
5007                 if (tp->link_config.active_speed == SPEED_10)
5008                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5009                 else if (tp->link_config.active_speed == SPEED_100)
5010                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5011                                      LED_CTRL_100MBPS_ON);
5012                 else if (tp->link_config.active_speed == SPEED_1000)
5013                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5014                                      LED_CTRL_1000MBPS_ON);
5015
5016                 tw32(MAC_LED_CTRL, led_ctrl);
5017                 udelay(40);
5018         }
5019
5020         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5021         if (tp->link_config.active_duplex == DUPLEX_HALF)
5022                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5023
5024         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5025                 if (current_link_up &&
5026                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5027                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5028                 else
5029                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5030         }
5031
5032         /* ??? Without this setting Netgear GA302T PHY does not
5033          * ??? send/receive packets...
5034          */
5035         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5036             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5037                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
5039                 udelay(80);
5040         }
5041
5042         tw32_f(MAC_MODE, tp->mac_mode);
5043         udelay(40);
5044
5045         tg3_phy_eee_adjust(tp, current_link_up);
5046
5047         if (tg3_flag(tp, USE_LINKCHG_REG)) {
5048                 /* Polled via timer. */
5049                 tw32_f(MAC_EVENT, 0);
5050         } else {
5051                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5052         }
5053         udelay(40);
5054
5055         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5056             current_link_up &&
5057             tp->link_config.active_speed == SPEED_1000 &&
5058             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5059                 udelay(120);
5060                 tw32_f(MAC_STATUS,
5061                      (MAC_STATUS_SYNC_CHANGED |
5062                       MAC_STATUS_CFG_CHANGED));
5063                 udelay(40);
5064                 tg3_write_mem(tp,
5065                               NIC_SRAM_FIRMWARE_MBOX,
5066                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5067         }
5068
5069         /* Prevent send BD corruption. */
5070         if (tg3_flag(tp, CLKREQ_BUG)) {
5071                 if (tp->link_config.active_speed == SPEED_100 ||
5072                     tp->link_config.active_speed == SPEED_10)
5073                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5074                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
5075                 else
5076                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5077                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
5078         }
5079
5080         tg3_test_and_report_link_chg(tp, current_link_up);
5081
5082         return 0;
5083 }
5084
5085 struct tg3_fiber_aneginfo {
5086         int state;
5087 #define ANEG_STATE_UNKNOWN              0
5088 #define ANEG_STATE_AN_ENABLE            1
5089 #define ANEG_STATE_RESTART_INIT         2
5090 #define ANEG_STATE_RESTART              3
5091 #define ANEG_STATE_DISABLE_LINK_OK      4
5092 #define ANEG_STATE_ABILITY_DETECT_INIT  5
5093 #define ANEG_STATE_ABILITY_DETECT       6
5094 #define ANEG_STATE_ACK_DETECT_INIT      7
5095 #define ANEG_STATE_ACK_DETECT           8
5096 #define ANEG_STATE_COMPLETE_ACK_INIT    9
5097 #define ANEG_STATE_COMPLETE_ACK         10
5098 #define ANEG_STATE_IDLE_DETECT_INIT     11
5099 #define ANEG_STATE_IDLE_DETECT          12
5100 #define ANEG_STATE_LINK_OK              13
5101 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5102 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5103
5104         u32 flags;
5105 #define MR_AN_ENABLE            0x00000001
5106 #define MR_RESTART_AN           0x00000002
5107 #define MR_AN_COMPLETE          0x00000004
5108 #define MR_PAGE_RX              0x00000008
5109 #define MR_NP_LOADED            0x00000010
5110 #define MR_TOGGLE_TX            0x00000020
5111 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5112 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5113 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5114 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5115 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5116 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5117 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5118 #define MR_TOGGLE_RX            0x00002000
5119 #define MR_NP_RX                0x00004000
5120
5121 #define MR_LINK_OK              0x80000000
5122
5123         unsigned long link_time, cur_time;
5124
5125         u32 ability_match_cfg;
5126         int ability_match_count;
5127
5128         char ability_match, idle_match, ack_match;
5129
5130         u32 txconfig, rxconfig;
5131 #define ANEG_CFG_NP             0x00000080
5132 #define ANEG_CFG_ACK            0x00000040
5133 #define ANEG_CFG_RF2            0x00000020
5134 #define ANEG_CFG_RF1            0x00000010
5135 #define ANEG_CFG_PS2            0x00000001
5136 #define ANEG_CFG_PS1            0x00008000
5137 #define ANEG_CFG_HD             0x00004000
5138 #define ANEG_CFG_FD             0x00002000
5139 #define ANEG_CFG_INVAL          0x00001f06
5140
5141 };
5142 #define ANEG_OK         0
5143 #define ANEG_DONE       1
5144 #define ANEG_TIMER_ENAB 2
5145 #define ANEG_FAILED     -1
5146
5147 #define ANEG_STATE_SETTLE_TIME  10000
5148
5149 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5150                                    struct tg3_fiber_aneginfo *ap)
5151 {
5152         u16 flowctrl;
5153         unsigned long delta;
5154         u32 rx_cfg_reg;
5155         int ret;
5156
5157         if (ap->state == ANEG_STATE_UNKNOWN) {
5158                 ap->rxconfig = 0;
5159                 ap->link_time = 0;
5160                 ap->cur_time = 0;
5161                 ap->ability_match_cfg = 0;
5162                 ap->ability_match_count = 0;
5163                 ap->ability_match = 0;
5164                 ap->idle_match = 0;
5165                 ap->ack_match = 0;
5166         }
5167         ap->cur_time++;
5168
5169         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5170                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5171
5172                 if (rx_cfg_reg != ap->ability_match_cfg) {
5173                         ap->ability_match_cfg = rx_cfg_reg;
5174                         ap->ability_match = 0;
5175                         ap->ability_match_count = 0;
5176                 } else {
5177                         if (++ap->ability_match_count > 1) {
5178                                 ap->ability_match = 1;
5179                                 ap->ability_match_cfg = rx_cfg_reg;
5180                         }
5181                 }
5182                 if (rx_cfg_reg & ANEG_CFG_ACK)
5183                         ap->ack_match = 1;
5184                 else
5185                         ap->ack_match = 0;
5186
5187                 ap->idle_match = 0;
5188         } else {
5189                 ap->idle_match = 1;
5190                 ap->ability_match_cfg = 0;
5191                 ap->ability_match_count = 0;
5192                 ap->ability_match = 0;
5193                 ap->ack_match = 0;
5194
5195                 rx_cfg_reg = 0;
5196         }
5197
5198         ap->rxconfig = rx_cfg_reg;
5199         ret = ANEG_OK;
5200
5201         switch (ap->state) {
5202         case ANEG_STATE_UNKNOWN:
5203                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5204                         ap->state = ANEG_STATE_AN_ENABLE;
5205
5206                 fallthrough;
5207         case ANEG_STATE_AN_ENABLE:
5208                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5209                 if (ap->flags & MR_AN_ENABLE) {
5210                         ap->link_time = 0;
5211                         ap->cur_time = 0;
5212                         ap->ability_match_cfg = 0;
5213                         ap->ability_match_count = 0;
5214                         ap->ability_match = 0;
5215                         ap->idle_match = 0;
5216                         ap->ack_match = 0;
5217
5218                         ap->state = ANEG_STATE_RESTART_INIT;
5219                 } else {
5220                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5221                 }
5222                 break;
5223
5224         case ANEG_STATE_RESTART_INIT:
5225                 ap->link_time = ap->cur_time;
5226                 ap->flags &= ~(MR_NP_LOADED);
5227                 ap->txconfig = 0;
5228                 tw32(MAC_TX_AUTO_NEG, 0);
5229                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5230                 tw32_f(MAC_MODE, tp->mac_mode);
5231                 udelay(40);
5232
5233                 ret = ANEG_TIMER_ENAB;
5234                 ap->state = ANEG_STATE_RESTART;
5235
5236                 fallthrough;
5237         case ANEG_STATE_RESTART:
5238                 delta = ap->cur_time - ap->link_time;
5239                 if (delta > ANEG_STATE_SETTLE_TIME)
5240                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5241                 else
5242                         ret = ANEG_TIMER_ENAB;
5243                 break;
5244
5245         case ANEG_STATE_DISABLE_LINK_OK:
5246                 ret = ANEG_DONE;
5247                 break;
5248
5249         case ANEG_STATE_ABILITY_DETECT_INIT:
5250                 ap->flags &= ~(MR_TOGGLE_TX);
5251                 ap->txconfig = ANEG_CFG_FD;
5252                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5253                 if (flowctrl & ADVERTISE_1000XPAUSE)
5254                         ap->txconfig |= ANEG_CFG_PS1;
5255                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5256                         ap->txconfig |= ANEG_CFG_PS2;
5257                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5258                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5259                 tw32_f(MAC_MODE, tp->mac_mode);
5260                 udelay(40);
5261
5262                 ap->state = ANEG_STATE_ABILITY_DETECT;
5263                 break;
5264
5265         case ANEG_STATE_ABILITY_DETECT:
5266                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5267                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5268                 break;
5269
5270         case ANEG_STATE_ACK_DETECT_INIT:
5271                 ap->txconfig |= ANEG_CFG_ACK;
5272                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5273                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5274                 tw32_f(MAC_MODE, tp->mac_mode);
5275                 udelay(40);
5276
5277                 ap->state = ANEG_STATE_ACK_DETECT;
5278
5279                 fallthrough;
5280         case ANEG_STATE_ACK_DETECT:
5281                 if (ap->ack_match != 0) {
5282                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5283                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5284                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5285                         } else {
5286                                 ap->state = ANEG_STATE_AN_ENABLE;
5287                         }
5288                 } else if (ap->ability_match != 0 &&
5289                            ap->rxconfig == 0) {
5290                         ap->state = ANEG_STATE_AN_ENABLE;
5291                 }
5292                 break;
5293
5294         case ANEG_STATE_COMPLETE_ACK_INIT:
5295                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5296                         ret = ANEG_FAILED;
5297                         break;
5298                 }
5299                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5300                                MR_LP_ADV_HALF_DUPLEX |
5301                                MR_LP_ADV_SYM_PAUSE |
5302                                MR_LP_ADV_ASYM_PAUSE |
5303                                MR_LP_ADV_REMOTE_FAULT1 |
5304                                MR_LP_ADV_REMOTE_FAULT2 |
5305                                MR_LP_ADV_NEXT_PAGE |
5306                                MR_TOGGLE_RX |
5307                                MR_NP_RX);
5308                 if (ap->rxconfig & ANEG_CFG_FD)
5309                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5310                 if (ap->rxconfig & ANEG_CFG_HD)
5311                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5312                 if (ap->rxconfig & ANEG_CFG_PS1)
5313                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5314                 if (ap->rxconfig & ANEG_CFG_PS2)
5315                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5316                 if (ap->rxconfig & ANEG_CFG_RF1)
5317                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5318                 if (ap->rxconfig & ANEG_CFG_RF2)
5319                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5320                 if (ap->rxconfig & ANEG_CFG_NP)
5321                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5322
5323                 ap->link_time = ap->cur_time;
5324
5325                 ap->flags ^= (MR_TOGGLE_TX);
5326                 if (ap->rxconfig & 0x0008)
5327                         ap->flags |= MR_TOGGLE_RX;
5328                 if (ap->rxconfig & ANEG_CFG_NP)
5329                         ap->flags |= MR_NP_RX;
5330                 ap->flags |= MR_PAGE_RX;
5331
5332                 ap->state = ANEG_STATE_COMPLETE_ACK;
5333                 ret = ANEG_TIMER_ENAB;
5334                 break;
5335
5336         case ANEG_STATE_COMPLETE_ACK:
5337                 if (ap->ability_match != 0 &&
5338                     ap->rxconfig == 0) {
5339                         ap->state = ANEG_STATE_AN_ENABLE;
5340                         break;
5341                 }
5342                 delta = ap->cur_time - ap->link_time;
5343                 if (delta > ANEG_STATE_SETTLE_TIME) {
5344                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5345                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346                         } else {
5347                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5348                                     !(ap->flags & MR_NP_RX)) {
5349                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5350                                 } else {
5351                                         ret = ANEG_FAILED;
5352                                 }
5353                         }
5354                 }
5355                 break;
5356
5357         case ANEG_STATE_IDLE_DETECT_INIT:
5358                 ap->link_time = ap->cur_time;
5359                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5360                 tw32_f(MAC_MODE, tp->mac_mode);
5361                 udelay(40);
5362
5363                 ap->state = ANEG_STATE_IDLE_DETECT;
5364                 ret = ANEG_TIMER_ENAB;
5365                 break;
5366
5367         case ANEG_STATE_IDLE_DETECT:
5368                 if (ap->ability_match != 0 &&
5369                     ap->rxconfig == 0) {
5370                         ap->state = ANEG_STATE_AN_ENABLE;
5371                         break;
5372                 }
5373                 delta = ap->cur_time - ap->link_time;
5374                 if (delta > ANEG_STATE_SETTLE_TIME) {
5375                         /* XXX another gem from the Broadcom driver :( */
5376                         ap->state = ANEG_STATE_LINK_OK;
5377                 }
5378                 break;
5379
5380         case ANEG_STATE_LINK_OK:
5381                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5382                 ret = ANEG_DONE;
5383                 break;
5384
5385         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5386                 /* ??? unimplemented */
5387                 break;
5388
5389         case ANEG_STATE_NEXT_PAGE_WAIT:
5390                 /* ??? unimplemented */
5391                 break;
5392
5393         default:
5394                 ret = ANEG_FAILED;
5395                 break;
5396         }
5397
5398         return ret;
5399 }
5400
5401 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5402 {
5403         int res = 0;
5404         struct tg3_fiber_aneginfo aninfo;
5405         int status = ANEG_FAILED;
5406         unsigned int tick;
5407         u32 tmp;
5408
5409         tw32_f(MAC_TX_AUTO_NEG, 0);
5410
5411         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5412         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5413         udelay(40);
5414
5415         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5416         udelay(40);
5417
5418         memset(&aninfo, 0, sizeof(aninfo));
5419         aninfo.flags |= MR_AN_ENABLE;
5420         aninfo.state = ANEG_STATE_UNKNOWN;
5421         aninfo.cur_time = 0;
5422         tick = 0;
5423         while (++tick < 195000) {
5424                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5425                 if (status == ANEG_DONE || status == ANEG_FAILED)
5426                         break;
5427
5428                 udelay(1);
5429         }
5430
5431         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5432         tw32_f(MAC_MODE, tp->mac_mode);
5433         udelay(40);
5434
5435         *txflags = aninfo.txconfig;
5436         *rxflags = aninfo.flags;
5437
5438         if (status == ANEG_DONE &&
5439             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5440                              MR_LP_ADV_FULL_DUPLEX)))
5441                 res = 1;
5442
5443         return res;
5444 }
5445
5446 static void tg3_init_bcm8002(struct tg3 *tp)
5447 {
5448         u32 mac_status = tr32(MAC_STATUS);
5449         int i;
5450
5451         /* Reset when initting first time or we have a link. */
5452         if (tg3_flag(tp, INIT_COMPLETE) &&
5453             !(mac_status & MAC_STATUS_PCS_SYNCED))
5454                 return;
5455
5456         /* Set PLL lock range. */
5457         tg3_writephy(tp, 0x16, 0x8007);
5458
5459         /* SW reset */
5460         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5461
5462         /* Wait for reset to complete. */
5463         /* XXX schedule_timeout() ... */
5464         for (i = 0; i < 500; i++)
5465                 udelay(10);
5466
5467         /* Config mode; select PMA/Ch 1 regs. */
5468         tg3_writephy(tp, 0x10, 0x8411);
5469
5470         /* Enable auto-lock and comdet, select txclk for tx. */
5471         tg3_writephy(tp, 0x11, 0x0a10);
5472
5473         tg3_writephy(tp, 0x18, 0x00a0);
5474         tg3_writephy(tp, 0x16, 0x41ff);
5475
5476         /* Assert and deassert POR. */
5477         tg3_writephy(tp, 0x13, 0x0400);
5478         udelay(40);
5479         tg3_writephy(tp, 0x13, 0x0000);
5480
5481         tg3_writephy(tp, 0x11, 0x0a50);
5482         udelay(40);
5483         tg3_writephy(tp, 0x11, 0x0a10);
5484
5485         /* Wait for signal to stabilize */
5486         /* XXX schedule_timeout() ... */
5487         for (i = 0; i < 15000; i++)
5488                 udelay(10);
5489
5490         /* Deselect the channel register so we can read the PHYID
5491          * later.
5492          */
5493         tg3_writephy(tp, 0x10, 0x8011);
5494 }
5495
5496 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5497 {
5498         u16 flowctrl;
5499         bool current_link_up;
5500         u32 sg_dig_ctrl, sg_dig_status;
5501         u32 serdes_cfg, expected_sg_dig_ctrl;
5502         int workaround, port_a;
5503
5504         serdes_cfg = 0;
5505         expected_sg_dig_ctrl = 0;
5506         workaround = 0;
5507         port_a = 1;
5508         current_link_up = false;
5509
5510         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5511             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5512                 workaround = 1;
5513                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514                         port_a = 0;
5515
5516                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5517                 /* preserve bits 20-23 for voltage regulator */
5518                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519         }
5520
5521         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5522
5523         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5524                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5525                         if (workaround) {
5526                                 u32 val = serdes_cfg;
5527
5528                                 if (port_a)
5529                                         val |= 0xc010000;
5530                                 else
5531                                         val |= 0x4010000;
5532                                 tw32_f(MAC_SERDES_CFG, val);
5533                         }
5534
5535                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5536                 }
5537                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5538                         tg3_setup_flow_control(tp, 0, 0);
5539                         current_link_up = true;
5540                 }
5541                 goto out;
5542         }
5543
5544         /* Want auto-negotiation.  */
5545         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5546
5547         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5548         if (flowctrl & ADVERTISE_1000XPAUSE)
5549                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5550         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5551                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5552
5553         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5554                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5555                     tp->serdes_counter &&
5556                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5557                                     MAC_STATUS_RCVD_CFG)) ==
5558                      MAC_STATUS_PCS_SYNCED)) {
5559                         tp->serdes_counter--;
5560                         current_link_up = true;
5561                         goto out;
5562                 }
5563 restart_autoneg:
5564                 if (workaround)
5565                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5566                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5567                 udelay(5);
5568                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5569
5570                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5571                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5572         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5573                                  MAC_STATUS_SIGNAL_DET)) {
5574                 sg_dig_status = tr32(SG_DIG_STATUS);
5575                 mac_status = tr32(MAC_STATUS);
5576
5577                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5578                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5579                         u32 local_adv = 0, remote_adv = 0;
5580
5581                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5582                                 local_adv |= ADVERTISE_1000XPAUSE;
5583                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5584                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5585
5586                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5587                                 remote_adv |= LPA_1000XPAUSE;
5588                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5589                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5590
5591                         tp->link_config.rmt_adv =
5592                                            mii_adv_to_ethtool_adv_x(remote_adv);
5593
5594                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5595                         current_link_up = true;
5596                         tp->serdes_counter = 0;
5597                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5598                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5599                         if (tp->serdes_counter)
5600                                 tp->serdes_counter--;
5601                         else {
5602                                 if (workaround) {
5603                                         u32 val = serdes_cfg;
5604
5605                                         if (port_a)
5606                                                 val |= 0xc010000;
5607                                         else
5608                                                 val |= 0x4010000;
5609
5610                                         tw32_f(MAC_SERDES_CFG, val);
5611                                 }
5612
5613                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614                                 udelay(40);
5615
5616                                 /* Link parallel detection - link is up */
5617                                 /* only if we have PCS_SYNC and not */
5618                                 /* receiving config code words */
5619                                 mac_status = tr32(MAC_STATUS);
5620                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5621                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5622                                         tg3_setup_flow_control(tp, 0, 0);
5623                                         current_link_up = true;
5624                                         tp->phy_flags |=
5625                                                 TG3_PHYFLG_PARALLEL_DETECT;
5626                                         tp->serdes_counter =
5627                                                 SERDES_PARALLEL_DET_TIMEOUT;
5628                                 } else
5629                                         goto restart_autoneg;
5630                         }
5631                 }
5632         } else {
5633                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5634                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5635         }
5636
5637 out:
5638         return current_link_up;
5639 }
5640
5641 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5642 {
5643         bool current_link_up = false;
5644
5645         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646                 goto out;
5647
5648         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5649                 u32 txflags, rxflags;
5650                 int i;
5651
5652                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5653                         u32 local_adv = 0, remote_adv = 0;
5654
5655                         if (txflags & ANEG_CFG_PS1)
5656                                 local_adv |= ADVERTISE_1000XPAUSE;
5657                         if (txflags & ANEG_CFG_PS2)
5658                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5659
5660                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5661                                 remote_adv |= LPA_1000XPAUSE;
5662                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5663                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5664
5665                         tp->link_config.rmt_adv =
5666                                            mii_adv_to_ethtool_adv_x(remote_adv);
5667
5668                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5669
5670                         current_link_up = true;
5671                 }
5672                 for (i = 0; i < 30; i++) {
5673                         udelay(20);
5674                         tw32_f(MAC_STATUS,
5675                                (MAC_STATUS_SYNC_CHANGED |
5676                                 MAC_STATUS_CFG_CHANGED));
5677                         udelay(40);
5678                         if ((tr32(MAC_STATUS) &
5679                              (MAC_STATUS_SYNC_CHANGED |
5680                               MAC_STATUS_CFG_CHANGED)) == 0)
5681                                 break;
5682                 }
5683
5684                 mac_status = tr32(MAC_STATUS);
5685                 if (!current_link_up &&
5686                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5687                     !(mac_status & MAC_STATUS_RCVD_CFG))
5688                         current_link_up = true;
5689         } else {
5690                 tg3_setup_flow_control(tp, 0, 0);
5691
5692                 /* Forcing 1000FD link up. */
5693                 current_link_up = true;
5694
5695                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696                 udelay(40);
5697
5698                 tw32_f(MAC_MODE, tp->mac_mode);
5699                 udelay(40);
5700         }
5701
5702 out:
5703         return current_link_up;
5704 }
5705
5706 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 {
5708         u32 orig_pause_cfg;
5709         u32 orig_active_speed;
5710         u8 orig_active_duplex;
5711         u32 mac_status;
5712         bool current_link_up;
5713         int i;
5714
5715         orig_pause_cfg = tp->link_config.active_flowctrl;
5716         orig_active_speed = tp->link_config.active_speed;
5717         orig_active_duplex = tp->link_config.active_duplex;
5718
5719         if (!tg3_flag(tp, HW_AUTONEG) &&
5720             tp->link_up &&
5721             tg3_flag(tp, INIT_COMPLETE)) {
5722                 mac_status = tr32(MAC_STATUS);
5723                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5724                                MAC_STATUS_SIGNAL_DET |
5725                                MAC_STATUS_CFG_CHANGED |
5726                                MAC_STATUS_RCVD_CFG);
5727                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5728                                    MAC_STATUS_SIGNAL_DET)) {
5729                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5730                                             MAC_STATUS_CFG_CHANGED));
5731                         return 0;
5732                 }
5733         }
5734
5735         tw32_f(MAC_TX_AUTO_NEG, 0);
5736
5737         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5738         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5739         tw32_f(MAC_MODE, tp->mac_mode);
5740         udelay(40);
5741
5742         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5743                 tg3_init_bcm8002(tp);
5744
5745         /* Enable link change event even when serdes polling.  */
5746         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747         udelay(40);
5748
5749         current_link_up = false;
5750         tp->link_config.rmt_adv = 0;
5751         mac_status = tr32(MAC_STATUS);
5752
5753         if (tg3_flag(tp, HW_AUTONEG))
5754                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5755         else
5756                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5757
5758         tp->napi[0].hw_status->status =
5759                 (SD_STATUS_UPDATED |
5760                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5761
5762         for (i = 0; i < 100; i++) {
5763                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5764                                     MAC_STATUS_CFG_CHANGED));
5765                 udelay(5);
5766                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5767                                          MAC_STATUS_CFG_CHANGED |
5768                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5769                         break;
5770         }
5771
5772         mac_status = tr32(MAC_STATUS);
5773         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5774                 current_link_up = false;
5775                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5776                     tp->serdes_counter == 0) {
5777                         tw32_f(MAC_MODE, (tp->mac_mode |
5778                                           MAC_MODE_SEND_CONFIGS));
5779                         udelay(1);
5780                         tw32_f(MAC_MODE, tp->mac_mode);
5781                 }
5782         }
5783
5784         if (current_link_up) {
5785                 tp->link_config.active_speed = SPEED_1000;
5786                 tp->link_config.active_duplex = DUPLEX_FULL;
5787                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5788                                     LED_CTRL_LNKLED_OVERRIDE |
5789                                     LED_CTRL_1000MBPS_ON));
5790         } else {
5791                 tp->link_config.active_speed = SPEED_UNKNOWN;
5792                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5793                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5794                                     LED_CTRL_LNKLED_OVERRIDE |
5795                                     LED_CTRL_TRAFFIC_OVERRIDE));
5796         }
5797
5798         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5799                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5800                 if (orig_pause_cfg != now_pause_cfg ||
5801                     orig_active_speed != tp->link_config.active_speed ||
5802                     orig_active_duplex != tp->link_config.active_duplex)
5803                         tg3_link_report(tp);
5804         }
5805
5806         return 0;
5807 }
5808
5809 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5810 {
5811         int err = 0;
5812         u32 bmsr, bmcr;
5813         u32 current_speed = SPEED_UNKNOWN;
5814         u8 current_duplex = DUPLEX_UNKNOWN;
5815         bool current_link_up = false;
5816         u32 local_adv, remote_adv, sgsr;
5817
5818         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5819              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5820              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5821              (sgsr & SERDES_TG3_SGMII_MODE)) {
5822
5823                 if (force_reset)
5824                         tg3_phy_reset(tp);
5825
5826                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5827
5828                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5829                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5830                 } else {
5831                         current_link_up = true;
5832                         if (sgsr & SERDES_TG3_SPEED_1000) {
5833                                 current_speed = SPEED_1000;
5834                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5835                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5836                                 current_speed = SPEED_100;
5837                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5838                         } else {
5839                                 current_speed = SPEED_10;
5840                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841                         }
5842
5843                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5844                                 current_duplex = DUPLEX_FULL;
5845                         else
5846                                 current_duplex = DUPLEX_HALF;
5847                 }
5848
5849                 tw32_f(MAC_MODE, tp->mac_mode);
5850                 udelay(40);
5851
5852                 tg3_clear_mac_status(tp);
5853
5854                 goto fiber_setup_done;
5855         }
5856
5857         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5858         tw32_f(MAC_MODE, tp->mac_mode);
5859         udelay(40);
5860
5861         tg3_clear_mac_status(tp);
5862
5863         if (force_reset)
5864                 tg3_phy_reset(tp);
5865
5866         tp->link_config.rmt_adv = 0;
5867
5868         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5869         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5870         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5871                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5872                         bmsr |= BMSR_LSTATUS;
5873                 else
5874                         bmsr &= ~BMSR_LSTATUS;
5875         }
5876
5877         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5878
5879         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5880             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5881                 /* do nothing, just check for link up at the end */
5882         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883                 u32 adv, newadv;
5884
5885                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5886                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5887                                  ADVERTISE_1000XPAUSE |
5888                                  ADVERTISE_1000XPSE_ASYM |
5889                                  ADVERTISE_SLCT);
5890
5891                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5892                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5893
5894                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5895                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5896                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5897                         tg3_writephy(tp, MII_BMCR, bmcr);
5898
5899                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5900                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5901                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5902
5903                         return err;
5904                 }
5905         } else {
5906                 u32 new_bmcr;
5907
5908                 bmcr &= ~BMCR_SPEED1000;
5909                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5910
5911                 if (tp->link_config.duplex == DUPLEX_FULL)
5912                         new_bmcr |= BMCR_FULLDPLX;
5913
5914                 if (new_bmcr != bmcr) {
5915                         /* BMCR_SPEED1000 is a reserved bit that needs
5916                          * to be set on write.
5917                          */
5918                         new_bmcr |= BMCR_SPEED1000;
5919
5920                         /* Force a linkdown */
5921                         if (tp->link_up) {
5922                                 u32 adv;
5923
5924                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5925                                 adv &= ~(ADVERTISE_1000XFULL |
5926                                          ADVERTISE_1000XHALF |
5927                                          ADVERTISE_SLCT);
5928                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5929                                 tg3_writephy(tp, MII_BMCR, bmcr |
5930                                                            BMCR_ANRESTART |
5931                                                            BMCR_ANENABLE);
5932                                 udelay(10);
5933                                 tg3_carrier_off(tp);
5934                         }
5935                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5936                         bmcr = new_bmcr;
5937                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5938                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5939                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5940                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5941                                         bmsr |= BMSR_LSTATUS;
5942                                 else
5943                                         bmsr &= ~BMSR_LSTATUS;
5944                         }
5945                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5946                 }
5947         }
5948
5949         if (bmsr & BMSR_LSTATUS) {
5950                 current_speed = SPEED_1000;
5951                 current_link_up = true;
5952                 if (bmcr & BMCR_FULLDPLX)
5953                         current_duplex = DUPLEX_FULL;
5954                 else
5955                         current_duplex = DUPLEX_HALF;
5956
5957                 local_adv = 0;
5958                 remote_adv = 0;
5959
5960                 if (bmcr & BMCR_ANENABLE) {
5961                         u32 common;
5962
5963                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5964                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5965                         common = local_adv & remote_adv;
5966                         if (common & (ADVERTISE_1000XHALF |
5967                                       ADVERTISE_1000XFULL)) {
5968                                 if (common & ADVERTISE_1000XFULL)
5969                                         current_duplex = DUPLEX_FULL;
5970                                 else
5971                                         current_duplex = DUPLEX_HALF;
5972
5973                                 tp->link_config.rmt_adv =
5974                                            mii_adv_to_ethtool_adv_x(remote_adv);
5975                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5976                                 /* Link is up via parallel detect */
5977                         } else {
5978                                 current_link_up = false;
5979                         }
5980                 }
5981         }
5982
5983 fiber_setup_done:
5984         if (current_link_up && current_duplex == DUPLEX_FULL)
5985                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5986
5987         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5988         if (tp->link_config.active_duplex == DUPLEX_HALF)
5989                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5990
5991         tw32_f(MAC_MODE, tp->mac_mode);
5992         udelay(40);
5993
5994         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5995
5996         tp->link_config.active_speed = current_speed;
5997         tp->link_config.active_duplex = current_duplex;
5998
5999         tg3_test_and_report_link_chg(tp, current_link_up);
6000         return err;
6001 }
6002
6003 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6004 {
6005         if (tp->serdes_counter) {
6006                 /* Give autoneg time to complete. */
6007                 tp->serdes_counter--;
6008                 return;
6009         }
6010
6011         if (!tp->link_up &&
6012             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013                 u32 bmcr;
6014
6015                 tg3_readphy(tp, MII_BMCR, &bmcr);
6016                 if (bmcr & BMCR_ANENABLE) {
6017                         u32 phy1, phy2;
6018
6019                         /* Select shadow register 0x1f */
6020                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6021                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6022
6023                         /* Select expansion interrupt status register */
6024                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6025                                          MII_TG3_DSP_EXP1_INT_STAT);
6026                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6028
6029                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6030                                 /* We have signal detect and not receiving
6031                                  * config code words, link is up by parallel
6032                                  * detection.
6033                                  */
6034
6035                                 bmcr &= ~BMCR_ANENABLE;
6036                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6037                                 tg3_writephy(tp, MII_BMCR, bmcr);
6038                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039                         }
6040                 }
6041         } else if (tp->link_up &&
6042                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6043                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044                 u32 phy2;
6045
6046                 /* Select expansion interrupt status register */
6047                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6048                                  MII_TG3_DSP_EXP1_INT_STAT);
6049                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6050                 if (phy2 & 0x20) {
6051                         u32 bmcr;
6052
6053                         /* Config code words received, turn on autoneg. */
6054                         tg3_readphy(tp, MII_BMCR, &bmcr);
6055                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6056
6057                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6058
6059                 }
6060         }
6061 }
6062
6063 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6064 {
6065         u32 val;
6066         int err;
6067
6068         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6069                 err = tg3_setup_fiber_phy(tp, force_reset);
6070         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6071                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6072         else
6073                 err = tg3_setup_copper_phy(tp, force_reset);
6074
6075         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076                 u32 scale;
6077
6078                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6079                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6080                         scale = 65;
6081                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6082                         scale = 6;
6083                 else
6084                         scale = 12;
6085
6086                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6087                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6088                 tw32(GRC_MISC_CFG, val);
6089         }
6090
6091         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6092               (6 << TX_LENGTHS_IPG_SHIFT);
6093         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6094             tg3_asic_rev(tp) == ASIC_REV_5762)
6095                 val |= tr32(MAC_TX_LENGTHS) &
6096                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
6097                         TX_LENGTHS_CNT_DWN_VAL_MSK);
6098
6099         if (tp->link_config.active_speed == SPEED_1000 &&
6100             tp->link_config.active_duplex == DUPLEX_HALF)
6101                 tw32(MAC_TX_LENGTHS, val |
6102                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6103         else
6104                 tw32(MAC_TX_LENGTHS, val |
6105                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6106
6107         if (!tg3_flag(tp, 5705_PLUS)) {
6108                 if (tp->link_up) {
6109                         tw32(HOSTCC_STAT_COAL_TICKS,
6110                              tp->coal.stats_block_coalesce_usecs);
6111                 } else {
6112                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6113                 }
6114         }
6115
6116         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6117                 val = tr32(PCIE_PWR_MGMT_THRESH);
6118                 if (!tp->link_up)
6119                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120                               tp->pwrmgmt_thresh;
6121                 else
6122                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6123                 tw32(PCIE_PWR_MGMT_THRESH, val);
6124         }
6125
6126         return err;
6127 }
6128
6129 /* tp->lock must be held */
6130 static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
6131 {
6132         u64 stamp;
6133
6134         ptp_read_system_prets(sts);
6135         stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6136         ptp_read_system_postts(sts);
6137         stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6138
6139         return stamp;
6140 }
6141
6142 /* tp->lock must be held */
6143 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6144 {
6145         u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6146
6147         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6148         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6149         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6150         tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6151 }
6152
6153 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6154 static inline void tg3_full_unlock(struct tg3 *tp);
6155 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6156 {
6157         struct tg3 *tp = netdev_priv(dev);
6158
6159         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6160                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6161                                 SOF_TIMESTAMPING_SOFTWARE;
6162
6163         if (tg3_flag(tp, PTP_CAPABLE)) {
6164                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6165                                         SOF_TIMESTAMPING_RX_HARDWARE |
6166                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6167         }
6168
6169         if (tp->ptp_clock)
6170                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6171         else
6172                 info->phc_index = -1;
6173
6174         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6175
6176         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6177                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6178                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6179                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6180         return 0;
6181 }
6182
6183 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6184 {
6185         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6186         bool neg_adj = false;
6187         u32 correction = 0;
6188
6189         if (ppb < 0) {
6190                 neg_adj = true;
6191                 ppb = -ppb;
6192         }
6193
6194         /* Frequency adjustment is performed using hardware with a 24 bit
6195          * accumulator and a programmable correction value. On each clk, the
6196          * correction value gets added to the accumulator and when it
6197          * overflows, the time counter is incremented/decremented.
6198          *
6199          * So conversion from ppb to correction value is
6200          *              ppb * (1 << 24) / 1000000000
6201          */
6202         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6203                      TG3_EAV_REF_CLK_CORRECT_MASK;
6204
6205         tg3_full_lock(tp, 0);
6206
6207         if (correction)
6208                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6209                      TG3_EAV_REF_CLK_CORRECT_EN |
6210                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6211         else
6212                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6213
6214         tg3_full_unlock(tp);
6215
6216         return 0;
6217 }
6218
6219 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6220 {
6221         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6222
6223         tg3_full_lock(tp, 0);
6224         tp->ptp_adjust += delta;
6225         tg3_full_unlock(tp);
6226
6227         return 0;
6228 }
6229
6230 static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
6231                             struct ptp_system_timestamp *sts)
6232 {
6233         u64 ns;
6234         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6235
6236         tg3_full_lock(tp, 0);
6237         ns = tg3_refclk_read(tp, sts);
6238         ns += tp->ptp_adjust;
6239         tg3_full_unlock(tp);
6240
6241         *ts = ns_to_timespec64(ns);
6242
6243         return 0;
6244 }
6245
6246 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6247                            const struct timespec64 *ts)
6248 {
6249         u64 ns;
6250         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6251
6252         ns = timespec64_to_ns(ts);
6253
6254         tg3_full_lock(tp, 0);
6255         tg3_refclk_write(tp, ns);
6256         tp->ptp_adjust = 0;
6257         tg3_full_unlock(tp);
6258
6259         return 0;
6260 }
6261
6262 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6263                           struct ptp_clock_request *rq, int on)
6264 {
6265         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6266         u32 clock_ctl;
6267         int rval = 0;
6268
6269         switch (rq->type) {
6270         case PTP_CLK_REQ_PEROUT:
6271                 /* Reject requests with unsupported flags */
6272                 if (rq->perout.flags)
6273                         return -EOPNOTSUPP;
6274
6275                 if (rq->perout.index != 0)
6276                         return -EINVAL;
6277
6278                 tg3_full_lock(tp, 0);
6279                 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6280                 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6281
6282                 if (on) {
6283                         u64 nsec;
6284
6285                         nsec = rq->perout.start.sec * 1000000000ULL +
6286                                rq->perout.start.nsec;
6287
6288                         if (rq->perout.period.sec || rq->perout.period.nsec) {
6289                                 netdev_warn(tp->dev,
6290                                             "Device supports only a one-shot timesync output, period must be 0\n");
6291                                 rval = -EINVAL;
6292                                 goto err_out;
6293                         }
6294
6295                         if (nsec & (1ULL << 63)) {
6296                                 netdev_warn(tp->dev,
6297                                             "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6298                                 rval = -EINVAL;
6299                                 goto err_out;
6300                         }
6301
6302                         tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6303                         tw32(TG3_EAV_WATCHDOG0_MSB,
6304                              TG3_EAV_WATCHDOG0_EN |
6305                              ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6306
6307                         tw32(TG3_EAV_REF_CLCK_CTL,
6308                              clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6309                 } else {
6310                         tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6311                         tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6312                 }
6313
6314 err_out:
6315                 tg3_full_unlock(tp);
6316                 return rval;
6317
6318         default:
6319                 break;
6320         }
6321
6322         return -EOPNOTSUPP;
6323 }
6324
6325 static const struct ptp_clock_info tg3_ptp_caps = {
6326         .owner          = THIS_MODULE,
6327         .name           = "tg3 clock",
6328         .max_adj        = 250000000,
6329         .n_alarm        = 0,
6330         .n_ext_ts       = 0,
6331         .n_per_out      = 1,
6332         .n_pins         = 0,
6333         .pps            = 0,
6334         .adjfreq        = tg3_ptp_adjfreq,
6335         .adjtime        = tg3_ptp_adjtime,
6336         .gettimex64     = tg3_ptp_gettimex,
6337         .settime64      = tg3_ptp_settime,
6338         .enable         = tg3_ptp_enable,
6339 };
6340
6341 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6342                                      struct skb_shared_hwtstamps *timestamp)
6343 {
6344         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6345         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6346                                            tp->ptp_adjust);
6347 }
6348
6349 /* tp->lock must be held */
6350 static void tg3_ptp_init(struct tg3 *tp)
6351 {
6352         if (!tg3_flag(tp, PTP_CAPABLE))
6353                 return;
6354
6355         /* Initialize the hardware clock to the system time. */
6356         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6357         tp->ptp_adjust = 0;
6358         tp->ptp_info = tg3_ptp_caps;
6359 }
6360
6361 /* tp->lock must be held */
6362 static void tg3_ptp_resume(struct tg3 *tp)
6363 {
6364         if (!tg3_flag(tp, PTP_CAPABLE))
6365                 return;
6366
6367         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6368         tp->ptp_adjust = 0;
6369 }
6370
6371 static void tg3_ptp_fini(struct tg3 *tp)
6372 {
6373         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6374                 return;
6375
6376         ptp_clock_unregister(tp->ptp_clock);
6377         tp->ptp_clock = NULL;
6378         tp->ptp_adjust = 0;
6379 }
6380
6381 static inline int tg3_irq_sync(struct tg3 *tp)
6382 {
6383         return tp->irq_sync;
6384 }
6385
6386 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6387 {
6388         int i;
6389
6390         dst = (u32 *)((u8 *)dst + off);
6391         for (i = 0; i < len; i += sizeof(u32))
6392                 *dst++ = tr32(off + i);
6393 }
6394
6395 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6396 {
6397         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6398         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6399         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6400         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6401         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6402         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6403         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6404         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6405         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6406         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6407         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6408         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6409         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6410         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6411         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6412         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6413         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6414         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6415         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6416
6417         if (tg3_flag(tp, SUPPORT_MSIX))
6418                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6419
6420         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6421         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6422         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6423         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6424         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6425         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6426         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6427         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6428
6429         if (!tg3_flag(tp, 5705_PLUS)) {
6430                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6431                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6432                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6433         }
6434
6435         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6436         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6437         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6438         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6439         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6440
6441         if (tg3_flag(tp, NVRAM))
6442                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6443 }
6444
6445 static void tg3_dump_state(struct tg3 *tp)
6446 {
6447         int i;
6448         u32 *regs;
6449
6450         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6451         if (!regs)
6452                 return;
6453
6454         if (tg3_flag(tp, PCI_EXPRESS)) {
6455                 /* Read up to but not including private PCI registers */
6456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6457                         regs[i / sizeof(u32)] = tr32(i);
6458         } else
6459                 tg3_dump_legacy_regs(tp, regs);
6460
6461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6462                 if (!regs[i + 0] && !regs[i + 1] &&
6463                     !regs[i + 2] && !regs[i + 3])
6464                         continue;
6465
6466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6467                            i * 4,
6468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6469         }
6470
6471         kfree(regs);
6472
6473         for (i = 0; i < tp->irq_cnt; i++) {
6474                 struct tg3_napi *tnapi = &tp->napi[i];
6475
6476                 /* SW status block */
6477                 netdev_err(tp->dev,
6478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6479                            i,
6480                            tnapi->hw_status->status,
6481                            tnapi->hw_status->status_tag,
6482                            tnapi->hw_status->rx_jumbo_consumer,
6483                            tnapi->hw_status->rx_consumer,
6484                            tnapi->hw_status->rx_mini_consumer,
6485                            tnapi->hw_status->idx[0].rx_producer,
6486                            tnapi->hw_status->idx[0].tx_consumer);
6487
6488                 netdev_err(tp->dev,
6489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6490                            i,
6491                            tnapi->last_tag, tnapi->last_irq_tag,
6492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6493                            tnapi->rx_rcb_ptr,
6494                            tnapi->prodring.rx_std_prod_idx,
6495                            tnapi->prodring.rx_std_cons_idx,
6496                            tnapi->prodring.rx_jmb_prod_idx,
6497                            tnapi->prodring.rx_jmb_cons_idx);
6498         }
6499 }
6500
6501 /* This is called whenever we suspect that the system chipset is re-
6502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6503  * is bogus tx completions. We try to recover by setting the
6504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6505  * in the workqueue.
6506  */
6507 static void tg3_tx_recover(struct tg3 *tp)
6508 {
6509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6511
6512         netdev_warn(tp->dev,
6513                     "The system may be re-ordering memory-mapped I/O "
6514                     "cycles to the network device, attempting to recover. "
6515                     "Please report the problem to the driver maintainer "
6516                     "and include system chipset information.\n");
6517
6518         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6519 }
6520
6521 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6522 {
6523         /* Tell compiler to fetch tx indices from memory. */
6524         barrier();
6525         return tnapi->tx_pending -
6526                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6527 }
6528
6529 /* Tigon3 never reports partial packet sends.  So we do not
6530  * need special logic to handle SKBs that have not had all
6531  * of their frags sent yet, like SunGEM does.
6532  */
6533 static void tg3_tx(struct tg3_napi *tnapi)
6534 {
6535         struct tg3 *tp = tnapi->tp;
6536         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6537         u32 sw_idx = tnapi->tx_cons;
6538         struct netdev_queue *txq;
6539         int index = tnapi - tp->napi;
6540         unsigned int pkts_compl = 0, bytes_compl = 0;
6541
6542         if (tg3_flag(tp, ENABLE_TSS))
6543                 index--;
6544
6545         txq = netdev_get_tx_queue(tp->dev, index);
6546
6547         while (sw_idx != hw_idx) {
6548                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6549                 struct sk_buff *skb = ri->skb;
6550                 int i, tx_bug = 0;
6551
6552                 if (unlikely(skb == NULL)) {
6553                         tg3_tx_recover(tp);
6554                         return;
6555                 }
6556
6557                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6558                         struct skb_shared_hwtstamps timestamp;
6559                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6560                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6561
6562                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6563
6564                         skb_tstamp_tx(skb, &timestamp);
6565                 }
6566
6567                 dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
6568                                  skb_headlen(skb), DMA_TO_DEVICE);
6569
6570                 ri->skb = NULL;
6571
6572                 while (ri->fragmented) {
6573                         ri->fragmented = false;
6574                         sw_idx = NEXT_TX(sw_idx);
6575                         ri = &tnapi->tx_buffers[sw_idx];
6576                 }
6577
6578                 sw_idx = NEXT_TX(sw_idx);
6579
6580                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6581                         ri = &tnapi->tx_buffers[sw_idx];
6582                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6583                                 tx_bug = 1;
6584
6585                         dma_unmap_page(&tp->pdev->dev,
6586                                        dma_unmap_addr(ri, mapping),
6587                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6588                                        DMA_TO_DEVICE);
6589
6590                         while (ri->fragmented) {
6591                                 ri->fragmented = false;
6592                                 sw_idx = NEXT_TX(sw_idx);
6593                                 ri = &tnapi->tx_buffers[sw_idx];
6594                         }
6595
6596                         sw_idx = NEXT_TX(sw_idx);
6597                 }
6598
6599                 pkts_compl++;
6600                 bytes_compl += skb->len;
6601
6602                 dev_consume_skb_any(skb);
6603
6604                 if (unlikely(tx_bug)) {
6605                         tg3_tx_recover(tp);
6606                         return;
6607                 }
6608         }
6609
6610         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6611
6612         tnapi->tx_cons = sw_idx;
6613
6614         /* Need to make the tx_cons update visible to tg3_start_xmit()
6615          * before checking for netif_queue_stopped().  Without the
6616          * memory barrier, there is a small possibility that tg3_start_xmit()
6617          * will miss it and cause the queue to be stopped forever.
6618          */
6619         smp_mb();
6620
6621         if (unlikely(netif_tx_queue_stopped(txq) &&
6622                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6623                 __netif_tx_lock(txq, smp_processor_id());
6624                 if (netif_tx_queue_stopped(txq) &&
6625                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6626                         netif_tx_wake_queue(txq);
6627                 __netif_tx_unlock(txq);
6628         }
6629 }
6630
6631 static void tg3_frag_free(bool is_frag, void *data)
6632 {
6633         if (is_frag)
6634                 skb_free_frag(data);
6635         else
6636                 kfree(data);
6637 }
6638
6639 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6640 {
6641         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6642                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6643
6644         if (!ri->data)
6645                 return;
6646
6647         dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz,
6648                          DMA_FROM_DEVICE);
6649         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6650         ri->data = NULL;
6651 }
6652
6653
6654 /* Returns size of skb allocated or < 0 on error.
6655  *
6656  * We only need to fill in the address because the other members
6657  * of the RX descriptor are invariant, see tg3_init_rings.
6658  *
6659  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6660  * posting buffers we only dirty the first cache line of the RX
6661  * descriptor (containing the address).  Whereas for the RX status
6662  * buffers the cpu only reads the last cacheline of the RX descriptor
6663  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6664  */
6665 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6666                              u32 opaque_key, u32 dest_idx_unmasked,
6667                              unsigned int *frag_size)
6668 {
6669         struct tg3_rx_buffer_desc *desc;
6670         struct ring_info *map;
6671         u8 *data;
6672         dma_addr_t mapping;
6673         int skb_size, data_size, dest_idx;
6674
6675         switch (opaque_key) {
6676         case RXD_OPAQUE_RING_STD:
6677                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6678                 desc = &tpr->rx_std[dest_idx];
6679                 map = &tpr->rx_std_buffers[dest_idx];
6680                 data_size = tp->rx_pkt_map_sz;
6681                 break;
6682
6683         case RXD_OPAQUE_RING_JUMBO:
6684                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6685                 desc = &tpr->rx_jmb[dest_idx].std;
6686                 map = &tpr->rx_jmb_buffers[dest_idx];
6687                 data_size = TG3_RX_JMB_MAP_SZ;
6688                 break;
6689
6690         default:
6691                 return -EINVAL;
6692         }
6693
6694         /* Do not overwrite any of the map or rp information
6695          * until we are sure we can commit to a new buffer.
6696          *
6697          * Callers depend upon this behavior and assume that
6698          * we leave everything unchanged if we fail.
6699          */
6700         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6701                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6702         if (skb_size <= PAGE_SIZE) {
6703                 data = napi_alloc_frag(skb_size);
6704                 *frag_size = skb_size;
6705         } else {
6706                 data = kmalloc(skb_size, GFP_ATOMIC);
6707                 *frag_size = 0;
6708         }
6709         if (!data)
6710                 return -ENOMEM;
6711
6712         mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp),
6713                                  data_size, DMA_FROM_DEVICE);
6714         if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) {
6715                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6716                 return -EIO;
6717         }
6718
6719         map->data = data;
6720         dma_unmap_addr_set(map, mapping, mapping);
6721
6722         desc->addr_hi = ((u64)mapping >> 32);
6723         desc->addr_lo = ((u64)mapping & 0xffffffff);
6724
6725         return data_size;
6726 }
6727
6728 /* We only need to move over in the address because the other
6729  * members of the RX descriptor are invariant.  See notes above
6730  * tg3_alloc_rx_data for full details.
6731  */
6732 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6733                            struct tg3_rx_prodring_set *dpr,
6734                            u32 opaque_key, int src_idx,
6735                            u32 dest_idx_unmasked)
6736 {
6737         struct tg3 *tp = tnapi->tp;
6738         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6739         struct ring_info *src_map, *dest_map;
6740         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6741         int dest_idx;
6742
6743         switch (opaque_key) {
6744         case RXD_OPAQUE_RING_STD:
6745                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6746                 dest_desc = &dpr->rx_std[dest_idx];
6747                 dest_map = &dpr->rx_std_buffers[dest_idx];
6748                 src_desc = &spr->rx_std[src_idx];
6749                 src_map = &spr->rx_std_buffers[src_idx];
6750                 break;
6751
6752         case RXD_OPAQUE_RING_JUMBO:
6753                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6754                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6755                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6756                 src_desc = &spr->rx_jmb[src_idx].std;
6757                 src_map = &spr->rx_jmb_buffers[src_idx];
6758                 break;
6759
6760         default:
6761                 return;
6762         }
6763
6764         dest_map->data = src_map->data;
6765         dma_unmap_addr_set(dest_map, mapping,
6766                            dma_unmap_addr(src_map, mapping));
6767         dest_desc->addr_hi = src_desc->addr_hi;
6768         dest_desc->addr_lo = src_desc->addr_lo;
6769
6770         /* Ensure that the update to the skb happens after the physical
6771          * addresses have been transferred to the new BD location.
6772          */
6773         smp_wmb();
6774
6775         src_map->data = NULL;
6776 }
6777
6778 /* The RX ring scheme is composed of multiple rings which post fresh
6779  * buffers to the chip, and one special ring the chip uses to report
6780  * status back to the host.
6781  *
6782  * The special ring reports the status of received packets to the
6783  * host.  The chip does not write into the original descriptor the
6784  * RX buffer was obtained from.  The chip simply takes the original
6785  * descriptor as provided by the host, updates the status and length
6786  * field, then writes this into the next status ring entry.
6787  *
6788  * Each ring the host uses to post buffers to the chip is described
6789  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6790  * it is first placed into the on-chip ram.  When the packet's length
6791  * is known, it walks down the TG3_BDINFO entries to select the ring.
6792  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6793  * which is within the range of the new packet's length is chosen.
6794  *
6795  * The "separate ring for rx status" scheme may sound queer, but it makes
6796  * sense from a cache coherency perspective.  If only the host writes
6797  * to the buffer post rings, and only the chip writes to the rx status
6798  * rings, then cache lines never move beyond shared-modified state.
6799  * If both the host and chip were to write into the same ring, cache line
6800  * eviction could occur since both entities want it in an exclusive state.
6801  */
6802 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6803 {
6804         struct tg3 *tp = tnapi->tp;
6805         u32 work_mask, rx_std_posted = 0;
6806         u32 std_prod_idx, jmb_prod_idx;
6807         u32 sw_idx = tnapi->rx_rcb_ptr;
6808         u16 hw_idx;
6809         int received;
6810         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6811
6812         hw_idx = *(tnapi->rx_rcb_prod_idx);
6813         /*
6814          * We need to order the read of hw_idx and the read of
6815          * the opaque cookie.
6816          */
6817         rmb();
6818         work_mask = 0;
6819         received = 0;
6820         std_prod_idx = tpr->rx_std_prod_idx;
6821         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6822         while (sw_idx != hw_idx && budget > 0) {
6823                 struct ring_info *ri;
6824                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6825                 unsigned int len;
6826                 struct sk_buff *skb;
6827                 dma_addr_t dma_addr;
6828                 u32 opaque_key, desc_idx, *post_ptr;
6829                 u8 *data;
6830                 u64 tstamp = 0;
6831
6832                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6833                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6834                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6835                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6836                         dma_addr = dma_unmap_addr(ri, mapping);
6837                         data = ri->data;
6838                         post_ptr = &std_prod_idx;
6839                         rx_std_posted++;
6840                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6841                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6842                         dma_addr = dma_unmap_addr(ri, mapping);
6843                         data = ri->data;
6844                         post_ptr = &jmb_prod_idx;
6845                 } else
6846                         goto next_pkt_nopost;
6847
6848                 work_mask |= opaque_key;
6849
6850                 if (desc->err_vlan & RXD_ERR_MASK) {
6851                 drop_it:
6852                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6853                                        desc_idx, *post_ptr);
6854                 drop_it_no_recycle:
6855                         /* Other statistics kept track of by card. */
6856                         tp->rx_dropped++;
6857                         goto next_pkt;
6858                 }
6859
6860                 prefetch(data + TG3_RX_OFFSET(tp));
6861                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6862                       ETH_FCS_LEN;
6863
6864                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6865                      RXD_FLAG_PTPSTAT_PTPV1 ||
6866                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6867                      RXD_FLAG_PTPSTAT_PTPV2) {
6868                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6869                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6870                 }
6871
6872                 if (len > TG3_RX_COPY_THRESH(tp)) {
6873                         int skb_size;
6874                         unsigned int frag_size;
6875
6876                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6877                                                     *post_ptr, &frag_size);
6878                         if (skb_size < 0)
6879                                 goto drop_it;
6880
6881                         dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size,
6882                                          DMA_FROM_DEVICE);
6883
6884                         /* Ensure that the update to the data happens
6885                          * after the usage of the old DMA mapping.
6886                          */
6887                         smp_wmb();
6888
6889                         ri->data = NULL;
6890
6891                         skb = build_skb(data, frag_size);
6892                         if (!skb) {
6893                                 tg3_frag_free(frag_size != 0, data);
6894                                 goto drop_it_no_recycle;
6895                         }
6896                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6897                 } else {
6898                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6899                                        desc_idx, *post_ptr);
6900
6901                         skb = netdev_alloc_skb(tp->dev,
6902                                                len + TG3_RAW_IP_ALIGN);
6903                         if (skb == NULL)
6904                                 goto drop_it_no_recycle;
6905
6906                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6907                         dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len,
6908                                                 DMA_FROM_DEVICE);
6909                         memcpy(skb->data,
6910                                data + TG3_RX_OFFSET(tp),
6911                                len);
6912                         dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
6913                                                    len, DMA_FROM_DEVICE);
6914                 }
6915
6916                 skb_put(skb, len);
6917                 if (tstamp)
6918                         tg3_hwclock_to_timestamp(tp, tstamp,
6919                                                  skb_hwtstamps(skb));
6920
6921                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6922                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6923                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6924                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6925                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6926                 else
6927                         skb_checksum_none_assert(skb);
6928
6929                 skb->protocol = eth_type_trans(skb, tp->dev);
6930
6931                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6932                     skb->protocol != htons(ETH_P_8021Q) &&
6933                     skb->protocol != htons(ETH_P_8021AD)) {
6934                         dev_kfree_skb_any(skb);
6935                         goto drop_it_no_recycle;
6936                 }
6937
6938                 if (desc->type_flags & RXD_FLAG_VLAN &&
6939                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6940                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6941                                                desc->err_vlan & RXD_VLAN_MASK);
6942
6943                 napi_gro_receive(&tnapi->napi, skb);
6944
6945                 received++;
6946                 budget--;
6947
6948 next_pkt:
6949                 (*post_ptr)++;
6950
6951                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6952                         tpr->rx_std_prod_idx = std_prod_idx &
6953                                                tp->rx_std_ring_mask;
6954                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6955                                      tpr->rx_std_prod_idx);
6956                         work_mask &= ~RXD_OPAQUE_RING_STD;
6957                         rx_std_posted = 0;
6958                 }
6959 next_pkt_nopost:
6960                 sw_idx++;
6961                 sw_idx &= tp->rx_ret_ring_mask;
6962
6963                 /* Refresh hw_idx to see if there is new work */
6964                 if (sw_idx == hw_idx) {
6965                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6966                         rmb();
6967                 }
6968         }
6969
6970         /* ACK the status ring. */
6971         tnapi->rx_rcb_ptr = sw_idx;
6972         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6973
6974         /* Refill RX ring(s). */
6975         if (!tg3_flag(tp, ENABLE_RSS)) {
6976                 /* Sync BD data before updating mailbox */
6977                 wmb();
6978
6979                 if (work_mask & RXD_OPAQUE_RING_STD) {
6980                         tpr->rx_std_prod_idx = std_prod_idx &
6981                                                tp->rx_std_ring_mask;
6982                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6983                                      tpr->rx_std_prod_idx);
6984                 }
6985                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6986                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6987                                                tp->rx_jmb_ring_mask;
6988                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6989                                      tpr->rx_jmb_prod_idx);
6990                 }
6991         } else if (work_mask) {
6992                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6993                  * updated before the producer indices can be updated.
6994                  */
6995                 smp_wmb();
6996
6997                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6998                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6999
7000                 if (tnapi != &tp->napi[1]) {
7001                         tp->rx_refill = true;
7002                         napi_schedule(&tp->napi[1].napi);
7003                 }
7004         }
7005
7006         return received;
7007 }
7008
7009 static void tg3_poll_link(struct tg3 *tp)
7010 {
7011         /* handle link change and other phy events */
7012         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7013                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7014
7015                 if (sblk->status & SD_STATUS_LINK_CHG) {
7016                         sblk->status = SD_STATUS_UPDATED |
7017                                        (sblk->status & ~SD_STATUS_LINK_CHG);
7018                         spin_lock(&tp->lock);
7019                         if (tg3_flag(tp, USE_PHYLIB)) {
7020                                 tw32_f(MAC_STATUS,
7021                                      (MAC_STATUS_SYNC_CHANGED |
7022                                       MAC_STATUS_CFG_CHANGED |
7023                                       MAC_STATUS_MI_COMPLETION |
7024                                       MAC_STATUS_LNKSTATE_CHANGED));
7025                                 udelay(40);
7026                         } else
7027                                 tg3_setup_phy(tp, false);
7028                         spin_unlock(&tp->lock);
7029                 }
7030         }
7031 }
7032
7033 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7034                                 struct tg3_rx_prodring_set *dpr,
7035                                 struct tg3_rx_prodring_set *spr)
7036 {
7037         u32 si, di, cpycnt, src_prod_idx;
7038         int i, err = 0;
7039
7040         while (1) {
7041                 src_prod_idx = spr->rx_std_prod_idx;
7042
7043                 /* Make sure updates to the rx_std_buffers[] entries and the
7044                  * standard producer index are seen in the correct order.
7045                  */
7046                 smp_rmb();
7047
7048                 if (spr->rx_std_cons_idx == src_prod_idx)
7049                         break;
7050
7051                 if (spr->rx_std_cons_idx < src_prod_idx)
7052                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7053                 else
7054                         cpycnt = tp->rx_std_ring_mask + 1 -
7055                                  spr->rx_std_cons_idx;
7056
7057                 cpycnt = min(cpycnt,
7058                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7059
7060                 si = spr->rx_std_cons_idx;
7061                 di = dpr->rx_std_prod_idx;
7062
7063                 for (i = di; i < di + cpycnt; i++) {
7064                         if (dpr->rx_std_buffers[i].data) {
7065                                 cpycnt = i - di;
7066                                 err = -ENOSPC;
7067                                 break;
7068                         }
7069                 }
7070
7071                 if (!cpycnt)
7072                         break;
7073
7074                 /* Ensure that updates to the rx_std_buffers ring and the
7075                  * shadowed hardware producer ring from tg3_recycle_skb() are
7076                  * ordered correctly WRT the skb check above.
7077                  */
7078                 smp_rmb();
7079
7080                 memcpy(&dpr->rx_std_buffers[di],
7081                        &spr->rx_std_buffers[si],
7082                        cpycnt * sizeof(struct ring_info));
7083
7084                 for (i = 0; i < cpycnt; i++, di++, si++) {
7085                         struct tg3_rx_buffer_desc *sbd, *dbd;
7086                         sbd = &spr->rx_std[si];
7087                         dbd = &dpr->rx_std[di];
7088                         dbd->addr_hi = sbd->addr_hi;
7089                         dbd->addr_lo = sbd->addr_lo;
7090                 }
7091
7092                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7093                                        tp->rx_std_ring_mask;
7094                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7095                                        tp->rx_std_ring_mask;
7096         }
7097
7098         while (1) {
7099                 src_prod_idx = spr->rx_jmb_prod_idx;
7100
7101                 /* Make sure updates to the rx_jmb_buffers[] entries and
7102                  * the jumbo producer index are seen in the correct order.
7103                  */
7104                 smp_rmb();
7105
7106                 if (spr->rx_jmb_cons_idx == src_prod_idx)
7107                         break;
7108
7109                 if (spr->rx_jmb_cons_idx < src_prod_idx)
7110                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7111                 else
7112                         cpycnt = tp->rx_jmb_ring_mask + 1 -
7113                                  spr->rx_jmb_cons_idx;
7114
7115                 cpycnt = min(cpycnt,
7116                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7117
7118                 si = spr->rx_jmb_cons_idx;
7119                 di = dpr->rx_jmb_prod_idx;
7120
7121                 for (i = di; i < di + cpycnt; i++) {
7122                         if (dpr->rx_jmb_buffers[i].data) {
7123                                 cpycnt = i - di;
7124                                 err = -ENOSPC;
7125                                 break;
7126                         }
7127                 }
7128
7129                 if (!cpycnt)
7130                         break;
7131
7132                 /* Ensure that updates to the rx_jmb_buffers ring and the
7133                  * shadowed hardware producer ring from tg3_recycle_skb() are
7134                  * ordered correctly WRT the skb check above.
7135                  */
7136                 smp_rmb();
7137
7138                 memcpy(&dpr->rx_jmb_buffers[di],
7139                        &spr->rx_jmb_buffers[si],
7140                        cpycnt * sizeof(struct ring_info));
7141
7142                 for (i = 0; i < cpycnt; i++, di++, si++) {
7143                         struct tg3_rx_buffer_desc *sbd, *dbd;
7144                         sbd = &spr->rx_jmb[si].std;
7145                         dbd = &dpr->rx_jmb[di].std;
7146                         dbd->addr_hi = sbd->addr_hi;
7147                         dbd->addr_lo = sbd->addr_lo;
7148                 }
7149
7150                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7151                                        tp->rx_jmb_ring_mask;
7152                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7153                                        tp->rx_jmb_ring_mask;
7154         }
7155
7156         return err;
7157 }
7158
7159 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7160 {
7161         struct tg3 *tp = tnapi->tp;
7162
7163         /* run TX completion thread */
7164         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7165                 tg3_tx(tnapi);
7166                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7167                         return work_done;
7168         }
7169
7170         if (!tnapi->rx_rcb_prod_idx)
7171                 return work_done;
7172
7173         /* run RX thread, within the bounds set by NAPI.
7174          * All RX "locking" is done by ensuring outside
7175          * code synchronizes with tg3->napi.poll()
7176          */
7177         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7178                 work_done += tg3_rx(tnapi, budget - work_done);
7179
7180         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7181                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7182                 int i, err = 0;
7183                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7184                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7185
7186                 tp->rx_refill = false;
7187                 for (i = 1; i <= tp->rxq_cnt; i++)
7188                         err |= tg3_rx_prodring_xfer(tp, dpr,
7189                                                     &tp->napi[i].prodring);
7190
7191                 wmb();
7192
7193                 if (std_prod_idx != dpr->rx_std_prod_idx)
7194                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7195                                      dpr->rx_std_prod_idx);
7196
7197                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7198                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7199                                      dpr->rx_jmb_prod_idx);
7200
7201                 if (err)
7202                         tw32_f(HOSTCC_MODE, tp->coal_now);
7203         }
7204
7205         return work_done;
7206 }
7207
7208 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7209 {
7210         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7211                 schedule_work(&tp->reset_task);
7212 }
7213
7214 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7215 {
7216         if (test_and_clear_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7217                 cancel_work_sync(&tp->reset_task);
7218         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7219 }
7220
7221 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7222 {
7223         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7224         struct tg3 *tp = tnapi->tp;
7225         int work_done = 0;
7226         struct tg3_hw_status *sblk = tnapi->hw_status;
7227
7228         while (1) {
7229                 work_done = tg3_poll_work(tnapi, work_done, budget);
7230
7231                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7232                         goto tx_recovery;
7233
7234                 if (unlikely(work_done >= budget))
7235                         break;
7236
7237                 /* tp->last_tag is used in tg3_int_reenable() below
7238                  * to tell the hw how much work has been processed,
7239                  * so we must read it before checking for more work.
7240                  */
7241                 tnapi->last_tag = sblk->status_tag;
7242                 tnapi->last_irq_tag = tnapi->last_tag;
7243                 rmb();
7244
7245                 /* check for RX/TX work to do */
7246                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7247                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7248
7249                         /* This test here is not race free, but will reduce
7250                          * the number of interrupts by looping again.
7251                          */
7252                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7253                                 continue;
7254
7255                         napi_complete_done(napi, work_done);
7256                         /* Reenable interrupts. */
7257                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7258
7259                         /* This test here is synchronized by napi_schedule()
7260                          * and napi_complete() to close the race condition.
7261                          */
7262                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7263                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7264                                                   HOSTCC_MODE_ENABLE |
7265                                                   tnapi->coal_now);
7266                         }
7267                         break;
7268                 }
7269         }
7270
7271         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7272         return work_done;
7273
7274 tx_recovery:
7275         /* work_done is guaranteed to be less than budget. */
7276         napi_complete(napi);
7277         tg3_reset_task_schedule(tp);
7278         return work_done;
7279 }
7280
7281 static void tg3_process_error(struct tg3 *tp)
7282 {
7283         u32 val;
7284         bool real_error = false;
7285
7286         if (tg3_flag(tp, ERROR_PROCESSED))
7287                 return;
7288
7289         /* Check Flow Attention register */
7290         val = tr32(HOSTCC_FLOW_ATTN);
7291         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7292                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7293                 real_error = true;
7294         }
7295
7296         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7297                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7298                 real_error = true;
7299         }
7300
7301         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7302                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7303                 real_error = true;
7304         }
7305
7306         if (!real_error)
7307                 return;
7308
7309         tg3_dump_state(tp);
7310
7311         tg3_flag_set(tp, ERROR_PROCESSED);
7312         tg3_reset_task_schedule(tp);
7313 }
7314
7315 static int tg3_poll(struct napi_struct *napi, int budget)
7316 {
7317         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7318         struct tg3 *tp = tnapi->tp;
7319         int work_done = 0;
7320         struct tg3_hw_status *sblk = tnapi->hw_status;
7321
7322         while (1) {
7323                 if (sblk->status & SD_STATUS_ERROR)
7324                         tg3_process_error(tp);
7325
7326                 tg3_poll_link(tp);
7327
7328                 work_done = tg3_poll_work(tnapi, work_done, budget);
7329
7330                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7331                         goto tx_recovery;
7332
7333                 if (unlikely(work_done >= budget))
7334                         break;
7335
7336                 if (tg3_flag(tp, TAGGED_STATUS)) {
7337                         /* tp->last_tag is used in tg3_int_reenable() below
7338                          * to tell the hw how much work has been processed,
7339                          * so we must read it before checking for more work.
7340                          */
7341                         tnapi->last_tag = sblk->status_tag;
7342                         tnapi->last_irq_tag = tnapi->last_tag;
7343                         rmb();
7344                 } else
7345                         sblk->status &= ~SD_STATUS_UPDATED;
7346
7347                 if (likely(!tg3_has_work(tnapi))) {
7348                         napi_complete_done(napi, work_done);
7349                         tg3_int_reenable(tnapi);
7350                         break;
7351                 }
7352         }
7353
7354         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7355         return work_done;
7356
7357 tx_recovery:
7358         /* work_done is guaranteed to be less than budget. */
7359         napi_complete(napi);
7360         tg3_reset_task_schedule(tp);
7361         return work_done;
7362 }
7363
7364 static void tg3_napi_disable(struct tg3 *tp)
7365 {
7366         int i;
7367
7368         for (i = tp->irq_cnt - 1; i >= 0; i--)
7369                 napi_disable(&tp->napi[i].napi);
7370 }
7371
7372 static void tg3_napi_enable(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         for (i = 0; i < tp->irq_cnt; i++)
7377                 napi_enable(&tp->napi[i].napi);
7378 }
7379
7380 static void tg3_napi_init(struct tg3 *tp)
7381 {
7382         int i;
7383
7384         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7385         for (i = 1; i < tp->irq_cnt; i++)
7386                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7387 }
7388
7389 static void tg3_napi_fini(struct tg3 *tp)
7390 {
7391         int i;
7392
7393         for (i = 0; i < tp->irq_cnt; i++)
7394                 netif_napi_del(&tp->napi[i].napi);
7395 }
7396
7397 static inline void tg3_netif_stop(struct tg3 *tp)
7398 {
7399         netif_trans_update(tp->dev);    /* prevent tx timeout */
7400         tg3_napi_disable(tp);
7401         netif_carrier_off(tp->dev);
7402         netif_tx_disable(tp->dev);
7403 }
7404
7405 /* tp->lock must be held */
7406 static inline void tg3_netif_start(struct tg3 *tp)
7407 {
7408         tg3_ptp_resume(tp);
7409
7410         /* NOTE: unconditional netif_tx_wake_all_queues is only
7411          * appropriate so long as all callers are assured to
7412          * have free tx slots (such as after tg3_init_hw)
7413          */
7414         netif_tx_wake_all_queues(tp->dev);
7415
7416         if (tp->link_up)
7417                 netif_carrier_on(tp->dev);
7418
7419         tg3_napi_enable(tp);
7420         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7421         tg3_enable_ints(tp);
7422 }
7423
7424 static void tg3_irq_quiesce(struct tg3 *tp)
7425         __releases(tp->lock)
7426         __acquires(tp->lock)
7427 {
7428         int i;
7429
7430         BUG_ON(tp->irq_sync);
7431
7432         tp->irq_sync = 1;
7433         smp_mb();
7434
7435         spin_unlock_bh(&tp->lock);
7436
7437         for (i = 0; i < tp->irq_cnt; i++)
7438                 synchronize_irq(tp->napi[i].irq_vec);
7439
7440         spin_lock_bh(&tp->lock);
7441 }
7442
7443 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7444  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7445  * with as well.  Most of the time, this is not necessary except when
7446  * shutting down the device.
7447  */
7448 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7449 {
7450         spin_lock_bh(&tp->lock);
7451         if (irq_sync)
7452                 tg3_irq_quiesce(tp);
7453 }
7454
7455 static inline void tg3_full_unlock(struct tg3 *tp)
7456 {
7457         spin_unlock_bh(&tp->lock);
7458 }
7459
7460 /* One-shot MSI handler - Chip automatically disables interrupt
7461  * after sending MSI so driver doesn't have to do it.
7462  */
7463 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7464 {
7465         struct tg3_napi *tnapi = dev_id;
7466         struct tg3 *tp = tnapi->tp;
7467
7468         prefetch(tnapi->hw_status);
7469         if (tnapi->rx_rcb)
7470                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471
7472         if (likely(!tg3_irq_sync(tp)))
7473                 napi_schedule(&tnapi->napi);
7474
7475         return IRQ_HANDLED;
7476 }
7477
7478 /* MSI ISR - No need to check for interrupt sharing and no need to
7479  * flush status block and interrupt mailbox. PCI ordering rules
7480  * guarantee that MSI will arrive after the status block.
7481  */
7482 static irqreturn_t tg3_msi(int irq, void *dev_id)
7483 {
7484         struct tg3_napi *tnapi = dev_id;
7485         struct tg3 *tp = tnapi->tp;
7486
7487         prefetch(tnapi->hw_status);
7488         if (tnapi->rx_rcb)
7489                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7490         /*
7491          * Writing any value to intr-mbox-0 clears PCI INTA# and
7492          * chip-internal interrupt pending events.
7493          * Writing non-zero to intr-mbox-0 additional tells the
7494          * NIC to stop sending us irqs, engaging "in-intr-handler"
7495          * event coalescing.
7496          */
7497         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7498         if (likely(!tg3_irq_sync(tp)))
7499                 napi_schedule(&tnapi->napi);
7500
7501         return IRQ_RETVAL(1);
7502 }
7503
7504 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7505 {
7506         struct tg3_napi *tnapi = dev_id;
7507         struct tg3 *tp = tnapi->tp;
7508         struct tg3_hw_status *sblk = tnapi->hw_status;
7509         unsigned int handled = 1;
7510
7511         /* In INTx mode, it is possible for the interrupt to arrive at
7512          * the CPU before the status block posted prior to the interrupt.
7513          * Reading the PCI State register will confirm whether the
7514          * interrupt is ours and will flush the status block.
7515          */
7516         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7517                 if (tg3_flag(tp, CHIP_RESETTING) ||
7518                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7519                         handled = 0;
7520                         goto out;
7521                 }
7522         }
7523
7524         /*
7525          * Writing any value to intr-mbox-0 clears PCI INTA# and
7526          * chip-internal interrupt pending events.
7527          * Writing non-zero to intr-mbox-0 additional tells the
7528          * NIC to stop sending us irqs, engaging "in-intr-handler"
7529          * event coalescing.
7530          *
7531          * Flush the mailbox to de-assert the IRQ immediately to prevent
7532          * spurious interrupts.  The flush impacts performance but
7533          * excessive spurious interrupts can be worse in some cases.
7534          */
7535         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7536         if (tg3_irq_sync(tp))
7537                 goto out;
7538         sblk->status &= ~SD_STATUS_UPDATED;
7539         if (likely(tg3_has_work(tnapi))) {
7540                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7541                 napi_schedule(&tnapi->napi);
7542         } else {
7543                 /* No work, shared interrupt perhaps?  re-enable
7544                  * interrupts, and flush that PCI write
7545                  */
7546                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7547                                0x00000000);
7548         }
7549 out:
7550         return IRQ_RETVAL(handled);
7551 }
7552
7553 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7554 {
7555         struct tg3_napi *tnapi = dev_id;
7556         struct tg3 *tp = tnapi->tp;
7557         struct tg3_hw_status *sblk = tnapi->hw_status;
7558         unsigned int handled = 1;
7559
7560         /* In INTx mode, it is possible for the interrupt to arrive at
7561          * the CPU before the status block posted prior to the interrupt.
7562          * Reading the PCI State register will confirm whether the
7563          * interrupt is ours and will flush the status block.
7564          */
7565         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7566                 if (tg3_flag(tp, CHIP_RESETTING) ||
7567                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7568                         handled = 0;
7569                         goto out;
7570                 }
7571         }
7572
7573         /*
7574          * writing any value to intr-mbox-0 clears PCI INTA# and
7575          * chip-internal interrupt pending events.
7576          * writing non-zero to intr-mbox-0 additional tells the
7577          * NIC to stop sending us irqs, engaging "in-intr-handler"
7578          * event coalescing.
7579          *
7580          * Flush the mailbox to de-assert the IRQ immediately to prevent
7581          * spurious interrupts.  The flush impacts performance but
7582          * excessive spurious interrupts can be worse in some cases.
7583          */
7584         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7585
7586         /*
7587          * In a shared interrupt configuration, sometimes other devices'
7588          * interrupts will scream.  We record the current status tag here
7589          * so that the above check can report that the screaming interrupts
7590          * are unhandled.  Eventually they will be silenced.
7591          */
7592         tnapi->last_irq_tag = sblk->status_tag;
7593
7594         if (tg3_irq_sync(tp))
7595                 goto out;
7596
7597         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7598
7599         napi_schedule(&tnapi->napi);
7600
7601 out:
7602         return IRQ_RETVAL(handled);
7603 }
7604
7605 /* ISR for interrupt test */
7606 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7607 {
7608         struct tg3_napi *tnapi = dev_id;
7609         struct tg3 *tp = tnapi->tp;
7610         struct tg3_hw_status *sblk = tnapi->hw_status;
7611
7612         if ((sblk->status & SD_STATUS_UPDATED) ||
7613             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7614                 tg3_disable_ints(tp);
7615                 return IRQ_RETVAL(1);
7616         }
7617         return IRQ_RETVAL(0);
7618 }
7619
7620 #ifdef CONFIG_NET_POLL_CONTROLLER
7621 static void tg3_poll_controller(struct net_device *dev)
7622 {
7623         int i;
7624         struct tg3 *tp = netdev_priv(dev);
7625
7626         if (tg3_irq_sync(tp))
7627                 return;
7628
7629         for (i = 0; i < tp->irq_cnt; i++)
7630                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7631 }
7632 #endif
7633
7634 static void tg3_tx_timeout(struct net_device *dev, unsigned int txqueue)
7635 {
7636         struct tg3 *tp = netdev_priv(dev);
7637
7638         if (netif_msg_tx_err(tp)) {
7639                 netdev_err(dev, "transmit timed out, resetting\n");
7640                 tg3_dump_state(tp);
7641         }
7642
7643         tg3_reset_task_schedule(tp);
7644 }
7645
7646 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7647 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7648 {
7649         u32 base = (u32) mapping & 0xffffffff;
7650
7651         return base + len + 8 < base;
7652 }
7653
7654 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7655  * of any 4GB boundaries: 4G, 8G, etc
7656  */
7657 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658                                            u32 len, u32 mss)
7659 {
7660         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7661                 u32 base = (u32) mapping & 0xffffffff;
7662
7663                 return ((base + len + (mss & 0x3fff)) < base);
7664         }
7665         return 0;
7666 }
7667
7668 /* Test for DMA addresses > 40-bit */
7669 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7670                                           int len)
7671 {
7672 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7673         if (tg3_flag(tp, 40BIT_DMA_BUG))
7674                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7675         return 0;
7676 #else
7677         return 0;
7678 #endif
7679 }
7680
7681 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7682                                  dma_addr_t mapping, u32 len, u32 flags,
7683                                  u32 mss, u32 vlan)
7684 {
7685         txbd->addr_hi = ((u64) mapping >> 32);
7686         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7687         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7688         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7689 }
7690
7691 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7692                             dma_addr_t map, u32 len, u32 flags,
7693                             u32 mss, u32 vlan)
7694 {
7695         struct tg3 *tp = tnapi->tp;
7696         bool hwbug = false;
7697
7698         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7699                 hwbug = true;
7700
7701         if (tg3_4g_overflow_test(map, len))
7702                 hwbug = true;
7703
7704         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7705                 hwbug = true;
7706
7707         if (tg3_40bit_overflow_test(tp, map, len))
7708                 hwbug = true;
7709
7710         if (tp->dma_limit) {
7711                 u32 prvidx = *entry;
7712                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7713                 while (len > tp->dma_limit && *budget) {
7714                         u32 frag_len = tp->dma_limit;
7715                         len -= tp->dma_limit;
7716
7717                         /* Avoid the 8byte DMA problem */
7718                         if (len <= 8) {
7719                                 len += tp->dma_limit / 2;
7720                                 frag_len = tp->dma_limit / 2;
7721                         }
7722
7723                         tnapi->tx_buffers[*entry].fragmented = true;
7724
7725                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726                                       frag_len, tmp_flag, mss, vlan);
7727                         *budget -= 1;
7728                         prvidx = *entry;
7729                         *entry = NEXT_TX(*entry);
7730
7731                         map += frag_len;
7732                 }
7733
7734                 if (len) {
7735                         if (*budget) {
7736                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7737                                               len, flags, mss, vlan);
7738                                 *budget -= 1;
7739                                 *entry = NEXT_TX(*entry);
7740                         } else {
7741                                 hwbug = true;
7742                                 tnapi->tx_buffers[prvidx].fragmented = false;
7743                         }
7744                 }
7745         } else {
7746                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7747                               len, flags, mss, vlan);
7748                 *entry = NEXT_TX(*entry);
7749         }
7750
7751         return hwbug;
7752 }
7753
7754 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7755 {
7756         int i;
7757         struct sk_buff *skb;
7758         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7759
7760         skb = txb->skb;
7761         txb->skb = NULL;
7762
7763         dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping),
7764                          skb_headlen(skb), DMA_TO_DEVICE);
7765
7766         while (txb->fragmented) {
7767                 txb->fragmented = false;
7768                 entry = NEXT_TX(entry);
7769                 txb = &tnapi->tx_buffers[entry];
7770         }
7771
7772         for (i = 0; i <= last; i++) {
7773                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7774
7775                 entry = NEXT_TX(entry);
7776                 txb = &tnapi->tx_buffers[entry];
7777
7778                 dma_unmap_page(&tnapi->tp->pdev->dev,
7779                                dma_unmap_addr(txb, mapping),
7780                                skb_frag_size(frag), DMA_TO_DEVICE);
7781
7782                 while (txb->fragmented) {
7783                         txb->fragmented = false;
7784                         entry = NEXT_TX(entry);
7785                         txb = &tnapi->tx_buffers[entry];
7786                 }
7787         }
7788 }
7789
7790 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7791 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7792                                        struct sk_buff **pskb,
7793                                        u32 *entry, u32 *budget,
7794                                        u32 base_flags, u32 mss, u32 vlan)
7795 {
7796         struct tg3 *tp = tnapi->tp;
7797         struct sk_buff *new_skb, *skb = *pskb;
7798         dma_addr_t new_addr = 0;
7799         int ret = 0;
7800
7801         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7802                 new_skb = skb_copy(skb, GFP_ATOMIC);
7803         else {
7804                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7805
7806                 new_skb = skb_copy_expand(skb,
7807                                           skb_headroom(skb) + more_headroom,
7808                                           skb_tailroom(skb), GFP_ATOMIC);
7809         }
7810
7811         if (!new_skb) {
7812                 ret = -1;
7813         } else {
7814                 /* New SKB is guaranteed to be linear. */
7815                 new_addr = dma_map_single(&tp->pdev->dev, new_skb->data,
7816                                           new_skb->len, DMA_TO_DEVICE);
7817                 /* Make sure the mapping succeeded */
7818                 if (dma_mapping_error(&tp->pdev->dev, new_addr)) {
7819                         dev_kfree_skb_any(new_skb);
7820                         ret = -1;
7821                 } else {
7822                         u32 save_entry = *entry;
7823
7824                         base_flags |= TXD_FLAG_END;
7825
7826                         tnapi->tx_buffers[*entry].skb = new_skb;
7827                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7828                                            mapping, new_addr);
7829
7830                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7831                                             new_skb->len, base_flags,
7832                                             mss, vlan)) {
7833                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7834                                 dev_kfree_skb_any(new_skb);
7835                                 ret = -1;
7836                         }
7837                 }
7838         }
7839
7840         dev_consume_skb_any(skb);
7841         *pskb = new_skb;
7842         return ret;
7843 }
7844
7845 static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7846 {
7847         /* Check if we will never have enough descriptors,
7848          * as gso_segs can be more than current ring size
7849          */
7850         return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7851 }
7852
7853 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7854
7855 /* Use GSO to workaround all TSO packets that meet HW bug conditions
7856  * indicated in tg3_tx_frag_set()
7857  */
7858 static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7859                        struct netdev_queue *txq, struct sk_buff *skb)
7860 {
7861         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7862         struct sk_buff *segs, *seg, *next;
7863
7864         /* Estimate the number of fragments in the worst case */
7865         if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7866                 netif_tx_stop_queue(txq);
7867
7868                 /* netif_tx_stop_queue() must be done before checking
7869                  * checking tx index in tg3_tx_avail() below, because in
7870                  * tg3_tx(), we update tx index before checking for
7871                  * netif_tx_queue_stopped().
7872                  */
7873                 smp_mb();
7874                 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7875                         return NETDEV_TX_BUSY;
7876
7877                 netif_tx_wake_queue(txq);
7878         }
7879
7880         segs = skb_gso_segment(skb, tp->dev->features &
7881                                     ~(NETIF_F_TSO | NETIF_F_TSO6));
7882         if (IS_ERR(segs) || !segs)
7883                 goto tg3_tso_bug_end;
7884
7885         skb_list_walk_safe(segs, seg, next) {
7886                 skb_mark_not_on_list(seg);
7887                 tg3_start_xmit(seg, tp->dev);
7888         }
7889
7890 tg3_tso_bug_end:
7891         dev_consume_skb_any(skb);
7892
7893         return NETDEV_TX_OK;
7894 }
7895
7896 /* hard_start_xmit for all devices */
7897 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7898 {
7899         struct tg3 *tp = netdev_priv(dev);
7900         u32 len, entry, base_flags, mss, vlan = 0;
7901         u32 budget;
7902         int i = -1, would_hit_hwbug;
7903         dma_addr_t mapping;
7904         struct tg3_napi *tnapi;
7905         struct netdev_queue *txq;
7906         unsigned int last;
7907         struct iphdr *iph = NULL;
7908         struct tcphdr *tcph = NULL;
7909         __sum16 tcp_csum = 0, ip_csum = 0;
7910         __be16 ip_tot_len = 0;
7911
7912         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7913         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7914         if (tg3_flag(tp, ENABLE_TSS))
7915                 tnapi++;
7916
7917         budget = tg3_tx_avail(tnapi);
7918
7919         /* We are running in BH disabled context with netif_tx_lock
7920          * and TX reclaim runs via tp->napi.poll inside of a software
7921          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7922          * no IRQ context deadlocks to worry about either.  Rejoice!
7923          */
7924         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7925                 if (!netif_tx_queue_stopped(txq)) {
7926                         netif_tx_stop_queue(txq);
7927
7928                         /* This is a hard error, log it. */
7929                         netdev_err(dev,
7930                                    "BUG! Tx Ring full when queue awake!\n");
7931                 }
7932                 return NETDEV_TX_BUSY;
7933         }
7934
7935         entry = tnapi->tx_prod;
7936         base_flags = 0;
7937
7938         mss = skb_shinfo(skb)->gso_size;
7939         if (mss) {
7940                 u32 tcp_opt_len, hdr_len;
7941
7942                 if (skb_cow_head(skb, 0))
7943                         goto drop;
7944
7945                 iph = ip_hdr(skb);
7946                 tcp_opt_len = tcp_optlen(skb);
7947
7948                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7949
7950                 /* HW/FW can not correctly segment packets that have been
7951                  * vlan encapsulated.
7952                  */
7953                 if (skb->protocol == htons(ETH_P_8021Q) ||
7954                     skb->protocol == htons(ETH_P_8021AD)) {
7955                         if (tg3_tso_bug_gso_check(tnapi, skb))
7956                                 return tg3_tso_bug(tp, tnapi, txq, skb);
7957                         goto drop;
7958                 }
7959
7960                 if (!skb_is_gso_v6(skb)) {
7961                         if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7962                             tg3_flag(tp, TSO_BUG)) {
7963                                 if (tg3_tso_bug_gso_check(tnapi, skb))
7964                                         return tg3_tso_bug(tp, tnapi, txq, skb);
7965                                 goto drop;
7966                         }
7967                         ip_csum = iph->check;
7968                         ip_tot_len = iph->tot_len;
7969                         iph->check = 0;
7970                         iph->tot_len = htons(mss + hdr_len);
7971                 }
7972
7973                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7974                                TXD_FLAG_CPU_POST_DMA);
7975
7976                 tcph = tcp_hdr(skb);
7977                 tcp_csum = tcph->check;
7978
7979                 if (tg3_flag(tp, HW_TSO_1) ||
7980                     tg3_flag(tp, HW_TSO_2) ||
7981                     tg3_flag(tp, HW_TSO_3)) {
7982                         tcph->check = 0;
7983                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7984                 } else {
7985                         tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7986                                                          0, IPPROTO_TCP, 0);
7987                 }
7988
7989                 if (tg3_flag(tp, HW_TSO_3)) {
7990                         mss |= (hdr_len & 0xc) << 12;
7991                         if (hdr_len & 0x10)
7992                                 base_flags |= 0x00000010;
7993                         base_flags |= (hdr_len & 0x3e0) << 5;
7994                 } else if (tg3_flag(tp, HW_TSO_2))
7995                         mss |= hdr_len << 9;
7996                 else if (tg3_flag(tp, HW_TSO_1) ||
7997                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7998                         if (tcp_opt_len || iph->ihl > 5) {
7999                                 int tsflags;
8000
8001                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002                                 mss |= (tsflags << 11);
8003                         }
8004                 } else {
8005                         if (tcp_opt_len || iph->ihl > 5) {
8006                                 int tsflags;
8007
8008                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8009                                 base_flags |= tsflags << 12;
8010                         }
8011                 }
8012         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8013                 /* HW/FW can not correctly checksum packets that have been
8014                  * vlan encapsulated.
8015                  */
8016                 if (skb->protocol == htons(ETH_P_8021Q) ||
8017                     skb->protocol == htons(ETH_P_8021AD)) {
8018                         if (skb_checksum_help(skb))
8019                                 goto drop;
8020                 } else  {
8021                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
8022                 }
8023         }
8024
8025         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8026             !mss && skb->len > VLAN_ETH_FRAME_LEN)
8027                 base_flags |= TXD_FLAG_JMB_PKT;
8028
8029         if (skb_vlan_tag_present(skb)) {
8030                 base_flags |= TXD_FLAG_VLAN;
8031                 vlan = skb_vlan_tag_get(skb);
8032         }
8033
8034         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8035             tg3_flag(tp, TX_TSTAMP_EN)) {
8036                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8037                 base_flags |= TXD_FLAG_HWTSTAMP;
8038         }
8039
8040         len = skb_headlen(skb);
8041
8042         mapping = dma_map_single(&tp->pdev->dev, skb->data, len,
8043                                  DMA_TO_DEVICE);
8044         if (dma_mapping_error(&tp->pdev->dev, mapping))
8045                 goto drop;
8046
8047
8048         tnapi->tx_buffers[entry].skb = skb;
8049         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8050
8051         would_hit_hwbug = 0;
8052
8053         if (tg3_flag(tp, 5701_DMA_BUG))
8054                 would_hit_hwbug = 1;
8055
8056         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8057                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8058                             mss, vlan)) {
8059                 would_hit_hwbug = 1;
8060         } else if (skb_shinfo(skb)->nr_frags > 0) {
8061                 u32 tmp_mss = mss;
8062
8063                 if (!tg3_flag(tp, HW_TSO_1) &&
8064                     !tg3_flag(tp, HW_TSO_2) &&
8065                     !tg3_flag(tp, HW_TSO_3))
8066                         tmp_mss = 0;
8067
8068                 /* Now loop through additional data
8069                  * fragments, and queue them.
8070                  */
8071                 last = skb_shinfo(skb)->nr_frags - 1;
8072                 for (i = 0; i <= last; i++) {
8073                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8074
8075                         len = skb_frag_size(frag);
8076                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8077                                                    len, DMA_TO_DEVICE);
8078
8079                         tnapi->tx_buffers[entry].skb = NULL;
8080                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8081                                            mapping);
8082                         if (dma_mapping_error(&tp->pdev->dev, mapping))
8083                                 goto dma_error;
8084
8085                         if (!budget ||
8086                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8087                                             len, base_flags |
8088                                             ((i == last) ? TXD_FLAG_END : 0),
8089                                             tmp_mss, vlan)) {
8090                                 would_hit_hwbug = 1;
8091                                 break;
8092                         }
8093                 }
8094         }
8095
8096         if (would_hit_hwbug) {
8097                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8098
8099                 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8100                         /* If it's a TSO packet, do GSO instead of
8101                          * allocating and copying to a large linear SKB
8102                          */
8103                         if (ip_tot_len) {
8104                                 iph->check = ip_csum;
8105                                 iph->tot_len = ip_tot_len;
8106                         }
8107                         tcph->check = tcp_csum;
8108                         return tg3_tso_bug(tp, tnapi, txq, skb);
8109                 }
8110
8111                 /* If the workaround fails due to memory/mapping
8112                  * failure, silently drop this packet.
8113                  */
8114                 entry = tnapi->tx_prod;
8115                 budget = tg3_tx_avail(tnapi);
8116                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8117                                                 base_flags, mss, vlan))
8118                         goto drop_nofree;
8119         }
8120
8121         skb_tx_timestamp(skb);
8122         netdev_tx_sent_queue(txq, skb->len);
8123
8124         /* Sync BD data before updating mailbox */
8125         wmb();
8126
8127         tnapi->tx_prod = entry;
8128         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8129                 netif_tx_stop_queue(txq);
8130
8131                 /* netif_tx_stop_queue() must be done before checking
8132                  * checking tx index in tg3_tx_avail() below, because in
8133                  * tg3_tx(), we update tx index before checking for
8134                  * netif_tx_queue_stopped().
8135                  */
8136                 smp_mb();
8137                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8138                         netif_tx_wake_queue(txq);
8139         }
8140
8141         if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
8142                 /* Packets are ready, update Tx producer idx on card. */
8143                 tw32_tx_mbox(tnapi->prodmbox, entry);
8144         }
8145
8146         return NETDEV_TX_OK;
8147
8148 dma_error:
8149         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8150         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8151 drop:
8152         dev_kfree_skb_any(skb);
8153 drop_nofree:
8154         tp->tx_dropped++;
8155         return NETDEV_TX_OK;
8156 }
8157
8158 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8159 {
8160         if (enable) {
8161                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8162                                   MAC_MODE_PORT_MODE_MASK);
8163
8164                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8165
8166                 if (!tg3_flag(tp, 5705_PLUS))
8167                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8168
8169                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8170                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8171                 else
8172                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8173         } else {
8174                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8175
8176                 if (tg3_flag(tp, 5705_PLUS) ||
8177                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8178                     tg3_asic_rev(tp) == ASIC_REV_5700)
8179                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8180         }
8181
8182         tw32(MAC_MODE, tp->mac_mode);
8183         udelay(40);
8184 }
8185
8186 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8187 {
8188         u32 val, bmcr, mac_mode, ptest = 0;
8189
8190         tg3_phy_toggle_apd(tp, false);
8191         tg3_phy_toggle_automdix(tp, false);
8192
8193         if (extlpbk && tg3_phy_set_extloopbk(tp))
8194                 return -EIO;
8195
8196         bmcr = BMCR_FULLDPLX;
8197         switch (speed) {
8198         case SPEED_10:
8199                 break;
8200         case SPEED_100:
8201                 bmcr |= BMCR_SPEED100;
8202                 break;
8203         case SPEED_1000:
8204         default:
8205                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8206                         speed = SPEED_100;
8207                         bmcr |= BMCR_SPEED100;
8208                 } else {
8209                         speed = SPEED_1000;
8210                         bmcr |= BMCR_SPEED1000;
8211                 }
8212         }
8213
8214         if (extlpbk) {
8215                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8216                         tg3_readphy(tp, MII_CTRL1000, &val);
8217                         val |= CTL1000_AS_MASTER |
8218                                CTL1000_ENABLE_MASTER;
8219                         tg3_writephy(tp, MII_CTRL1000, val);
8220                 } else {
8221                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8222                                 MII_TG3_FET_PTEST_TRIM_2;
8223                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8224                 }
8225         } else
8226                 bmcr |= BMCR_LOOPBACK;
8227
8228         tg3_writephy(tp, MII_BMCR, bmcr);
8229
8230         /* The write needs to be flushed for the FETs */
8231         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8232                 tg3_readphy(tp, MII_BMCR, &bmcr);
8233
8234         udelay(40);
8235
8236         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8237             tg3_asic_rev(tp) == ASIC_REV_5785) {
8238                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8239                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8240                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8241
8242                 /* The write needs to be flushed for the AC131 */
8243                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8244         }
8245
8246         /* Reset to prevent losing 1st rx packet intermittently */
8247         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8248             tg3_flag(tp, 5780_CLASS)) {
8249                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8250                 udelay(10);
8251                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8252         }
8253
8254         mac_mode = tp->mac_mode &
8255                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8256         if (speed == SPEED_1000)
8257                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8258         else
8259                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8260
8261         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8262                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8263
8264                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8265                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8266                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8267                         mac_mode |= MAC_MODE_LINK_POLARITY;
8268
8269                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8270                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8271         }
8272
8273         tw32(MAC_MODE, mac_mode);
8274         udelay(40);
8275
8276         return 0;
8277 }
8278
8279 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8280 {
8281         struct tg3 *tp = netdev_priv(dev);
8282
8283         if (features & NETIF_F_LOOPBACK) {
8284                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8285                         return;
8286
8287                 spin_lock_bh(&tp->lock);
8288                 tg3_mac_loopback(tp, true);
8289                 netif_carrier_on(tp->dev);
8290                 spin_unlock_bh(&tp->lock);
8291                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8292         } else {
8293                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8294                         return;
8295
8296                 spin_lock_bh(&tp->lock);
8297                 tg3_mac_loopback(tp, false);
8298                 /* Force link status check */
8299                 tg3_setup_phy(tp, true);
8300                 spin_unlock_bh(&tp->lock);
8301                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8302         }
8303 }
8304
8305 static netdev_features_t tg3_fix_features(struct net_device *dev,
8306         netdev_features_t features)
8307 {
8308         struct tg3 *tp = netdev_priv(dev);
8309
8310         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8311                 features &= ~NETIF_F_ALL_TSO;
8312
8313         return features;
8314 }
8315
8316 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8317 {
8318         netdev_features_t changed = dev->features ^ features;
8319
8320         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8321                 tg3_set_loopback(dev, features);
8322
8323         return 0;
8324 }
8325
8326 static void tg3_rx_prodring_free(struct tg3 *tp,
8327                                  struct tg3_rx_prodring_set *tpr)
8328 {
8329         int i;
8330
8331         if (tpr != &tp->napi[0].prodring) {
8332                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8333                      i = (i + 1) & tp->rx_std_ring_mask)
8334                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8335                                         tp->rx_pkt_map_sz);
8336
8337                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8338                         for (i = tpr->rx_jmb_cons_idx;
8339                              i != tpr->rx_jmb_prod_idx;
8340                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8341                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8342                                                 TG3_RX_JMB_MAP_SZ);
8343                         }
8344                 }
8345
8346                 return;
8347         }
8348
8349         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8350                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8351                                 tp->rx_pkt_map_sz);
8352
8353         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8354                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8355                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8356                                         TG3_RX_JMB_MAP_SZ);
8357         }
8358 }
8359
8360 /* Initialize rx rings for packet processing.
8361  *
8362  * The chip has been shut down and the driver detached from
8363  * the networking, so no interrupts or new tx packets will
8364  * end up in the driver.  tp->{tx,}lock are held and thus
8365  * we may not sleep.
8366  */
8367 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8368                                  struct tg3_rx_prodring_set *tpr)
8369 {
8370         u32 i, rx_pkt_dma_sz;
8371
8372         tpr->rx_std_cons_idx = 0;
8373         tpr->rx_std_prod_idx = 0;
8374         tpr->rx_jmb_cons_idx = 0;
8375         tpr->rx_jmb_prod_idx = 0;
8376
8377         if (tpr != &tp->napi[0].prodring) {
8378                 memset(&tpr->rx_std_buffers[0], 0,
8379                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8380                 if (tpr->rx_jmb_buffers)
8381                         memset(&tpr->rx_jmb_buffers[0], 0,
8382                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8383                 goto done;
8384         }
8385
8386         /* Zero out all descriptors. */
8387         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8388
8389         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8390         if (tg3_flag(tp, 5780_CLASS) &&
8391             tp->dev->mtu > ETH_DATA_LEN)
8392                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8393         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8394
8395         /* Initialize invariants of the rings, we only set this
8396          * stuff once.  This works because the card does not
8397          * write into the rx buffer posting rings.
8398          */
8399         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8400                 struct tg3_rx_buffer_desc *rxd;
8401
8402                 rxd = &tpr->rx_std[i];
8403                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8404                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8405                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8406                                (i << RXD_OPAQUE_INDEX_SHIFT));
8407         }
8408
8409         /* Now allocate fresh SKBs for each rx ring. */
8410         for (i = 0; i < tp->rx_pending; i++) {
8411                 unsigned int frag_size;
8412
8413                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8414                                       &frag_size) < 0) {
8415                         netdev_warn(tp->dev,
8416                                     "Using a smaller RX standard ring. Only "
8417                                     "%d out of %d buffers were allocated "
8418                                     "successfully\n", i, tp->rx_pending);
8419                         if (i == 0)
8420                                 goto initfail;
8421                         tp->rx_pending = i;
8422                         break;
8423                 }
8424         }
8425
8426         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8427                 goto done;
8428
8429         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8430
8431         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8432                 goto done;
8433
8434         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8435                 struct tg3_rx_buffer_desc *rxd;
8436
8437                 rxd = &tpr->rx_jmb[i].std;
8438                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8439                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8440                                   RXD_FLAG_JUMBO;
8441                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8442                        (i << RXD_OPAQUE_INDEX_SHIFT));
8443         }
8444
8445         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8446                 unsigned int frag_size;
8447
8448                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8449                                       &frag_size) < 0) {
8450                         netdev_warn(tp->dev,
8451                                     "Using a smaller RX jumbo ring. Only %d "
8452                                     "out of %d buffers were allocated "
8453                                     "successfully\n", i, tp->rx_jumbo_pending);
8454                         if (i == 0)
8455                                 goto initfail;
8456                         tp->rx_jumbo_pending = i;
8457                         break;
8458                 }
8459         }
8460
8461 done:
8462         return 0;
8463
8464 initfail:
8465         tg3_rx_prodring_free(tp, tpr);
8466         return -ENOMEM;
8467 }
8468
8469 static void tg3_rx_prodring_fini(struct tg3 *tp,
8470                                  struct tg3_rx_prodring_set *tpr)
8471 {
8472         kfree(tpr->rx_std_buffers);
8473         tpr->rx_std_buffers = NULL;
8474         kfree(tpr->rx_jmb_buffers);
8475         tpr->rx_jmb_buffers = NULL;
8476         if (tpr->rx_std) {
8477                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8478                                   tpr->rx_std, tpr->rx_std_mapping);
8479                 tpr->rx_std = NULL;
8480         }
8481         if (tpr->rx_jmb) {
8482                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8483                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8484                 tpr->rx_jmb = NULL;
8485         }
8486 }
8487
8488 static int tg3_rx_prodring_init(struct tg3 *tp,
8489                                 struct tg3_rx_prodring_set *tpr)
8490 {
8491         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8492                                       GFP_KERNEL);
8493         if (!tpr->rx_std_buffers)
8494                 return -ENOMEM;
8495
8496         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8497                                          TG3_RX_STD_RING_BYTES(tp),
8498                                          &tpr->rx_std_mapping,
8499                                          GFP_KERNEL);
8500         if (!tpr->rx_std)
8501                 goto err_out;
8502
8503         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8504                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8505                                               GFP_KERNEL);
8506                 if (!tpr->rx_jmb_buffers)
8507                         goto err_out;
8508
8509                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8510                                                  TG3_RX_JMB_RING_BYTES(tp),
8511                                                  &tpr->rx_jmb_mapping,
8512                                                  GFP_KERNEL);
8513                 if (!tpr->rx_jmb)
8514                         goto err_out;
8515         }
8516
8517         return 0;
8518
8519 err_out:
8520         tg3_rx_prodring_fini(tp, tpr);
8521         return -ENOMEM;
8522 }
8523
8524 /* Free up pending packets in all rx/tx rings.
8525  *
8526  * The chip has been shut down and the driver detached from
8527  * the networking, so no interrupts or new tx packets will
8528  * end up in the driver.  tp->{tx,}lock is not held and we are not
8529  * in an interrupt context and thus may sleep.
8530  */
8531 static void tg3_free_rings(struct tg3 *tp)
8532 {
8533         int i, j;
8534
8535         for (j = 0; j < tp->irq_cnt; j++) {
8536                 struct tg3_napi *tnapi = &tp->napi[j];
8537
8538                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8539
8540                 if (!tnapi->tx_buffers)
8541                         continue;
8542
8543                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8544                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8545
8546                         if (!skb)
8547                                 continue;
8548
8549                         tg3_tx_skb_unmap(tnapi, i,
8550                                          skb_shinfo(skb)->nr_frags - 1);
8551
8552                         dev_consume_skb_any(skb);
8553                 }
8554                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8555         }
8556 }
8557
8558 /* Initialize tx/rx rings for packet processing.
8559  *
8560  * The chip has been shut down and the driver detached from
8561  * the networking, so no interrupts or new tx packets will
8562  * end up in the driver.  tp->{tx,}lock are held and thus
8563  * we may not sleep.
8564  */
8565 static int tg3_init_rings(struct tg3 *tp)
8566 {
8567         int i;
8568
8569         /* Free up all the SKBs. */
8570         tg3_free_rings(tp);
8571
8572         for (i = 0; i < tp->irq_cnt; i++) {
8573                 struct tg3_napi *tnapi = &tp->napi[i];
8574
8575                 tnapi->last_tag = 0;
8576                 tnapi->last_irq_tag = 0;
8577                 tnapi->hw_status->status = 0;
8578                 tnapi->hw_status->status_tag = 0;
8579                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8580
8581                 tnapi->tx_prod = 0;
8582                 tnapi->tx_cons = 0;
8583                 if (tnapi->tx_ring)
8584                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8585
8586                 tnapi->rx_rcb_ptr = 0;
8587                 if (tnapi->rx_rcb)
8588                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8589
8590                 if (tnapi->prodring.rx_std &&
8591                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8592                         tg3_free_rings(tp);
8593                         return -ENOMEM;
8594                 }
8595         }
8596
8597         return 0;
8598 }
8599
8600 static void tg3_mem_tx_release(struct tg3 *tp)
8601 {
8602         int i;
8603
8604         for (i = 0; i < tp->irq_max; i++) {
8605                 struct tg3_napi *tnapi = &tp->napi[i];
8606
8607                 if (tnapi->tx_ring) {
8608                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8609                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8610                         tnapi->tx_ring = NULL;
8611                 }
8612
8613                 kfree(tnapi->tx_buffers);
8614                 tnapi->tx_buffers = NULL;
8615         }
8616 }
8617
8618 static int tg3_mem_tx_acquire(struct tg3 *tp)
8619 {
8620         int i;
8621         struct tg3_napi *tnapi = &tp->napi[0];
8622
8623         /* If multivector TSS is enabled, vector 0 does not handle
8624          * tx interrupts.  Don't allocate any resources for it.
8625          */
8626         if (tg3_flag(tp, ENABLE_TSS))
8627                 tnapi++;
8628
8629         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8630                 tnapi->tx_buffers = kcalloc(TG3_TX_RING_SIZE,
8631                                             sizeof(struct tg3_tx_ring_info),
8632                                             GFP_KERNEL);
8633                 if (!tnapi->tx_buffers)
8634                         goto err_out;
8635
8636                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8637                                                     TG3_TX_RING_BYTES,
8638                                                     &tnapi->tx_desc_mapping,
8639                                                     GFP_KERNEL);
8640                 if (!tnapi->tx_ring)
8641                         goto err_out;
8642         }
8643
8644         return 0;
8645
8646 err_out:
8647         tg3_mem_tx_release(tp);
8648         return -ENOMEM;
8649 }
8650
8651 static void tg3_mem_rx_release(struct tg3 *tp)
8652 {
8653         int i;
8654
8655         for (i = 0; i < tp->irq_max; i++) {
8656                 struct tg3_napi *tnapi = &tp->napi[i];
8657
8658                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8659
8660                 if (!tnapi->rx_rcb)
8661                         continue;
8662
8663                 dma_free_coherent(&tp->pdev->dev,
8664                                   TG3_RX_RCB_RING_BYTES(tp),
8665                                   tnapi->rx_rcb,
8666                                   tnapi->rx_rcb_mapping);
8667                 tnapi->rx_rcb = NULL;
8668         }
8669 }
8670
8671 static int tg3_mem_rx_acquire(struct tg3 *tp)
8672 {
8673         unsigned int i, limit;
8674
8675         limit = tp->rxq_cnt;
8676
8677         /* If RSS is enabled, we need a (dummy) producer ring
8678          * set on vector zero.  This is the true hw prodring.
8679          */
8680         if (tg3_flag(tp, ENABLE_RSS))
8681                 limit++;
8682
8683         for (i = 0; i < limit; i++) {
8684                 struct tg3_napi *tnapi = &tp->napi[i];
8685
8686                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8687                         goto err_out;
8688
8689                 /* If multivector RSS is enabled, vector 0
8690                  * does not handle rx or tx interrupts.
8691                  * Don't allocate any resources for it.
8692                  */
8693                 if (!i && tg3_flag(tp, ENABLE_RSS))
8694                         continue;
8695
8696                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8697                                                    TG3_RX_RCB_RING_BYTES(tp),
8698                                                    &tnapi->rx_rcb_mapping,
8699                                                    GFP_KERNEL);
8700                 if (!tnapi->rx_rcb)
8701                         goto err_out;
8702         }
8703
8704         return 0;
8705
8706 err_out:
8707         tg3_mem_rx_release(tp);
8708         return -ENOMEM;
8709 }
8710
8711 /*
8712  * Must not be invoked with interrupt sources disabled and
8713  * the hardware shutdown down.
8714  */
8715 static void tg3_free_consistent(struct tg3 *tp)
8716 {
8717         int i;
8718
8719         for (i = 0; i < tp->irq_cnt; i++) {
8720                 struct tg3_napi *tnapi = &tp->napi[i];
8721
8722                 if (tnapi->hw_status) {
8723                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8724                                           tnapi->hw_status,
8725                                           tnapi->status_mapping);
8726                         tnapi->hw_status = NULL;
8727                 }
8728         }
8729
8730         tg3_mem_rx_release(tp);
8731         tg3_mem_tx_release(tp);
8732
8733         /* tp->hw_stats can be referenced safely:
8734          *     1. under rtnl_lock
8735          *     2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
8736          */
8737         if (tp->hw_stats) {
8738                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8739                                   tp->hw_stats, tp->stats_mapping);
8740                 tp->hw_stats = NULL;
8741         }
8742 }
8743
8744 /*
8745  * Must not be invoked with interrupt sources disabled and
8746  * the hardware shutdown down.  Can sleep.
8747  */
8748 static int tg3_alloc_consistent(struct tg3 *tp)
8749 {
8750         int i;
8751
8752         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8753                                           sizeof(struct tg3_hw_stats),
8754                                           &tp->stats_mapping, GFP_KERNEL);
8755         if (!tp->hw_stats)
8756                 goto err_out;
8757
8758         for (i = 0; i < tp->irq_cnt; i++) {
8759                 struct tg3_napi *tnapi = &tp->napi[i];
8760                 struct tg3_hw_status *sblk;
8761
8762                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8763                                                       TG3_HW_STATUS_SIZE,
8764                                                       &tnapi->status_mapping,
8765                                                       GFP_KERNEL);
8766                 if (!tnapi->hw_status)
8767                         goto err_out;
8768
8769                 sblk = tnapi->hw_status;
8770
8771                 if (tg3_flag(tp, ENABLE_RSS)) {
8772                         u16 *prodptr = NULL;
8773
8774                         /*
8775                          * When RSS is enabled, the status block format changes
8776                          * slightly.  The "rx_jumbo_consumer", "reserved",
8777                          * and "rx_mini_consumer" members get mapped to the
8778                          * other three rx return ring producer indexes.
8779                          */
8780                         switch (i) {
8781                         case 1:
8782                                 prodptr = &sblk->idx[0].rx_producer;
8783                                 break;
8784                         case 2:
8785                                 prodptr = &sblk->rx_jumbo_consumer;
8786                                 break;
8787                         case 3:
8788                                 prodptr = &sblk->reserved;
8789                                 break;
8790                         case 4:
8791                                 prodptr = &sblk->rx_mini_consumer;
8792                                 break;
8793                         }
8794                         tnapi->rx_rcb_prod_idx = prodptr;
8795                 } else {
8796                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8797                 }
8798         }
8799
8800         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8801                 goto err_out;
8802
8803         return 0;
8804
8805 err_out:
8806         tg3_free_consistent(tp);
8807         return -ENOMEM;
8808 }
8809
8810 #define MAX_WAIT_CNT 1000
8811
8812 /* To stop a block, clear the enable bit and poll till it
8813  * clears.  tp->lock is held.
8814  */
8815 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8816 {
8817         unsigned int i;
8818         u32 val;
8819
8820         if (tg3_flag(tp, 5705_PLUS)) {
8821                 switch (ofs) {
8822                 case RCVLSC_MODE:
8823                 case DMAC_MODE:
8824                 case MBFREE_MODE:
8825                 case BUFMGR_MODE:
8826                 case MEMARB_MODE:
8827                         /* We can't enable/disable these bits of the
8828                          * 5705/5750, just say success.
8829                          */
8830                         return 0;
8831
8832                 default:
8833                         break;
8834                 }
8835         }
8836
8837         val = tr32(ofs);
8838         val &= ~enable_bit;
8839         tw32_f(ofs, val);
8840
8841         for (i = 0; i < MAX_WAIT_CNT; i++) {
8842                 if (pci_channel_offline(tp->pdev)) {
8843                         dev_err(&tp->pdev->dev,
8844                                 "tg3_stop_block device offline, "
8845                                 "ofs=%lx enable_bit=%x\n",
8846                                 ofs, enable_bit);
8847                         return -ENODEV;
8848                 }
8849
8850                 udelay(100);
8851                 val = tr32(ofs);
8852                 if ((val & enable_bit) == 0)
8853                         break;
8854         }
8855
8856         if (i == MAX_WAIT_CNT && !silent) {
8857                 dev_err(&tp->pdev->dev,
8858                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8859                         ofs, enable_bit);
8860                 return -ENODEV;
8861         }
8862
8863         return 0;
8864 }
8865
8866 /* tp->lock is held. */
8867 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8868 {
8869         int i, err;
8870
8871         tg3_disable_ints(tp);
8872
8873         if (pci_channel_offline(tp->pdev)) {
8874                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8875                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8876                 err = -ENODEV;
8877                 goto err_no_dev;
8878         }
8879
8880         tp->rx_mode &= ~RX_MODE_ENABLE;
8881         tw32_f(MAC_RX_MODE, tp->rx_mode);
8882         udelay(10);
8883
8884         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8885         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8886         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8887         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8888         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8889         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8890
8891         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8892         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8893         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8894         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8895         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8896         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8897         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8898
8899         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8900         tw32_f(MAC_MODE, tp->mac_mode);
8901         udelay(40);
8902
8903         tp->tx_mode &= ~TX_MODE_ENABLE;
8904         tw32_f(MAC_TX_MODE, tp->tx_mode);
8905
8906         for (i = 0; i < MAX_WAIT_CNT; i++) {
8907                 udelay(100);
8908                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8909                         break;
8910         }
8911         if (i >= MAX_WAIT_CNT) {
8912                 dev_err(&tp->pdev->dev,
8913                         "%s timed out, TX_MODE_ENABLE will not clear "
8914                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8915                 err |= -ENODEV;
8916         }
8917
8918         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8919         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8920         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8921
8922         tw32(FTQ_RESET, 0xffffffff);
8923         tw32(FTQ_RESET, 0x00000000);
8924
8925         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8926         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8927
8928 err_no_dev:
8929         for (i = 0; i < tp->irq_cnt; i++) {
8930                 struct tg3_napi *tnapi = &tp->napi[i];
8931                 if (tnapi->hw_status)
8932                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8933         }
8934
8935         return err;
8936 }
8937
8938 /* Save PCI command register before chip reset */
8939 static void tg3_save_pci_state(struct tg3 *tp)
8940 {
8941         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8942 }
8943
8944 /* Restore PCI state after chip reset */
8945 static void tg3_restore_pci_state(struct tg3 *tp)
8946 {
8947         u32 val;
8948
8949         /* Re-enable indirect register accesses. */
8950         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8951                                tp->misc_host_ctrl);
8952
8953         /* Set MAX PCI retry to zero. */
8954         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8955         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8956             tg3_flag(tp, PCIX_MODE))
8957                 val |= PCISTATE_RETRY_SAME_DMA;
8958         /* Allow reads and writes to the APE register and memory space. */
8959         if (tg3_flag(tp, ENABLE_APE))
8960                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8961                        PCISTATE_ALLOW_APE_SHMEM_WR |
8962                        PCISTATE_ALLOW_APE_PSPACE_WR;
8963         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8964
8965         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8966
8967         if (!tg3_flag(tp, PCI_EXPRESS)) {
8968                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8969                                       tp->pci_cacheline_sz);
8970                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8971                                       tp->pci_lat_timer);
8972         }
8973
8974         /* Make sure PCI-X relaxed ordering bit is clear. */
8975         if (tg3_flag(tp, PCIX_MODE)) {
8976                 u16 pcix_cmd;
8977
8978                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8979                                      &pcix_cmd);
8980                 pcix_cmd &= ~PCI_X_CMD_ERO;
8981                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8982                                       pcix_cmd);
8983         }
8984
8985         if (tg3_flag(tp, 5780_CLASS)) {
8986
8987                 /* Chip reset on 5780 will reset MSI enable bit,
8988                  * so need to restore it.
8989                  */
8990                 if (tg3_flag(tp, USING_MSI)) {
8991                         u16 ctrl;
8992
8993                         pci_read_config_word(tp->pdev,
8994                                              tp->msi_cap + PCI_MSI_FLAGS,
8995                                              &ctrl);
8996                         pci_write_config_word(tp->pdev,
8997                                               tp->msi_cap + PCI_MSI_FLAGS,
8998                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8999                         val = tr32(MSGINT_MODE);
9000                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
9001                 }
9002         }
9003 }
9004
9005 static void tg3_override_clk(struct tg3 *tp)
9006 {
9007         u32 val;
9008
9009         switch (tg3_asic_rev(tp)) {
9010         case ASIC_REV_5717:
9011                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9012                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9013                      TG3_CPMU_MAC_ORIDE_ENABLE);
9014                 break;
9015
9016         case ASIC_REV_5719:
9017         case ASIC_REV_5720:
9018                 tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9019                 break;
9020
9021         default:
9022                 return;
9023         }
9024 }
9025
9026 static void tg3_restore_clk(struct tg3 *tp)
9027 {
9028         u32 val;
9029
9030         switch (tg3_asic_rev(tp)) {
9031         case ASIC_REV_5717:
9032                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9033                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9034                      val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9035                 break;
9036
9037         case ASIC_REV_5719:
9038         case ASIC_REV_5720:
9039                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9040                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9041                 break;
9042
9043         default:
9044                 return;
9045         }
9046 }
9047
9048 /* tp->lock is held. */
9049 static int tg3_chip_reset(struct tg3 *tp)
9050         __releases(tp->lock)
9051         __acquires(tp->lock)
9052 {
9053         u32 val;
9054         void (*write_op)(struct tg3 *, u32, u32);
9055         int i, err;
9056
9057         if (!pci_device_is_present(tp->pdev))
9058                 return -ENODEV;
9059
9060         tg3_nvram_lock(tp);
9061
9062         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9063
9064         /* No matching tg3_nvram_unlock() after this because
9065          * chip reset below will undo the nvram lock.
9066          */
9067         tp->nvram_lock_cnt = 0;
9068
9069         /* GRC_MISC_CFG core clock reset will clear the memory
9070          * enable bit in PCI register 4 and the MSI enable bit
9071          * on some chips, so we save relevant registers here.
9072          */
9073         tg3_save_pci_state(tp);
9074
9075         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9076             tg3_flag(tp, 5755_PLUS))
9077                 tw32(GRC_FASTBOOT_PC, 0);
9078
9079         /*
9080          * We must avoid the readl() that normally takes place.
9081          * It locks machines, causes machine checks, and other
9082          * fun things.  So, temporarily disable the 5701
9083          * hardware workaround, while we do the reset.
9084          */
9085         write_op = tp->write32;
9086         if (write_op == tg3_write_flush_reg32)
9087                 tp->write32 = tg3_write32;
9088
9089         /* Prevent the irq handler from reading or writing PCI registers
9090          * during chip reset when the memory enable bit in the PCI command
9091          * register may be cleared.  The chip does not generate interrupt
9092          * at this time, but the irq handler may still be called due to irq
9093          * sharing or irqpoll.
9094          */
9095         tg3_flag_set(tp, CHIP_RESETTING);
9096         for (i = 0; i < tp->irq_cnt; i++) {
9097                 struct tg3_napi *tnapi = &tp->napi[i];
9098                 if (tnapi->hw_status) {
9099                         tnapi->hw_status->status = 0;
9100                         tnapi->hw_status->status_tag = 0;
9101                 }
9102                 tnapi->last_tag = 0;
9103                 tnapi->last_irq_tag = 0;
9104         }
9105         smp_mb();
9106
9107         tg3_full_unlock(tp);
9108
9109         for (i = 0; i < tp->irq_cnt; i++)
9110                 synchronize_irq(tp->napi[i].irq_vec);
9111
9112         tg3_full_lock(tp, 0);
9113
9114         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9115                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9116                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9117         }
9118
9119         /* do the reset */
9120         val = GRC_MISC_CFG_CORECLK_RESET;
9121
9122         if (tg3_flag(tp, PCI_EXPRESS)) {
9123                 /* Force PCIe 1.0a mode */
9124                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9125                     !tg3_flag(tp, 57765_PLUS) &&
9126                     tr32(TG3_PCIE_PHY_TSTCTL) ==
9127                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9128                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9129
9130                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9131                         tw32(GRC_MISC_CFG, (1 << 29));
9132                         val |= (1 << 29);
9133                 }
9134         }
9135
9136         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9137                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9138                 tw32(GRC_VCPU_EXT_CTRL,
9139                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9140         }
9141
9142         /* Set the clock to the highest frequency to avoid timeouts. With link
9143          * aware mode, the clock speed could be slow and bootcode does not
9144          * complete within the expected time. Override the clock to allow the
9145          * bootcode to finish sooner and then restore it.
9146          */
9147         tg3_override_clk(tp);
9148
9149         /* Manage gphy power for all CPMU absent PCIe devices. */
9150         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9151                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9152
9153         tw32(GRC_MISC_CFG, val);
9154
9155         /* restore 5701 hardware bug workaround write method */
9156         tp->write32 = write_op;
9157
9158         /* Unfortunately, we have to delay before the PCI read back.
9159          * Some 575X chips even will not respond to a PCI cfg access
9160          * when the reset command is given to the chip.
9161          *
9162          * How do these hardware designers expect things to work
9163          * properly if the PCI write is posted for a long period
9164          * of time?  It is always necessary to have some method by
9165          * which a register read back can occur to push the write
9166          * out which does the reset.
9167          *
9168          * For most tg3 variants the trick below was working.
9169          * Ho hum...
9170          */
9171         udelay(120);
9172
9173         /* Flush PCI posted writes.  The normal MMIO registers
9174          * are inaccessible at this time so this is the only
9175          * way to make this reliably (actually, this is no longer
9176          * the case, see above).  I tried to use indirect
9177          * register read/write but this upset some 5701 variants.
9178          */
9179         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9180
9181         udelay(120);
9182
9183         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9184                 u16 val16;
9185
9186                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9187                         int j;
9188                         u32 cfg_val;
9189
9190                         /* Wait for link training to complete.  */
9191                         for (j = 0; j < 5000; j++)
9192                                 udelay(100);
9193
9194                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9195                         pci_write_config_dword(tp->pdev, 0xc4,
9196                                                cfg_val | (1 << 15));
9197                 }
9198
9199                 /* Clear the "no snoop" and "relaxed ordering" bits. */
9200                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9201                 /*
9202                  * Older PCIe devices only support the 128 byte
9203                  * MPS setting.  Enforce the restriction.
9204                  */
9205                 if (!tg3_flag(tp, CPMU_PRESENT))
9206                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9207                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9208
9209                 /* Clear error status */
9210                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9211                                       PCI_EXP_DEVSTA_CED |
9212                                       PCI_EXP_DEVSTA_NFED |
9213                                       PCI_EXP_DEVSTA_FED |
9214                                       PCI_EXP_DEVSTA_URD);
9215         }
9216
9217         tg3_restore_pci_state(tp);
9218
9219         tg3_flag_clear(tp, CHIP_RESETTING);
9220         tg3_flag_clear(tp, ERROR_PROCESSED);
9221
9222         val = 0;
9223         if (tg3_flag(tp, 5780_CLASS))
9224                 val = tr32(MEMARB_MODE);
9225         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9226
9227         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9228                 tg3_stop_fw(tp);
9229                 tw32(0x5000, 0x400);
9230         }
9231
9232         if (tg3_flag(tp, IS_SSB_CORE)) {
9233                 /*
9234                  * BCM4785: In order to avoid repercussions from using
9235                  * potentially defective internal ROM, stop the Rx RISC CPU,
9236                  * which is not required.
9237                  */
9238                 tg3_stop_fw(tp);
9239                 tg3_halt_cpu(tp, RX_CPU_BASE);
9240         }
9241
9242         err = tg3_poll_fw(tp);
9243         if (err)
9244                 return err;
9245
9246         tw32(GRC_MODE, tp->grc_mode);
9247
9248         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9249                 val = tr32(0xc4);
9250
9251                 tw32(0xc4, val | (1 << 15));
9252         }
9253
9254         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9255             tg3_asic_rev(tp) == ASIC_REV_5705) {
9256                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9257                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9258                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9259                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9260         }
9261
9262         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9263                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9264                 val = tp->mac_mode;
9265         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9266                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9267                 val = tp->mac_mode;
9268         } else
9269                 val = 0;
9270
9271         tw32_f(MAC_MODE, val);
9272         udelay(40);
9273
9274         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9275
9276         tg3_mdio_start(tp);
9277
9278         if (tg3_flag(tp, PCI_EXPRESS) &&
9279             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9280             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9281             !tg3_flag(tp, 57765_PLUS)) {
9282                 val = tr32(0x7c00);
9283
9284                 tw32(0x7c00, val | (1 << 25));
9285         }
9286
9287         tg3_restore_clk(tp);
9288
9289         /* Increase the core clock speed to fix tx timeout issue for 5762
9290          * with 100Mbps link speed.
9291          */
9292         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9293                 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9294                 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9295                      TG3_CPMU_MAC_ORIDE_ENABLE);
9296         }
9297
9298         /* Reprobe ASF enable state.  */
9299         tg3_flag_clear(tp, ENABLE_ASF);
9300         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9301                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9302
9303         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9304         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9305         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9306                 u32 nic_cfg;
9307
9308                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9309                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9310                         tg3_flag_set(tp, ENABLE_ASF);
9311                         tp->last_event_jiffies = jiffies;
9312                         if (tg3_flag(tp, 5750_PLUS))
9313                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9314
9315                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9316                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9317                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9318                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9319                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9320                 }
9321         }
9322
9323         return 0;
9324 }
9325
9326 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9327 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9328 static void __tg3_set_rx_mode(struct net_device *);
9329
9330 /* tp->lock is held. */
9331 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9332 {
9333         int err;
9334
9335         tg3_stop_fw(tp);
9336
9337         tg3_write_sig_pre_reset(tp, kind);
9338
9339         tg3_abort_hw(tp, silent);
9340         err = tg3_chip_reset(tp);
9341
9342         __tg3_set_mac_addr(tp, false);
9343
9344         tg3_write_sig_legacy(tp, kind);
9345         tg3_write_sig_post_reset(tp, kind);
9346
9347         if (tp->hw_stats) {
9348                 /* Save the stats across chip resets... */
9349                 tg3_get_nstats(tp, &tp->net_stats_prev);
9350                 tg3_get_estats(tp, &tp->estats_prev);
9351
9352                 /* And make sure the next sample is new data */
9353                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9354         }
9355
9356         return err;
9357 }
9358
9359 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9360 {
9361         struct tg3 *tp = netdev_priv(dev);
9362         struct sockaddr *addr = p;
9363         int err = 0;
9364         bool skip_mac_1 = false;
9365
9366         if (!is_valid_ether_addr(addr->sa_data))
9367                 return -EADDRNOTAVAIL;
9368
9369         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9370
9371         if (!netif_running(dev))
9372                 return 0;
9373
9374         if (tg3_flag(tp, ENABLE_ASF)) {
9375                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9376
9377                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9378                 addr0_low = tr32(MAC_ADDR_0_LOW);
9379                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9380                 addr1_low = tr32(MAC_ADDR_1_LOW);
9381
9382                 /* Skip MAC addr 1 if ASF is using it. */
9383                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9384                     !(addr1_high == 0 && addr1_low == 0))
9385                         skip_mac_1 = true;
9386         }
9387         spin_lock_bh(&tp->lock);
9388         __tg3_set_mac_addr(tp, skip_mac_1);
9389         __tg3_set_rx_mode(dev);
9390         spin_unlock_bh(&tp->lock);
9391
9392         return err;
9393 }
9394
9395 /* tp->lock is held. */
9396 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9397                            dma_addr_t mapping, u32 maxlen_flags,
9398                            u32 nic_addr)
9399 {
9400         tg3_write_mem(tp,
9401                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9402                       ((u64) mapping >> 32));
9403         tg3_write_mem(tp,
9404                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9405                       ((u64) mapping & 0xffffffff));
9406         tg3_write_mem(tp,
9407                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9408                        maxlen_flags);
9409
9410         if (!tg3_flag(tp, 5705_PLUS))
9411                 tg3_write_mem(tp,
9412                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9413                               nic_addr);
9414 }
9415
9416
9417 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9418 {
9419         int i = 0;
9420
9421         if (!tg3_flag(tp, ENABLE_TSS)) {
9422                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9423                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9424                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9425         } else {
9426                 tw32(HOSTCC_TXCOL_TICKS, 0);
9427                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9428                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9429
9430                 for (; i < tp->txq_cnt; i++) {
9431                         u32 reg;
9432
9433                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9434                         tw32(reg, ec->tx_coalesce_usecs);
9435                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9436                         tw32(reg, ec->tx_max_coalesced_frames);
9437                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9438                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9439                 }
9440         }
9441
9442         for (; i < tp->irq_max - 1; i++) {
9443                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9444                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9445                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9446         }
9447 }
9448
9449 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9450 {
9451         int i = 0;
9452         u32 limit = tp->rxq_cnt;
9453
9454         if (!tg3_flag(tp, ENABLE_RSS)) {
9455                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9456                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9457                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9458                 limit--;
9459         } else {
9460                 tw32(HOSTCC_RXCOL_TICKS, 0);
9461                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9462                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9463         }
9464
9465         for (; i < limit; i++) {
9466                 u32 reg;
9467
9468                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9469                 tw32(reg, ec->rx_coalesce_usecs);
9470                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9471                 tw32(reg, ec->rx_max_coalesced_frames);
9472                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9473                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9474         }
9475
9476         for (; i < tp->irq_max - 1; i++) {
9477                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9478                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9479                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9480         }
9481 }
9482
9483 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9484 {
9485         tg3_coal_tx_init(tp, ec);
9486         tg3_coal_rx_init(tp, ec);
9487
9488         if (!tg3_flag(tp, 5705_PLUS)) {
9489                 u32 val = ec->stats_block_coalesce_usecs;
9490
9491                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9492                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9493
9494                 if (!tp->link_up)
9495                         val = 0;
9496
9497                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9498         }
9499 }
9500
9501 /* tp->lock is held. */
9502 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9503 {
9504         u32 txrcb, limit;
9505
9506         /* Disable all transmit rings but the first. */
9507         if (!tg3_flag(tp, 5705_PLUS))
9508                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9509         else if (tg3_flag(tp, 5717_PLUS))
9510                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9511         else if (tg3_flag(tp, 57765_CLASS) ||
9512                  tg3_asic_rev(tp) == ASIC_REV_5762)
9513                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9514         else
9515                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9516
9517         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9518              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9519                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9520                               BDINFO_FLAGS_DISABLED);
9521 }
9522
9523 /* tp->lock is held. */
9524 static void tg3_tx_rcbs_init(struct tg3 *tp)
9525 {
9526         int i = 0;
9527         u32 txrcb = NIC_SRAM_SEND_RCB;
9528
9529         if (tg3_flag(tp, ENABLE_TSS))
9530                 i++;
9531
9532         for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9533                 struct tg3_napi *tnapi = &tp->napi[i];
9534
9535                 if (!tnapi->tx_ring)
9536                         continue;
9537
9538                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9539                                (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9540                                NIC_SRAM_TX_BUFFER_DESC);
9541         }
9542 }
9543
9544 /* tp->lock is held. */
9545 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9546 {
9547         u32 rxrcb, limit;
9548
9549         /* Disable all receive return rings but the first. */
9550         if (tg3_flag(tp, 5717_PLUS))
9551                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9552         else if (!tg3_flag(tp, 5705_PLUS))
9553                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9554         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9555                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9556                  tg3_flag(tp, 57765_CLASS))
9557                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9558         else
9559                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9560
9561         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9562              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9563                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9564                               BDINFO_FLAGS_DISABLED);
9565 }
9566
9567 /* tp->lock is held. */
9568 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9569 {
9570         int i = 0;
9571         u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9572
9573         if (tg3_flag(tp, ENABLE_RSS))
9574                 i++;
9575
9576         for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9577                 struct tg3_napi *tnapi = &tp->napi[i];
9578
9579                 if (!tnapi->rx_rcb)
9580                         continue;
9581
9582                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9583                                (tp->rx_ret_ring_mask + 1) <<
9584                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9585         }
9586 }
9587
9588 /* tp->lock is held. */
9589 static void tg3_rings_reset(struct tg3 *tp)
9590 {
9591         int i;
9592         u32 stblk;
9593         struct tg3_napi *tnapi = &tp->napi[0];
9594
9595         tg3_tx_rcbs_disable(tp);
9596
9597         tg3_rx_ret_rcbs_disable(tp);
9598
9599         /* Disable interrupts */
9600         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9601         tp->napi[0].chk_msi_cnt = 0;
9602         tp->napi[0].last_rx_cons = 0;
9603         tp->napi[0].last_tx_cons = 0;
9604
9605         /* Zero mailbox registers. */
9606         if (tg3_flag(tp, SUPPORT_MSIX)) {
9607                 for (i = 1; i < tp->irq_max; i++) {
9608                         tp->napi[i].tx_prod = 0;
9609                         tp->napi[i].tx_cons = 0;
9610                         if (tg3_flag(tp, ENABLE_TSS))
9611                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9612                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9613                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9614                         tp->napi[i].chk_msi_cnt = 0;
9615                         tp->napi[i].last_rx_cons = 0;
9616                         tp->napi[i].last_tx_cons = 0;
9617                 }
9618                 if (!tg3_flag(tp, ENABLE_TSS))
9619                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9620         } else {
9621                 tp->napi[0].tx_prod = 0;
9622                 tp->napi[0].tx_cons = 0;
9623                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9624                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9625         }
9626
9627         /* Make sure the NIC-based send BD rings are disabled. */
9628         if (!tg3_flag(tp, 5705_PLUS)) {
9629                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9630                 for (i = 0; i < 16; i++)
9631                         tw32_tx_mbox(mbox + i * 8, 0);
9632         }
9633
9634         /* Clear status block in ram. */
9635         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9636
9637         /* Set status block DMA address */
9638         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9639              ((u64) tnapi->status_mapping >> 32));
9640         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9641              ((u64) tnapi->status_mapping & 0xffffffff));
9642
9643         stblk = HOSTCC_STATBLCK_RING1;
9644
9645         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9646                 u64 mapping = (u64)tnapi->status_mapping;
9647                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9648                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9649                 stblk += 8;
9650
9651                 /* Clear status block in ram. */
9652                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9653         }
9654
9655         tg3_tx_rcbs_init(tp);
9656         tg3_rx_ret_rcbs_init(tp);
9657 }
9658
9659 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9660 {
9661         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9662
9663         if (!tg3_flag(tp, 5750_PLUS) ||
9664             tg3_flag(tp, 5780_CLASS) ||
9665             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9666             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9667             tg3_flag(tp, 57765_PLUS))
9668                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9669         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9670                  tg3_asic_rev(tp) == ASIC_REV_5787)
9671                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9672         else
9673                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9674
9675         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9676         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9677
9678         val = min(nic_rep_thresh, host_rep_thresh);
9679         tw32(RCVBDI_STD_THRESH, val);
9680
9681         if (tg3_flag(tp, 57765_PLUS))
9682                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9683
9684         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9685                 return;
9686
9687         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9688
9689         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9690
9691         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9692         tw32(RCVBDI_JUMBO_THRESH, val);
9693
9694         if (tg3_flag(tp, 57765_PLUS))
9695                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9696 }
9697
9698 static inline u32 calc_crc(unsigned char *buf, int len)
9699 {
9700         u32 reg;
9701         u32 tmp;
9702         int j, k;
9703
9704         reg = 0xffffffff;
9705
9706         for (j = 0; j < len; j++) {
9707                 reg ^= buf[j];
9708
9709                 for (k = 0; k < 8; k++) {
9710                         tmp = reg & 0x01;
9711
9712                         reg >>= 1;
9713
9714                         if (tmp)
9715                                 reg ^= CRC32_POLY_LE;
9716                 }
9717         }
9718
9719         return ~reg;
9720 }
9721
9722 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9723 {
9724         /* accept or reject all multicast frames */
9725         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9726         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9727         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9728         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9729 }
9730
9731 static void __tg3_set_rx_mode(struct net_device *dev)
9732 {
9733         struct tg3 *tp = netdev_priv(dev);
9734         u32 rx_mode;
9735
9736         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9737                                   RX_MODE_KEEP_VLAN_TAG);
9738
9739 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9740         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9741          * flag clear.
9742          */
9743         if (!tg3_flag(tp, ENABLE_ASF))
9744                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9745 #endif
9746
9747         if (dev->flags & IFF_PROMISC) {
9748                 /* Promiscuous mode. */
9749                 rx_mode |= RX_MODE_PROMISC;
9750         } else if (dev->flags & IFF_ALLMULTI) {
9751                 /* Accept all multicast. */
9752                 tg3_set_multi(tp, 1);
9753         } else if (netdev_mc_empty(dev)) {
9754                 /* Reject all multicast. */
9755                 tg3_set_multi(tp, 0);
9756         } else {
9757                 /* Accept one or more multicast(s). */
9758                 struct netdev_hw_addr *ha;
9759                 u32 mc_filter[4] = { 0, };
9760                 u32 regidx;
9761                 u32 bit;
9762                 u32 crc;
9763
9764                 netdev_for_each_mc_addr(ha, dev) {
9765                         crc = calc_crc(ha->addr, ETH_ALEN);
9766                         bit = ~crc & 0x7f;
9767                         regidx = (bit & 0x60) >> 5;
9768                         bit &= 0x1f;
9769                         mc_filter[regidx] |= (1 << bit);
9770                 }
9771
9772                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9773                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9774                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9775                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9776         }
9777
9778         if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9779                 rx_mode |= RX_MODE_PROMISC;
9780         } else if (!(dev->flags & IFF_PROMISC)) {
9781                 /* Add all entries into to the mac addr filter list */
9782                 int i = 0;
9783                 struct netdev_hw_addr *ha;
9784
9785                 netdev_for_each_uc_addr(ha, dev) {
9786                         __tg3_set_one_mac_addr(tp, ha->addr,
9787                                                i + TG3_UCAST_ADDR_IDX(tp));
9788                         i++;
9789                 }
9790         }
9791
9792         if (rx_mode != tp->rx_mode) {
9793                 tp->rx_mode = rx_mode;
9794                 tw32_f(MAC_RX_MODE, rx_mode);
9795                 udelay(10);
9796         }
9797 }
9798
9799 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9800 {
9801         int i;
9802
9803         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9804                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9805 }
9806
9807 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9808 {
9809         int i;
9810
9811         if (!tg3_flag(tp, SUPPORT_MSIX))
9812                 return;
9813
9814         if (tp->rxq_cnt == 1) {
9815                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9816                 return;
9817         }
9818
9819         /* Validate table against current IRQ count */
9820         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9821                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9822                         break;
9823         }
9824
9825         if (i != TG3_RSS_INDIR_TBL_SIZE)
9826                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9827 }
9828
9829 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9830 {
9831         int i = 0;
9832         u32 reg = MAC_RSS_INDIR_TBL_0;
9833
9834         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9835                 u32 val = tp->rss_ind_tbl[i];
9836                 i++;
9837                 for (; i % 8; i++) {
9838                         val <<= 4;
9839                         val |= tp->rss_ind_tbl[i];
9840                 }
9841                 tw32(reg, val);
9842                 reg += 4;
9843         }
9844 }
9845
9846 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9847 {
9848         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9849                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9850         else
9851                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9852 }
9853
9854 /* tp->lock is held. */
9855 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9856 {
9857         u32 val, rdmac_mode;
9858         int i, err, limit;
9859         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9860
9861         tg3_disable_ints(tp);
9862
9863         tg3_stop_fw(tp);
9864
9865         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9866
9867         if (tg3_flag(tp, INIT_COMPLETE))
9868                 tg3_abort_hw(tp, 1);
9869
9870         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9871             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9872                 tg3_phy_pull_config(tp);
9873                 tg3_eee_pull_config(tp, NULL);
9874                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9875         }
9876
9877         /* Enable MAC control of LPI */
9878         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9879                 tg3_setup_eee(tp);
9880
9881         if (reset_phy)
9882                 tg3_phy_reset(tp);
9883
9884         err = tg3_chip_reset(tp);
9885         if (err)
9886                 return err;
9887
9888         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9889
9890         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9891                 val = tr32(TG3_CPMU_CTRL);
9892                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9893                 tw32(TG3_CPMU_CTRL, val);
9894
9895                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9896                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9897                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9898                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9899
9900                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9901                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9902                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9903                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9904
9905                 val = tr32(TG3_CPMU_HST_ACC);
9906                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9907                 val |= CPMU_HST_ACC_MACCLK_6_25;
9908                 tw32(TG3_CPMU_HST_ACC, val);
9909         }
9910
9911         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9912                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9913                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9914                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9915                 tw32(PCIE_PWR_MGMT_THRESH, val);
9916
9917                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9918                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9919
9920                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9921
9922                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9923                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9924         }
9925
9926         if (tg3_flag(tp, L1PLLPD_EN)) {
9927                 u32 grc_mode = tr32(GRC_MODE);
9928
9929                 /* Access the lower 1K of PL PCIE block registers. */
9930                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9931                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9932
9933                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9934                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9935                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9936
9937                 tw32(GRC_MODE, grc_mode);
9938         }
9939
9940         if (tg3_flag(tp, 57765_CLASS)) {
9941                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9942                         u32 grc_mode = tr32(GRC_MODE);
9943
9944                         /* Access the lower 1K of PL PCIE block registers. */
9945                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9946                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9947
9948                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9949                                    TG3_PCIE_PL_LO_PHYCTL5);
9950                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9951                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9952
9953                         tw32(GRC_MODE, grc_mode);
9954                 }
9955
9956                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9957                         u32 grc_mode;
9958
9959                         /* Fix transmit hangs */
9960                         val = tr32(TG3_CPMU_PADRNG_CTL);
9961                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9962                         tw32(TG3_CPMU_PADRNG_CTL, val);
9963
9964                         grc_mode = tr32(GRC_MODE);
9965
9966                         /* Access the lower 1K of DL PCIE block registers. */
9967                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9968                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9969
9970                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9971                                    TG3_PCIE_DL_LO_FTSMAX);
9972                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9973                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9974                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9975
9976                         tw32(GRC_MODE, grc_mode);
9977                 }
9978
9979                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9980                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9981                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9982                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9983         }
9984
9985         /* This works around an issue with Athlon chipsets on
9986          * B3 tigon3 silicon.  This bit has no effect on any
9987          * other revision.  But do not set this on PCI Express
9988          * chips and don't even touch the clocks if the CPMU is present.
9989          */
9990         if (!tg3_flag(tp, CPMU_PRESENT)) {
9991                 if (!tg3_flag(tp, PCI_EXPRESS))
9992                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9993                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9994         }
9995
9996         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9997             tg3_flag(tp, PCIX_MODE)) {
9998                 val = tr32(TG3PCI_PCISTATE);
9999                 val |= PCISTATE_RETRY_SAME_DMA;
10000                 tw32(TG3PCI_PCISTATE, val);
10001         }
10002
10003         if (tg3_flag(tp, ENABLE_APE)) {
10004                 /* Allow reads and writes to the
10005                  * APE register and memory space.
10006                  */
10007                 val = tr32(TG3PCI_PCISTATE);
10008                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
10009                        PCISTATE_ALLOW_APE_SHMEM_WR |
10010                        PCISTATE_ALLOW_APE_PSPACE_WR;
10011                 tw32(TG3PCI_PCISTATE, val);
10012         }
10013
10014         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
10015                 /* Enable some hw fixes.  */
10016                 val = tr32(TG3PCI_MSI_DATA);
10017                 val |= (1 << 26) | (1 << 28) | (1 << 29);
10018                 tw32(TG3PCI_MSI_DATA, val);
10019         }
10020
10021         /* Descriptor ring init may make accesses to the
10022          * NIC SRAM area to setup the TX descriptors, so we
10023          * can only do this after the hardware has been
10024          * successfully reset.
10025          */
10026         err = tg3_init_rings(tp);
10027         if (err)
10028                 return err;
10029
10030         if (tg3_flag(tp, 57765_PLUS)) {
10031                 val = tr32(TG3PCI_DMA_RW_CTRL) &
10032                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10033                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10034                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10035                 if (!tg3_flag(tp, 57765_CLASS) &&
10036                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
10037                     tg3_asic_rev(tp) != ASIC_REV_5762)
10038                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
10039                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10040         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10041                    tg3_asic_rev(tp) != ASIC_REV_5761) {
10042                 /* This value is determined during the probe time DMA
10043                  * engine test, tg3_test_dma.
10044                  */
10045                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10046         }
10047
10048         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10049                           GRC_MODE_4X_NIC_SEND_RINGS |
10050                           GRC_MODE_NO_TX_PHDR_CSUM |
10051                           GRC_MODE_NO_RX_PHDR_CSUM);
10052         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10053
10054         /* Pseudo-header checksum is done by hardware logic and not
10055          * the offload processers, so make the chip do the pseudo-
10056          * header checksums on receive.  For transmit it is more
10057          * convenient to do the pseudo-header checksum in software
10058          * as Linux does that on transmit for us in all cases.
10059          */
10060         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10061
10062         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10063         if (tp->rxptpctl)
10064                 tw32(TG3_RX_PTP_CTL,
10065                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10066
10067         if (tg3_flag(tp, PTP_CAPABLE))
10068                 val |= GRC_MODE_TIME_SYNC_ENABLE;
10069
10070         tw32(GRC_MODE, tp->grc_mode | val);
10071
10072         /* On one of the AMD platform, MRRS is restricted to 4000 because of
10073          * south bridge limitation. As a workaround, Driver is setting MRRS
10074          * to 2048 instead of default 4096.
10075          */
10076         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10077             tp->pdev->subsystem_device == TG3PCI_SUBDEVICE_ID_DELL_5762) {
10078                 val = tr32(TG3PCI_DEV_STATUS_CTRL) & ~MAX_READ_REQ_MASK;
10079                 tw32(TG3PCI_DEV_STATUS_CTRL, val | MAX_READ_REQ_SIZE_2048);
10080         }
10081
10082         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
10083         val = tr32(GRC_MISC_CFG);
10084         val &= ~0xff;
10085         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10086         tw32(GRC_MISC_CFG, val);
10087
10088         /* Initialize MBUF/DESC pool. */
10089         if (tg3_flag(tp, 5750_PLUS)) {
10090                 /* Do nothing.  */
10091         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10092                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10093                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
10094                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10095                 else
10096                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10097                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10098                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10099         } else if (tg3_flag(tp, TSO_CAPABLE)) {
10100                 int fw_len;
10101
10102                 fw_len = tp->fw_len;
10103                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10104                 tw32(BUFMGR_MB_POOL_ADDR,
10105                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10106                 tw32(BUFMGR_MB_POOL_SIZE,
10107                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10108         }
10109
10110         if (tp->dev->mtu <= ETH_DATA_LEN) {
10111                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10112                      tp->bufmgr_config.mbuf_read_dma_low_water);
10113                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10114                      tp->bufmgr_config.mbuf_mac_rx_low_water);
10115                 tw32(BUFMGR_MB_HIGH_WATER,
10116                      tp->bufmgr_config.mbuf_high_water);
10117         } else {
10118                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
10119                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10120                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
10121                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10122                 tw32(BUFMGR_MB_HIGH_WATER,
10123                      tp->bufmgr_config.mbuf_high_water_jumbo);
10124         }
10125         tw32(BUFMGR_DMA_LOW_WATER,
10126              tp->bufmgr_config.dma_low_water);
10127         tw32(BUFMGR_DMA_HIGH_WATER,
10128              tp->bufmgr_config.dma_high_water);
10129
10130         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10131         if (tg3_asic_rev(tp) == ASIC_REV_5719)
10132                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10133         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10134             tg3_asic_rev(tp) == ASIC_REV_5762 ||
10135             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10136             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10137                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10138         tw32(BUFMGR_MODE, val);
10139         for (i = 0; i < 2000; i++) {
10140                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10141                         break;
10142                 udelay(10);
10143         }
10144         if (i >= 2000) {
10145                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10146                 return -ENODEV;
10147         }
10148
10149         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10150                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10151
10152         tg3_setup_rxbd_thresholds(tp);
10153
10154         /* Initialize TG3_BDINFO's at:
10155          *  RCVDBDI_STD_BD:     standard eth size rx ring
10156          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
10157          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
10158          *
10159          * like so:
10160          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
10161          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
10162          *                              ring attribute flags
10163          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
10164          *
10165          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10166          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10167          *
10168          * The size of each ring is fixed in the firmware, but the location is
10169          * configurable.
10170          */
10171         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10172              ((u64) tpr->rx_std_mapping >> 32));
10173         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10174              ((u64) tpr->rx_std_mapping & 0xffffffff));
10175         if (!tg3_flag(tp, 5717_PLUS))
10176                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10177                      NIC_SRAM_RX_BUFFER_DESC);
10178
10179         /* Disable the mini ring */
10180         if (!tg3_flag(tp, 5705_PLUS))
10181                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10182                      BDINFO_FLAGS_DISABLED);
10183
10184         /* Program the jumbo buffer descriptor ring control
10185          * blocks on those devices that have them.
10186          */
10187         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10188             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10189
10190                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10191                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10192                              ((u64) tpr->rx_jmb_mapping >> 32));
10193                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10194                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10195                         val = TG3_RX_JMB_RING_SIZE(tp) <<
10196                               BDINFO_FLAGS_MAXLEN_SHIFT;
10197                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10198                              val | BDINFO_FLAGS_USE_EXT_RECV);
10199                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10200                             tg3_flag(tp, 57765_CLASS) ||
10201                             tg3_asic_rev(tp) == ASIC_REV_5762)
10202                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10203                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10204                 } else {
10205                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10206                              BDINFO_FLAGS_DISABLED);
10207                 }
10208
10209                 if (tg3_flag(tp, 57765_PLUS)) {
10210                         val = TG3_RX_STD_RING_SIZE(tp);
10211                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10212                         val |= (TG3_RX_STD_DMA_SZ << 2);
10213                 } else
10214                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10215         } else
10216                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10217
10218         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10219
10220         tpr->rx_std_prod_idx = tp->rx_pending;
10221         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10222
10223         tpr->rx_jmb_prod_idx =
10224                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10225         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10226
10227         tg3_rings_reset(tp);
10228
10229         /* Initialize MAC address and backoff seed. */
10230         __tg3_set_mac_addr(tp, false);
10231
10232         /* MTU + ethernet header + FCS + optional VLAN tag */
10233         tw32(MAC_RX_MTU_SIZE,
10234              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10235
10236         /* The slot time is changed by tg3_setup_phy if we
10237          * run at gigabit with half duplex.
10238          */
10239         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10240               (6 << TX_LENGTHS_IPG_SHIFT) |
10241               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10242
10243         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10244             tg3_asic_rev(tp) == ASIC_REV_5762)
10245                 val |= tr32(MAC_TX_LENGTHS) &
10246                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
10247                         TX_LENGTHS_CNT_DWN_VAL_MSK);
10248
10249         tw32(MAC_TX_LENGTHS, val);
10250
10251         /* Receive rules. */
10252         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10253         tw32(RCVLPC_CONFIG, 0x0181);
10254
10255         /* Calculate RDMAC_MODE setting early, we need it to determine
10256          * the RCVLPC_STATE_ENABLE mask.
10257          */
10258         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10259                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10260                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10261                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10262                       RDMAC_MODE_LNGREAD_ENAB);
10263
10264         if (tg3_asic_rev(tp) == ASIC_REV_5717)
10265                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10266
10267         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10268             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10269             tg3_asic_rev(tp) == ASIC_REV_57780)
10270                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10271                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10272                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10273
10274         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10275             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10276                 if (tg3_flag(tp, TSO_CAPABLE) &&
10277                     tg3_asic_rev(tp) == ASIC_REV_5705) {
10278                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10279                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10280                            !tg3_flag(tp, IS_5788)) {
10281                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10282                 }
10283         }
10284
10285         if (tg3_flag(tp, PCI_EXPRESS))
10286                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10287
10288         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10289                 tp->dma_limit = 0;
10290                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10291                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10292                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10293                 }
10294         }
10295
10296         if (tg3_flag(tp, HW_TSO_1) ||
10297             tg3_flag(tp, HW_TSO_2) ||
10298             tg3_flag(tp, HW_TSO_3))
10299                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10300
10301         if (tg3_flag(tp, 57765_PLUS) ||
10302             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10303             tg3_asic_rev(tp) == ASIC_REV_57780)
10304                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10305
10306         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10307             tg3_asic_rev(tp) == ASIC_REV_5762)
10308                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10309
10310         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10311             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10312             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10313             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10314             tg3_flag(tp, 57765_PLUS)) {
10315                 u32 tgtreg;
10316
10317                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10318                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10319                 else
10320                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10321
10322                 val = tr32(tgtreg);
10323                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10324                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10325                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10326                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10327                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10328                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10329                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10330                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10331                 }
10332                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10333         }
10334
10335         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10336             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10337             tg3_asic_rev(tp) == ASIC_REV_5762) {
10338                 u32 tgtreg;
10339
10340                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10341                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10342                 else
10343                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10344
10345                 val = tr32(tgtreg);
10346                 tw32(tgtreg, val |
10347                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10348                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10349         }
10350
10351         /* Receive/send statistics. */
10352         if (tg3_flag(tp, 5750_PLUS)) {
10353                 val = tr32(RCVLPC_STATS_ENABLE);
10354                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10355                 tw32(RCVLPC_STATS_ENABLE, val);
10356         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10357                    tg3_flag(tp, TSO_CAPABLE)) {
10358                 val = tr32(RCVLPC_STATS_ENABLE);
10359                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10360                 tw32(RCVLPC_STATS_ENABLE, val);
10361         } else {
10362                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10363         }
10364         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10365         tw32(SNDDATAI_STATSENAB, 0xffffff);
10366         tw32(SNDDATAI_STATSCTRL,
10367              (SNDDATAI_SCTRL_ENABLE |
10368               SNDDATAI_SCTRL_FASTUPD));
10369
10370         /* Setup host coalescing engine. */
10371         tw32(HOSTCC_MODE, 0);
10372         for (i = 0; i < 2000; i++) {
10373                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10374                         break;
10375                 udelay(10);
10376         }
10377
10378         __tg3_set_coalesce(tp, &tp->coal);
10379
10380         if (!tg3_flag(tp, 5705_PLUS)) {
10381                 /* Status/statistics block address.  See tg3_timer,
10382                  * the tg3_periodic_fetch_stats call there, and
10383                  * tg3_get_stats to see how this works for 5705/5750 chips.
10384                  */
10385                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10386                      ((u64) tp->stats_mapping >> 32));
10387                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10388                      ((u64) tp->stats_mapping & 0xffffffff));
10389                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10390
10391                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10392
10393                 /* Clear statistics and status block memory areas */
10394                 for (i = NIC_SRAM_STATS_BLK;
10395                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10396                      i += sizeof(u32)) {
10397                         tg3_write_mem(tp, i, 0);
10398                         udelay(40);
10399                 }
10400         }
10401
10402         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10403
10404         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10405         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10406         if (!tg3_flag(tp, 5705_PLUS))
10407                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10408
10409         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10410                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10411                 /* reset to prevent losing 1st rx packet intermittently */
10412                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10413                 udelay(10);
10414         }
10415
10416         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10417                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10418                         MAC_MODE_FHDE_ENABLE;
10419         if (tg3_flag(tp, ENABLE_APE))
10420                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10421         if (!tg3_flag(tp, 5705_PLUS) &&
10422             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10423             tg3_asic_rev(tp) != ASIC_REV_5700)
10424                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10425         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10426         udelay(40);
10427
10428         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10429          * If TG3_FLAG_IS_NIC is zero, we should read the
10430          * register to preserve the GPIO settings for LOMs. The GPIOs,
10431          * whether used as inputs or outputs, are set by boot code after
10432          * reset.
10433          */
10434         if (!tg3_flag(tp, IS_NIC)) {
10435                 u32 gpio_mask;
10436
10437                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10438                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10439                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10440
10441                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10442                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10443                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10444
10445                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10446                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10447
10448                 tp->grc_local_ctrl &= ~gpio_mask;
10449                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10450
10451                 /* GPIO1 must be driven high for eeprom write protect */
10452                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10453                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10454                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10455         }
10456         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10457         udelay(100);
10458
10459         if (tg3_flag(tp, USING_MSIX)) {
10460                 val = tr32(MSGINT_MODE);
10461                 val |= MSGINT_MODE_ENABLE;
10462                 if (tp->irq_cnt > 1)
10463                         val |= MSGINT_MODE_MULTIVEC_EN;
10464                 if (!tg3_flag(tp, 1SHOT_MSI))
10465                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10466                 tw32(MSGINT_MODE, val);
10467         }
10468
10469         if (!tg3_flag(tp, 5705_PLUS)) {
10470                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10471                 udelay(40);
10472         }
10473
10474         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10475                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10476                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10477                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10478                WDMAC_MODE_LNGREAD_ENAB);
10479
10480         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10481             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10482                 if (tg3_flag(tp, TSO_CAPABLE) &&
10483                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10484                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10485                         /* nothing */
10486                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10487                            !tg3_flag(tp, IS_5788)) {
10488                         val |= WDMAC_MODE_RX_ACCEL;
10489                 }
10490         }
10491
10492         /* Enable host coalescing bug fix */
10493         if (tg3_flag(tp, 5755_PLUS))
10494                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10495
10496         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10497                 val |= WDMAC_MODE_BURST_ALL_DATA;
10498
10499         tw32_f(WDMAC_MODE, val);
10500         udelay(40);
10501
10502         if (tg3_flag(tp, PCIX_MODE)) {
10503                 u16 pcix_cmd;
10504
10505                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10506                                      &pcix_cmd);
10507                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10508                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10509                         pcix_cmd |= PCI_X_CMD_READ_2K;
10510                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10511                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10512                         pcix_cmd |= PCI_X_CMD_READ_2K;
10513                 }
10514                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10515                                       pcix_cmd);
10516         }
10517
10518         tw32_f(RDMAC_MODE, rdmac_mode);
10519         udelay(40);
10520
10521         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10522             tg3_asic_rev(tp) == ASIC_REV_5720) {
10523                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10524                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10525                                 break;
10526                 }
10527                 if (i < TG3_NUM_RDMA_CHANNELS) {
10528                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10529                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10530                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10531                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10532                 }
10533         }
10534
10535         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10536         if (!tg3_flag(tp, 5705_PLUS))
10537                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10538
10539         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10540                 tw32(SNDDATAC_MODE,
10541                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10542         else
10543                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10544
10545         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10546         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10547         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10548         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10549                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10550         tw32(RCVDBDI_MODE, val);
10551         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10552         if (tg3_flag(tp, HW_TSO_1) ||
10553             tg3_flag(tp, HW_TSO_2) ||
10554             tg3_flag(tp, HW_TSO_3))
10555                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10556         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10557         if (tg3_flag(tp, ENABLE_TSS))
10558                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10559         tw32(SNDBDI_MODE, val);
10560         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10561
10562         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10563                 err = tg3_load_5701_a0_firmware_fix(tp);
10564                 if (err)
10565                         return err;
10566         }
10567
10568         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10569                 /* Ignore any errors for the firmware download. If download
10570                  * fails, the device will operate with EEE disabled
10571                  */
10572                 tg3_load_57766_firmware(tp);
10573         }
10574
10575         if (tg3_flag(tp, TSO_CAPABLE)) {
10576                 err = tg3_load_tso_firmware(tp);
10577                 if (err)
10578                         return err;
10579         }
10580
10581         tp->tx_mode = TX_MODE_ENABLE;
10582
10583         if (tg3_flag(tp, 5755_PLUS) ||
10584             tg3_asic_rev(tp) == ASIC_REV_5906)
10585                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10586
10587         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10588             tg3_asic_rev(tp) == ASIC_REV_5762) {
10589                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10590                 tp->tx_mode &= ~val;
10591                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10592         }
10593
10594         tw32_f(MAC_TX_MODE, tp->tx_mode);
10595         udelay(100);
10596
10597         if (tg3_flag(tp, ENABLE_RSS)) {
10598                 u32 rss_key[10];
10599
10600                 tg3_rss_write_indir_tbl(tp);
10601
10602                 netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10603
10604                 for (i = 0; i < 10 ; i++)
10605                         tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10606         }
10607
10608         tp->rx_mode = RX_MODE_ENABLE;
10609         if (tg3_flag(tp, 5755_PLUS))
10610                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10611
10612         if (tg3_asic_rev(tp) == ASIC_REV_5762)
10613                 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10614
10615         if (tg3_flag(tp, ENABLE_RSS))
10616                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10617                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10618                                RX_MODE_RSS_IPV6_HASH_EN |
10619                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10620                                RX_MODE_RSS_IPV4_HASH_EN |
10621                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10622
10623         tw32_f(MAC_RX_MODE, tp->rx_mode);
10624         udelay(10);
10625
10626         tw32(MAC_LED_CTRL, tp->led_ctrl);
10627
10628         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10629         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10630                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10631                 udelay(10);
10632         }
10633         tw32_f(MAC_RX_MODE, tp->rx_mode);
10634         udelay(10);
10635
10636         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10637                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10638                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10639                         /* Set drive transmission level to 1.2V  */
10640                         /* only if the signal pre-emphasis bit is not set  */
10641                         val = tr32(MAC_SERDES_CFG);
10642                         val &= 0xfffff000;
10643                         val |= 0x880;
10644                         tw32(MAC_SERDES_CFG, val);
10645                 }
10646                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10647                         tw32(MAC_SERDES_CFG, 0x616000);
10648         }
10649
10650         /* Prevent chip from dropping frames when flow control
10651          * is enabled.
10652          */
10653         if (tg3_flag(tp, 57765_CLASS))
10654                 val = 1;
10655         else
10656                 val = 2;
10657         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10658
10659         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10660             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10661                 /* Use hardware link auto-negotiation */
10662                 tg3_flag_set(tp, HW_AUTONEG);
10663         }
10664
10665         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10666             tg3_asic_rev(tp) == ASIC_REV_5714) {
10667                 u32 tmp;
10668
10669                 tmp = tr32(SERDES_RX_CTRL);
10670                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10671                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10672                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10673                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10674         }
10675
10676         if (!tg3_flag(tp, USE_PHYLIB)) {
10677                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10678                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10679
10680                 err = tg3_setup_phy(tp, false);
10681                 if (err)
10682                         return err;
10683
10684                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10685                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10686                         u32 tmp;
10687
10688                         /* Clear CRC stats. */
10689                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10690                                 tg3_writephy(tp, MII_TG3_TEST1,
10691                                              tmp | MII_TG3_TEST1_CRC_EN);
10692                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10693                         }
10694                 }
10695         }
10696
10697         __tg3_set_rx_mode(tp->dev);
10698
10699         /* Initialize receive rules. */
10700         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10701         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10702         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10703         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10704
10705         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10706                 limit = 8;
10707         else
10708                 limit = 16;
10709         if (tg3_flag(tp, ENABLE_ASF))
10710                 limit -= 4;
10711         switch (limit) {
10712         case 16:
10713                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10714                 fallthrough;
10715         case 15:
10716                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10717                 fallthrough;
10718         case 14:
10719                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10720                 fallthrough;
10721         case 13:
10722                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10723                 fallthrough;
10724         case 12:
10725                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10726                 fallthrough;
10727         case 11:
10728                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10729                 fallthrough;
10730         case 10:
10731                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10732                 fallthrough;
10733         case 9:
10734                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10735                 fallthrough;
10736         case 8:
10737                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10738                 fallthrough;
10739         case 7:
10740                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10741                 fallthrough;
10742         case 6:
10743                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10744                 fallthrough;
10745         case 5:
10746                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10747                 fallthrough;
10748         case 4:
10749                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10750         case 3:
10751                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10752         case 2:
10753         case 1:
10754
10755         default:
10756                 break;
10757         }
10758
10759         if (tg3_flag(tp, ENABLE_APE))
10760                 /* Write our heartbeat update interval to APE. */
10761                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10762                                 APE_HOST_HEARTBEAT_INT_5SEC);
10763
10764         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10765
10766         return 0;
10767 }
10768
10769 /* Called at device open time to get the chip ready for
10770  * packet processing.  Invoked with tp->lock held.
10771  */
10772 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10773 {
10774         /* Chip may have been just powered on. If so, the boot code may still
10775          * be running initialization. Wait for it to finish to avoid races in
10776          * accessing the hardware.
10777          */
10778         tg3_enable_register_access(tp);
10779         tg3_poll_fw(tp);
10780
10781         tg3_switch_clocks(tp);
10782
10783         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10784
10785         return tg3_reset_hw(tp, reset_phy);
10786 }
10787
10788 #ifdef CONFIG_TIGON3_HWMON
10789 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10790 {
10791         u32 off, len = TG3_OCIR_LEN;
10792         int i;
10793
10794         for (i = 0, off = 0; i < TG3_SD_NUM_RECS; i++, ocir++, off += len) {
10795                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10796
10797                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10798                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10799                         memset(ocir, 0, len);
10800         }
10801 }
10802
10803 /* sysfs attributes for hwmon */
10804 static ssize_t tg3_show_temp(struct device *dev,
10805                              struct device_attribute *devattr, char *buf)
10806 {
10807         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10808         struct tg3 *tp = dev_get_drvdata(dev);
10809         u32 temperature;
10810
10811         spin_lock_bh(&tp->lock);
10812         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10813                                 sizeof(temperature));
10814         spin_unlock_bh(&tp->lock);
10815         return sprintf(buf, "%u\n", temperature * 1000);
10816 }
10817
10818
10819 static SENSOR_DEVICE_ATTR(temp1_input, 0444, tg3_show_temp, NULL,
10820                           TG3_TEMP_SENSOR_OFFSET);
10821 static SENSOR_DEVICE_ATTR(temp1_crit, 0444, tg3_show_temp, NULL,
10822                           TG3_TEMP_CAUTION_OFFSET);
10823 static SENSOR_DEVICE_ATTR(temp1_max, 0444, tg3_show_temp, NULL,
10824                           TG3_TEMP_MAX_OFFSET);
10825
10826 static struct attribute *tg3_attrs[] = {
10827         &sensor_dev_attr_temp1_input.dev_attr.attr,
10828         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10829         &sensor_dev_attr_temp1_max.dev_attr.attr,
10830         NULL
10831 };
10832 ATTRIBUTE_GROUPS(tg3);
10833
10834 static void tg3_hwmon_close(struct tg3 *tp)
10835 {
10836         if (tp->hwmon_dev) {
10837                 hwmon_device_unregister(tp->hwmon_dev);
10838                 tp->hwmon_dev = NULL;
10839         }
10840 }
10841
10842 static void tg3_hwmon_open(struct tg3 *tp)
10843 {
10844         int i;
10845         u32 size = 0;
10846         struct pci_dev *pdev = tp->pdev;
10847         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10848
10849         tg3_sd_scan_scratchpad(tp, ocirs);
10850
10851         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10852                 if (!ocirs[i].src_data_length)
10853                         continue;
10854
10855                 size += ocirs[i].src_hdr_length;
10856                 size += ocirs[i].src_data_length;
10857         }
10858
10859         if (!size)
10860                 return;
10861
10862         tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10863                                                           tp, tg3_groups);
10864         if (IS_ERR(tp->hwmon_dev)) {
10865                 tp->hwmon_dev = NULL;
10866                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10867         }
10868 }
10869 #else
10870 static inline void tg3_hwmon_close(struct tg3 *tp) { }
10871 static inline void tg3_hwmon_open(struct tg3 *tp) { }
10872 #endif /* CONFIG_TIGON3_HWMON */
10873
10874
10875 #define TG3_STAT_ADD32(PSTAT, REG) \
10876 do {    u32 __val = tr32(REG); \
10877         (PSTAT)->low += __val; \
10878         if ((PSTAT)->low < __val) \
10879                 (PSTAT)->high += 1; \
10880 } while (0)
10881
10882 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10883 {
10884         struct tg3_hw_stats *sp = tp->hw_stats;
10885
10886         if (!tp->link_up)
10887                 return;
10888
10889         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10890         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10891         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10892         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10893         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10894         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10895         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10896         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10897         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10898         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10899         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10900         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10901         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10902         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10903                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10904                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10905                 u32 val;
10906
10907                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10908                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10909                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10910                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10911         }
10912
10913         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10914         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10915         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10916         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10917         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10918         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10919         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10920         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10921         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10922         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10923         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10924         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10925         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10926         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10927
10928         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10929         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10930             tg3_asic_rev(tp) != ASIC_REV_5762 &&
10931             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10932             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10933                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10934         } else {
10935                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10936                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10937                 if (val) {
10938                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10939                         sp->rx_discards.low += val;
10940                         if (sp->rx_discards.low < val)
10941                                 sp->rx_discards.high += 1;
10942                 }
10943                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10944         }
10945         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10946 }
10947
10948 static void tg3_chk_missed_msi(struct tg3 *tp)
10949 {
10950         u32 i;
10951
10952         for (i = 0; i < tp->irq_cnt; i++) {
10953                 struct tg3_napi *tnapi = &tp->napi[i];
10954
10955                 if (tg3_has_work(tnapi)) {
10956                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10957                             tnapi->last_tx_cons == tnapi->tx_cons) {
10958                                 if (tnapi->chk_msi_cnt < 1) {
10959                                         tnapi->chk_msi_cnt++;
10960                                         return;
10961                                 }
10962                                 tg3_msi(0, tnapi);
10963                         }
10964                 }
10965                 tnapi->chk_msi_cnt = 0;
10966                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10967                 tnapi->last_tx_cons = tnapi->tx_cons;
10968         }
10969 }
10970
10971 static void tg3_timer(struct timer_list *t)
10972 {
10973         struct tg3 *tp = from_timer(tp, t, timer);
10974
10975         spin_lock(&tp->lock);
10976
10977         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10978                 spin_unlock(&tp->lock);
10979                 goto restart_timer;
10980         }
10981
10982         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10983             tg3_flag(tp, 57765_CLASS))
10984                 tg3_chk_missed_msi(tp);
10985
10986         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10987                 /* BCM4785: Flush posted writes from GbE to host memory. */
10988                 tr32(HOSTCC_MODE);
10989         }
10990
10991         if (!tg3_flag(tp, TAGGED_STATUS)) {
10992                 /* All of this garbage is because when using non-tagged
10993                  * IRQ status the mailbox/status_block protocol the chip
10994                  * uses with the cpu is race prone.
10995                  */
10996                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10997                         tw32(GRC_LOCAL_CTRL,
10998                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10999                 } else {
11000                         tw32(HOSTCC_MODE, tp->coalesce_mode |
11001                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
11002                 }
11003
11004                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11005                         spin_unlock(&tp->lock);
11006                         tg3_reset_task_schedule(tp);
11007                         goto restart_timer;
11008                 }
11009         }
11010
11011         /* This part only runs once per second. */
11012         if (!--tp->timer_counter) {
11013                 if (tg3_flag(tp, 5705_PLUS))
11014                         tg3_periodic_fetch_stats(tp);
11015
11016                 if (tp->setlpicnt && !--tp->setlpicnt)
11017                         tg3_phy_eee_enable(tp);
11018
11019                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
11020                         u32 mac_stat;
11021                         int phy_event;
11022
11023                         mac_stat = tr32(MAC_STATUS);
11024
11025                         phy_event = 0;
11026                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
11027                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
11028                                         phy_event = 1;
11029                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
11030                                 phy_event = 1;
11031
11032                         if (phy_event)
11033                                 tg3_setup_phy(tp, false);
11034                 } else if (tg3_flag(tp, POLL_SERDES)) {
11035                         u32 mac_stat = tr32(MAC_STATUS);
11036                         int need_setup = 0;
11037
11038                         if (tp->link_up &&
11039                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
11040                                 need_setup = 1;
11041                         }
11042                         if (!tp->link_up &&
11043                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
11044                                          MAC_STATUS_SIGNAL_DET))) {
11045                                 need_setup = 1;
11046                         }
11047                         if (need_setup) {
11048                                 if (!tp->serdes_counter) {
11049                                         tw32_f(MAC_MODE,
11050                                              (tp->mac_mode &
11051                                               ~MAC_MODE_PORT_MODE_MASK));
11052                                         udelay(40);
11053                                         tw32_f(MAC_MODE, tp->mac_mode);
11054                                         udelay(40);
11055                                 }
11056                                 tg3_setup_phy(tp, false);
11057                         }
11058                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11059                            tg3_flag(tp, 5780_CLASS)) {
11060                         tg3_serdes_parallel_detect(tp);
11061                 } else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11062                         u32 cpmu = tr32(TG3_CPMU_STATUS);
11063                         bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11064                                          TG3_CPMU_STATUS_LINK_MASK);
11065
11066                         if (link_up != tp->link_up)
11067                                 tg3_setup_phy(tp, false);
11068                 }
11069
11070                 tp->timer_counter = tp->timer_multiplier;
11071         }
11072
11073         /* Heartbeat is only sent once every 2 seconds.
11074          *
11075          * The heartbeat is to tell the ASF firmware that the host
11076          * driver is still alive.  In the event that the OS crashes,
11077          * ASF needs to reset the hardware to free up the FIFO space
11078          * that may be filled with rx packets destined for the host.
11079          * If the FIFO is full, ASF will no longer function properly.
11080          *
11081          * Unintended resets have been reported on real time kernels
11082          * where the timer doesn't run on time.  Netpoll will also have
11083          * same problem.
11084          *
11085          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11086          * to check the ring condition when the heartbeat is expiring
11087          * before doing the reset.  This will prevent most unintended
11088          * resets.
11089          */
11090         if (!--tp->asf_counter) {
11091                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11092                         tg3_wait_for_event_ack(tp);
11093
11094                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11095                                       FWCMD_NICDRV_ALIVE3);
11096                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11097                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11098                                       TG3_FW_UPDATE_TIMEOUT_SEC);
11099
11100                         tg3_generate_fw_event(tp);
11101                 }
11102                 tp->asf_counter = tp->asf_multiplier;
11103         }
11104
11105         /* Update the APE heartbeat every 5 seconds.*/
11106         tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11107
11108         spin_unlock(&tp->lock);
11109
11110 restart_timer:
11111         tp->timer.expires = jiffies + tp->timer_offset;
11112         add_timer(&tp->timer);
11113 }
11114
11115 static void tg3_timer_init(struct tg3 *tp)
11116 {
11117         if (tg3_flag(tp, TAGGED_STATUS) &&
11118             tg3_asic_rev(tp) != ASIC_REV_5717 &&
11119             !tg3_flag(tp, 57765_CLASS))
11120                 tp->timer_offset = HZ;
11121         else
11122                 tp->timer_offset = HZ / 10;
11123
11124         BUG_ON(tp->timer_offset > HZ);
11125
11126         tp->timer_multiplier = (HZ / tp->timer_offset);
11127         tp->asf_multiplier = (HZ / tp->timer_offset) *
11128                              TG3_FW_UPDATE_FREQ_SEC;
11129
11130         timer_setup(&tp->timer, tg3_timer, 0);
11131 }
11132
11133 static void tg3_timer_start(struct tg3 *tp)
11134 {
11135         tp->asf_counter   = tp->asf_multiplier;
11136         tp->timer_counter = tp->timer_multiplier;
11137
11138         tp->timer.expires = jiffies + tp->timer_offset;
11139         add_timer(&tp->timer);
11140 }
11141
11142 static void tg3_timer_stop(struct tg3 *tp)
11143 {
11144         del_timer_sync(&tp->timer);
11145 }
11146
11147 /* Restart hardware after configuration changes, self-test, etc.
11148  * Invoked with tp->lock held.
11149  */
11150 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11151         __releases(tp->lock)
11152         __acquires(tp->lock)
11153 {
11154         int err;
11155
11156         err = tg3_init_hw(tp, reset_phy);
11157         if (err) {
11158                 netdev_err(tp->dev,
11159                            "Failed to re-initialize device, aborting\n");
11160                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11161                 tg3_full_unlock(tp);
11162                 tg3_timer_stop(tp);
11163                 tp->irq_sync = 0;
11164                 tg3_napi_enable(tp);
11165                 dev_close(tp->dev);
11166                 tg3_full_lock(tp, 0);
11167         }
11168         return err;
11169 }
11170
11171 static void tg3_reset_task(struct work_struct *work)
11172 {
11173         struct tg3 *tp = container_of(work, struct tg3, reset_task);
11174         int err;
11175
11176         rtnl_lock();
11177         tg3_full_lock(tp, 0);
11178
11179         if (!netif_running(tp->dev)) {
11180                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11181                 tg3_full_unlock(tp);
11182                 rtnl_unlock();
11183                 return;
11184         }
11185
11186         tg3_full_unlock(tp);
11187
11188         tg3_phy_stop(tp);
11189
11190         tg3_netif_stop(tp);
11191
11192         tg3_full_lock(tp, 1);
11193
11194         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11195                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11196                 tp->write32_rx_mbox = tg3_write_flush_reg32;
11197                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11198                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11199         }
11200
11201         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11202         err = tg3_init_hw(tp, true);
11203         if (err) {
11204                 tg3_full_unlock(tp);
11205                 tp->irq_sync = 0;
11206                 tg3_napi_enable(tp);
11207                 /* Clear this flag so that tg3_reset_task_cancel() will not
11208                  * call cancel_work_sync() and wait forever.
11209                  */
11210                 tg3_flag_clear(tp, RESET_TASK_PENDING);
11211                 dev_close(tp->dev);
11212                 goto out;
11213         }
11214
11215         tg3_netif_start(tp);
11216
11217         tg3_full_unlock(tp);
11218
11219         if (!err)
11220                 tg3_phy_start(tp);
11221
11222         tg3_flag_clear(tp, RESET_TASK_PENDING);
11223 out:
11224         rtnl_unlock();
11225 }
11226
11227 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11228 {
11229         irq_handler_t fn;
11230         unsigned long flags;
11231         char *name;
11232         struct tg3_napi *tnapi = &tp->napi[irq_num];
11233
11234         if (tp->irq_cnt == 1)
11235                 name = tp->dev->name;
11236         else {
11237                 name = &tnapi->irq_lbl[0];
11238                 if (tnapi->tx_buffers && tnapi->rx_rcb)
11239                         snprintf(name, IFNAMSIZ,
11240                                  "%s-txrx-%d", tp->dev->name, irq_num);
11241                 else if (tnapi->tx_buffers)
11242                         snprintf(name, IFNAMSIZ,
11243                                  "%s-tx-%d", tp->dev->name, irq_num);
11244                 else if (tnapi->rx_rcb)
11245                         snprintf(name, IFNAMSIZ,
11246                                  "%s-rx-%d", tp->dev->name, irq_num);
11247                 else
11248                         snprintf(name, IFNAMSIZ,
11249                                  "%s-%d", tp->dev->name, irq_num);
11250                 name[IFNAMSIZ-1] = 0;
11251         }
11252
11253         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11254                 fn = tg3_msi;
11255                 if (tg3_flag(tp, 1SHOT_MSI))
11256                         fn = tg3_msi_1shot;
11257                 flags = 0;
11258         } else {
11259                 fn = tg3_interrupt;
11260                 if (tg3_flag(tp, TAGGED_STATUS))
11261                         fn = tg3_interrupt_tagged;
11262                 flags = IRQF_SHARED;
11263         }
11264
11265         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11266 }
11267
11268 static int tg3_test_interrupt(struct tg3 *tp)
11269 {
11270         struct tg3_napi *tnapi = &tp->napi[0];
11271         struct net_device *dev = tp->dev;
11272         int err, i, intr_ok = 0;
11273         u32 val;
11274
11275         if (!netif_running(dev))
11276                 return -ENODEV;
11277
11278         tg3_disable_ints(tp);
11279
11280         free_irq(tnapi->irq_vec, tnapi);
11281
11282         /*
11283          * Turn off MSI one shot mode.  Otherwise this test has no
11284          * observable way to know whether the interrupt was delivered.
11285          */
11286         if (tg3_flag(tp, 57765_PLUS)) {
11287                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11288                 tw32(MSGINT_MODE, val);
11289         }
11290
11291         err = request_irq(tnapi->irq_vec, tg3_test_isr,
11292                           IRQF_SHARED, dev->name, tnapi);
11293         if (err)
11294                 return err;
11295
11296         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11297         tg3_enable_ints(tp);
11298
11299         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11300                tnapi->coal_now);
11301
11302         for (i = 0; i < 5; i++) {
11303                 u32 int_mbox, misc_host_ctrl;
11304
11305                 int_mbox = tr32_mailbox(tnapi->int_mbox);
11306                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11307
11308                 if ((int_mbox != 0) ||
11309                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11310                         intr_ok = 1;
11311                         break;
11312                 }
11313
11314                 if (tg3_flag(tp, 57765_PLUS) &&
11315                     tnapi->hw_status->status_tag != tnapi->last_tag)
11316                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11317
11318                 msleep(10);
11319         }
11320
11321         tg3_disable_ints(tp);
11322
11323         free_irq(tnapi->irq_vec, tnapi);
11324
11325         err = tg3_request_irq(tp, 0);
11326
11327         if (err)
11328                 return err;
11329
11330         if (intr_ok) {
11331                 /* Reenable MSI one shot mode. */
11332                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11333                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11334                         tw32(MSGINT_MODE, val);
11335                 }
11336                 return 0;
11337         }
11338
11339         return -EIO;
11340 }
11341
11342 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11343  * successfully restored
11344  */
11345 static int tg3_test_msi(struct tg3 *tp)
11346 {
11347         int err;
11348         u16 pci_cmd;
11349
11350         if (!tg3_flag(tp, USING_MSI))
11351                 return 0;
11352
11353         /* Turn off SERR reporting in case MSI terminates with Master
11354          * Abort.
11355          */
11356         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11357         pci_write_config_word(tp->pdev, PCI_COMMAND,
11358                               pci_cmd & ~PCI_COMMAND_SERR);
11359
11360         err = tg3_test_interrupt(tp);
11361
11362         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11363
11364         if (!err)
11365                 return 0;
11366
11367         /* other failures */
11368         if (err != -EIO)
11369                 return err;
11370
11371         /* MSI test failed, go back to INTx mode */
11372         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11373                     "to INTx mode. Please report this failure to the PCI "
11374                     "maintainer and include system chipset information\n");
11375
11376         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11377
11378         pci_disable_msi(tp->pdev);
11379
11380         tg3_flag_clear(tp, USING_MSI);
11381         tp->napi[0].irq_vec = tp->pdev->irq;
11382
11383         err = tg3_request_irq(tp, 0);
11384         if (err)
11385                 return err;
11386
11387         /* Need to reset the chip because the MSI cycle may have terminated
11388          * with Master Abort.
11389          */
11390         tg3_full_lock(tp, 1);
11391
11392         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11393         err = tg3_init_hw(tp, true);
11394
11395         tg3_full_unlock(tp);
11396
11397         if (err)
11398                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11399
11400         return err;
11401 }
11402
11403 static int tg3_request_firmware(struct tg3 *tp)
11404 {
11405         const struct tg3_firmware_hdr *fw_hdr;
11406
11407         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11408                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11409                            tp->fw_needed);
11410                 return -ENOENT;
11411         }
11412
11413         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11414
11415         /* Firmware blob starts with version numbers, followed by
11416          * start address and _full_ length including BSS sections
11417          * (which must be longer than the actual data, of course
11418          */
11419
11420         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11421         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11422                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11423                            tp->fw_len, tp->fw_needed);
11424                 release_firmware(tp->fw);
11425                 tp->fw = NULL;
11426                 return -EINVAL;
11427         }
11428
11429         /* We no longer need firmware; we have it. */
11430         tp->fw_needed = NULL;
11431         return 0;
11432 }
11433
11434 static u32 tg3_irq_count(struct tg3 *tp)
11435 {
11436         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11437
11438         if (irq_cnt > 1) {
11439                 /* We want as many rx rings enabled as there are cpus.
11440                  * In multiqueue MSI-X mode, the first MSI-X vector
11441                  * only deals with link interrupts, etc, so we add
11442                  * one to the number of vectors we are requesting.
11443                  */
11444                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11445         }
11446
11447         return irq_cnt;
11448 }
11449
11450 static bool tg3_enable_msix(struct tg3 *tp)
11451 {
11452         int i, rc;
11453         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11454
11455         tp->txq_cnt = tp->txq_req;
11456         tp->rxq_cnt = tp->rxq_req;
11457         if (!tp->rxq_cnt)
11458                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11459         if (tp->rxq_cnt > tp->rxq_max)
11460                 tp->rxq_cnt = tp->rxq_max;
11461
11462         /* Disable multiple TX rings by default.  Simple round-robin hardware
11463          * scheduling of the TX rings can cause starvation of rings with
11464          * small packets when other rings have TSO or jumbo packets.
11465          */
11466         if (!tp->txq_req)
11467                 tp->txq_cnt = 1;
11468
11469         tp->irq_cnt = tg3_irq_count(tp);
11470
11471         for (i = 0; i < tp->irq_max; i++) {
11472                 msix_ent[i].entry  = i;
11473                 msix_ent[i].vector = 0;
11474         }
11475
11476         rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11477         if (rc < 0) {
11478                 return false;
11479         } else if (rc < tp->irq_cnt) {
11480                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11481                               tp->irq_cnt, rc);
11482                 tp->irq_cnt = rc;
11483                 tp->rxq_cnt = max(rc - 1, 1);
11484                 if (tp->txq_cnt)
11485                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11486         }
11487
11488         for (i = 0; i < tp->irq_max; i++)
11489                 tp->napi[i].irq_vec = msix_ent[i].vector;
11490
11491         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11492                 pci_disable_msix(tp->pdev);
11493                 return false;
11494         }
11495
11496         if (tp->irq_cnt == 1)
11497                 return true;
11498
11499         tg3_flag_set(tp, ENABLE_RSS);
11500
11501         if (tp->txq_cnt > 1)
11502                 tg3_flag_set(tp, ENABLE_TSS);
11503
11504         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11505
11506         return true;
11507 }
11508
11509 static void tg3_ints_init(struct tg3 *tp)
11510 {
11511         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11512             !tg3_flag(tp, TAGGED_STATUS)) {
11513                 /* All MSI supporting chips should support tagged
11514                  * status.  Assert that this is the case.
11515                  */
11516                 netdev_warn(tp->dev,
11517                             "MSI without TAGGED_STATUS? Not using MSI\n");
11518                 goto defcfg;
11519         }
11520
11521         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11522                 tg3_flag_set(tp, USING_MSIX);
11523         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11524                 tg3_flag_set(tp, USING_MSI);
11525
11526         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11527                 u32 msi_mode = tr32(MSGINT_MODE);
11528                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11529                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11530                 if (!tg3_flag(tp, 1SHOT_MSI))
11531                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11532                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11533         }
11534 defcfg:
11535         if (!tg3_flag(tp, USING_MSIX)) {
11536                 tp->irq_cnt = 1;
11537                 tp->napi[0].irq_vec = tp->pdev->irq;
11538         }
11539
11540         if (tp->irq_cnt == 1) {
11541                 tp->txq_cnt = 1;
11542                 tp->rxq_cnt = 1;
11543                 netif_set_real_num_tx_queues(tp->dev, 1);
11544                 netif_set_real_num_rx_queues(tp->dev, 1);
11545         }
11546 }
11547
11548 static void tg3_ints_fini(struct tg3 *tp)
11549 {
11550         if (tg3_flag(tp, USING_MSIX))
11551                 pci_disable_msix(tp->pdev);
11552         else if (tg3_flag(tp, USING_MSI))
11553                 pci_disable_msi(tp->pdev);
11554         tg3_flag_clear(tp, USING_MSI);
11555         tg3_flag_clear(tp, USING_MSIX);
11556         tg3_flag_clear(tp, ENABLE_RSS);
11557         tg3_flag_clear(tp, ENABLE_TSS);
11558 }
11559
11560 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11561                      bool init)
11562 {
11563         struct net_device *dev = tp->dev;
11564         int i, err;
11565
11566         /*
11567          * Setup interrupts first so we know how
11568          * many NAPI resources to allocate
11569          */
11570         tg3_ints_init(tp);
11571
11572         tg3_rss_check_indir_tbl(tp);
11573
11574         /* The placement of this call is tied
11575          * to the setup and use of Host TX descriptors.
11576          */
11577         err = tg3_alloc_consistent(tp);
11578         if (err)
11579                 goto out_ints_fini;
11580
11581         tg3_napi_init(tp);
11582
11583         tg3_napi_enable(tp);
11584
11585         for (i = 0; i < tp->irq_cnt; i++) {
11586                 err = tg3_request_irq(tp, i);
11587                 if (err) {
11588                         for (i--; i >= 0; i--) {
11589                                 struct tg3_napi *tnapi = &tp->napi[i];
11590
11591                                 free_irq(tnapi->irq_vec, tnapi);
11592                         }
11593                         goto out_napi_fini;
11594                 }
11595         }
11596
11597         tg3_full_lock(tp, 0);
11598
11599         if (init)
11600                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11601
11602         err = tg3_init_hw(tp, reset_phy);
11603         if (err) {
11604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11605                 tg3_free_rings(tp);
11606         }
11607
11608         tg3_full_unlock(tp);
11609
11610         if (err)
11611                 goto out_free_irq;
11612
11613         if (test_irq && tg3_flag(tp, USING_MSI)) {
11614                 err = tg3_test_msi(tp);
11615
11616                 if (err) {
11617                         tg3_full_lock(tp, 0);
11618                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11619                         tg3_free_rings(tp);
11620                         tg3_full_unlock(tp);
11621
11622                         goto out_napi_fini;
11623                 }
11624
11625                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11626                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11627
11628                         tw32(PCIE_TRANSACTION_CFG,
11629                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11630                 }
11631         }
11632
11633         tg3_phy_start(tp);
11634
11635         tg3_hwmon_open(tp);
11636
11637         tg3_full_lock(tp, 0);
11638
11639         tg3_timer_start(tp);
11640         tg3_flag_set(tp, INIT_COMPLETE);
11641         tg3_enable_ints(tp);
11642
11643         tg3_ptp_resume(tp);
11644
11645         tg3_full_unlock(tp);
11646
11647         netif_tx_start_all_queues(dev);
11648
11649         /*
11650          * Reset loopback feature if it was turned on while the device was down
11651          * make sure that it's installed properly now.
11652          */
11653         if (dev->features & NETIF_F_LOOPBACK)
11654                 tg3_set_loopback(dev, dev->features);
11655
11656         return 0;
11657
11658 out_free_irq:
11659         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11660                 struct tg3_napi *tnapi = &tp->napi[i];
11661                 free_irq(tnapi->irq_vec, tnapi);
11662         }
11663
11664 out_napi_fini:
11665         tg3_napi_disable(tp);
11666         tg3_napi_fini(tp);
11667         tg3_free_consistent(tp);
11668
11669 out_ints_fini:
11670         tg3_ints_fini(tp);
11671
11672         return err;
11673 }
11674
11675 static void tg3_stop(struct tg3 *tp)
11676 {
11677         int i;
11678
11679         tg3_reset_task_cancel(tp);
11680         tg3_netif_stop(tp);
11681
11682         tg3_timer_stop(tp);
11683
11684         tg3_hwmon_close(tp);
11685
11686         tg3_phy_stop(tp);
11687
11688         tg3_full_lock(tp, 1);
11689
11690         tg3_disable_ints(tp);
11691
11692         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11693         tg3_free_rings(tp);
11694         tg3_flag_clear(tp, INIT_COMPLETE);
11695
11696         tg3_full_unlock(tp);
11697
11698         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11699                 struct tg3_napi *tnapi = &tp->napi[i];
11700                 free_irq(tnapi->irq_vec, tnapi);
11701         }
11702
11703         tg3_ints_fini(tp);
11704
11705         tg3_napi_fini(tp);
11706
11707         tg3_free_consistent(tp);
11708 }
11709
11710 static int tg3_open(struct net_device *dev)
11711 {
11712         struct tg3 *tp = netdev_priv(dev);
11713         int err;
11714
11715         if (tp->pcierr_recovery) {
11716                 netdev_err(dev, "Failed to open device. PCI error recovery "
11717                            "in progress\n");
11718                 return -EAGAIN;
11719         }
11720
11721         if (tp->fw_needed) {
11722                 err = tg3_request_firmware(tp);
11723                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11724                         if (err) {
11725                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11726                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11727                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11728                                 netdev_warn(tp->dev, "EEE capability restored\n");
11729                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11730                         }
11731                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11732                         if (err)
11733                                 return err;
11734                 } else if (err) {
11735                         netdev_warn(tp->dev, "TSO capability disabled\n");
11736                         tg3_flag_clear(tp, TSO_CAPABLE);
11737                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11738                         netdev_notice(tp->dev, "TSO capability restored\n");
11739                         tg3_flag_set(tp, TSO_CAPABLE);
11740                 }
11741         }
11742
11743         tg3_carrier_off(tp);
11744
11745         err = tg3_power_up(tp);
11746         if (err)
11747                 return err;
11748
11749         tg3_full_lock(tp, 0);
11750
11751         tg3_disable_ints(tp);
11752         tg3_flag_clear(tp, INIT_COMPLETE);
11753
11754         tg3_full_unlock(tp);
11755
11756         err = tg3_start(tp,
11757                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11758                         true, true);
11759         if (err) {
11760                 tg3_frob_aux_power(tp, false);
11761                 pci_set_power_state(tp->pdev, PCI_D3hot);
11762         }
11763
11764         return err;
11765 }
11766
11767 static int tg3_close(struct net_device *dev)
11768 {
11769         struct tg3 *tp = netdev_priv(dev);
11770
11771         if (tp->pcierr_recovery) {
11772                 netdev_err(dev, "Failed to close device. PCI error recovery "
11773                            "in progress\n");
11774                 return -EAGAIN;
11775         }
11776
11777         tg3_stop(tp);
11778
11779         if (pci_device_is_present(tp->pdev)) {
11780                 tg3_power_down_prepare(tp);
11781
11782                 tg3_carrier_off(tp);
11783         }
11784         return 0;
11785 }
11786
11787 static inline u64 get_stat64(tg3_stat64_t *val)
11788 {
11789        return ((u64)val->high << 32) | ((u64)val->low);
11790 }
11791
11792 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11793 {
11794         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11795
11796         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11797             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11798              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11799                 u32 val;
11800
11801                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11802                         tg3_writephy(tp, MII_TG3_TEST1,
11803                                      val | MII_TG3_TEST1_CRC_EN);
11804                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11805                 } else
11806                         val = 0;
11807
11808                 tp->phy_crc_errors += val;
11809
11810                 return tp->phy_crc_errors;
11811         }
11812
11813         return get_stat64(&hw_stats->rx_fcs_errors);
11814 }
11815
11816 #define ESTAT_ADD(member) \
11817         estats->member =        old_estats->member + \
11818                                 get_stat64(&hw_stats->member)
11819
11820 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11821 {
11822         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11823         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11824
11825         ESTAT_ADD(rx_octets);
11826         ESTAT_ADD(rx_fragments);
11827         ESTAT_ADD(rx_ucast_packets);
11828         ESTAT_ADD(rx_mcast_packets);
11829         ESTAT_ADD(rx_bcast_packets);
11830         ESTAT_ADD(rx_fcs_errors);
11831         ESTAT_ADD(rx_align_errors);
11832         ESTAT_ADD(rx_xon_pause_rcvd);
11833         ESTAT_ADD(rx_xoff_pause_rcvd);
11834         ESTAT_ADD(rx_mac_ctrl_rcvd);
11835         ESTAT_ADD(rx_xoff_entered);
11836         ESTAT_ADD(rx_frame_too_long_errors);
11837         ESTAT_ADD(rx_jabbers);
11838         ESTAT_ADD(rx_undersize_packets);
11839         ESTAT_ADD(rx_in_length_errors);
11840         ESTAT_ADD(rx_out_length_errors);
11841         ESTAT_ADD(rx_64_or_less_octet_packets);
11842         ESTAT_ADD(rx_65_to_127_octet_packets);
11843         ESTAT_ADD(rx_128_to_255_octet_packets);
11844         ESTAT_ADD(rx_256_to_511_octet_packets);
11845         ESTAT_ADD(rx_512_to_1023_octet_packets);
11846         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11847         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11848         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11849         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11850         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11851
11852         ESTAT_ADD(tx_octets);
11853         ESTAT_ADD(tx_collisions);
11854         ESTAT_ADD(tx_xon_sent);
11855         ESTAT_ADD(tx_xoff_sent);
11856         ESTAT_ADD(tx_flow_control);
11857         ESTAT_ADD(tx_mac_errors);
11858         ESTAT_ADD(tx_single_collisions);
11859         ESTAT_ADD(tx_mult_collisions);
11860         ESTAT_ADD(tx_deferred);
11861         ESTAT_ADD(tx_excessive_collisions);
11862         ESTAT_ADD(tx_late_collisions);
11863         ESTAT_ADD(tx_collide_2times);
11864         ESTAT_ADD(tx_collide_3times);
11865         ESTAT_ADD(tx_collide_4times);
11866         ESTAT_ADD(tx_collide_5times);
11867         ESTAT_ADD(tx_collide_6times);
11868         ESTAT_ADD(tx_collide_7times);
11869         ESTAT_ADD(tx_collide_8times);
11870         ESTAT_ADD(tx_collide_9times);
11871         ESTAT_ADD(tx_collide_10times);
11872         ESTAT_ADD(tx_collide_11times);
11873         ESTAT_ADD(tx_collide_12times);
11874         ESTAT_ADD(tx_collide_13times);
11875         ESTAT_ADD(tx_collide_14times);
11876         ESTAT_ADD(tx_collide_15times);
11877         ESTAT_ADD(tx_ucast_packets);
11878         ESTAT_ADD(tx_mcast_packets);
11879         ESTAT_ADD(tx_bcast_packets);
11880         ESTAT_ADD(tx_carrier_sense_errors);
11881         ESTAT_ADD(tx_discards);
11882         ESTAT_ADD(tx_errors);
11883
11884         ESTAT_ADD(dma_writeq_full);
11885         ESTAT_ADD(dma_write_prioq_full);
11886         ESTAT_ADD(rxbds_empty);
11887         ESTAT_ADD(rx_discards);
11888         ESTAT_ADD(rx_errors);
11889         ESTAT_ADD(rx_threshold_hit);
11890
11891         ESTAT_ADD(dma_readq_full);
11892         ESTAT_ADD(dma_read_prioq_full);
11893         ESTAT_ADD(tx_comp_queue_full);
11894
11895         ESTAT_ADD(ring_set_send_prod_index);
11896         ESTAT_ADD(ring_status_update);
11897         ESTAT_ADD(nic_irqs);
11898         ESTAT_ADD(nic_avoided_irqs);
11899         ESTAT_ADD(nic_tx_threshold_hit);
11900
11901         ESTAT_ADD(mbuf_lwm_thresh_hit);
11902 }
11903
11904 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11905 {
11906         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11907         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11908
11909         stats->rx_packets = old_stats->rx_packets +
11910                 get_stat64(&hw_stats->rx_ucast_packets) +
11911                 get_stat64(&hw_stats->rx_mcast_packets) +
11912                 get_stat64(&hw_stats->rx_bcast_packets);
11913
11914         stats->tx_packets = old_stats->tx_packets +
11915                 get_stat64(&hw_stats->tx_ucast_packets) +
11916                 get_stat64(&hw_stats->tx_mcast_packets) +
11917                 get_stat64(&hw_stats->tx_bcast_packets);
11918
11919         stats->rx_bytes = old_stats->rx_bytes +
11920                 get_stat64(&hw_stats->rx_octets);
11921         stats->tx_bytes = old_stats->tx_bytes +
11922                 get_stat64(&hw_stats->tx_octets);
11923
11924         stats->rx_errors = old_stats->rx_errors +
11925                 get_stat64(&hw_stats->rx_errors);
11926         stats->tx_errors = old_stats->tx_errors +
11927                 get_stat64(&hw_stats->tx_errors) +
11928                 get_stat64(&hw_stats->tx_mac_errors) +
11929                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11930                 get_stat64(&hw_stats->tx_discards);
11931
11932         stats->multicast = old_stats->multicast +
11933                 get_stat64(&hw_stats->rx_mcast_packets);
11934         stats->collisions = old_stats->collisions +
11935                 get_stat64(&hw_stats->tx_collisions);
11936
11937         stats->rx_length_errors = old_stats->rx_length_errors +
11938                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11939                 get_stat64(&hw_stats->rx_undersize_packets);
11940
11941         stats->rx_frame_errors = old_stats->rx_frame_errors +
11942                 get_stat64(&hw_stats->rx_align_errors);
11943         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11944                 get_stat64(&hw_stats->tx_discards);
11945         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11946                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11947
11948         stats->rx_crc_errors = old_stats->rx_crc_errors +
11949                 tg3_calc_crc_errors(tp);
11950
11951         stats->rx_missed_errors = old_stats->rx_missed_errors +
11952                 get_stat64(&hw_stats->rx_discards);
11953
11954         stats->rx_dropped = tp->rx_dropped;
11955         stats->tx_dropped = tp->tx_dropped;
11956 }
11957
11958 static int tg3_get_regs_len(struct net_device *dev)
11959 {
11960         return TG3_REG_BLK_SIZE;
11961 }
11962
11963 static void tg3_get_regs(struct net_device *dev,
11964                 struct ethtool_regs *regs, void *_p)
11965 {
11966         struct tg3 *tp = netdev_priv(dev);
11967
11968         regs->version = 0;
11969
11970         memset(_p, 0, TG3_REG_BLK_SIZE);
11971
11972         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11973                 return;
11974
11975         tg3_full_lock(tp, 0);
11976
11977         tg3_dump_legacy_regs(tp, (u32 *)_p);
11978
11979         tg3_full_unlock(tp);
11980 }
11981
11982 static int tg3_get_eeprom_len(struct net_device *dev)
11983 {
11984         struct tg3 *tp = netdev_priv(dev);
11985
11986         return tp->nvram_size;
11987 }
11988
11989 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11990 {
11991         struct tg3 *tp = netdev_priv(dev);
11992         int ret, cpmu_restore = 0;
11993         u8  *pd;
11994         u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11995         __be32 val;
11996
11997         if (tg3_flag(tp, NO_NVRAM))
11998                 return -EINVAL;
11999
12000         offset = eeprom->offset;
12001         len = eeprom->len;
12002         eeprom->len = 0;
12003
12004         eeprom->magic = TG3_EEPROM_MAGIC;
12005
12006         /* Override clock, link aware and link idle modes */
12007         if (tg3_flag(tp, CPMU_PRESENT)) {
12008                 cpmu_val = tr32(TG3_CPMU_CTRL);
12009                 if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
12010                                 CPMU_CTRL_LINK_IDLE_MODE)) {
12011                         tw32(TG3_CPMU_CTRL, cpmu_val &
12012                                             ~(CPMU_CTRL_LINK_AWARE_MODE |
12013                                              CPMU_CTRL_LINK_IDLE_MODE));
12014                         cpmu_restore = 1;
12015                 }
12016         }
12017         tg3_override_clk(tp);
12018
12019         if (offset & 3) {
12020                 /* adjustments to start on required 4 byte boundary */
12021                 b_offset = offset & 3;
12022                 b_count = 4 - b_offset;
12023                 if (b_count > len) {
12024                         /* i.e. offset=1 len=2 */
12025                         b_count = len;
12026                 }
12027                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
12028                 if (ret)
12029                         goto eeprom_done;
12030                 memcpy(data, ((char *)&val) + b_offset, b_count);
12031                 len -= b_count;
12032                 offset += b_count;
12033                 eeprom->len += b_count;
12034         }
12035
12036         /* read bytes up to the last 4 byte boundary */
12037         pd = &data[eeprom->len];
12038         for (i = 0; i < (len - (len & 3)); i += 4) {
12039                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
12040                 if (ret) {
12041                         if (i)
12042                                 i -= 4;
12043                         eeprom->len += i;
12044                         goto eeprom_done;
12045                 }
12046                 memcpy(pd + i, &val, 4);
12047                 if (need_resched()) {
12048                         if (signal_pending(current)) {
12049                                 eeprom->len += i;
12050                                 ret = -EINTR;
12051                                 goto eeprom_done;
12052                         }
12053                         cond_resched();
12054                 }
12055         }
12056         eeprom->len += i;
12057
12058         if (len & 3) {
12059                 /* read last bytes not ending on 4 byte boundary */
12060                 pd = &data[eeprom->len];
12061                 b_count = len & 3;
12062                 b_offset = offset + len - b_count;
12063                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
12064                 if (ret)
12065                         goto eeprom_done;
12066                 memcpy(pd, &val, b_count);
12067                 eeprom->len += b_count;
12068         }
12069         ret = 0;
12070
12071 eeprom_done:
12072         /* Restore clock, link aware and link idle modes */
12073         tg3_restore_clk(tp);
12074         if (cpmu_restore)
12075                 tw32(TG3_CPMU_CTRL, cpmu_val);
12076
12077         return ret;
12078 }
12079
12080 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12081 {
12082         struct tg3 *tp = netdev_priv(dev);
12083         int ret;
12084         u32 offset, len, b_offset, odd_len;
12085         u8 *buf;
12086         __be32 start = 0, end;
12087
12088         if (tg3_flag(tp, NO_NVRAM) ||
12089             eeprom->magic != TG3_EEPROM_MAGIC)
12090                 return -EINVAL;
12091
12092         offset = eeprom->offset;
12093         len = eeprom->len;
12094
12095         if ((b_offset = (offset & 3))) {
12096                 /* adjustments to start on required 4 byte boundary */
12097                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12098                 if (ret)
12099                         return ret;
12100                 len += b_offset;
12101                 offset &= ~3;
12102                 if (len < 4)
12103                         len = 4;
12104         }
12105
12106         odd_len = 0;
12107         if (len & 3) {
12108                 /* adjustments to end on required 4 byte boundary */
12109                 odd_len = 1;
12110                 len = (len + 3) & ~3;
12111                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12112                 if (ret)
12113                         return ret;
12114         }
12115
12116         buf = data;
12117         if (b_offset || odd_len) {
12118                 buf = kmalloc(len, GFP_KERNEL);
12119                 if (!buf)
12120                         return -ENOMEM;
12121                 if (b_offset)
12122                         memcpy(buf, &start, 4);
12123                 if (odd_len)
12124                         memcpy(buf+len-4, &end, 4);
12125                 memcpy(buf + b_offset, data, eeprom->len);
12126         }
12127
12128         ret = tg3_nvram_write_block(tp, offset, len, buf);
12129
12130         if (buf != data)
12131                 kfree(buf);
12132
12133         return ret;
12134 }
12135
12136 static int tg3_get_link_ksettings(struct net_device *dev,
12137                                   struct ethtool_link_ksettings *cmd)
12138 {
12139         struct tg3 *tp = netdev_priv(dev);
12140         u32 supported, advertising;
12141
12142         if (tg3_flag(tp, USE_PHYLIB)) {
12143                 struct phy_device *phydev;
12144                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12145                         return -EAGAIN;
12146                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12147                 phy_ethtool_ksettings_get(phydev, cmd);
12148
12149                 return 0;
12150         }
12151
12152         supported = (SUPPORTED_Autoneg);
12153
12154         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12155                 supported |= (SUPPORTED_1000baseT_Half |
12156                               SUPPORTED_1000baseT_Full);
12157
12158         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12159                 supported |= (SUPPORTED_100baseT_Half |
12160                               SUPPORTED_100baseT_Full |
12161                               SUPPORTED_10baseT_Half |
12162                               SUPPORTED_10baseT_Full |
12163                               SUPPORTED_TP);
12164                 cmd->base.port = PORT_TP;
12165         } else {
12166                 supported |= SUPPORTED_FIBRE;
12167                 cmd->base.port = PORT_FIBRE;
12168         }
12169         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
12170                                                 supported);
12171
12172         advertising = tp->link_config.advertising;
12173         if (tg3_flag(tp, PAUSE_AUTONEG)) {
12174                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12175                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12176                                 advertising |= ADVERTISED_Pause;
12177                         } else {
12178                                 advertising |= ADVERTISED_Pause |
12179                                         ADVERTISED_Asym_Pause;
12180                         }
12181                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12182                         advertising |= ADVERTISED_Asym_Pause;
12183                 }
12184         }
12185         ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
12186                                                 advertising);
12187
12188         if (netif_running(dev) && tp->link_up) {
12189                 cmd->base.speed = tp->link_config.active_speed;
12190                 cmd->base.duplex = tp->link_config.active_duplex;
12191                 ethtool_convert_legacy_u32_to_link_mode(
12192                         cmd->link_modes.lp_advertising,
12193                         tp->link_config.rmt_adv);
12194
12195                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12196                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12197                                 cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
12198                         else
12199                                 cmd->base.eth_tp_mdix = ETH_TP_MDI;
12200                 }
12201         } else {
12202                 cmd->base.speed = SPEED_UNKNOWN;
12203                 cmd->base.duplex = DUPLEX_UNKNOWN;
12204                 cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
12205         }
12206         cmd->base.phy_address = tp->phy_addr;
12207         cmd->base.autoneg = tp->link_config.autoneg;
12208         return 0;
12209 }
12210
12211 static int tg3_set_link_ksettings(struct net_device *dev,
12212                                   const struct ethtool_link_ksettings *cmd)
12213 {
12214         struct tg3 *tp = netdev_priv(dev);
12215         u32 speed = cmd->base.speed;
12216         u32 advertising;
12217
12218         if (tg3_flag(tp, USE_PHYLIB)) {
12219                 struct phy_device *phydev;
12220                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12221                         return -EAGAIN;
12222                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12223                 return phy_ethtool_ksettings_set(phydev, cmd);
12224         }
12225
12226         if (cmd->base.autoneg != AUTONEG_ENABLE &&
12227             cmd->base.autoneg != AUTONEG_DISABLE)
12228                 return -EINVAL;
12229
12230         if (cmd->base.autoneg == AUTONEG_DISABLE &&
12231             cmd->base.duplex != DUPLEX_FULL &&
12232             cmd->base.duplex != DUPLEX_HALF)
12233                 return -EINVAL;
12234
12235         ethtool_convert_link_mode_to_legacy_u32(&advertising,
12236                                                 cmd->link_modes.advertising);
12237
12238         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12239                 u32 mask = ADVERTISED_Autoneg |
12240                            ADVERTISED_Pause |
12241                            ADVERTISED_Asym_Pause;
12242
12243                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12244                         mask |= ADVERTISED_1000baseT_Half |
12245                                 ADVERTISED_1000baseT_Full;
12246
12247                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12248                         mask |= ADVERTISED_100baseT_Half |
12249                                 ADVERTISED_100baseT_Full |
12250                                 ADVERTISED_10baseT_Half |
12251                                 ADVERTISED_10baseT_Full |
12252                                 ADVERTISED_TP;
12253                 else
12254                         mask |= ADVERTISED_FIBRE;
12255
12256                 if (advertising & ~mask)
12257                         return -EINVAL;
12258
12259                 mask &= (ADVERTISED_1000baseT_Half |
12260                          ADVERTISED_1000baseT_Full |
12261                          ADVERTISED_100baseT_Half |
12262                          ADVERTISED_100baseT_Full |
12263                          ADVERTISED_10baseT_Half |
12264                          ADVERTISED_10baseT_Full);
12265
12266                 advertising &= mask;
12267         } else {
12268                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12269                         if (speed != SPEED_1000)
12270                                 return -EINVAL;
12271
12272                         if (cmd->base.duplex != DUPLEX_FULL)
12273                                 return -EINVAL;
12274                 } else {
12275                         if (speed != SPEED_100 &&
12276                             speed != SPEED_10)
12277                                 return -EINVAL;
12278                 }
12279         }
12280
12281         tg3_full_lock(tp, 0);
12282
12283         tp->link_config.autoneg = cmd->base.autoneg;
12284         if (cmd->base.autoneg == AUTONEG_ENABLE) {
12285                 tp->link_config.advertising = (advertising |
12286                                               ADVERTISED_Autoneg);
12287                 tp->link_config.speed = SPEED_UNKNOWN;
12288                 tp->link_config.duplex = DUPLEX_UNKNOWN;
12289         } else {
12290                 tp->link_config.advertising = 0;
12291                 tp->link_config.speed = speed;
12292                 tp->link_config.duplex = cmd->base.duplex;
12293         }
12294
12295         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12296
12297         tg3_warn_mgmt_link_flap(tp);
12298
12299         if (netif_running(dev))
12300                 tg3_setup_phy(tp, true);
12301
12302         tg3_full_unlock(tp);
12303
12304         return 0;
12305 }
12306
12307 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12308 {
12309         struct tg3 *tp = netdev_priv(dev);
12310
12311         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12312         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12313         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12314 }
12315
12316 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12317 {
12318         struct tg3 *tp = netdev_priv(dev);
12319
12320         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12321                 wol->supported = WAKE_MAGIC;
12322         else
12323                 wol->supported = 0;
12324         wol->wolopts = 0;
12325         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12326                 wol->wolopts = WAKE_MAGIC;
12327         memset(&wol->sopass, 0, sizeof(wol->sopass));
12328 }
12329
12330 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12331 {
12332         struct tg3 *tp = netdev_priv(dev);
12333         struct device *dp = &tp->pdev->dev;
12334
12335         if (wol->wolopts & ~WAKE_MAGIC)
12336                 return -EINVAL;
12337         if ((wol->wolopts & WAKE_MAGIC) &&
12338             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12339                 return -EINVAL;
12340
12341         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12342
12343         if (device_may_wakeup(dp))
12344                 tg3_flag_set(tp, WOL_ENABLE);
12345         else
12346                 tg3_flag_clear(tp, WOL_ENABLE);
12347
12348         return 0;
12349 }
12350
12351 static u32 tg3_get_msglevel(struct net_device *dev)
12352 {
12353         struct tg3 *tp = netdev_priv(dev);
12354         return tp->msg_enable;
12355 }
12356
12357 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12358 {
12359         struct tg3 *tp = netdev_priv(dev);
12360         tp->msg_enable = value;
12361 }
12362
12363 static int tg3_nway_reset(struct net_device *dev)
12364 {
12365         struct tg3 *tp = netdev_priv(dev);
12366         int r;
12367
12368         if (!netif_running(dev))
12369                 return -EAGAIN;
12370
12371         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12372                 return -EINVAL;
12373
12374         tg3_warn_mgmt_link_flap(tp);
12375
12376         if (tg3_flag(tp, USE_PHYLIB)) {
12377                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12378                         return -EAGAIN;
12379                 r = phy_start_aneg(mdiobus_get_phy(tp->mdio_bus, tp->phy_addr));
12380         } else {
12381                 u32 bmcr;
12382
12383                 spin_lock_bh(&tp->lock);
12384                 r = -EINVAL;
12385                 tg3_readphy(tp, MII_BMCR, &bmcr);
12386                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12387                     ((bmcr & BMCR_ANENABLE) ||
12388                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12389                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12390                                                    BMCR_ANENABLE);
12391                         r = 0;
12392                 }
12393                 spin_unlock_bh(&tp->lock);
12394         }
12395
12396         return r;
12397 }
12398
12399 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12400 {
12401         struct tg3 *tp = netdev_priv(dev);
12402
12403         ering->rx_max_pending = tp->rx_std_ring_mask;
12404         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12405                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12406         else
12407                 ering->rx_jumbo_max_pending = 0;
12408
12409         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12410
12411         ering->rx_pending = tp->rx_pending;
12412         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12413                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12414         else
12415                 ering->rx_jumbo_pending = 0;
12416
12417         ering->tx_pending = tp->napi[0].tx_pending;
12418 }
12419
12420 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12421 {
12422         struct tg3 *tp = netdev_priv(dev);
12423         int i, irq_sync = 0, err = 0;
12424         bool reset_phy = false;
12425
12426         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12427             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12428             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12429             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12430             (tg3_flag(tp, TSO_BUG) &&
12431              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12432                 return -EINVAL;
12433
12434         if (netif_running(dev)) {
12435                 tg3_phy_stop(tp);
12436                 tg3_netif_stop(tp);
12437                 irq_sync = 1;
12438         }
12439
12440         tg3_full_lock(tp, irq_sync);
12441
12442         tp->rx_pending = ering->rx_pending;
12443
12444         if (tg3_flag(tp, MAX_RXPEND_64) &&
12445             tp->rx_pending > 63)
12446                 tp->rx_pending = 63;
12447
12448         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12449                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12450
12451         for (i = 0; i < tp->irq_max; i++)
12452                 tp->napi[i].tx_pending = ering->tx_pending;
12453
12454         if (netif_running(dev)) {
12455                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12456                 /* Reset PHY to avoid PHY lock up */
12457                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12458                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
12459                     tg3_asic_rev(tp) == ASIC_REV_5720)
12460                         reset_phy = true;
12461
12462                 err = tg3_restart_hw(tp, reset_phy);
12463                 if (!err)
12464                         tg3_netif_start(tp);
12465         }
12466
12467         tg3_full_unlock(tp);
12468
12469         if (irq_sync && !err)
12470                 tg3_phy_start(tp);
12471
12472         return err;
12473 }
12474
12475 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12476 {
12477         struct tg3 *tp = netdev_priv(dev);
12478
12479         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12480
12481         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12482                 epause->rx_pause = 1;
12483         else
12484                 epause->rx_pause = 0;
12485
12486         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12487                 epause->tx_pause = 1;
12488         else
12489                 epause->tx_pause = 0;
12490 }
12491
12492 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12493 {
12494         struct tg3 *tp = netdev_priv(dev);
12495         int err = 0;
12496         bool reset_phy = false;
12497
12498         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12499                 tg3_warn_mgmt_link_flap(tp);
12500
12501         if (tg3_flag(tp, USE_PHYLIB)) {
12502                 struct phy_device *phydev;
12503
12504                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
12505
12506                 if (!phy_validate_pause(phydev, epause))
12507                         return -EINVAL;
12508
12509                 tp->link_config.flowctrl = 0;
12510                 phy_set_asym_pause(phydev, epause->rx_pause, epause->tx_pause);
12511                 if (epause->rx_pause) {
12512                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12513
12514                         if (epause->tx_pause) {
12515                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12516                         }
12517                 } else if (epause->tx_pause) {
12518                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12519                 }
12520
12521                 if (epause->autoneg)
12522                         tg3_flag_set(tp, PAUSE_AUTONEG);
12523                 else
12524                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12525
12526                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12527                         if (phydev->autoneg) {
12528                                 /* phy_set_asym_pause() will
12529                                  * renegotiate the link to inform our
12530                                  * link partner of our flow control
12531                                  * settings, even if the flow control
12532                                  * is forced.  Let tg3_adjust_link()
12533                                  * do the final flow control setup.
12534                                  */
12535                                 return 0;
12536                         }
12537
12538                         if (!epause->autoneg)
12539                                 tg3_setup_flow_control(tp, 0, 0);
12540                 }
12541         } else {
12542                 int irq_sync = 0;
12543
12544                 if (netif_running(dev)) {
12545                         tg3_netif_stop(tp);
12546                         irq_sync = 1;
12547                 }
12548
12549                 tg3_full_lock(tp, irq_sync);
12550
12551                 if (epause->autoneg)
12552                         tg3_flag_set(tp, PAUSE_AUTONEG);
12553                 else
12554                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12555                 if (epause->rx_pause)
12556                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12557                 else
12558                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12559                 if (epause->tx_pause)
12560                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12561                 else
12562                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12563
12564                 if (netif_running(dev)) {
12565                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12566                         /* Reset PHY to avoid PHY lock up */
12567                         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
12568                             tg3_asic_rev(tp) == ASIC_REV_5719 ||
12569                             tg3_asic_rev(tp) == ASIC_REV_5720)
12570                                 reset_phy = true;
12571
12572                         err = tg3_restart_hw(tp, reset_phy);
12573                         if (!err)
12574                                 tg3_netif_start(tp);
12575                 }
12576
12577                 tg3_full_unlock(tp);
12578         }
12579
12580         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12581
12582         return err;
12583 }
12584
12585 static int tg3_get_sset_count(struct net_device *dev, int sset)
12586 {
12587         switch (sset) {
12588         case ETH_SS_TEST:
12589                 return TG3_NUM_TEST;
12590         case ETH_SS_STATS:
12591                 return TG3_NUM_STATS;
12592         default:
12593                 return -EOPNOTSUPP;
12594         }
12595 }
12596
12597 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12598                          u32 *rules __always_unused)
12599 {
12600         struct tg3 *tp = netdev_priv(dev);
12601
12602         if (!tg3_flag(tp, SUPPORT_MSIX))
12603                 return -EOPNOTSUPP;
12604
12605         switch (info->cmd) {
12606         case ETHTOOL_GRXRINGS:
12607                 if (netif_running(tp->dev))
12608                         info->data = tp->rxq_cnt;
12609                 else {
12610                         info->data = num_online_cpus();
12611                         if (info->data > TG3_RSS_MAX_NUM_QS)
12612                                 info->data = TG3_RSS_MAX_NUM_QS;
12613                 }
12614
12615                 return 0;
12616
12617         default:
12618                 return -EOPNOTSUPP;
12619         }
12620 }
12621
12622 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12623 {
12624         u32 size = 0;
12625         struct tg3 *tp = netdev_priv(dev);
12626
12627         if (tg3_flag(tp, SUPPORT_MSIX))
12628                 size = TG3_RSS_INDIR_TBL_SIZE;
12629
12630         return size;
12631 }
12632
12633 static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12634 {
12635         struct tg3 *tp = netdev_priv(dev);
12636         int i;
12637
12638         if (hfunc)
12639                 *hfunc = ETH_RSS_HASH_TOP;
12640         if (!indir)
12641                 return 0;
12642
12643         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12644                 indir[i] = tp->rss_ind_tbl[i];
12645
12646         return 0;
12647 }
12648
12649 static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12650                         const u8 hfunc)
12651 {
12652         struct tg3 *tp = netdev_priv(dev);
12653         size_t i;
12654
12655         /* We require at least one supported parameter to be changed and no
12656          * change in any of the unsupported parameters
12657          */
12658         if (key ||
12659             (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12660                 return -EOPNOTSUPP;
12661
12662         if (!indir)
12663                 return 0;
12664
12665         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12666                 tp->rss_ind_tbl[i] = indir[i];
12667
12668         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12669                 return 0;
12670
12671         /* It is legal to write the indirection
12672          * table while the device is running.
12673          */
12674         tg3_full_lock(tp, 0);
12675         tg3_rss_write_indir_tbl(tp);
12676         tg3_full_unlock(tp);
12677
12678         return 0;
12679 }
12680
12681 static void tg3_get_channels(struct net_device *dev,
12682                              struct ethtool_channels *channel)
12683 {
12684         struct tg3 *tp = netdev_priv(dev);
12685         u32 deflt_qs = netif_get_num_default_rss_queues();
12686
12687         channel->max_rx = tp->rxq_max;
12688         channel->max_tx = tp->txq_max;
12689
12690         if (netif_running(dev)) {
12691                 channel->rx_count = tp->rxq_cnt;
12692                 channel->tx_count = tp->txq_cnt;
12693         } else {
12694                 if (tp->rxq_req)
12695                         channel->rx_count = tp->rxq_req;
12696                 else
12697                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12698
12699                 if (tp->txq_req)
12700                         channel->tx_count = tp->txq_req;
12701                 else
12702                         channel->tx_count = min(deflt_qs, tp->txq_max);
12703         }
12704 }
12705
12706 static int tg3_set_channels(struct net_device *dev,
12707                             struct ethtool_channels *channel)
12708 {
12709         struct tg3 *tp = netdev_priv(dev);
12710
12711         if (!tg3_flag(tp, SUPPORT_MSIX))
12712                 return -EOPNOTSUPP;
12713
12714         if (channel->rx_count > tp->rxq_max ||
12715             channel->tx_count > tp->txq_max)
12716                 return -EINVAL;
12717
12718         tp->rxq_req = channel->rx_count;
12719         tp->txq_req = channel->tx_count;
12720
12721         if (!netif_running(dev))
12722                 return 0;
12723
12724         tg3_stop(tp);
12725
12726         tg3_carrier_off(tp);
12727
12728         tg3_start(tp, true, false, false);
12729
12730         return 0;
12731 }
12732
12733 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12734 {
12735         switch (stringset) {
12736         case ETH_SS_STATS:
12737                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12738                 break;
12739         case ETH_SS_TEST:
12740                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12741                 break;
12742         default:
12743                 WARN_ON(1);     /* we need a WARN() */
12744                 break;
12745         }
12746 }
12747
12748 static int tg3_set_phys_id(struct net_device *dev,
12749                             enum ethtool_phys_id_state state)
12750 {
12751         struct tg3 *tp = netdev_priv(dev);
12752
12753         switch (state) {
12754         case ETHTOOL_ID_ACTIVE:
12755                 return 1;       /* cycle on/off once per second */
12756
12757         case ETHTOOL_ID_ON:
12758                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12759                      LED_CTRL_1000MBPS_ON |
12760                      LED_CTRL_100MBPS_ON |
12761                      LED_CTRL_10MBPS_ON |
12762                      LED_CTRL_TRAFFIC_OVERRIDE |
12763                      LED_CTRL_TRAFFIC_BLINK |
12764                      LED_CTRL_TRAFFIC_LED);
12765                 break;
12766
12767         case ETHTOOL_ID_OFF:
12768                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12769                      LED_CTRL_TRAFFIC_OVERRIDE);
12770                 break;
12771
12772         case ETHTOOL_ID_INACTIVE:
12773                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12774                 break;
12775         }
12776
12777         return 0;
12778 }
12779
12780 static void tg3_get_ethtool_stats(struct net_device *dev,
12781                                    struct ethtool_stats *estats, u64 *tmp_stats)
12782 {
12783         struct tg3 *tp = netdev_priv(dev);
12784
12785         if (tp->hw_stats)
12786                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12787         else
12788                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12789 }
12790
12791 static __be32 *tg3_vpd_readblock(struct tg3 *tp, unsigned int *vpdlen)
12792 {
12793         int i;
12794         __be32 *buf;
12795         u32 offset = 0, len = 0;
12796         u32 magic, val;
12797
12798         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12799                 return NULL;
12800
12801         if (magic == TG3_EEPROM_MAGIC) {
12802                 for (offset = TG3_NVM_DIR_START;
12803                      offset < TG3_NVM_DIR_END;
12804                      offset += TG3_NVM_DIRENT_SIZE) {
12805                         if (tg3_nvram_read(tp, offset, &val))
12806                                 return NULL;
12807
12808                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12809                             TG3_NVM_DIRTYPE_EXTVPD)
12810                                 break;
12811                 }
12812
12813                 if (offset != TG3_NVM_DIR_END) {
12814                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12815                         if (tg3_nvram_read(tp, offset + 4, &offset))
12816                                 return NULL;
12817
12818                         offset = tg3_nvram_logical_addr(tp, offset);
12819                 }
12820
12821                 if (!offset || !len) {
12822                         offset = TG3_NVM_VPD_OFF;
12823                         len = TG3_NVM_VPD_LEN;
12824                 }
12825
12826                 buf = kmalloc(len, GFP_KERNEL);
12827                 if (!buf)
12828                         return NULL;
12829
12830                 for (i = 0; i < len; i += 4) {
12831                         /* The data is in little-endian format in NVRAM.
12832                          * Use the big-endian read routines to preserve
12833                          * the byte order as it exists in NVRAM.
12834                          */
12835                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12836                                 goto error;
12837                 }
12838                 *vpdlen = len;
12839         } else {
12840                 buf = pci_vpd_alloc(tp->pdev, vpdlen);
12841                 if (IS_ERR(buf))
12842                         return NULL;
12843         }
12844
12845         return buf;
12846
12847 error:
12848         kfree(buf);
12849         return NULL;
12850 }
12851
12852 #define NVRAM_TEST_SIZE 0x100
12853 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12854 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12855 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12856 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12857 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12858 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12859 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12860 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12861
12862 static int tg3_test_nvram(struct tg3 *tp)
12863 {
12864         u32 csum, magic;
12865         __be32 *buf;
12866         int i, j, k, err = 0, size;
12867         unsigned int len;
12868
12869         if (tg3_flag(tp, NO_NVRAM))
12870                 return 0;
12871
12872         if (tg3_nvram_read(tp, 0, &magic) != 0)
12873                 return -EIO;
12874
12875         if (magic == TG3_EEPROM_MAGIC)
12876                 size = NVRAM_TEST_SIZE;
12877         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12878                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12879                     TG3_EEPROM_SB_FORMAT_1) {
12880                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12881                         case TG3_EEPROM_SB_REVISION_0:
12882                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12883                                 break;
12884                         case TG3_EEPROM_SB_REVISION_2:
12885                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12886                                 break;
12887                         case TG3_EEPROM_SB_REVISION_3:
12888                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12889                                 break;
12890                         case TG3_EEPROM_SB_REVISION_4:
12891                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12892                                 break;
12893                         case TG3_EEPROM_SB_REVISION_5:
12894                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12895                                 break;
12896                         case TG3_EEPROM_SB_REVISION_6:
12897                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12898                                 break;
12899                         default:
12900                                 return -EIO;
12901                         }
12902                 } else
12903                         return 0;
12904         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12905                 size = NVRAM_SELFBOOT_HW_SIZE;
12906         else
12907                 return -EIO;
12908
12909         buf = kmalloc(size, GFP_KERNEL);
12910         if (buf == NULL)
12911                 return -ENOMEM;
12912
12913         err = -EIO;
12914         for (i = 0, j = 0; i < size; i += 4, j++) {
12915                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12916                 if (err)
12917                         break;
12918         }
12919         if (i < size)
12920                 goto out;
12921
12922         /* Selfboot format */
12923         magic = be32_to_cpu(buf[0]);
12924         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12925             TG3_EEPROM_MAGIC_FW) {
12926                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12927
12928                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12929                     TG3_EEPROM_SB_REVISION_2) {
12930                         /* For rev 2, the csum doesn't include the MBA. */
12931                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12932                                 csum8 += buf8[i];
12933                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12934                                 csum8 += buf8[i];
12935                 } else {
12936                         for (i = 0; i < size; i++)
12937                                 csum8 += buf8[i];
12938                 }
12939
12940                 if (csum8 == 0) {
12941                         err = 0;
12942                         goto out;
12943                 }
12944
12945                 err = -EIO;
12946                 goto out;
12947         }
12948
12949         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12950             TG3_EEPROM_MAGIC_HW) {
12951                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12952                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12953                 u8 *buf8 = (u8 *) buf;
12954
12955                 /* Separate the parity bits and the data bytes.  */
12956                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12957                         if ((i == 0) || (i == 8)) {
12958                                 int l;
12959                                 u8 msk;
12960
12961                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12962                                         parity[k++] = buf8[i] & msk;
12963                                 i++;
12964                         } else if (i == 16) {
12965                                 int l;
12966                                 u8 msk;
12967
12968                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12969                                         parity[k++] = buf8[i] & msk;
12970                                 i++;
12971
12972                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12973                                         parity[k++] = buf8[i] & msk;
12974                                 i++;
12975                         }
12976                         data[j++] = buf8[i];
12977                 }
12978
12979                 err = -EIO;
12980                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12981                         u8 hw8 = hweight8(data[i]);
12982
12983                         if ((hw8 & 0x1) && parity[i])
12984                                 goto out;
12985                         else if (!(hw8 & 0x1) && !parity[i])
12986                                 goto out;
12987                 }
12988                 err = 0;
12989                 goto out;
12990         }
12991
12992         err = -EIO;
12993
12994         /* Bootstrap checksum at offset 0x10 */
12995         csum = calc_crc((unsigned char *) buf, 0x10);
12996         if (csum != le32_to_cpu(buf[0x10/4]))
12997                 goto out;
12998
12999         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
13000         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
13001         if (csum != le32_to_cpu(buf[0xfc/4]))
13002                 goto out;
13003
13004         kfree(buf);
13005
13006         buf = tg3_vpd_readblock(tp, &len);
13007         if (!buf)
13008                 return -ENOMEM;
13009
13010         err = pci_vpd_check_csum(buf, len);
13011         /* go on if no checksum found */
13012         if (err == 1)
13013                 err = 0;
13014 out:
13015         kfree(buf);
13016         return err;
13017 }
13018
13019 #define TG3_SERDES_TIMEOUT_SEC  2
13020 #define TG3_COPPER_TIMEOUT_SEC  6
13021
13022 static int tg3_test_link(struct tg3 *tp)
13023 {
13024         int i, max;
13025
13026         if (!netif_running(tp->dev))
13027                 return -ENODEV;
13028
13029         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13030                 max = TG3_SERDES_TIMEOUT_SEC;
13031         else
13032                 max = TG3_COPPER_TIMEOUT_SEC;
13033
13034         for (i = 0; i < max; i++) {
13035                 if (tp->link_up)
13036                         return 0;
13037
13038                 if (msleep_interruptible(1000))
13039                         break;
13040         }
13041
13042         return -EIO;
13043 }
13044
13045 /* Only test the commonly used registers */
13046 static int tg3_test_registers(struct tg3 *tp)
13047 {
13048         int i, is_5705, is_5750;
13049         u32 offset, read_mask, write_mask, val, save_val, read_val;
13050         static struct {
13051                 u16 offset;
13052                 u16 flags;
13053 #define TG3_FL_5705     0x1
13054 #define TG3_FL_NOT_5705 0x2
13055 #define TG3_FL_NOT_5788 0x4
13056 #define TG3_FL_NOT_5750 0x8
13057                 u32 read_mask;
13058                 u32 write_mask;
13059         } reg_tbl[] = {
13060                 /* MAC Control Registers */
13061                 { MAC_MODE, TG3_FL_NOT_5705,
13062                         0x00000000, 0x00ef6f8c },
13063                 { MAC_MODE, TG3_FL_5705,
13064                         0x00000000, 0x01ef6b8c },
13065                 { MAC_STATUS, TG3_FL_NOT_5705,
13066                         0x03800107, 0x00000000 },
13067                 { MAC_STATUS, TG3_FL_5705,
13068                         0x03800100, 0x00000000 },
13069                 { MAC_ADDR_0_HIGH, 0x0000,
13070                         0x00000000, 0x0000ffff },
13071                 { MAC_ADDR_0_LOW, 0x0000,
13072                         0x00000000, 0xffffffff },
13073                 { MAC_RX_MTU_SIZE, 0x0000,
13074                         0x00000000, 0x0000ffff },
13075                 { MAC_TX_MODE, 0x0000,
13076                         0x00000000, 0x00000070 },
13077                 { MAC_TX_LENGTHS, 0x0000,
13078                         0x00000000, 0x00003fff },
13079                 { MAC_RX_MODE, TG3_FL_NOT_5705,
13080                         0x00000000, 0x000007fc },
13081                 { MAC_RX_MODE, TG3_FL_5705,
13082                         0x00000000, 0x000007dc },
13083                 { MAC_HASH_REG_0, 0x0000,
13084                         0x00000000, 0xffffffff },
13085                 { MAC_HASH_REG_1, 0x0000,
13086                         0x00000000, 0xffffffff },
13087                 { MAC_HASH_REG_2, 0x0000,
13088                         0x00000000, 0xffffffff },
13089                 { MAC_HASH_REG_3, 0x0000,
13090                         0x00000000, 0xffffffff },
13091
13092                 /* Receive Data and Receive BD Initiator Control Registers. */
13093                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13094                         0x00000000, 0xffffffff },
13095                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13096                         0x00000000, 0xffffffff },
13097                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13098                         0x00000000, 0x00000003 },
13099                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13100                         0x00000000, 0xffffffff },
13101                 { RCVDBDI_STD_BD+0, 0x0000,
13102                         0x00000000, 0xffffffff },
13103                 { RCVDBDI_STD_BD+4, 0x0000,
13104                         0x00000000, 0xffffffff },
13105                 { RCVDBDI_STD_BD+8, 0x0000,
13106                         0x00000000, 0xffff0002 },
13107                 { RCVDBDI_STD_BD+0xc, 0x0000,
13108                         0x00000000, 0xffffffff },
13109
13110                 /* Receive BD Initiator Control Registers. */
13111                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13112                         0x00000000, 0xffffffff },
13113                 { RCVBDI_STD_THRESH, TG3_FL_5705,
13114                         0x00000000, 0x000003ff },
13115                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13116                         0x00000000, 0xffffffff },
13117
13118                 /* Host Coalescing Control Registers. */
13119                 { HOSTCC_MODE, TG3_FL_NOT_5705,
13120                         0x00000000, 0x00000004 },
13121                 { HOSTCC_MODE, TG3_FL_5705,
13122                         0x00000000, 0x000000f6 },
13123                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13124                         0x00000000, 0xffffffff },
13125                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13126                         0x00000000, 0x000003ff },
13127                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13128                         0x00000000, 0xffffffff },
13129                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13130                         0x00000000, 0x000003ff },
13131                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13132                         0x00000000, 0xffffffff },
13133                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13134                         0x00000000, 0x000000ff },
13135                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13136                         0x00000000, 0xffffffff },
13137                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13138                         0x00000000, 0x000000ff },
13139                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13140                         0x00000000, 0xffffffff },
13141                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13142                         0x00000000, 0xffffffff },
13143                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13144                         0x00000000, 0xffffffff },
13145                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13146                         0x00000000, 0x000000ff },
13147                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13148                         0x00000000, 0xffffffff },
13149                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13150                         0x00000000, 0x000000ff },
13151                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13152                         0x00000000, 0xffffffff },
13153                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13154                         0x00000000, 0xffffffff },
13155                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13156                         0x00000000, 0xffffffff },
13157                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13158                         0x00000000, 0xffffffff },
13159                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13160                         0x00000000, 0xffffffff },
13161                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13162                         0xffffffff, 0x00000000 },
13163                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13164                         0xffffffff, 0x00000000 },
13165
13166                 /* Buffer Manager Control Registers. */
13167                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13168                         0x00000000, 0x007fff80 },
13169                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13170                         0x00000000, 0x007fffff },
13171                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13172                         0x00000000, 0x0000003f },
13173                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13174                         0x00000000, 0x000001ff },
13175                 { BUFMGR_MB_HIGH_WATER, 0x0000,
13176                         0x00000000, 0x000001ff },
13177                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13178                         0xffffffff, 0x00000000 },
13179                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13180                         0xffffffff, 0x00000000 },
13181
13182                 /* Mailbox Registers */
13183                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13184                         0x00000000, 0x000001ff },
13185                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13186                         0x00000000, 0x000001ff },
13187                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13188                         0x00000000, 0x000007ff },
13189                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13190                         0x00000000, 0x000001ff },
13191
13192                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13193         };
13194
13195         is_5705 = is_5750 = 0;
13196         if (tg3_flag(tp, 5705_PLUS)) {
13197                 is_5705 = 1;
13198                 if (tg3_flag(tp, 5750_PLUS))
13199                         is_5750 = 1;
13200         }
13201
13202         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13203                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13204                         continue;
13205
13206                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13207                         continue;
13208
13209                 if (tg3_flag(tp, IS_5788) &&
13210                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
13211                         continue;
13212
13213                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13214                         continue;
13215
13216                 offset = (u32) reg_tbl[i].offset;
13217                 read_mask = reg_tbl[i].read_mask;
13218                 write_mask = reg_tbl[i].write_mask;
13219
13220                 /* Save the original register content */
13221                 save_val = tr32(offset);
13222
13223                 /* Determine the read-only value. */
13224                 read_val = save_val & read_mask;
13225
13226                 /* Write zero to the register, then make sure the read-only bits
13227                  * are not changed and the read/write bits are all zeros.
13228                  */
13229                 tw32(offset, 0);
13230
13231                 val = tr32(offset);
13232
13233                 /* Test the read-only and read/write bits. */
13234                 if (((val & read_mask) != read_val) || (val & write_mask))
13235                         goto out;
13236
13237                 /* Write ones to all the bits defined by RdMask and WrMask, then
13238                  * make sure the read-only bits are not changed and the
13239                  * read/write bits are all ones.
13240                  */
13241                 tw32(offset, read_mask | write_mask);
13242
13243                 val = tr32(offset);
13244
13245                 /* Test the read-only bits. */
13246                 if ((val & read_mask) != read_val)
13247                         goto out;
13248
13249                 /* Test the read/write bits. */
13250                 if ((val & write_mask) != write_mask)
13251                         goto out;
13252
13253                 tw32(offset, save_val);
13254         }
13255
13256         return 0;
13257
13258 out:
13259         if (netif_msg_hw(tp))
13260                 netdev_err(tp->dev,
13261                            "Register test failed at offset %x\n", offset);
13262         tw32(offset, save_val);
13263         return -EIO;
13264 }
13265
13266 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13267 {
13268         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13269         int i;
13270         u32 j;
13271
13272         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13273                 for (j = 0; j < len; j += 4) {
13274                         u32 val;
13275
13276                         tg3_write_mem(tp, offset + j, test_pattern[i]);
13277                         tg3_read_mem(tp, offset + j, &val);
13278                         if (val != test_pattern[i])
13279                                 return -EIO;
13280                 }
13281         }
13282         return 0;
13283 }
13284
13285 static int tg3_test_memory(struct tg3 *tp)
13286 {
13287         static struct mem_entry {
13288                 u32 offset;
13289                 u32 len;
13290         } mem_tbl_570x[] = {
13291                 { 0x00000000, 0x00b50},
13292                 { 0x00002000, 0x1c000},
13293                 { 0xffffffff, 0x00000}
13294         }, mem_tbl_5705[] = {
13295                 { 0x00000100, 0x0000c},
13296                 { 0x00000200, 0x00008},
13297                 { 0x00004000, 0x00800},
13298                 { 0x00006000, 0x01000},
13299                 { 0x00008000, 0x02000},
13300                 { 0x00010000, 0x0e000},
13301                 { 0xffffffff, 0x00000}
13302         }, mem_tbl_5755[] = {
13303                 { 0x00000200, 0x00008},
13304                 { 0x00004000, 0x00800},
13305                 { 0x00006000, 0x00800},
13306                 { 0x00008000, 0x02000},
13307                 { 0x00010000, 0x0c000},
13308                 { 0xffffffff, 0x00000}
13309         }, mem_tbl_5906[] = {
13310                 { 0x00000200, 0x00008},
13311                 { 0x00004000, 0x00400},
13312                 { 0x00006000, 0x00400},
13313                 { 0x00008000, 0x01000},
13314                 { 0x00010000, 0x01000},
13315                 { 0xffffffff, 0x00000}
13316         }, mem_tbl_5717[] = {
13317                 { 0x00000200, 0x00008},
13318                 { 0x00010000, 0x0a000},
13319                 { 0x00020000, 0x13c00},
13320                 { 0xffffffff, 0x00000}
13321         }, mem_tbl_57765[] = {
13322                 { 0x00000200, 0x00008},
13323                 { 0x00004000, 0x00800},
13324                 { 0x00006000, 0x09800},
13325                 { 0x00010000, 0x0a000},
13326                 { 0xffffffff, 0x00000}
13327         };
13328         struct mem_entry *mem_tbl;
13329         int err = 0;
13330         int i;
13331
13332         if (tg3_flag(tp, 5717_PLUS))
13333                 mem_tbl = mem_tbl_5717;
13334         else if (tg3_flag(tp, 57765_CLASS) ||
13335                  tg3_asic_rev(tp) == ASIC_REV_5762)
13336                 mem_tbl = mem_tbl_57765;
13337         else if (tg3_flag(tp, 5755_PLUS))
13338                 mem_tbl = mem_tbl_5755;
13339         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13340                 mem_tbl = mem_tbl_5906;
13341         else if (tg3_flag(tp, 5705_PLUS))
13342                 mem_tbl = mem_tbl_5705;
13343         else
13344                 mem_tbl = mem_tbl_570x;
13345
13346         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13347                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13348                 if (err)
13349                         break;
13350         }
13351
13352         return err;
13353 }
13354
13355 #define TG3_TSO_MSS             500
13356
13357 #define TG3_TSO_IP_HDR_LEN      20
13358 #define TG3_TSO_TCP_HDR_LEN     20
13359 #define TG3_TSO_TCP_OPT_LEN     12
13360
13361 static const u8 tg3_tso_header[] = {
13362 0x08, 0x00,
13363 0x45, 0x00, 0x00, 0x00,
13364 0x00, 0x00, 0x40, 0x00,
13365 0x40, 0x06, 0x00, 0x00,
13366 0x0a, 0x00, 0x00, 0x01,
13367 0x0a, 0x00, 0x00, 0x02,
13368 0x0d, 0x00, 0xe0, 0x00,
13369 0x00, 0x00, 0x01, 0x00,
13370 0x00, 0x00, 0x02, 0x00,
13371 0x80, 0x10, 0x10, 0x00,
13372 0x14, 0x09, 0x00, 0x00,
13373 0x01, 0x01, 0x08, 0x0a,
13374 0x11, 0x11, 0x11, 0x11,
13375 0x11, 0x11, 0x11, 0x11,
13376 };
13377
13378 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13379 {
13380         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13381         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13382         u32 budget;
13383         struct sk_buff *skb;
13384         u8 *tx_data, *rx_data;
13385         dma_addr_t map;
13386         int num_pkts, tx_len, rx_len, i, err;
13387         struct tg3_rx_buffer_desc *desc;
13388         struct tg3_napi *tnapi, *rnapi;
13389         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13390
13391         tnapi = &tp->napi[0];
13392         rnapi = &tp->napi[0];
13393         if (tp->irq_cnt > 1) {
13394                 if (tg3_flag(tp, ENABLE_RSS))
13395                         rnapi = &tp->napi[1];
13396                 if (tg3_flag(tp, ENABLE_TSS))
13397                         tnapi = &tp->napi[1];
13398         }
13399         coal_now = tnapi->coal_now | rnapi->coal_now;
13400
13401         err = -EIO;
13402
13403         tx_len = pktsz;
13404         skb = netdev_alloc_skb(tp->dev, tx_len);
13405         if (!skb)
13406                 return -ENOMEM;
13407
13408         tx_data = skb_put(skb, tx_len);
13409         memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13410         memset(tx_data + ETH_ALEN, 0x0, 8);
13411
13412         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13413
13414         if (tso_loopback) {
13415                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13416
13417                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13418                               TG3_TSO_TCP_OPT_LEN;
13419
13420                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13421                        sizeof(tg3_tso_header));
13422                 mss = TG3_TSO_MSS;
13423
13424                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13425                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13426
13427                 /* Set the total length field in the IP header */
13428                 iph->tot_len = htons((u16)(mss + hdr_len));
13429
13430                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13431                               TXD_FLAG_CPU_POST_DMA);
13432
13433                 if (tg3_flag(tp, HW_TSO_1) ||
13434                     tg3_flag(tp, HW_TSO_2) ||
13435                     tg3_flag(tp, HW_TSO_3)) {
13436                         struct tcphdr *th;
13437                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13438                         th = (struct tcphdr *)&tx_data[val];
13439                         th->check = 0;
13440                 } else
13441                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13442
13443                 if (tg3_flag(tp, HW_TSO_3)) {
13444                         mss |= (hdr_len & 0xc) << 12;
13445                         if (hdr_len & 0x10)
13446                                 base_flags |= 0x00000010;
13447                         base_flags |= (hdr_len & 0x3e0) << 5;
13448                 } else if (tg3_flag(tp, HW_TSO_2))
13449                         mss |= hdr_len << 9;
13450                 else if (tg3_flag(tp, HW_TSO_1) ||
13451                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13452                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13453                 } else {
13454                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13455                 }
13456
13457                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13458         } else {
13459                 num_pkts = 1;
13460                 data_off = ETH_HLEN;
13461
13462                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13463                     tx_len > VLAN_ETH_FRAME_LEN)
13464                         base_flags |= TXD_FLAG_JMB_PKT;
13465         }
13466
13467         for (i = data_off; i < tx_len; i++)
13468                 tx_data[i] = (u8) (i & 0xff);
13469
13470         map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE);
13471         if (dma_mapping_error(&tp->pdev->dev, map)) {
13472                 dev_kfree_skb(skb);
13473                 return -EIO;
13474         }
13475
13476         val = tnapi->tx_prod;
13477         tnapi->tx_buffers[val].skb = skb;
13478         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13479
13480         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13481                rnapi->coal_now);
13482
13483         udelay(10);
13484
13485         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13486
13487         budget = tg3_tx_avail(tnapi);
13488         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13489                             base_flags | TXD_FLAG_END, mss, 0)) {
13490                 tnapi->tx_buffers[val].skb = NULL;
13491                 dev_kfree_skb(skb);
13492                 return -EIO;
13493         }
13494
13495         tnapi->tx_prod++;
13496
13497         /* Sync BD data before updating mailbox */
13498         wmb();
13499
13500         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13501         tr32_mailbox(tnapi->prodmbox);
13502
13503         udelay(10);
13504
13505         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13506         for (i = 0; i < 35; i++) {
13507                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13508                        coal_now);
13509
13510                 udelay(10);
13511
13512                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13513                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13514                 if ((tx_idx == tnapi->tx_prod) &&
13515                     (rx_idx == (rx_start_idx + num_pkts)))
13516                         break;
13517         }
13518
13519         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13520         dev_kfree_skb(skb);
13521
13522         if (tx_idx != tnapi->tx_prod)
13523                 goto out;
13524
13525         if (rx_idx != rx_start_idx + num_pkts)
13526                 goto out;
13527
13528         val = data_off;
13529         while (rx_idx != rx_start_idx) {
13530                 desc = &rnapi->rx_rcb[rx_start_idx++];
13531                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13532                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13533
13534                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13535                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13536                         goto out;
13537
13538                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13539                          - ETH_FCS_LEN;
13540
13541                 if (!tso_loopback) {
13542                         if (rx_len != tx_len)
13543                                 goto out;
13544
13545                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13546                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13547                                         goto out;
13548                         } else {
13549                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13550                                         goto out;
13551                         }
13552                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13553                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13554                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13555                         goto out;
13556                 }
13557
13558                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13559                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13560                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13561                                              mapping);
13562                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13563                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13564                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13565                                              mapping);
13566                 } else
13567                         goto out;
13568
13569                 dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len,
13570                                         DMA_FROM_DEVICE);
13571
13572                 rx_data += TG3_RX_OFFSET(tp);
13573                 for (i = data_off; i < rx_len; i++, val++) {
13574                         if (*(rx_data + i) != (u8) (val & 0xff))
13575                                 goto out;
13576                 }
13577         }
13578
13579         err = 0;
13580
13581         /* tg3_free_rings will unmap and free the rx_data */
13582 out:
13583         return err;
13584 }
13585
13586 #define TG3_STD_LOOPBACK_FAILED         1
13587 #define TG3_JMB_LOOPBACK_FAILED         2
13588 #define TG3_TSO_LOOPBACK_FAILED         4
13589 #define TG3_LOOPBACK_FAILED \
13590         (TG3_STD_LOOPBACK_FAILED | \
13591          TG3_JMB_LOOPBACK_FAILED | \
13592          TG3_TSO_LOOPBACK_FAILED)
13593
13594 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13595 {
13596         int err = -EIO;
13597         u32 eee_cap;
13598         u32 jmb_pkt_sz = 9000;
13599
13600         if (tp->dma_limit)
13601                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13602
13603         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13604         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13605
13606         if (!netif_running(tp->dev)) {
13607                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13608                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13609                 if (do_extlpbk)
13610                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13611                 goto done;
13612         }
13613
13614         err = tg3_reset_hw(tp, true);
13615         if (err) {
13616                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13617                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13618                 if (do_extlpbk)
13619                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13620                 goto done;
13621         }
13622
13623         if (tg3_flag(tp, ENABLE_RSS)) {
13624                 int i;
13625
13626                 /* Reroute all rx packets to the 1st queue */
13627                 for (i = MAC_RSS_INDIR_TBL_0;
13628                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13629                         tw32(i, 0x0);
13630         }
13631
13632         /* HW errata - mac loopback fails in some cases on 5780.
13633          * Normal traffic and PHY loopback are not affected by
13634          * errata.  Also, the MAC loopback test is deprecated for
13635          * all newer ASIC revisions.
13636          */
13637         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13638             !tg3_flag(tp, CPMU_PRESENT)) {
13639                 tg3_mac_loopback(tp, true);
13640
13641                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13642                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13643
13644                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13645                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13646                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13647
13648                 tg3_mac_loopback(tp, false);
13649         }
13650
13651         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13652             !tg3_flag(tp, USE_PHYLIB)) {
13653                 int i;
13654
13655                 tg3_phy_lpbk_set(tp, 0, false);
13656
13657                 /* Wait for link */
13658                 for (i = 0; i < 100; i++) {
13659                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13660                                 break;
13661                         mdelay(1);
13662                 }
13663
13664                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13665                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13666                 if (tg3_flag(tp, TSO_CAPABLE) &&
13667                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13668                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13669                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13670                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13671                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13672
13673                 if (do_extlpbk) {
13674                         tg3_phy_lpbk_set(tp, 0, true);
13675
13676                         /* All link indications report up, but the hardware
13677                          * isn't really ready for about 20 msec.  Double it
13678                          * to be sure.
13679                          */
13680                         mdelay(40);
13681
13682                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13683                                 data[TG3_EXT_LOOPB_TEST] |=
13684                                                         TG3_STD_LOOPBACK_FAILED;
13685                         if (tg3_flag(tp, TSO_CAPABLE) &&
13686                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13687                                 data[TG3_EXT_LOOPB_TEST] |=
13688                                                         TG3_TSO_LOOPBACK_FAILED;
13689                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13690                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13691                                 data[TG3_EXT_LOOPB_TEST] |=
13692                                                         TG3_JMB_LOOPBACK_FAILED;
13693                 }
13694
13695                 /* Re-enable gphy autopowerdown. */
13696                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13697                         tg3_phy_toggle_apd(tp, true);
13698         }
13699
13700         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13701                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13702
13703 done:
13704         tp->phy_flags |= eee_cap;
13705
13706         return err;
13707 }
13708
13709 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13710                           u64 *data)
13711 {
13712         struct tg3 *tp = netdev_priv(dev);
13713         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13714
13715         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13716                 if (tg3_power_up(tp)) {
13717                         etest->flags |= ETH_TEST_FL_FAILED;
13718                         memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13719                         return;
13720                 }
13721                 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13722         }
13723
13724         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13725
13726         if (tg3_test_nvram(tp) != 0) {
13727                 etest->flags |= ETH_TEST_FL_FAILED;
13728                 data[TG3_NVRAM_TEST] = 1;
13729         }
13730         if (!doextlpbk && tg3_test_link(tp)) {
13731                 etest->flags |= ETH_TEST_FL_FAILED;
13732                 data[TG3_LINK_TEST] = 1;
13733         }
13734         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13735                 int err, err2 = 0, irq_sync = 0;
13736
13737                 if (netif_running(dev)) {
13738                         tg3_phy_stop(tp);
13739                         tg3_netif_stop(tp);
13740                         irq_sync = 1;
13741                 }
13742
13743                 tg3_full_lock(tp, irq_sync);
13744                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13745                 err = tg3_nvram_lock(tp);
13746                 tg3_halt_cpu(tp, RX_CPU_BASE);
13747                 if (!tg3_flag(tp, 5705_PLUS))
13748                         tg3_halt_cpu(tp, TX_CPU_BASE);
13749                 if (!err)
13750                         tg3_nvram_unlock(tp);
13751
13752                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13753                         tg3_phy_reset(tp);
13754
13755                 if (tg3_test_registers(tp) != 0) {
13756                         etest->flags |= ETH_TEST_FL_FAILED;
13757                         data[TG3_REGISTER_TEST] = 1;
13758                 }
13759
13760                 if (tg3_test_memory(tp) != 0) {
13761                         etest->flags |= ETH_TEST_FL_FAILED;
13762                         data[TG3_MEMORY_TEST] = 1;
13763                 }
13764
13765                 if (doextlpbk)
13766                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13767
13768                 if (tg3_test_loopback(tp, data, doextlpbk))
13769                         etest->flags |= ETH_TEST_FL_FAILED;
13770
13771                 tg3_full_unlock(tp);
13772
13773                 if (tg3_test_interrupt(tp) != 0) {
13774                         etest->flags |= ETH_TEST_FL_FAILED;
13775                         data[TG3_INTERRUPT_TEST] = 1;
13776                 }
13777
13778                 tg3_full_lock(tp, 0);
13779
13780                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13781                 if (netif_running(dev)) {
13782                         tg3_flag_set(tp, INIT_COMPLETE);
13783                         err2 = tg3_restart_hw(tp, true);
13784                         if (!err2)
13785                                 tg3_netif_start(tp);
13786                 }
13787
13788                 tg3_full_unlock(tp);
13789
13790                 if (irq_sync && !err2)
13791                         tg3_phy_start(tp);
13792         }
13793         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13794                 tg3_power_down_prepare(tp);
13795
13796 }
13797
13798 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13799 {
13800         struct tg3 *tp = netdev_priv(dev);
13801         struct hwtstamp_config stmpconf;
13802
13803         if (!tg3_flag(tp, PTP_CAPABLE))
13804                 return -EOPNOTSUPP;
13805
13806         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13807                 return -EFAULT;
13808
13809         if (stmpconf.flags)
13810                 return -EINVAL;
13811
13812         if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13813             stmpconf.tx_type != HWTSTAMP_TX_OFF)
13814                 return -ERANGE;
13815
13816         switch (stmpconf.rx_filter) {
13817         case HWTSTAMP_FILTER_NONE:
13818                 tp->rxptpctl = 0;
13819                 break;
13820         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13821                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13822                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13823                 break;
13824         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13825                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13826                                TG3_RX_PTP_CTL_SYNC_EVNT;
13827                 break;
13828         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13829                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13830                                TG3_RX_PTP_CTL_DELAY_REQ;
13831                 break;
13832         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13833                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13834                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13835                 break;
13836         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13837                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13838                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13839                 break;
13840         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13841                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13842                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13843                 break;
13844         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13845                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13846                                TG3_RX_PTP_CTL_SYNC_EVNT;
13847                 break;
13848         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13849                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13850                                TG3_RX_PTP_CTL_SYNC_EVNT;
13851                 break;
13852         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13854                                TG3_RX_PTP_CTL_SYNC_EVNT;
13855                 break;
13856         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13858                                TG3_RX_PTP_CTL_DELAY_REQ;
13859                 break;
13860         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13861                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13862                                TG3_RX_PTP_CTL_DELAY_REQ;
13863                 break;
13864         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13865                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13866                                TG3_RX_PTP_CTL_DELAY_REQ;
13867                 break;
13868         default:
13869                 return -ERANGE;
13870         }
13871
13872         if (netif_running(dev) && tp->rxptpctl)
13873                 tw32(TG3_RX_PTP_CTL,
13874                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13875
13876         if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13877                 tg3_flag_set(tp, TX_TSTAMP_EN);
13878         else
13879                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13880
13881         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13882                 -EFAULT : 0;
13883 }
13884
13885 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13886 {
13887         struct tg3 *tp = netdev_priv(dev);
13888         struct hwtstamp_config stmpconf;
13889
13890         if (!tg3_flag(tp, PTP_CAPABLE))
13891                 return -EOPNOTSUPP;
13892
13893         stmpconf.flags = 0;
13894         stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13895                             HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13896
13897         switch (tp->rxptpctl) {
13898         case 0:
13899                 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13900                 break;
13901         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13902                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13903                 break;
13904         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13905                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13906                 break;
13907         case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13908                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13909                 break;
13910         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13911                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13912                 break;
13913         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13914                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13915                 break;
13916         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13917                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13918                 break;
13919         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13920                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13921                 break;
13922         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13923                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13924                 break;
13925         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13926                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13927                 break;
13928         case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13929                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13930                 break;
13931         case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13932                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13933                 break;
13934         case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13935                 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13936                 break;
13937         default:
13938                 WARN_ON_ONCE(1);
13939                 return -ERANGE;
13940         }
13941
13942         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13943                 -EFAULT : 0;
13944 }
13945
13946 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13947 {
13948         struct mii_ioctl_data *data = if_mii(ifr);
13949         struct tg3 *tp = netdev_priv(dev);
13950         int err;
13951
13952         if (tg3_flag(tp, USE_PHYLIB)) {
13953                 struct phy_device *phydev;
13954                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13955                         return -EAGAIN;
13956                 phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
13957                 return phy_mii_ioctl(phydev, ifr, cmd);
13958         }
13959
13960         switch (cmd) {
13961         case SIOCGMIIPHY:
13962                 data->phy_id = tp->phy_addr;
13963
13964                 fallthrough;
13965         case SIOCGMIIREG: {
13966                 u32 mii_regval;
13967
13968                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13969                         break;                  /* We have no PHY */
13970
13971                 if (!netif_running(dev))
13972                         return -EAGAIN;
13973
13974                 spin_lock_bh(&tp->lock);
13975                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13976                                     data->reg_num & 0x1f, &mii_regval);
13977                 spin_unlock_bh(&tp->lock);
13978
13979                 data->val_out = mii_regval;
13980
13981                 return err;
13982         }
13983
13984         case SIOCSMIIREG:
13985                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13986                         break;                  /* We have no PHY */
13987
13988                 if (!netif_running(dev))
13989                         return -EAGAIN;
13990
13991                 spin_lock_bh(&tp->lock);
13992                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13993                                      data->reg_num & 0x1f, data->val_in);
13994                 spin_unlock_bh(&tp->lock);
13995
13996                 return err;
13997
13998         case SIOCSHWTSTAMP:
13999                 return tg3_hwtstamp_set(dev, ifr);
14000
14001         case SIOCGHWTSTAMP:
14002                 return tg3_hwtstamp_get(dev, ifr);
14003
14004         default:
14005                 /* do nothing */
14006                 break;
14007         }
14008         return -EOPNOTSUPP;
14009 }
14010
14011 static int tg3_get_coalesce(struct net_device *dev,
14012                             struct ethtool_coalesce *ec,
14013                             struct kernel_ethtool_coalesce *kernel_coal,
14014                             struct netlink_ext_ack *extack)
14015 {
14016         struct tg3 *tp = netdev_priv(dev);
14017
14018         memcpy(ec, &tp->coal, sizeof(*ec));
14019         return 0;
14020 }
14021
14022 static int tg3_set_coalesce(struct net_device *dev,
14023                             struct ethtool_coalesce *ec,
14024                             struct kernel_ethtool_coalesce *kernel_coal,
14025                             struct netlink_ext_ack *extack)
14026 {
14027         struct tg3 *tp = netdev_priv(dev);
14028         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14029         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14030
14031         if (!tg3_flag(tp, 5705_PLUS)) {
14032                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14033                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14034                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14035                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14036         }
14037
14038         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14039             (!ec->rx_coalesce_usecs) ||
14040             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14041             (!ec->tx_coalesce_usecs) ||
14042             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14043             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14044             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14045             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14046             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14047             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14048             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14049             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14050                 return -EINVAL;
14051
14052         /* Only copy relevant parameters, ignore all others. */
14053         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14054         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14055         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14056         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14057         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14058         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14059         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14060         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14061         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14062
14063         if (netif_running(dev)) {
14064                 tg3_full_lock(tp, 0);
14065                 __tg3_set_coalesce(tp, &tp->coal);
14066                 tg3_full_unlock(tp);
14067         }
14068         return 0;
14069 }
14070
14071 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14072 {
14073         struct tg3 *tp = netdev_priv(dev);
14074
14075         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14076                 netdev_warn(tp->dev, "Board does not support EEE!\n");
14077                 return -EOPNOTSUPP;
14078         }
14079
14080         if (edata->advertised != tp->eee.advertised) {
14081                 netdev_warn(tp->dev,
14082                             "Direct manipulation of EEE advertisement is not supported\n");
14083                 return -EINVAL;
14084         }
14085
14086         if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14087                 netdev_warn(tp->dev,
14088                             "Maximal Tx Lpi timer supported is %#x(u)\n",
14089                             TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14090                 return -EINVAL;
14091         }
14092
14093         tp->eee = *edata;
14094
14095         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14096         tg3_warn_mgmt_link_flap(tp);
14097
14098         if (netif_running(tp->dev)) {
14099                 tg3_full_lock(tp, 0);
14100                 tg3_setup_eee(tp);
14101                 tg3_phy_reset(tp);
14102                 tg3_full_unlock(tp);
14103         }
14104
14105         return 0;
14106 }
14107
14108 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14109 {
14110         struct tg3 *tp = netdev_priv(dev);
14111
14112         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14113                 netdev_warn(tp->dev,
14114                             "Board does not support EEE!\n");
14115                 return -EOPNOTSUPP;
14116         }
14117
14118         *edata = tp->eee;
14119         return 0;
14120 }
14121
14122 static const struct ethtool_ops tg3_ethtool_ops = {
14123         .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
14124                                      ETHTOOL_COALESCE_MAX_FRAMES |
14125                                      ETHTOOL_COALESCE_USECS_IRQ |
14126                                      ETHTOOL_COALESCE_MAX_FRAMES_IRQ |
14127                                      ETHTOOL_COALESCE_STATS_BLOCK_USECS,
14128         .get_drvinfo            = tg3_get_drvinfo,
14129         .get_regs_len           = tg3_get_regs_len,
14130         .get_regs               = tg3_get_regs,
14131         .get_wol                = tg3_get_wol,
14132         .set_wol                = tg3_set_wol,
14133         .get_msglevel           = tg3_get_msglevel,
14134         .set_msglevel           = tg3_set_msglevel,
14135         .nway_reset             = tg3_nway_reset,
14136         .get_link               = ethtool_op_get_link,
14137         .get_eeprom_len         = tg3_get_eeprom_len,
14138         .get_eeprom             = tg3_get_eeprom,
14139         .set_eeprom             = tg3_set_eeprom,
14140         .get_ringparam          = tg3_get_ringparam,
14141         .set_ringparam          = tg3_set_ringparam,
14142         .get_pauseparam         = tg3_get_pauseparam,
14143         .set_pauseparam         = tg3_set_pauseparam,
14144         .self_test              = tg3_self_test,
14145         .get_strings            = tg3_get_strings,
14146         .set_phys_id            = tg3_set_phys_id,
14147         .get_ethtool_stats      = tg3_get_ethtool_stats,
14148         .get_coalesce           = tg3_get_coalesce,
14149         .set_coalesce           = tg3_set_coalesce,
14150         .get_sset_count         = tg3_get_sset_count,
14151         .get_rxnfc              = tg3_get_rxnfc,
14152         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14153         .get_rxfh               = tg3_get_rxfh,
14154         .set_rxfh               = tg3_set_rxfh,
14155         .get_channels           = tg3_get_channels,
14156         .set_channels           = tg3_set_channels,
14157         .get_ts_info            = tg3_get_ts_info,
14158         .get_eee                = tg3_get_eee,
14159         .set_eee                = tg3_set_eee,
14160         .get_link_ksettings     = tg3_get_link_ksettings,
14161         .set_link_ksettings     = tg3_set_link_ksettings,
14162 };
14163
14164 static void tg3_get_stats64(struct net_device *dev,
14165                             struct rtnl_link_stats64 *stats)
14166 {
14167         struct tg3 *tp = netdev_priv(dev);
14168
14169         spin_lock_bh(&tp->lock);
14170         if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
14171                 *stats = tp->net_stats_prev;
14172                 spin_unlock_bh(&tp->lock);
14173                 return;
14174         }
14175
14176         tg3_get_nstats(tp, stats);
14177         spin_unlock_bh(&tp->lock);
14178 }
14179
14180 static void tg3_set_rx_mode(struct net_device *dev)
14181 {
14182         struct tg3 *tp = netdev_priv(dev);
14183
14184         if (!netif_running(dev))
14185                 return;
14186
14187         tg3_full_lock(tp, 0);
14188         __tg3_set_rx_mode(dev);
14189         tg3_full_unlock(tp);
14190 }
14191
14192 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14193                                int new_mtu)
14194 {
14195         dev->mtu = new_mtu;
14196
14197         if (new_mtu > ETH_DATA_LEN) {
14198                 if (tg3_flag(tp, 5780_CLASS)) {
14199                         netdev_update_features(dev);
14200                         tg3_flag_clear(tp, TSO_CAPABLE);
14201                 } else {
14202                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
14203                 }
14204         } else {
14205                 if (tg3_flag(tp, 5780_CLASS)) {
14206                         tg3_flag_set(tp, TSO_CAPABLE);
14207                         netdev_update_features(dev);
14208                 }
14209                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14210         }
14211 }
14212
14213 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14214 {
14215         struct tg3 *tp = netdev_priv(dev);
14216         int err;
14217         bool reset_phy = false;
14218
14219         if (!netif_running(dev)) {
14220                 /* We'll just catch it later when the
14221                  * device is up'd.
14222                  */
14223                 tg3_set_mtu(dev, tp, new_mtu);
14224                 return 0;
14225         }
14226
14227         tg3_phy_stop(tp);
14228
14229         tg3_netif_stop(tp);
14230
14231         tg3_set_mtu(dev, tp, new_mtu);
14232
14233         tg3_full_lock(tp, 1);
14234
14235         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14236
14237         /* Reset PHY, otherwise the read DMA engine will be in a mode that
14238          * breaks all requests to 256 bytes.
14239          */
14240         if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
14241             tg3_asic_rev(tp) == ASIC_REV_5717 ||
14242             tg3_asic_rev(tp) == ASIC_REV_5719 ||
14243             tg3_asic_rev(tp) == ASIC_REV_5720)
14244                 reset_phy = true;
14245
14246         err = tg3_restart_hw(tp, reset_phy);
14247
14248         if (!err)
14249                 tg3_netif_start(tp);
14250
14251         tg3_full_unlock(tp);
14252
14253         if (!err)
14254                 tg3_phy_start(tp);
14255
14256         return err;
14257 }
14258
14259 static const struct net_device_ops tg3_netdev_ops = {
14260         .ndo_open               = tg3_open,
14261         .ndo_stop               = tg3_close,
14262         .ndo_start_xmit         = tg3_start_xmit,
14263         .ndo_get_stats64        = tg3_get_stats64,
14264         .ndo_validate_addr      = eth_validate_addr,
14265         .ndo_set_rx_mode        = tg3_set_rx_mode,
14266         .ndo_set_mac_address    = tg3_set_mac_addr,
14267         .ndo_eth_ioctl          = tg3_ioctl,
14268         .ndo_tx_timeout         = tg3_tx_timeout,
14269         .ndo_change_mtu         = tg3_change_mtu,
14270         .ndo_fix_features       = tg3_fix_features,
14271         .ndo_set_features       = tg3_set_features,
14272 #ifdef CONFIG_NET_POLL_CONTROLLER
14273         .ndo_poll_controller    = tg3_poll_controller,
14274 #endif
14275 };
14276
14277 static void tg3_get_eeprom_size(struct tg3 *tp)
14278 {
14279         u32 cursize, val, magic;
14280
14281         tp->nvram_size = EEPROM_CHIP_SIZE;
14282
14283         if (tg3_nvram_read(tp, 0, &magic) != 0)
14284                 return;
14285
14286         if ((magic != TG3_EEPROM_MAGIC) &&
14287             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14288             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14289                 return;
14290
14291         /*
14292          * Size the chip by reading offsets at increasing powers of two.
14293          * When we encounter our validation signature, we know the addressing
14294          * has wrapped around, and thus have our chip size.
14295          */
14296         cursize = 0x10;
14297
14298         while (cursize < tp->nvram_size) {
14299                 if (tg3_nvram_read(tp, cursize, &val) != 0)
14300                         return;
14301
14302                 if (val == magic)
14303                         break;
14304
14305                 cursize <<= 1;
14306         }
14307
14308         tp->nvram_size = cursize;
14309 }
14310
14311 static void tg3_get_nvram_size(struct tg3 *tp)
14312 {
14313         u32 val;
14314
14315         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14316                 return;
14317
14318         /* Selfboot format */
14319         if (val != TG3_EEPROM_MAGIC) {
14320                 tg3_get_eeprom_size(tp);
14321                 return;
14322         }
14323
14324         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14325                 if (val != 0) {
14326                         /* This is confusing.  We want to operate on the
14327                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14328                          * call will read from NVRAM and byteswap the data
14329                          * according to the byteswapping settings for all
14330                          * other register accesses.  This ensures the data we
14331                          * want will always reside in the lower 16-bits.
14332                          * However, the data in NVRAM is in LE format, which
14333                          * means the data from the NVRAM read will always be
14334                          * opposite the endianness of the CPU.  The 16-bit
14335                          * byteswap then brings the data to CPU endianness.
14336                          */
14337                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14338                         return;
14339                 }
14340         }
14341         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14342 }
14343
14344 static void tg3_get_nvram_info(struct tg3 *tp)
14345 {
14346         u32 nvcfg1;
14347
14348         nvcfg1 = tr32(NVRAM_CFG1);
14349         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14350                 tg3_flag_set(tp, FLASH);
14351         } else {
14352                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14353                 tw32(NVRAM_CFG1, nvcfg1);
14354         }
14355
14356         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14357             tg3_flag(tp, 5780_CLASS)) {
14358                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14359                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14360                         tp->nvram_jedecnum = JEDEC_ATMEL;
14361                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14362                         tg3_flag_set(tp, NVRAM_BUFFERED);
14363                         break;
14364                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14365                         tp->nvram_jedecnum = JEDEC_ATMEL;
14366                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14367                         break;
14368                 case FLASH_VENDOR_ATMEL_EEPROM:
14369                         tp->nvram_jedecnum = JEDEC_ATMEL;
14370                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14371                         tg3_flag_set(tp, NVRAM_BUFFERED);
14372                         break;
14373                 case FLASH_VENDOR_ST:
14374                         tp->nvram_jedecnum = JEDEC_ST;
14375                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14376                         tg3_flag_set(tp, NVRAM_BUFFERED);
14377                         break;
14378                 case FLASH_VENDOR_SAIFUN:
14379                         tp->nvram_jedecnum = JEDEC_SAIFUN;
14380                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14381                         break;
14382                 case FLASH_VENDOR_SST_SMALL:
14383                 case FLASH_VENDOR_SST_LARGE:
14384                         tp->nvram_jedecnum = JEDEC_SST;
14385                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14386                         break;
14387                 }
14388         } else {
14389                 tp->nvram_jedecnum = JEDEC_ATMEL;
14390                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14391                 tg3_flag_set(tp, NVRAM_BUFFERED);
14392         }
14393 }
14394
14395 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14396 {
14397         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14398         case FLASH_5752PAGE_SIZE_256:
14399                 tp->nvram_pagesize = 256;
14400                 break;
14401         case FLASH_5752PAGE_SIZE_512:
14402                 tp->nvram_pagesize = 512;
14403                 break;
14404         case FLASH_5752PAGE_SIZE_1K:
14405                 tp->nvram_pagesize = 1024;
14406                 break;
14407         case FLASH_5752PAGE_SIZE_2K:
14408                 tp->nvram_pagesize = 2048;
14409                 break;
14410         case FLASH_5752PAGE_SIZE_4K:
14411                 tp->nvram_pagesize = 4096;
14412                 break;
14413         case FLASH_5752PAGE_SIZE_264:
14414                 tp->nvram_pagesize = 264;
14415                 break;
14416         case FLASH_5752PAGE_SIZE_528:
14417                 tp->nvram_pagesize = 528;
14418                 break;
14419         }
14420 }
14421
14422 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14423 {
14424         u32 nvcfg1;
14425
14426         nvcfg1 = tr32(NVRAM_CFG1);
14427
14428         /* NVRAM protection for TPM */
14429         if (nvcfg1 & (1 << 27))
14430                 tg3_flag_set(tp, PROTECTED_NVRAM);
14431
14432         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14433         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14434         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14435                 tp->nvram_jedecnum = JEDEC_ATMEL;
14436                 tg3_flag_set(tp, NVRAM_BUFFERED);
14437                 break;
14438         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14439                 tp->nvram_jedecnum = JEDEC_ATMEL;
14440                 tg3_flag_set(tp, NVRAM_BUFFERED);
14441                 tg3_flag_set(tp, FLASH);
14442                 break;
14443         case FLASH_5752VENDOR_ST_M45PE10:
14444         case FLASH_5752VENDOR_ST_M45PE20:
14445         case FLASH_5752VENDOR_ST_M45PE40:
14446                 tp->nvram_jedecnum = JEDEC_ST;
14447                 tg3_flag_set(tp, NVRAM_BUFFERED);
14448                 tg3_flag_set(tp, FLASH);
14449                 break;
14450         }
14451
14452         if (tg3_flag(tp, FLASH)) {
14453                 tg3_nvram_get_pagesize(tp, nvcfg1);
14454         } else {
14455                 /* For eeprom, set pagesize to maximum eeprom size */
14456                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14457
14458                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14459                 tw32(NVRAM_CFG1, nvcfg1);
14460         }
14461 }
14462
14463 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14464 {
14465         u32 nvcfg1, protect = 0;
14466
14467         nvcfg1 = tr32(NVRAM_CFG1);
14468
14469         /* NVRAM protection for TPM */
14470         if (nvcfg1 & (1 << 27)) {
14471                 tg3_flag_set(tp, PROTECTED_NVRAM);
14472                 protect = 1;
14473         }
14474
14475         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14476         switch (nvcfg1) {
14477         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14478         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14479         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14480         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14481                 tp->nvram_jedecnum = JEDEC_ATMEL;
14482                 tg3_flag_set(tp, NVRAM_BUFFERED);
14483                 tg3_flag_set(tp, FLASH);
14484                 tp->nvram_pagesize = 264;
14485                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14486                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14487                         tp->nvram_size = (protect ? 0x3e200 :
14488                                           TG3_NVRAM_SIZE_512KB);
14489                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14490                         tp->nvram_size = (protect ? 0x1f200 :
14491                                           TG3_NVRAM_SIZE_256KB);
14492                 else
14493                         tp->nvram_size = (protect ? 0x1f200 :
14494                                           TG3_NVRAM_SIZE_128KB);
14495                 break;
14496         case FLASH_5752VENDOR_ST_M45PE10:
14497         case FLASH_5752VENDOR_ST_M45PE20:
14498         case FLASH_5752VENDOR_ST_M45PE40:
14499                 tp->nvram_jedecnum = JEDEC_ST;
14500                 tg3_flag_set(tp, NVRAM_BUFFERED);
14501                 tg3_flag_set(tp, FLASH);
14502                 tp->nvram_pagesize = 256;
14503                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14504                         tp->nvram_size = (protect ?
14505                                           TG3_NVRAM_SIZE_64KB :
14506                                           TG3_NVRAM_SIZE_128KB);
14507                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14508                         tp->nvram_size = (protect ?
14509                                           TG3_NVRAM_SIZE_64KB :
14510                                           TG3_NVRAM_SIZE_256KB);
14511                 else
14512                         tp->nvram_size = (protect ?
14513                                           TG3_NVRAM_SIZE_128KB :
14514                                           TG3_NVRAM_SIZE_512KB);
14515                 break;
14516         }
14517 }
14518
14519 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14520 {
14521         u32 nvcfg1;
14522
14523         nvcfg1 = tr32(NVRAM_CFG1);
14524
14525         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14526         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14527         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14528         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14529         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14530                 tp->nvram_jedecnum = JEDEC_ATMEL;
14531                 tg3_flag_set(tp, NVRAM_BUFFERED);
14532                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14533
14534                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535                 tw32(NVRAM_CFG1, nvcfg1);
14536                 break;
14537         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14538         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14539         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14540         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14541                 tp->nvram_jedecnum = JEDEC_ATMEL;
14542                 tg3_flag_set(tp, NVRAM_BUFFERED);
14543                 tg3_flag_set(tp, FLASH);
14544                 tp->nvram_pagesize = 264;
14545                 break;
14546         case FLASH_5752VENDOR_ST_M45PE10:
14547         case FLASH_5752VENDOR_ST_M45PE20:
14548         case FLASH_5752VENDOR_ST_M45PE40:
14549                 tp->nvram_jedecnum = JEDEC_ST;
14550                 tg3_flag_set(tp, NVRAM_BUFFERED);
14551                 tg3_flag_set(tp, FLASH);
14552                 tp->nvram_pagesize = 256;
14553                 break;
14554         }
14555 }
14556
14557 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14558 {
14559         u32 nvcfg1, protect = 0;
14560
14561         nvcfg1 = tr32(NVRAM_CFG1);
14562
14563         /* NVRAM protection for TPM */
14564         if (nvcfg1 & (1 << 27)) {
14565                 tg3_flag_set(tp, PROTECTED_NVRAM);
14566                 protect = 1;
14567         }
14568
14569         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14570         switch (nvcfg1) {
14571         case FLASH_5761VENDOR_ATMEL_ADB021D:
14572         case FLASH_5761VENDOR_ATMEL_ADB041D:
14573         case FLASH_5761VENDOR_ATMEL_ADB081D:
14574         case FLASH_5761VENDOR_ATMEL_ADB161D:
14575         case FLASH_5761VENDOR_ATMEL_MDB021D:
14576         case FLASH_5761VENDOR_ATMEL_MDB041D:
14577         case FLASH_5761VENDOR_ATMEL_MDB081D:
14578         case FLASH_5761VENDOR_ATMEL_MDB161D:
14579                 tp->nvram_jedecnum = JEDEC_ATMEL;
14580                 tg3_flag_set(tp, NVRAM_BUFFERED);
14581                 tg3_flag_set(tp, FLASH);
14582                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14583                 tp->nvram_pagesize = 256;
14584                 break;
14585         case FLASH_5761VENDOR_ST_A_M45PE20:
14586         case FLASH_5761VENDOR_ST_A_M45PE40:
14587         case FLASH_5761VENDOR_ST_A_M45PE80:
14588         case FLASH_5761VENDOR_ST_A_M45PE16:
14589         case FLASH_5761VENDOR_ST_M_M45PE20:
14590         case FLASH_5761VENDOR_ST_M_M45PE40:
14591         case FLASH_5761VENDOR_ST_M_M45PE80:
14592         case FLASH_5761VENDOR_ST_M_M45PE16:
14593                 tp->nvram_jedecnum = JEDEC_ST;
14594                 tg3_flag_set(tp, NVRAM_BUFFERED);
14595                 tg3_flag_set(tp, FLASH);
14596                 tp->nvram_pagesize = 256;
14597                 break;
14598         }
14599
14600         if (protect) {
14601                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14602         } else {
14603                 switch (nvcfg1) {
14604                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14605                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14606                 case FLASH_5761VENDOR_ST_A_M45PE16:
14607                 case FLASH_5761VENDOR_ST_M_M45PE16:
14608                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14609                         break;
14610                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14611                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14612                 case FLASH_5761VENDOR_ST_A_M45PE80:
14613                 case FLASH_5761VENDOR_ST_M_M45PE80:
14614                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14615                         break;
14616                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14617                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14618                 case FLASH_5761VENDOR_ST_A_M45PE40:
14619                 case FLASH_5761VENDOR_ST_M_M45PE40:
14620                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14621                         break;
14622                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14623                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14624                 case FLASH_5761VENDOR_ST_A_M45PE20:
14625                 case FLASH_5761VENDOR_ST_M_M45PE20:
14626                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14627                         break;
14628                 }
14629         }
14630 }
14631
14632 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14633 {
14634         tp->nvram_jedecnum = JEDEC_ATMEL;
14635         tg3_flag_set(tp, NVRAM_BUFFERED);
14636         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14637 }
14638
14639 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14640 {
14641         u32 nvcfg1;
14642
14643         nvcfg1 = tr32(NVRAM_CFG1);
14644
14645         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14646         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14647         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14648                 tp->nvram_jedecnum = JEDEC_ATMEL;
14649                 tg3_flag_set(tp, NVRAM_BUFFERED);
14650                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14651
14652                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14653                 tw32(NVRAM_CFG1, nvcfg1);
14654                 return;
14655         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14656         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14657         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14658         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14659         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14660         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14661         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14662                 tp->nvram_jedecnum = JEDEC_ATMEL;
14663                 tg3_flag_set(tp, NVRAM_BUFFERED);
14664                 tg3_flag_set(tp, FLASH);
14665
14666                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14667                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14668                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14669                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14670                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14671                         break;
14672                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14673                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14674                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14675                         break;
14676                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14677                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14678                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14679                         break;
14680                 }
14681                 break;
14682         case FLASH_5752VENDOR_ST_M45PE10:
14683         case FLASH_5752VENDOR_ST_M45PE20:
14684         case FLASH_5752VENDOR_ST_M45PE40:
14685                 tp->nvram_jedecnum = JEDEC_ST;
14686                 tg3_flag_set(tp, NVRAM_BUFFERED);
14687                 tg3_flag_set(tp, FLASH);
14688
14689                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14690                 case FLASH_5752VENDOR_ST_M45PE10:
14691                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14692                         break;
14693                 case FLASH_5752VENDOR_ST_M45PE20:
14694                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14695                         break;
14696                 case FLASH_5752VENDOR_ST_M45PE40:
14697                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14698                         break;
14699                 }
14700                 break;
14701         default:
14702                 tg3_flag_set(tp, NO_NVRAM);
14703                 return;
14704         }
14705
14706         tg3_nvram_get_pagesize(tp, nvcfg1);
14707         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14708                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14709 }
14710
14711
14712 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14713 {
14714         u32 nvcfg1;
14715
14716         nvcfg1 = tr32(NVRAM_CFG1);
14717
14718         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14719         case FLASH_5717VENDOR_ATMEL_EEPROM:
14720         case FLASH_5717VENDOR_MICRO_EEPROM:
14721                 tp->nvram_jedecnum = JEDEC_ATMEL;
14722                 tg3_flag_set(tp, NVRAM_BUFFERED);
14723                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14724
14725                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14726                 tw32(NVRAM_CFG1, nvcfg1);
14727                 return;
14728         case FLASH_5717VENDOR_ATMEL_MDB011D:
14729         case FLASH_5717VENDOR_ATMEL_ADB011B:
14730         case FLASH_5717VENDOR_ATMEL_ADB011D:
14731         case FLASH_5717VENDOR_ATMEL_MDB021D:
14732         case FLASH_5717VENDOR_ATMEL_ADB021B:
14733         case FLASH_5717VENDOR_ATMEL_ADB021D:
14734         case FLASH_5717VENDOR_ATMEL_45USPT:
14735                 tp->nvram_jedecnum = JEDEC_ATMEL;
14736                 tg3_flag_set(tp, NVRAM_BUFFERED);
14737                 tg3_flag_set(tp, FLASH);
14738
14739                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14740                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14741                         /* Detect size with tg3_nvram_get_size() */
14742                         break;
14743                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14744                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14745                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14746                         break;
14747                 default:
14748                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14749                         break;
14750                 }
14751                 break;
14752         case FLASH_5717VENDOR_ST_M_M25PE10:
14753         case FLASH_5717VENDOR_ST_A_M25PE10:
14754         case FLASH_5717VENDOR_ST_M_M45PE10:
14755         case FLASH_5717VENDOR_ST_A_M45PE10:
14756         case FLASH_5717VENDOR_ST_M_M25PE20:
14757         case FLASH_5717VENDOR_ST_A_M25PE20:
14758         case FLASH_5717VENDOR_ST_M_M45PE20:
14759         case FLASH_5717VENDOR_ST_A_M45PE20:
14760         case FLASH_5717VENDOR_ST_25USPT:
14761         case FLASH_5717VENDOR_ST_45USPT:
14762                 tp->nvram_jedecnum = JEDEC_ST;
14763                 tg3_flag_set(tp, NVRAM_BUFFERED);
14764                 tg3_flag_set(tp, FLASH);
14765
14766                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14767                 case FLASH_5717VENDOR_ST_M_M25PE20:
14768                 case FLASH_5717VENDOR_ST_M_M45PE20:
14769                         /* Detect size with tg3_nvram_get_size() */
14770                         break;
14771                 case FLASH_5717VENDOR_ST_A_M25PE20:
14772                 case FLASH_5717VENDOR_ST_A_M45PE20:
14773                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14774                         break;
14775                 default:
14776                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14777                         break;
14778                 }
14779                 break;
14780         default:
14781                 tg3_flag_set(tp, NO_NVRAM);
14782                 return;
14783         }
14784
14785         tg3_nvram_get_pagesize(tp, nvcfg1);
14786         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14787                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14788 }
14789
14790 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14791 {
14792         u32 nvcfg1, nvmpinstrp, nv_status;
14793
14794         nvcfg1 = tr32(NVRAM_CFG1);
14795         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14796
14797         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14798                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14799                         tg3_flag_set(tp, NO_NVRAM);
14800                         return;
14801                 }
14802
14803                 switch (nvmpinstrp) {
14804                 case FLASH_5762_MX25L_100:
14805                 case FLASH_5762_MX25L_200:
14806                 case FLASH_5762_MX25L_400:
14807                 case FLASH_5762_MX25L_800:
14808                 case FLASH_5762_MX25L_160_320:
14809                         tp->nvram_pagesize = 4096;
14810                         tp->nvram_jedecnum = JEDEC_MACRONIX;
14811                         tg3_flag_set(tp, NVRAM_BUFFERED);
14812                         tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14813                         tg3_flag_set(tp, FLASH);
14814                         nv_status = tr32(NVRAM_AUTOSENSE_STATUS);
14815                         tp->nvram_size =
14816                                 (1 << (nv_status >> AUTOSENSE_DEVID &
14817                                                 AUTOSENSE_DEVID_MASK)
14818                                         << AUTOSENSE_SIZE_IN_MB);
14819                         return;
14820
14821                 case FLASH_5762_EEPROM_HD:
14822                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14823                         break;
14824                 case FLASH_5762_EEPROM_LD:
14825                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14826                         break;
14827                 case FLASH_5720VENDOR_M_ST_M45PE20:
14828                         /* This pinstrap supports multiple sizes, so force it
14829                          * to read the actual size from location 0xf0.
14830                          */
14831                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14832                         break;
14833                 }
14834         }
14835
14836         switch (nvmpinstrp) {
14837         case FLASH_5720_EEPROM_HD:
14838         case FLASH_5720_EEPROM_LD:
14839                 tp->nvram_jedecnum = JEDEC_ATMEL;
14840                 tg3_flag_set(tp, NVRAM_BUFFERED);
14841
14842                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14843                 tw32(NVRAM_CFG1, nvcfg1);
14844                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14845                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14846                 else
14847                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14848                 return;
14849         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14850         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14851         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14852         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14853         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14854         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14855         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14856         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14857         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14858         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14859         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14860         case FLASH_5720VENDOR_ATMEL_45USPT:
14861                 tp->nvram_jedecnum = JEDEC_ATMEL;
14862                 tg3_flag_set(tp, NVRAM_BUFFERED);
14863                 tg3_flag_set(tp, FLASH);
14864
14865                 switch (nvmpinstrp) {
14866                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14867                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14868                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14869                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14870                         break;
14871                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14872                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14873                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14874                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14875                         break;
14876                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14877                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14878                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14879                         break;
14880                 default:
14881                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14882                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14883                         break;
14884                 }
14885                 break;
14886         case FLASH_5720VENDOR_M_ST_M25PE10:
14887         case FLASH_5720VENDOR_M_ST_M45PE10:
14888         case FLASH_5720VENDOR_A_ST_M25PE10:
14889         case FLASH_5720VENDOR_A_ST_M45PE10:
14890         case FLASH_5720VENDOR_M_ST_M25PE20:
14891         case FLASH_5720VENDOR_M_ST_M45PE20:
14892         case FLASH_5720VENDOR_A_ST_M25PE20:
14893         case FLASH_5720VENDOR_A_ST_M45PE20:
14894         case FLASH_5720VENDOR_M_ST_M25PE40:
14895         case FLASH_5720VENDOR_M_ST_M45PE40:
14896         case FLASH_5720VENDOR_A_ST_M25PE40:
14897         case FLASH_5720VENDOR_A_ST_M45PE40:
14898         case FLASH_5720VENDOR_M_ST_M25PE80:
14899         case FLASH_5720VENDOR_M_ST_M45PE80:
14900         case FLASH_5720VENDOR_A_ST_M25PE80:
14901         case FLASH_5720VENDOR_A_ST_M45PE80:
14902         case FLASH_5720VENDOR_ST_25USPT:
14903         case FLASH_5720VENDOR_ST_45USPT:
14904                 tp->nvram_jedecnum = JEDEC_ST;
14905                 tg3_flag_set(tp, NVRAM_BUFFERED);
14906                 tg3_flag_set(tp, FLASH);
14907
14908                 switch (nvmpinstrp) {
14909                 case FLASH_5720VENDOR_M_ST_M25PE20:
14910                 case FLASH_5720VENDOR_M_ST_M45PE20:
14911                 case FLASH_5720VENDOR_A_ST_M25PE20:
14912                 case FLASH_5720VENDOR_A_ST_M45PE20:
14913                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14914                         break;
14915                 case FLASH_5720VENDOR_M_ST_M25PE40:
14916                 case FLASH_5720VENDOR_M_ST_M45PE40:
14917                 case FLASH_5720VENDOR_A_ST_M25PE40:
14918                 case FLASH_5720VENDOR_A_ST_M45PE40:
14919                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14920                         break;
14921                 case FLASH_5720VENDOR_M_ST_M25PE80:
14922                 case FLASH_5720VENDOR_M_ST_M45PE80:
14923                 case FLASH_5720VENDOR_A_ST_M25PE80:
14924                 case FLASH_5720VENDOR_A_ST_M45PE80:
14925                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14926                         break;
14927                 default:
14928                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14929                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14930                         break;
14931                 }
14932                 break;
14933         default:
14934                 tg3_flag_set(tp, NO_NVRAM);
14935                 return;
14936         }
14937
14938         tg3_nvram_get_pagesize(tp, nvcfg1);
14939         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14940                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14941
14942         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14943                 u32 val;
14944
14945                 if (tg3_nvram_read(tp, 0, &val))
14946                         return;
14947
14948                 if (val != TG3_EEPROM_MAGIC &&
14949                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14950                         tg3_flag_set(tp, NO_NVRAM);
14951         }
14952 }
14953
14954 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14955 static void tg3_nvram_init(struct tg3 *tp)
14956 {
14957         if (tg3_flag(tp, IS_SSB_CORE)) {
14958                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14959                 tg3_flag_clear(tp, NVRAM);
14960                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14961                 tg3_flag_set(tp, NO_NVRAM);
14962                 return;
14963         }
14964
14965         tw32_f(GRC_EEPROM_ADDR,
14966              (EEPROM_ADDR_FSM_RESET |
14967               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14968                EEPROM_ADDR_CLKPERD_SHIFT)));
14969
14970         msleep(1);
14971
14972         /* Enable seeprom accesses. */
14973         tw32_f(GRC_LOCAL_CTRL,
14974              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14975         udelay(100);
14976
14977         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14978             tg3_asic_rev(tp) != ASIC_REV_5701) {
14979                 tg3_flag_set(tp, NVRAM);
14980
14981                 if (tg3_nvram_lock(tp)) {
14982                         netdev_warn(tp->dev,
14983                                     "Cannot get nvram lock, %s failed\n",
14984                                     __func__);
14985                         return;
14986                 }
14987                 tg3_enable_nvram_access(tp);
14988
14989                 tp->nvram_size = 0;
14990
14991                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14992                         tg3_get_5752_nvram_info(tp);
14993                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14994                         tg3_get_5755_nvram_info(tp);
14995                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14996                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14997                          tg3_asic_rev(tp) == ASIC_REV_5785)
14998                         tg3_get_5787_nvram_info(tp);
14999                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
15000                         tg3_get_5761_nvram_info(tp);
15001                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
15002                         tg3_get_5906_nvram_info(tp);
15003                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
15004                          tg3_flag(tp, 57765_CLASS))
15005                         tg3_get_57780_nvram_info(tp);
15006                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15007                          tg3_asic_rev(tp) == ASIC_REV_5719)
15008                         tg3_get_5717_nvram_info(tp);
15009                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
15010                          tg3_asic_rev(tp) == ASIC_REV_5762)
15011                         tg3_get_5720_nvram_info(tp);
15012                 else
15013                         tg3_get_nvram_info(tp);
15014
15015                 if (tp->nvram_size == 0)
15016                         tg3_get_nvram_size(tp);
15017
15018                 tg3_disable_nvram_access(tp);
15019                 tg3_nvram_unlock(tp);
15020
15021         } else {
15022                 tg3_flag_clear(tp, NVRAM);
15023                 tg3_flag_clear(tp, NVRAM_BUFFERED);
15024
15025                 tg3_get_eeprom_size(tp);
15026         }
15027 }
15028
15029 struct subsys_tbl_ent {
15030         u16 subsys_vendor, subsys_devid;
15031         u32 phy_id;
15032 };
15033
15034 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15035         /* Broadcom boards. */
15036         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15037           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15038         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15039           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15040         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15041           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15042         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15043           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15044         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15045           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15046         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15047           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15049           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15050         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15051           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15053           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15055           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
15057           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15058
15059         /* 3com boards. */
15060         { TG3PCI_SUBVENDOR_ID_3COM,
15061           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15062         { TG3PCI_SUBVENDOR_ID_3COM,
15063           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15064         { TG3PCI_SUBVENDOR_ID_3COM,
15065           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15066         { TG3PCI_SUBVENDOR_ID_3COM,
15067           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15068         { TG3PCI_SUBVENDOR_ID_3COM,
15069           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15070
15071         /* DELL boards. */
15072         { TG3PCI_SUBVENDOR_ID_DELL,
15073           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15074         { TG3PCI_SUBVENDOR_ID_DELL,
15075           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15076         { TG3PCI_SUBVENDOR_ID_DELL,
15077           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15078         { TG3PCI_SUBVENDOR_ID_DELL,
15079           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15080
15081         /* Compaq boards. */
15082         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15083           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15084         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15085           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15086         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15087           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15088         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15089           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15090         { TG3PCI_SUBVENDOR_ID_COMPAQ,
15091           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15092
15093         /* IBM boards. */
15094         { TG3PCI_SUBVENDOR_ID_IBM,
15095           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15096 };
15097
15098 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15099 {
15100         int i;
15101
15102         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15103                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
15104                      tp->pdev->subsystem_vendor) &&
15105                     (subsys_id_to_phy_id[i].subsys_devid ==
15106                      tp->pdev->subsystem_device))
15107                         return &subsys_id_to_phy_id[i];
15108         }
15109         return NULL;
15110 }
15111
15112 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15113 {
15114         u32 val;
15115
15116         tp->phy_id = TG3_PHY_ID_INVALID;
15117         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15118
15119         /* Assume an onboard device and WOL capable by default.  */
15120         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15121         tg3_flag_set(tp, WOL_CAP);
15122
15123         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15124                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15125                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15126                         tg3_flag_set(tp, IS_NIC);
15127                 }
15128                 val = tr32(VCPU_CFGSHDW);
15129                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
15130                         tg3_flag_set(tp, ASPM_WORKAROUND);
15131                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15132                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15133                         tg3_flag_set(tp, WOL_ENABLE);
15134                         device_set_wakeup_enable(&tp->pdev->dev, true);
15135                 }
15136                 goto done;
15137         }
15138
15139         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15140         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15141                 u32 nic_cfg, led_cfg;
15142                 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15143                 u32 nic_phy_id, ver, eeprom_phy_id;
15144                 int eeprom_phy_serdes = 0;
15145
15146                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15147                 tp->nic_sram_data_cfg = nic_cfg;
15148
15149                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15150                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
15151                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15152                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
15153                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
15154                     (ver > 0) && (ver < 0x100))
15155                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15156
15157                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
15158                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15159
15160                 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15161                     tg3_asic_rev(tp) == ASIC_REV_5719 ||
15162                     tg3_asic_rev(tp) == ASIC_REV_5720)
15163                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15164
15165                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15166                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15167                         eeprom_phy_serdes = 1;
15168
15169                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15170                 if (nic_phy_id != 0) {
15171                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15172                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15173
15174                         eeprom_phy_id  = (id1 >> 16) << 10;
15175                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
15176                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15177                 } else
15178                         eeprom_phy_id = 0;
15179
15180                 tp->phy_id = eeprom_phy_id;
15181                 if (eeprom_phy_serdes) {
15182                         if (!tg3_flag(tp, 5705_PLUS))
15183                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15184                         else
15185                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15186                 }
15187
15188                 if (tg3_flag(tp, 5750_PLUS))
15189                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15190                                     SHASTA_EXT_LED_MODE_MASK);
15191                 else
15192                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15193
15194                 switch (led_cfg) {
15195                 default:
15196                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15197                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15198                         break;
15199
15200                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15201                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15202                         break;
15203
15204                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15205                         tp->led_ctrl = LED_CTRL_MODE_MAC;
15206
15207                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15208                          * read on some older 5700/5701 bootcode.
15209                          */
15210                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15211                             tg3_asic_rev(tp) == ASIC_REV_5701)
15212                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15213
15214                         break;
15215
15216                 case SHASTA_EXT_LED_SHARED:
15217                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
15218                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15219                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15220                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15221                                                  LED_CTRL_MODE_PHY_2);
15222
15223                         if (tg3_flag(tp, 5717_PLUS) ||
15224                             tg3_asic_rev(tp) == ASIC_REV_5762)
15225                                 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15226                                                 LED_CTRL_BLINK_RATE_MASK;
15227
15228                         break;
15229
15230                 case SHASTA_EXT_LED_MAC:
15231                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15232                         break;
15233
15234                 case SHASTA_EXT_LED_COMBO:
15235                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
15236                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15237                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15238                                                  LED_CTRL_MODE_PHY_2);
15239                         break;
15240
15241                 }
15242
15243                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15244                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
15245                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15246                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15247
15248                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15249                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15250
15251                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15252                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
15253                         if ((tp->pdev->subsystem_vendor ==
15254                              PCI_VENDOR_ID_ARIMA) &&
15255                             (tp->pdev->subsystem_device == 0x205a ||
15256                              tp->pdev->subsystem_device == 0x2063))
15257                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15258                 } else {
15259                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15260                         tg3_flag_set(tp, IS_NIC);
15261                 }
15262
15263                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15264                         tg3_flag_set(tp, ENABLE_ASF);
15265                         if (tg3_flag(tp, 5750_PLUS))
15266                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15267                 }
15268
15269                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15270                     tg3_flag(tp, 5750_PLUS))
15271                         tg3_flag_set(tp, ENABLE_APE);
15272
15273                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15274                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15275                         tg3_flag_clear(tp, WOL_CAP);
15276
15277                 if (tg3_flag(tp, WOL_CAP) &&
15278                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15279                         tg3_flag_set(tp, WOL_ENABLE);
15280                         device_set_wakeup_enable(&tp->pdev->dev, true);
15281                 }
15282
15283                 if (cfg2 & (1 << 17))
15284                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15285
15286                 /* serdes signal pre-emphasis in register 0x590 set by */
15287                 /* bootcode if bit 18 is set */
15288                 if (cfg2 & (1 << 18))
15289                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15290
15291                 if ((tg3_flag(tp, 57765_PLUS) ||
15292                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15293                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15294                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15295                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15296
15297                 if (tg3_flag(tp, PCI_EXPRESS)) {
15298                         u32 cfg3;
15299
15300                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15301                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15302                             !tg3_flag(tp, 57765_PLUS) &&
15303                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15304                                 tg3_flag_set(tp, ASPM_WORKAROUND);
15305                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15306                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15307                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15308                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15309                 }
15310
15311                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15312                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15313                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15314                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15315                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15316                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15317
15318                 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15319                         tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15320         }
15321 done:
15322         if (tg3_flag(tp, WOL_CAP))
15323                 device_set_wakeup_enable(&tp->pdev->dev,
15324                                          tg3_flag(tp, WOL_ENABLE));
15325         else
15326                 device_set_wakeup_capable(&tp->pdev->dev, false);
15327 }
15328
15329 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15330 {
15331         int i, err;
15332         u32 val2, off = offset * 8;
15333
15334         err = tg3_nvram_lock(tp);
15335         if (err)
15336                 return err;
15337
15338         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15339         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15340                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15341         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15342         udelay(10);
15343
15344         for (i = 0; i < 100; i++) {
15345                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15346                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15347                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15348                         break;
15349                 }
15350                 udelay(10);
15351         }
15352
15353         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15354
15355         tg3_nvram_unlock(tp);
15356         if (val2 & APE_OTP_STATUS_CMD_DONE)
15357                 return 0;
15358
15359         return -EBUSY;
15360 }
15361
15362 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15363 {
15364         int i;
15365         u32 val;
15366
15367         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15368         tw32(OTP_CTRL, cmd);
15369
15370         /* Wait for up to 1 ms for command to execute. */
15371         for (i = 0; i < 100; i++) {
15372                 val = tr32(OTP_STATUS);
15373                 if (val & OTP_STATUS_CMD_DONE)
15374                         break;
15375                 udelay(10);
15376         }
15377
15378         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15379 }
15380
15381 /* Read the gphy configuration from the OTP region of the chip.  The gphy
15382  * configuration is a 32-bit value that straddles the alignment boundary.
15383  * We do two 32-bit reads and then shift and merge the results.
15384  */
15385 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15386 {
15387         u32 bhalf_otp, thalf_otp;
15388
15389         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15390
15391         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15392                 return 0;
15393
15394         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15395
15396         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15397                 return 0;
15398
15399         thalf_otp = tr32(OTP_READ_DATA);
15400
15401         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15402
15403         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15404                 return 0;
15405
15406         bhalf_otp = tr32(OTP_READ_DATA);
15407
15408         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15409 }
15410
15411 static void tg3_phy_init_link_config(struct tg3 *tp)
15412 {
15413         u32 adv = ADVERTISED_Autoneg;
15414
15415         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15416                 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15417                         adv |= ADVERTISED_1000baseT_Half;
15418                 adv |= ADVERTISED_1000baseT_Full;
15419         }
15420
15421         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15422                 adv |= ADVERTISED_100baseT_Half |
15423                        ADVERTISED_100baseT_Full |
15424                        ADVERTISED_10baseT_Half |
15425                        ADVERTISED_10baseT_Full |
15426                        ADVERTISED_TP;
15427         else
15428                 adv |= ADVERTISED_FIBRE;
15429
15430         tp->link_config.advertising = adv;
15431         tp->link_config.speed = SPEED_UNKNOWN;
15432         tp->link_config.duplex = DUPLEX_UNKNOWN;
15433         tp->link_config.autoneg = AUTONEG_ENABLE;
15434         tp->link_config.active_speed = SPEED_UNKNOWN;
15435         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15436
15437         tp->old_link = -1;
15438 }
15439
15440 static int tg3_phy_probe(struct tg3 *tp)
15441 {
15442         u32 hw_phy_id_1, hw_phy_id_2;
15443         u32 hw_phy_id, hw_phy_id_masked;
15444         int err;
15445
15446         /* flow control autonegotiation is default behavior */
15447         tg3_flag_set(tp, PAUSE_AUTONEG);
15448         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15449
15450         if (tg3_flag(tp, ENABLE_APE)) {
15451                 switch (tp->pci_fn) {
15452                 case 0:
15453                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15454                         break;
15455                 case 1:
15456                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15457                         break;
15458                 case 2:
15459                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15460                         break;
15461                 case 3:
15462                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15463                         break;
15464                 }
15465         }
15466
15467         if (!tg3_flag(tp, ENABLE_ASF) &&
15468             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15469             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15470                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15471                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15472
15473         if (tg3_flag(tp, USE_PHYLIB))
15474                 return tg3_phy_init(tp);
15475
15476         /* Reading the PHY ID register can conflict with ASF
15477          * firmware access to the PHY hardware.
15478          */
15479         err = 0;
15480         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15481                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15482         } else {
15483                 /* Now read the physical PHY_ID from the chip and verify
15484                  * that it is sane.  If it doesn't look good, we fall back
15485                  * to either the hard-coded table based PHY_ID and failing
15486                  * that the value found in the eeprom area.
15487                  */
15488                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15489                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15490
15491                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15492                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15493                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15494
15495                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15496         }
15497
15498         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15499                 tp->phy_id = hw_phy_id;
15500                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15501                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15502                 else
15503                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15504         } else {
15505                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15506                         /* Do nothing, phy ID already set up in
15507                          * tg3_get_eeprom_hw_cfg().
15508                          */
15509                 } else {
15510                         struct subsys_tbl_ent *p;
15511
15512                         /* No eeprom signature?  Try the hardcoded
15513                          * subsys device table.
15514                          */
15515                         p = tg3_lookup_by_subsys(tp);
15516                         if (p) {
15517                                 tp->phy_id = p->phy_id;
15518                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15519                                 /* For now we saw the IDs 0xbc050cd0,
15520                                  * 0xbc050f80 and 0xbc050c30 on devices
15521                                  * connected to an BCM4785 and there are
15522                                  * probably more. Just assume that the phy is
15523                                  * supported when it is connected to a SSB core
15524                                  * for now.
15525                                  */
15526                                 return -ENODEV;
15527                         }
15528
15529                         if (!tp->phy_id ||
15530                             tp->phy_id == TG3_PHY_ID_BCM8002)
15531                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15532                 }
15533         }
15534
15535         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15536             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15537              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15538              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15539              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15540              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15541               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15542              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15543               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15544                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15545
15546                 tp->eee.supported = SUPPORTED_100baseT_Full |
15547                                     SUPPORTED_1000baseT_Full;
15548                 tp->eee.advertised = ADVERTISED_100baseT_Full |
15549                                      ADVERTISED_1000baseT_Full;
15550                 tp->eee.eee_enabled = 1;
15551                 tp->eee.tx_lpi_enabled = 1;
15552                 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15553         }
15554
15555         tg3_phy_init_link_config(tp);
15556
15557         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15558             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15559             !tg3_flag(tp, ENABLE_APE) &&
15560             !tg3_flag(tp, ENABLE_ASF)) {
15561                 u32 bmsr, dummy;
15562
15563                 tg3_readphy(tp, MII_BMSR, &bmsr);
15564                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15565                     (bmsr & BMSR_LSTATUS))
15566                         goto skip_phy_reset;
15567
15568                 err = tg3_phy_reset(tp);
15569                 if (err)
15570                         return err;
15571
15572                 tg3_phy_set_wirespeed(tp);
15573
15574                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15575                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15576                                             tp->link_config.flowctrl);
15577
15578                         tg3_writephy(tp, MII_BMCR,
15579                                      BMCR_ANENABLE | BMCR_ANRESTART);
15580                 }
15581         }
15582
15583 skip_phy_reset:
15584         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15585                 err = tg3_init_5401phy_dsp(tp);
15586                 if (err)
15587                         return err;
15588
15589                 err = tg3_init_5401phy_dsp(tp);
15590         }
15591
15592         return err;
15593 }
15594
15595 static void tg3_read_vpd(struct tg3 *tp)
15596 {
15597         u8 *vpd_data;
15598         unsigned int len, vpdlen;
15599         int i;
15600
15601         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15602         if (!vpd_data)
15603                 goto out_no_vpd;
15604
15605         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15606                                          PCI_VPD_RO_KEYWORD_MFR_ID, &len);
15607         if (i < 0)
15608                 goto partno;
15609
15610         if (len != 4 || memcmp(vpd_data + i, "1028", 4))
15611                 goto partno;
15612
15613         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15614                                          PCI_VPD_RO_KEYWORD_VENDOR0, &len);
15615         if (i < 0)
15616                 goto partno;
15617
15618         memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15619         snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, vpd_data + i);
15620
15621 partno:
15622         i = pci_vpd_find_ro_info_keyword(vpd_data, vpdlen,
15623                                          PCI_VPD_RO_KEYWORD_PARTNO, &len);
15624         if (i < 0)
15625                 goto out_not_found;
15626
15627         if (len > TG3_BPN_SIZE)
15628                 goto out_not_found;
15629
15630         memcpy(tp->board_part_number, &vpd_data[i], len);
15631
15632 out_not_found:
15633         kfree(vpd_data);
15634         if (tp->board_part_number[0])
15635                 return;
15636
15637 out_no_vpd:
15638         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15639                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15640                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15641                         strcpy(tp->board_part_number, "BCM5717");
15642                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15643                         strcpy(tp->board_part_number, "BCM5718");
15644                 else
15645                         goto nomatch;
15646         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15647                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15648                         strcpy(tp->board_part_number, "BCM57780");
15649                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15650                         strcpy(tp->board_part_number, "BCM57760");
15651                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15652                         strcpy(tp->board_part_number, "BCM57790");
15653                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15654                         strcpy(tp->board_part_number, "BCM57788");
15655                 else
15656                         goto nomatch;
15657         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15658                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15659                         strcpy(tp->board_part_number, "BCM57761");
15660                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15661                         strcpy(tp->board_part_number, "BCM57765");
15662                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15663                         strcpy(tp->board_part_number, "BCM57781");
15664                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15665                         strcpy(tp->board_part_number, "BCM57785");
15666                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15667                         strcpy(tp->board_part_number, "BCM57791");
15668                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15669                         strcpy(tp->board_part_number, "BCM57795");
15670                 else
15671                         goto nomatch;
15672         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15673                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15674                         strcpy(tp->board_part_number, "BCM57762");
15675                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15676                         strcpy(tp->board_part_number, "BCM57766");
15677                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15678                         strcpy(tp->board_part_number, "BCM57782");
15679                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15680                         strcpy(tp->board_part_number, "BCM57786");
15681                 else
15682                         goto nomatch;
15683         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15684                 strcpy(tp->board_part_number, "BCM95906");
15685         } else {
15686 nomatch:
15687                 strcpy(tp->board_part_number, "none");
15688         }
15689 }
15690
15691 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15692 {
15693         u32 val;
15694
15695         if (tg3_nvram_read(tp, offset, &val) ||
15696             (val & 0xfc000000) != 0x0c000000 ||
15697             tg3_nvram_read(tp, offset + 4, &val) ||
15698             val != 0)
15699                 return 0;
15700
15701         return 1;
15702 }
15703
15704 static void tg3_read_bc_ver(struct tg3 *tp)
15705 {
15706         u32 val, offset, start, ver_offset;
15707         int i, dst_off;
15708         bool newver = false;
15709
15710         if (tg3_nvram_read(tp, 0xc, &offset) ||
15711             tg3_nvram_read(tp, 0x4, &start))
15712                 return;
15713
15714         offset = tg3_nvram_logical_addr(tp, offset);
15715
15716         if (tg3_nvram_read(tp, offset, &val))
15717                 return;
15718
15719         if ((val & 0xfc000000) == 0x0c000000) {
15720                 if (tg3_nvram_read(tp, offset + 4, &val))
15721                         return;
15722
15723                 if (val == 0)
15724                         newver = true;
15725         }
15726
15727         dst_off = strlen(tp->fw_ver);
15728
15729         if (newver) {
15730                 if (TG3_VER_SIZE - dst_off < 16 ||
15731                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15732                         return;
15733
15734                 offset = offset + ver_offset - start;
15735                 for (i = 0; i < 16; i += 4) {
15736                         __be32 v;
15737                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15738                                 return;
15739
15740                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15741                 }
15742         } else {
15743                 u32 major, minor;
15744
15745                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15746                         return;
15747
15748                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15749                         TG3_NVM_BCVER_MAJSFT;
15750                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15751                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15752                          "v%d.%02d", major, minor);
15753         }
15754 }
15755
15756 static void tg3_read_hwsb_ver(struct tg3 *tp)
15757 {
15758         u32 val, major, minor;
15759
15760         /* Use native endian representation */
15761         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15762                 return;
15763
15764         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15765                 TG3_NVM_HWSB_CFG1_MAJSFT;
15766         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15767                 TG3_NVM_HWSB_CFG1_MINSFT;
15768
15769         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15770 }
15771
15772 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15773 {
15774         u32 offset, major, minor, build;
15775
15776         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15777
15778         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15779                 return;
15780
15781         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15782         case TG3_EEPROM_SB_REVISION_0:
15783                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15784                 break;
15785         case TG3_EEPROM_SB_REVISION_2:
15786                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15787                 break;
15788         case TG3_EEPROM_SB_REVISION_3:
15789                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15790                 break;
15791         case TG3_EEPROM_SB_REVISION_4:
15792                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15793                 break;
15794         case TG3_EEPROM_SB_REVISION_5:
15795                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15796                 break;
15797         case TG3_EEPROM_SB_REVISION_6:
15798                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15799                 break;
15800         default:
15801                 return;
15802         }
15803
15804         if (tg3_nvram_read(tp, offset, &val))
15805                 return;
15806
15807         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15808                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15809         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15810                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15811         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15812
15813         if (minor > 99 || build > 26)
15814                 return;
15815
15816         offset = strlen(tp->fw_ver);
15817         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15818                  " v%d.%02d", major, minor);
15819
15820         if (build > 0) {
15821                 offset = strlen(tp->fw_ver);
15822                 if (offset < TG3_VER_SIZE - 1)
15823                         tp->fw_ver[offset] = 'a' + build - 1;
15824         }
15825 }
15826
15827 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15828 {
15829         u32 val, offset, start;
15830         int i, vlen;
15831
15832         for (offset = TG3_NVM_DIR_START;
15833              offset < TG3_NVM_DIR_END;
15834              offset += TG3_NVM_DIRENT_SIZE) {
15835                 if (tg3_nvram_read(tp, offset, &val))
15836                         return;
15837
15838                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15839                         break;
15840         }
15841
15842         if (offset == TG3_NVM_DIR_END)
15843                 return;
15844
15845         if (!tg3_flag(tp, 5705_PLUS))
15846                 start = 0x08000000;
15847         else if (tg3_nvram_read(tp, offset - 4, &start))
15848                 return;
15849
15850         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15851             !tg3_fw_img_is_valid(tp, offset) ||
15852             tg3_nvram_read(tp, offset + 8, &val))
15853                 return;
15854
15855         offset += val - start;
15856
15857         vlen = strlen(tp->fw_ver);
15858
15859         tp->fw_ver[vlen++] = ',';
15860         tp->fw_ver[vlen++] = ' ';
15861
15862         for (i = 0; i < 4; i++) {
15863                 __be32 v;
15864                 if (tg3_nvram_read_be32(tp, offset, &v))
15865                         return;
15866
15867                 offset += sizeof(v);
15868
15869                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15870                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15871                         break;
15872                 }
15873
15874                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15875                 vlen += sizeof(v);
15876         }
15877 }
15878
15879 static void tg3_probe_ncsi(struct tg3 *tp)
15880 {
15881         u32 apedata;
15882
15883         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15884         if (apedata != APE_SEG_SIG_MAGIC)
15885                 return;
15886
15887         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15888         if (!(apedata & APE_FW_STATUS_READY))
15889                 return;
15890
15891         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15892                 tg3_flag_set(tp, APE_HAS_NCSI);
15893 }
15894
15895 static void tg3_read_dash_ver(struct tg3 *tp)
15896 {
15897         int vlen;
15898         u32 apedata;
15899         char *fwtype;
15900
15901         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15902
15903         if (tg3_flag(tp, APE_HAS_NCSI))
15904                 fwtype = "NCSI";
15905         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15906                 fwtype = "SMASH";
15907         else
15908                 fwtype = "DASH";
15909
15910         vlen = strlen(tp->fw_ver);
15911
15912         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15913                  fwtype,
15914                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15915                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15916                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15917                  (apedata & APE_FW_VERSION_BLDMSK));
15918 }
15919
15920 static void tg3_read_otp_ver(struct tg3 *tp)
15921 {
15922         u32 val, val2;
15923
15924         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15925                 return;
15926
15927         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15928             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15929             TG3_OTP_MAGIC0_VALID(val)) {
15930                 u64 val64 = (u64) val << 32 | val2;
15931                 u32 ver = 0;
15932                 int i, vlen;
15933
15934                 for (i = 0; i < 7; i++) {
15935                         if ((val64 & 0xff) == 0)
15936                                 break;
15937                         ver = val64 & 0xff;
15938                         val64 >>= 8;
15939                 }
15940                 vlen = strlen(tp->fw_ver);
15941                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15942         }
15943 }
15944
15945 static void tg3_read_fw_ver(struct tg3 *tp)
15946 {
15947         u32 val;
15948         bool vpd_vers = false;
15949
15950         if (tp->fw_ver[0] != 0)
15951                 vpd_vers = true;
15952
15953         if (tg3_flag(tp, NO_NVRAM)) {
15954                 strcat(tp->fw_ver, "sb");
15955                 tg3_read_otp_ver(tp);
15956                 return;
15957         }
15958
15959         if (tg3_nvram_read(tp, 0, &val))
15960                 return;
15961
15962         if (val == TG3_EEPROM_MAGIC)
15963                 tg3_read_bc_ver(tp);
15964         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15965                 tg3_read_sb_ver(tp, val);
15966         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15967                 tg3_read_hwsb_ver(tp);
15968
15969         if (tg3_flag(tp, ENABLE_ASF)) {
15970                 if (tg3_flag(tp, ENABLE_APE)) {
15971                         tg3_probe_ncsi(tp);
15972                         if (!vpd_vers)
15973                                 tg3_read_dash_ver(tp);
15974                 } else if (!vpd_vers) {
15975                         tg3_read_mgmtfw_ver(tp);
15976                 }
15977         }
15978
15979         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15980 }
15981
15982 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15983 {
15984         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15985                 return TG3_RX_RET_MAX_SIZE_5717;
15986         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15987                 return TG3_RX_RET_MAX_SIZE_5700;
15988         else
15989                 return TG3_RX_RET_MAX_SIZE_5705;
15990 }
15991
15992 static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15993         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15994         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15995         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15996         { },
15997 };
15998
15999 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
16000 {
16001         struct pci_dev *peer;
16002         unsigned int func, devnr = tp->pdev->devfn & ~7;
16003
16004         for (func = 0; func < 8; func++) {
16005                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
16006                 if (peer && peer != tp->pdev)
16007                         break;
16008                 pci_dev_put(peer);
16009         }
16010         /* 5704 can be configured in single-port mode, set peer to
16011          * tp->pdev in that case.
16012          */
16013         if (!peer) {
16014                 peer = tp->pdev;
16015                 return peer;
16016         }
16017
16018         /*
16019          * We don't need to keep the refcount elevated; there's no way
16020          * to remove one half of this device without removing the other
16021          */
16022         pci_dev_put(peer);
16023
16024         return peer;
16025 }
16026
16027 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16028 {
16029         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16030         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16031                 u32 reg;
16032
16033                 /* All devices that use the alternate
16034                  * ASIC REV location have a CPMU.
16035                  */
16036                 tg3_flag_set(tp, CPMU_PRESENT);
16037
16038                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16039                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16040                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16041                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16042                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16043                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16044                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16045                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16046                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16047                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16048                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16049                         reg = TG3PCI_GEN2_PRODID_ASICREV;
16050                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16051                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16052                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16053                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16054                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16055                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16056                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16057                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16058                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16059                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16060                         reg = TG3PCI_GEN15_PRODID_ASICREV;
16061                 else
16062                         reg = TG3PCI_PRODID_ASICREV;
16063
16064                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16065         }
16066
16067         /* Wrong chip ID in 5752 A0. This code can be removed later
16068          * as A0 is not in production.
16069          */
16070         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16071                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16072
16073         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16074                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16075
16076         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16077             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16078             tg3_asic_rev(tp) == ASIC_REV_5720)
16079                 tg3_flag_set(tp, 5717_PLUS);
16080
16081         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16082             tg3_asic_rev(tp) == ASIC_REV_57766)
16083                 tg3_flag_set(tp, 57765_CLASS);
16084
16085         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16086              tg3_asic_rev(tp) == ASIC_REV_5762)
16087                 tg3_flag_set(tp, 57765_PLUS);
16088
16089         /* Intentionally exclude ASIC_REV_5906 */
16090         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16091             tg3_asic_rev(tp) == ASIC_REV_5787 ||
16092             tg3_asic_rev(tp) == ASIC_REV_5784 ||
16093             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16094             tg3_asic_rev(tp) == ASIC_REV_5785 ||
16095             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16096             tg3_flag(tp, 57765_PLUS))
16097                 tg3_flag_set(tp, 5755_PLUS);
16098
16099         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16100             tg3_asic_rev(tp) == ASIC_REV_5714)
16101                 tg3_flag_set(tp, 5780_CLASS);
16102
16103         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16104             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16105             tg3_asic_rev(tp) == ASIC_REV_5906 ||
16106             tg3_flag(tp, 5755_PLUS) ||
16107             tg3_flag(tp, 5780_CLASS))
16108                 tg3_flag_set(tp, 5750_PLUS);
16109
16110         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16111             tg3_flag(tp, 5750_PLUS))
16112                 tg3_flag_set(tp, 5705_PLUS);
16113 }
16114
16115 static bool tg3_10_100_only_device(struct tg3 *tp,
16116                                    const struct pci_device_id *ent)
16117 {
16118         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16119
16120         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16121              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16122             (tp->phy_flags & TG3_PHYFLG_IS_FET))
16123                 return true;
16124
16125         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16126                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16127                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16128                                 return true;
16129                 } else {
16130                         return true;
16131                 }
16132         }
16133
16134         return false;
16135 }
16136
16137 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16138 {
16139         u32 misc_ctrl_reg;
16140         u32 pci_state_reg, grc_misc_cfg;
16141         u32 val;
16142         u16 pci_cmd;
16143         int err;
16144
16145         /* Force memory write invalidate off.  If we leave it on,
16146          * then on 5700_BX chips we have to enable a workaround.
16147          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16148          * to match the cacheline size.  The Broadcom driver have this
16149          * workaround but turns MWI off all the times so never uses
16150          * it.  This seems to suggest that the workaround is insufficient.
16151          */
16152         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16153         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16154         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16155
16156         /* Important! -- Make sure register accesses are byteswapped
16157          * correctly.  Also, for those chips that require it, make
16158          * sure that indirect register accesses are enabled before
16159          * the first operation.
16160          */
16161         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16162                               &misc_ctrl_reg);
16163         tp->misc_host_ctrl |= (misc_ctrl_reg &
16164                                MISC_HOST_CTRL_CHIPREV);
16165         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16166                                tp->misc_host_ctrl);
16167
16168         tg3_detect_asic_rev(tp, misc_ctrl_reg);
16169
16170         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16171          * we need to disable memory and use config. cycles
16172          * only to access all registers. The 5702/03 chips
16173          * can mistakenly decode the special cycles from the
16174          * ICH chipsets as memory write cycles, causing corruption
16175          * of register and memory space. Only certain ICH bridges
16176          * will drive special cycles with non-zero data during the
16177          * address phase which can fall within the 5703's address
16178          * range. This is not an ICH bug as the PCI spec allows
16179          * non-zero address during special cycles. However, only
16180          * these ICH bridges are known to drive non-zero addresses
16181          * during special cycles.
16182          *
16183          * Since special cycles do not cross PCI bridges, we only
16184          * enable this workaround if the 5703 is on the secondary
16185          * bus of these ICH bridges.
16186          */
16187         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16188             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16189                 static struct tg3_dev_id {
16190                         u32     vendor;
16191                         u32     device;
16192                         u32     rev;
16193                 } ich_chipsets[] = {
16194                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16195                           PCI_ANY_ID },
16196                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16197                           PCI_ANY_ID },
16198                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16199                           0xa },
16200                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16201                           PCI_ANY_ID },
16202                         { },
16203                 };
16204                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16205                 struct pci_dev *bridge = NULL;
16206
16207                 while (pci_id->vendor != 0) {
16208                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
16209                                                 bridge);
16210                         if (!bridge) {
16211                                 pci_id++;
16212                                 continue;
16213                         }
16214                         if (pci_id->rev != PCI_ANY_ID) {
16215                                 if (bridge->revision > pci_id->rev)
16216                                         continue;
16217                         }
16218                         if (bridge->subordinate &&
16219                             (bridge->subordinate->number ==
16220                              tp->pdev->bus->number)) {
16221                                 tg3_flag_set(tp, ICH_WORKAROUND);
16222                                 pci_dev_put(bridge);
16223                                 break;
16224                         }
16225                 }
16226         }
16227
16228         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16229                 static struct tg3_dev_id {
16230                         u32     vendor;
16231                         u32     device;
16232                 } bridge_chipsets[] = {
16233                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16234                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16235                         { },
16236                 };
16237                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16238                 struct pci_dev *bridge = NULL;
16239
16240                 while (pci_id->vendor != 0) {
16241                         bridge = pci_get_device(pci_id->vendor,
16242                                                 pci_id->device,
16243                                                 bridge);
16244                         if (!bridge) {
16245                                 pci_id++;
16246                                 continue;
16247                         }
16248                         if (bridge->subordinate &&
16249                             (bridge->subordinate->number <=
16250                              tp->pdev->bus->number) &&
16251                             (bridge->subordinate->busn_res.end >=
16252                              tp->pdev->bus->number)) {
16253                                 tg3_flag_set(tp, 5701_DMA_BUG);
16254                                 pci_dev_put(bridge);
16255                                 break;
16256                         }
16257                 }
16258         }
16259
16260         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16261          * DMA addresses > 40-bit. This bridge may have other additional
16262          * 57xx devices behind it in some 4-port NIC designs for example.
16263          * Any tg3 device found behind the bridge will also need the 40-bit
16264          * DMA workaround.
16265          */
16266         if (tg3_flag(tp, 5780_CLASS)) {
16267                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16268                 tp->msi_cap = tp->pdev->msi_cap;
16269         } else {
16270                 struct pci_dev *bridge = NULL;
16271
16272                 do {
16273                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16274                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
16275                                                 bridge);
16276                         if (bridge && bridge->subordinate &&
16277                             (bridge->subordinate->number <=
16278                              tp->pdev->bus->number) &&
16279                             (bridge->subordinate->busn_res.end >=
16280                              tp->pdev->bus->number)) {
16281                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
16282                                 pci_dev_put(bridge);
16283                                 break;
16284                         }
16285                 } while (bridge);
16286         }
16287
16288         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16289             tg3_asic_rev(tp) == ASIC_REV_5714)
16290                 tp->pdev_peer = tg3_find_peer(tp);
16291
16292         /* Determine TSO capabilities */
16293         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16294                 ; /* Do nothing. HW bug. */
16295         else if (tg3_flag(tp, 57765_PLUS))
16296                 tg3_flag_set(tp, HW_TSO_3);
16297         else if (tg3_flag(tp, 5755_PLUS) ||
16298                  tg3_asic_rev(tp) == ASIC_REV_5906)
16299                 tg3_flag_set(tp, HW_TSO_2);
16300         else if (tg3_flag(tp, 5750_PLUS)) {
16301                 tg3_flag_set(tp, HW_TSO_1);
16302                 tg3_flag_set(tp, TSO_BUG);
16303                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16304                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16305                         tg3_flag_clear(tp, TSO_BUG);
16306         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16307                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
16308                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16309                 tg3_flag_set(tp, FW_TSO);
16310                 tg3_flag_set(tp, TSO_BUG);
16311                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16312                         tp->fw_needed = FIRMWARE_TG3TSO5;
16313                 else
16314                         tp->fw_needed = FIRMWARE_TG3TSO;
16315         }
16316
16317         /* Selectively allow TSO based on operating conditions */
16318         if (tg3_flag(tp, HW_TSO_1) ||
16319             tg3_flag(tp, HW_TSO_2) ||
16320             tg3_flag(tp, HW_TSO_3) ||
16321             tg3_flag(tp, FW_TSO)) {
16322                 /* For firmware TSO, assume ASF is disabled.
16323                  * We'll disable TSO later if we discover ASF
16324                  * is enabled in tg3_get_eeprom_hw_cfg().
16325                  */
16326                 tg3_flag_set(tp, TSO_CAPABLE);
16327         } else {
16328                 tg3_flag_clear(tp, TSO_CAPABLE);
16329                 tg3_flag_clear(tp, TSO_BUG);
16330                 tp->fw_needed = NULL;
16331         }
16332
16333         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16334                 tp->fw_needed = FIRMWARE_TG3;
16335
16336         if (tg3_asic_rev(tp) == ASIC_REV_57766)
16337                 tp->fw_needed = FIRMWARE_TG357766;
16338
16339         tp->irq_max = 1;
16340
16341         if (tg3_flag(tp, 5750_PLUS)) {
16342                 tg3_flag_set(tp, SUPPORT_MSI);
16343                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16344                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16345                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16346                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16347                      tp->pdev_peer == tp->pdev))
16348                         tg3_flag_clear(tp, SUPPORT_MSI);
16349
16350                 if (tg3_flag(tp, 5755_PLUS) ||
16351                     tg3_asic_rev(tp) == ASIC_REV_5906) {
16352                         tg3_flag_set(tp, 1SHOT_MSI);
16353                 }
16354
16355                 if (tg3_flag(tp, 57765_PLUS)) {
16356                         tg3_flag_set(tp, SUPPORT_MSIX);
16357                         tp->irq_max = TG3_IRQ_MAX_VECS;
16358                 }
16359         }
16360
16361         tp->txq_max = 1;
16362         tp->rxq_max = 1;
16363         if (tp->irq_max > 1) {
16364                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16365                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16366
16367                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16368                     tg3_asic_rev(tp) == ASIC_REV_5720)
16369                         tp->txq_max = tp->irq_max - 1;
16370         }
16371
16372         if (tg3_flag(tp, 5755_PLUS) ||
16373             tg3_asic_rev(tp) == ASIC_REV_5906)
16374                 tg3_flag_set(tp, SHORT_DMA_BUG);
16375
16376         if (tg3_asic_rev(tp) == ASIC_REV_5719)
16377                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16378
16379         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16380             tg3_asic_rev(tp) == ASIC_REV_5719 ||
16381             tg3_asic_rev(tp) == ASIC_REV_5720 ||
16382             tg3_asic_rev(tp) == ASIC_REV_5762)
16383                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16384
16385         if (tg3_flag(tp, 57765_PLUS) &&
16386             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16387                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16388
16389         if (!tg3_flag(tp, 5705_PLUS) ||
16390             tg3_flag(tp, 5780_CLASS) ||
16391             tg3_flag(tp, USE_JUMBO_BDFLAG))
16392                 tg3_flag_set(tp, JUMBO_CAPABLE);
16393
16394         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16395                               &pci_state_reg);
16396
16397         if (pci_is_pcie(tp->pdev)) {
16398                 u16 lnkctl;
16399
16400                 tg3_flag_set(tp, PCI_EXPRESS);
16401
16402                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16403                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16404                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16405                                 tg3_flag_clear(tp, HW_TSO_2);
16406                                 tg3_flag_clear(tp, TSO_CAPABLE);
16407                         }
16408                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16409                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
16410                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16411                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16412                                 tg3_flag_set(tp, CLKREQ_BUG);
16413                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16414                         tg3_flag_set(tp, L1PLLPD_EN);
16415                 }
16416         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16417                 /* BCM5785 devices are effectively PCIe devices, and should
16418                  * follow PCIe codepaths, but do not have a PCIe capabilities
16419                  * section.
16420                  */
16421                 tg3_flag_set(tp, PCI_EXPRESS);
16422         } else if (!tg3_flag(tp, 5705_PLUS) ||
16423                    tg3_flag(tp, 5780_CLASS)) {
16424                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16425                 if (!tp->pcix_cap) {
16426                         dev_err(&tp->pdev->dev,
16427                                 "Cannot find PCI-X capability, aborting\n");
16428                         return -EIO;
16429                 }
16430
16431                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16432                         tg3_flag_set(tp, PCIX_MODE);
16433         }
16434
16435         /* If we have an AMD 762 or VIA K8T800 chipset, write
16436          * reordering to the mailbox registers done by the host
16437          * controller can cause major troubles.  We read back from
16438          * every mailbox register write to force the writes to be
16439          * posted to the chip in order.
16440          */
16441         if (pci_dev_present(tg3_write_reorder_chipsets) &&
16442             !tg3_flag(tp, PCI_EXPRESS))
16443                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16444
16445         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16446                              &tp->pci_cacheline_sz);
16447         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16448                              &tp->pci_lat_timer);
16449         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16450             tp->pci_lat_timer < 64) {
16451                 tp->pci_lat_timer = 64;
16452                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16453                                       tp->pci_lat_timer);
16454         }
16455
16456         /* Important! -- It is critical that the PCI-X hw workaround
16457          * situation is decided before the first MMIO register access.
16458          */
16459         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16460                 /* 5700 BX chips need to have their TX producer index
16461                  * mailboxes written twice to workaround a bug.
16462                  */
16463                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16464
16465                 /* If we are in PCI-X mode, enable register write workaround.
16466                  *
16467                  * The workaround is to use indirect register accesses
16468                  * for all chip writes not to mailbox registers.
16469                  */
16470                 if (tg3_flag(tp, PCIX_MODE)) {
16471                         u32 pm_reg;
16472
16473                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16474
16475                         /* The chip can have it's power management PCI config
16476                          * space registers clobbered due to this bug.
16477                          * So explicitly force the chip into D0 here.
16478                          */
16479                         pci_read_config_dword(tp->pdev,
16480                                               tp->pdev->pm_cap + PCI_PM_CTRL,
16481                                               &pm_reg);
16482                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16483                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16484                         pci_write_config_dword(tp->pdev,
16485                                                tp->pdev->pm_cap + PCI_PM_CTRL,
16486                                                pm_reg);
16487
16488                         /* Also, force SERR#/PERR# in PCI command. */
16489                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16490                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16491                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16492                 }
16493         }
16494
16495         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16496                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16497         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16498                 tg3_flag_set(tp, PCI_32BIT);
16499
16500         /* Chip-specific fixup from Broadcom driver */
16501         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16502             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16503                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16504                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16505         }
16506
16507         /* Default fast path register access methods */
16508         tp->read32 = tg3_read32;
16509         tp->write32 = tg3_write32;
16510         tp->read32_mbox = tg3_read32;
16511         tp->write32_mbox = tg3_write32;
16512         tp->write32_tx_mbox = tg3_write32;
16513         tp->write32_rx_mbox = tg3_write32;
16514
16515         /* Various workaround register access methods */
16516         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16517                 tp->write32 = tg3_write_indirect_reg32;
16518         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16519                  (tg3_flag(tp, PCI_EXPRESS) &&
16520                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16521                 /*
16522                  * Back to back register writes can cause problems on these
16523                  * chips, the workaround is to read back all reg writes
16524                  * except those to mailbox regs.
16525                  *
16526                  * See tg3_write_indirect_reg32().
16527                  */
16528                 tp->write32 = tg3_write_flush_reg32;
16529         }
16530
16531         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16532                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16533                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16534                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16535         }
16536
16537         if (tg3_flag(tp, ICH_WORKAROUND)) {
16538                 tp->read32 = tg3_read_indirect_reg32;
16539                 tp->write32 = tg3_write_indirect_reg32;
16540                 tp->read32_mbox = tg3_read_indirect_mbox;
16541                 tp->write32_mbox = tg3_write_indirect_mbox;
16542                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16543                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16544
16545                 iounmap(tp->regs);
16546                 tp->regs = NULL;
16547
16548                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16549                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16550                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16551         }
16552         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16553                 tp->read32_mbox = tg3_read32_mbox_5906;
16554                 tp->write32_mbox = tg3_write32_mbox_5906;
16555                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16556                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16557         }
16558
16559         if (tp->write32 == tg3_write_indirect_reg32 ||
16560             (tg3_flag(tp, PCIX_MODE) &&
16561              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16562               tg3_asic_rev(tp) == ASIC_REV_5701)))
16563                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16564
16565         /* The memory arbiter has to be enabled in order for SRAM accesses
16566          * to succeed.  Normally on powerup the tg3 chip firmware will make
16567          * sure it is enabled, but other entities such as system netboot
16568          * code might disable it.
16569          */
16570         val = tr32(MEMARB_MODE);
16571         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16572
16573         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16574         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16575             tg3_flag(tp, 5780_CLASS)) {
16576                 if (tg3_flag(tp, PCIX_MODE)) {
16577                         pci_read_config_dword(tp->pdev,
16578                                               tp->pcix_cap + PCI_X_STATUS,
16579                                               &val);
16580                         tp->pci_fn = val & 0x7;
16581                 }
16582         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16583                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16584                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16585                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16586                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16587                         val = tr32(TG3_CPMU_STATUS);
16588
16589                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16590                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16591                 else
16592                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16593                                      TG3_CPMU_STATUS_FSHFT_5719;
16594         }
16595
16596         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16597                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16598                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16599         }
16600
16601         /* Get eeprom hw config before calling tg3_set_power_state().
16602          * In particular, the TG3_FLAG_IS_NIC flag must be
16603          * determined before calling tg3_set_power_state() so that
16604          * we know whether or not to switch out of Vaux power.
16605          * When the flag is set, it means that GPIO1 is used for eeprom
16606          * write protect and also implies that it is a LOM where GPIOs
16607          * are not used to switch power.
16608          */
16609         tg3_get_eeprom_hw_cfg(tp);
16610
16611         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16612                 tg3_flag_clear(tp, TSO_CAPABLE);
16613                 tg3_flag_clear(tp, TSO_BUG);
16614                 tp->fw_needed = NULL;
16615         }
16616
16617         if (tg3_flag(tp, ENABLE_APE)) {
16618                 /* Allow reads and writes to the
16619                  * APE register and memory space.
16620                  */
16621                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16622                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16623                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16624                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16625                                        pci_state_reg);
16626
16627                 tg3_ape_lock_init(tp);
16628                 tp->ape_hb_interval =
16629                         msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16630         }
16631
16632         /* Set up tp->grc_local_ctrl before calling
16633          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16634          * will bring 5700's external PHY out of reset.
16635          * It is also used as eeprom write protect on LOMs.
16636          */
16637         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16638         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16639             tg3_flag(tp, EEPROM_WRITE_PROT))
16640                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16641                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16642         /* Unused GPIO3 must be driven as output on 5752 because there
16643          * are no pull-up resistors on unused GPIO pins.
16644          */
16645         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16646                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16647
16648         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16649             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16650             tg3_flag(tp, 57765_CLASS))
16651                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16652
16653         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16654             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16655                 /* Turn off the debug UART. */
16656                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16657                 if (tg3_flag(tp, IS_NIC))
16658                         /* Keep VMain power. */
16659                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16660                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16661         }
16662
16663         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16664                 tp->grc_local_ctrl |=
16665                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16666
16667         /* Switch out of Vaux if it is a NIC */
16668         tg3_pwrsrc_switch_to_vmain(tp);
16669
16670         /* Derive initial jumbo mode from MTU assigned in
16671          * ether_setup() via the alloc_etherdev() call
16672          */
16673         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16674                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16675
16676         /* Determine WakeOnLan speed to use. */
16677         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16678             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16679             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16680             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16681                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16682         } else {
16683                 tg3_flag_set(tp, WOL_SPEED_100MB);
16684         }
16685
16686         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16687                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16688
16689         /* A few boards don't want Ethernet@WireSpeed phy feature */
16690         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16691             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16692              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16693              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16694             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16695             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16696                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16697
16698         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16699             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16700                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16701         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16702                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16703
16704         if (tg3_flag(tp, 5705_PLUS) &&
16705             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16706             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16707             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16708             !tg3_flag(tp, 57765_PLUS)) {
16709                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16710                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16711                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16712                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16713                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16714                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16715                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16716                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16717                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16718                 } else
16719                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16720         }
16721
16722         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16723             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16724                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16725                 if (tp->phy_otp == 0)
16726                         tp->phy_otp = TG3_OTP_DEFAULT;
16727         }
16728
16729         if (tg3_flag(tp, CPMU_PRESENT))
16730                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16731         else
16732                 tp->mi_mode = MAC_MI_MODE_BASE;
16733
16734         tp->coalesce_mode = 0;
16735         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16736             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16737                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16738
16739         /* Set these bits to enable statistics workaround. */
16740         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16741             tg3_asic_rev(tp) == ASIC_REV_5762 ||
16742             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16743             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16744                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16745                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16746         }
16747
16748         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16749             tg3_asic_rev(tp) == ASIC_REV_57780)
16750                 tg3_flag_set(tp, USE_PHYLIB);
16751
16752         err = tg3_mdio_init(tp);
16753         if (err)
16754                 return err;
16755
16756         /* Initialize data/descriptor byte/word swapping. */
16757         val = tr32(GRC_MODE);
16758         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16759             tg3_asic_rev(tp) == ASIC_REV_5762)
16760                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16761                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16762                         GRC_MODE_B2HRX_ENABLE |
16763                         GRC_MODE_HTX2B_ENABLE |
16764                         GRC_MODE_HOST_STACKUP);
16765         else
16766                 val &= GRC_MODE_HOST_STACKUP;
16767
16768         tw32(GRC_MODE, val | tp->grc_mode);
16769
16770         tg3_switch_clocks(tp);
16771
16772         /* Clear this out for sanity. */
16773         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16774
16775         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16776         tw32(TG3PCI_REG_BASE_ADDR, 0);
16777
16778         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16779                               &pci_state_reg);
16780         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16781             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16782                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16783                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16784                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16785                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16786                         void __iomem *sram_base;
16787
16788                         /* Write some dummy words into the SRAM status block
16789                          * area, see if it reads back correctly.  If the return
16790                          * value is bad, force enable the PCIX workaround.
16791                          */
16792                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16793
16794                         writel(0x00000000, sram_base);
16795                         writel(0x00000000, sram_base + 4);
16796                         writel(0xffffffff, sram_base + 4);
16797                         if (readl(sram_base) != 0x00000000)
16798                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16799                 }
16800         }
16801
16802         udelay(50);
16803         tg3_nvram_init(tp);
16804
16805         /* If the device has an NVRAM, no need to load patch firmware */
16806         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16807             !tg3_flag(tp, NO_NVRAM))
16808                 tp->fw_needed = NULL;
16809
16810         grc_misc_cfg = tr32(GRC_MISC_CFG);
16811         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16812
16813         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16814             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16815              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16816                 tg3_flag_set(tp, IS_5788);
16817
16818         if (!tg3_flag(tp, IS_5788) &&
16819             tg3_asic_rev(tp) != ASIC_REV_5700)
16820                 tg3_flag_set(tp, TAGGED_STATUS);
16821         if (tg3_flag(tp, TAGGED_STATUS)) {
16822                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16823                                       HOSTCC_MODE_CLRTICK_TXBD);
16824
16825                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16826                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16827                                        tp->misc_host_ctrl);
16828         }
16829
16830         /* Preserve the APE MAC_MODE bits */
16831         if (tg3_flag(tp, ENABLE_APE))
16832                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16833         else
16834                 tp->mac_mode = 0;
16835
16836         if (tg3_10_100_only_device(tp, ent))
16837                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16838
16839         err = tg3_phy_probe(tp);
16840         if (err) {
16841                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16842                 /* ... but do not return immediately ... */
16843                 tg3_mdio_fini(tp);
16844         }
16845
16846         tg3_read_vpd(tp);
16847         tg3_read_fw_ver(tp);
16848
16849         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16850                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16851         } else {
16852                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16853                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16854                 else
16855                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16856         }
16857
16858         /* 5700 {AX,BX} chips have a broken status block link
16859          * change bit implementation, so we must use the
16860          * status register in those cases.
16861          */
16862         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16863                 tg3_flag_set(tp, USE_LINKCHG_REG);
16864         else
16865                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16866
16867         /* The led_ctrl is set during tg3_phy_probe, here we might
16868          * have to force the link status polling mechanism based
16869          * upon subsystem IDs.
16870          */
16871         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16872             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16873             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16874                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16875                 tg3_flag_set(tp, USE_LINKCHG_REG);
16876         }
16877
16878         /* For all SERDES we poll the MAC status register. */
16879         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16880                 tg3_flag_set(tp, POLL_SERDES);
16881         else
16882                 tg3_flag_clear(tp, POLL_SERDES);
16883
16884         if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16885                 tg3_flag_set(tp, POLL_CPMU_LINK);
16886
16887         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16888         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16889         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16890             tg3_flag(tp, PCIX_MODE)) {
16891                 tp->rx_offset = NET_SKB_PAD;
16892 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16893                 tp->rx_copy_thresh = ~(u16)0;
16894 #endif
16895         }
16896
16897         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16898         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16899         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16900
16901         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16902
16903         /* Increment the rx prod index on the rx std ring by at most
16904          * 8 for these chips to workaround hw errata.
16905          */
16906         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16907             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16908             tg3_asic_rev(tp) == ASIC_REV_5755)
16909                 tp->rx_std_max_post = 8;
16910
16911         if (tg3_flag(tp, ASPM_WORKAROUND))
16912                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16913                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16914
16915         return err;
16916 }
16917
16918 static int tg3_get_device_address(struct tg3 *tp)
16919 {
16920         struct net_device *dev = tp->dev;
16921         u32 hi, lo, mac_offset;
16922         int addr_ok = 0;
16923         int err;
16924
16925         if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
16926                 return 0;
16927
16928         if (tg3_flag(tp, IS_SSB_CORE)) {
16929                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16930                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16931                         return 0;
16932         }
16933
16934         mac_offset = 0x7c;
16935         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16936             tg3_flag(tp, 5780_CLASS)) {
16937                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16938                         mac_offset = 0xcc;
16939                 if (tg3_nvram_lock(tp))
16940                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16941                 else
16942                         tg3_nvram_unlock(tp);
16943         } else if (tg3_flag(tp, 5717_PLUS)) {
16944                 if (tp->pci_fn & 1)
16945                         mac_offset = 0xcc;
16946                 if (tp->pci_fn > 1)
16947                         mac_offset += 0x18c;
16948         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16949                 mac_offset = 0x10;
16950
16951         /* First try to get it from MAC address mailbox. */
16952         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16953         if ((hi >> 16) == 0x484b) {
16954                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16955                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16956
16957                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16958                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16959                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16960                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16961                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16962
16963                 /* Some old bootcode may report a 0 MAC address in SRAM */
16964                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16965         }
16966         if (!addr_ok) {
16967                 /* Next, try NVRAM. */
16968                 if (!tg3_flag(tp, NO_NVRAM) &&
16969                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16970                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16971                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16972                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16973                 }
16974                 /* Finally just fetch it out of the MAC control regs. */
16975                 else {
16976                         hi = tr32(MAC_ADDR_0_HIGH);
16977                         lo = tr32(MAC_ADDR_0_LOW);
16978
16979                         dev->dev_addr[5] = lo & 0xff;
16980                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16981                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16982                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16983                         dev->dev_addr[1] = hi & 0xff;
16984                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16985                 }
16986         }
16987
16988         if (!is_valid_ether_addr(&dev->dev_addr[0]))
16989                 return -EINVAL;
16990         return 0;
16991 }
16992
16993 #define BOUNDARY_SINGLE_CACHELINE       1
16994 #define BOUNDARY_MULTI_CACHELINE        2
16995
16996 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16997 {
16998         int cacheline_size;
16999         u8 byte;
17000         int goal;
17001
17002         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17003         if (byte == 0)
17004                 cacheline_size = 1024;
17005         else
17006                 cacheline_size = (int) byte * 4;
17007
17008         /* On 5703 and later chips, the boundary bits have no
17009          * effect.
17010          */
17011         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17012             tg3_asic_rev(tp) != ASIC_REV_5701 &&
17013             !tg3_flag(tp, PCI_EXPRESS))
17014                 goto out;
17015
17016 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17017         goal = BOUNDARY_MULTI_CACHELINE;
17018 #else
17019 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17020         goal = BOUNDARY_SINGLE_CACHELINE;
17021 #else
17022         goal = 0;
17023 #endif
17024 #endif
17025
17026         if (tg3_flag(tp, 57765_PLUS)) {
17027                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17028                 goto out;
17029         }
17030
17031         if (!goal)
17032                 goto out;
17033
17034         /* PCI controllers on most RISC systems tend to disconnect
17035          * when a device tries to burst across a cache-line boundary.
17036          * Therefore, letting tg3 do so just wastes PCI bandwidth.
17037          *
17038          * Unfortunately, for PCI-E there are only limited
17039          * write-side controls for this, and thus for reads
17040          * we will still get the disconnects.  We'll also waste
17041          * these PCI cycles for both read and write for chips
17042          * other than 5700 and 5701 which do not implement the
17043          * boundary bits.
17044          */
17045         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17046                 switch (cacheline_size) {
17047                 case 16:
17048                 case 32:
17049                 case 64:
17050                 case 128:
17051                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17052                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17053                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17054                         } else {
17055                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17056                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17057                         }
17058                         break;
17059
17060                 case 256:
17061                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17062                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17063                         break;
17064
17065                 default:
17066                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17067                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17068                         break;
17069                 }
17070         } else if (tg3_flag(tp, PCI_EXPRESS)) {
17071                 switch (cacheline_size) {
17072                 case 16:
17073                 case 32:
17074                 case 64:
17075                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17076                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17077                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17078                                 break;
17079                         }
17080                         fallthrough;
17081                 case 128:
17082                 default:
17083                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17084                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17085                         break;
17086                 }
17087         } else {
17088                 switch (cacheline_size) {
17089                 case 16:
17090                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17091                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
17092                                         DMA_RWCTRL_WRITE_BNDRY_16);
17093                                 break;
17094                         }
17095                         fallthrough;
17096                 case 32:
17097                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17098                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
17099                                         DMA_RWCTRL_WRITE_BNDRY_32);
17100                                 break;
17101                         }
17102                         fallthrough;
17103                 case 64:
17104                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17105                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
17106                                         DMA_RWCTRL_WRITE_BNDRY_64);
17107                                 break;
17108                         }
17109                         fallthrough;
17110                 case 128:
17111                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
17112                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
17113                                         DMA_RWCTRL_WRITE_BNDRY_128);
17114                                 break;
17115                         }
17116                         fallthrough;
17117                 case 256:
17118                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
17119                                 DMA_RWCTRL_WRITE_BNDRY_256);
17120                         break;
17121                 case 512:
17122                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
17123                                 DMA_RWCTRL_WRITE_BNDRY_512);
17124                         break;
17125                 case 1024:
17126                 default:
17127                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17128                                 DMA_RWCTRL_WRITE_BNDRY_1024);
17129                         break;
17130                 }
17131         }
17132
17133 out:
17134         return val;
17135 }
17136
17137 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17138                            int size, bool to_device)
17139 {
17140         struct tg3_internal_buffer_desc test_desc;
17141         u32 sram_dma_descs;
17142         int i, ret;
17143
17144         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17145
17146         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17147         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17148         tw32(RDMAC_STATUS, 0);
17149         tw32(WDMAC_STATUS, 0);
17150
17151         tw32(BUFMGR_MODE, 0);
17152         tw32(FTQ_RESET, 0);
17153
17154         test_desc.addr_hi = ((u64) buf_dma) >> 32;
17155         test_desc.addr_lo = buf_dma & 0xffffffff;
17156         test_desc.nic_mbuf = 0x00002100;
17157         test_desc.len = size;
17158
17159         /*
17160          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17161          * the *second* time the tg3 driver was getting loaded after an
17162          * initial scan.
17163          *
17164          * Broadcom tells me:
17165          *   ...the DMA engine is connected to the GRC block and a DMA
17166          *   reset may affect the GRC block in some unpredictable way...
17167          *   The behavior of resets to individual blocks has not been tested.
17168          *
17169          * Broadcom noted the GRC reset will also reset all sub-components.
17170          */
17171         if (to_device) {
17172                 test_desc.cqid_sqid = (13 << 8) | 2;
17173
17174                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17175                 udelay(40);
17176         } else {
17177                 test_desc.cqid_sqid = (16 << 8) | 7;
17178
17179                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17180                 udelay(40);
17181         }
17182         test_desc.flags = 0x00000005;
17183
17184         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17185                 u32 val;
17186
17187                 val = *(((u32 *)&test_desc) + i);
17188                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17189                                        sram_dma_descs + (i * sizeof(u32)));
17190                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17191         }
17192         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17193
17194         if (to_device)
17195                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17196         else
17197                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17198
17199         ret = -ENODEV;
17200         for (i = 0; i < 40; i++) {
17201                 u32 val;
17202
17203                 if (to_device)
17204                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17205                 else
17206                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17207                 if ((val & 0xffff) == sram_dma_descs) {
17208                         ret = 0;
17209                         break;
17210                 }
17211
17212                 udelay(100);
17213         }
17214
17215         return ret;
17216 }
17217
17218 #define TEST_BUFFER_SIZE        0x2000
17219
17220 static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17221         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17222         { },
17223 };
17224
17225 static int tg3_test_dma(struct tg3 *tp)
17226 {
17227         dma_addr_t buf_dma;
17228         u32 *buf, saved_dma_rwctrl;
17229         int ret = 0;
17230
17231         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17232                                  &buf_dma, GFP_KERNEL);
17233         if (!buf) {
17234                 ret = -ENOMEM;
17235                 goto out_nofree;
17236         }
17237
17238         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17239                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17240
17241         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17242
17243         if (tg3_flag(tp, 57765_PLUS))
17244                 goto out;
17245
17246         if (tg3_flag(tp, PCI_EXPRESS)) {
17247                 /* DMA read watermark not used on PCIE */
17248                 tp->dma_rwctrl |= 0x00180000;
17249         } else if (!tg3_flag(tp, PCIX_MODE)) {
17250                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17251                     tg3_asic_rev(tp) == ASIC_REV_5750)
17252                         tp->dma_rwctrl |= 0x003f0000;
17253                 else
17254                         tp->dma_rwctrl |= 0x003f000f;
17255         } else {
17256                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17257                     tg3_asic_rev(tp) == ASIC_REV_5704) {
17258                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17259                         u32 read_water = 0x7;
17260
17261                         /* If the 5704 is behind the EPB bridge, we can
17262                          * do the less restrictive ONE_DMA workaround for
17263                          * better performance.
17264                          */
17265                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17266                             tg3_asic_rev(tp) == ASIC_REV_5704)
17267                                 tp->dma_rwctrl |= 0x8000;
17268                         else if (ccval == 0x6 || ccval == 0x7)
17269                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17270
17271                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
17272                                 read_water = 4;
17273                         /* Set bit 23 to enable PCIX hw bug fix */
17274                         tp->dma_rwctrl |=
17275                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17276                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17277                                 (1 << 23);
17278                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17279                         /* 5780 always in PCIX mode */
17280                         tp->dma_rwctrl |= 0x00144000;
17281                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17282                         /* 5714 always in PCIX mode */
17283                         tp->dma_rwctrl |= 0x00148000;
17284                 } else {
17285                         tp->dma_rwctrl |= 0x001b000f;
17286                 }
17287         }
17288         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17289                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17290
17291         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17292             tg3_asic_rev(tp) == ASIC_REV_5704)
17293                 tp->dma_rwctrl &= 0xfffffff0;
17294
17295         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17296             tg3_asic_rev(tp) == ASIC_REV_5701) {
17297                 /* Remove this if it causes problems for some boards. */
17298                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17299
17300                 /* On 5700/5701 chips, we need to set this bit.
17301                  * Otherwise the chip will issue cacheline transactions
17302                  * to streamable DMA memory with not all the byte
17303                  * enables turned on.  This is an error on several
17304                  * RISC PCI controllers, in particular sparc64.
17305                  *
17306                  * On 5703/5704 chips, this bit has been reassigned
17307                  * a different meaning.  In particular, it is used
17308                  * on those chips to enable a PCI-X workaround.
17309                  */
17310                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17311         }
17312
17313         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17314
17315
17316         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17317             tg3_asic_rev(tp) != ASIC_REV_5701)
17318                 goto out;
17319
17320         /* It is best to perform DMA test with maximum write burst size
17321          * to expose the 5700/5701 write DMA bug.
17322          */
17323         saved_dma_rwctrl = tp->dma_rwctrl;
17324         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17325         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17326
17327         while (1) {
17328                 u32 *p = buf, i;
17329
17330                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17331                         p[i] = i;
17332
17333                 /* Send the buffer to the chip. */
17334                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17335                 if (ret) {
17336                         dev_err(&tp->pdev->dev,
17337                                 "%s: Buffer write failed. err = %d\n",
17338                                 __func__, ret);
17339                         break;
17340                 }
17341
17342                 /* Now read it back. */
17343                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17344                 if (ret) {
17345                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17346                                 "err = %d\n", __func__, ret);
17347                         break;
17348                 }
17349
17350                 /* Verify it. */
17351                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17352                         if (p[i] == i)
17353                                 continue;
17354
17355                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17356                             DMA_RWCTRL_WRITE_BNDRY_16) {
17357                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17358                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17359                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17360                                 break;
17361                         } else {
17362                                 dev_err(&tp->pdev->dev,
17363                                         "%s: Buffer corrupted on read back! "
17364                                         "(%d != %d)\n", __func__, p[i], i);
17365                                 ret = -ENODEV;
17366                                 goto out;
17367                         }
17368                 }
17369
17370                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17371                         /* Success. */
17372                         ret = 0;
17373                         break;
17374                 }
17375         }
17376         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17377             DMA_RWCTRL_WRITE_BNDRY_16) {
17378                 /* DMA test passed without adjusting DMA boundary,
17379                  * now look for chipsets that are known to expose the
17380                  * DMA bug without failing the test.
17381                  */
17382                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17383                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17384                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17385                 } else {
17386                         /* Safe to use the calculated DMA boundary. */
17387                         tp->dma_rwctrl = saved_dma_rwctrl;
17388                 }
17389
17390                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17391         }
17392
17393 out:
17394         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17395 out_nofree:
17396         return ret;
17397 }
17398
17399 static void tg3_init_bufmgr_config(struct tg3 *tp)
17400 {
17401         if (tg3_flag(tp, 57765_PLUS)) {
17402                 tp->bufmgr_config.mbuf_read_dma_low_water =
17403                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17404                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17405                         DEFAULT_MB_MACRX_LOW_WATER_57765;
17406                 tp->bufmgr_config.mbuf_high_water =
17407                         DEFAULT_MB_HIGH_WATER_57765;
17408
17409                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17410                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17411                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17412                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17413                 tp->bufmgr_config.mbuf_high_water_jumbo =
17414                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17415         } else if (tg3_flag(tp, 5705_PLUS)) {
17416                 tp->bufmgr_config.mbuf_read_dma_low_water =
17417                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17418                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17419                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17420                 tp->bufmgr_config.mbuf_high_water =
17421                         DEFAULT_MB_HIGH_WATER_5705;
17422                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17423                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17424                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17425                         tp->bufmgr_config.mbuf_high_water =
17426                                 DEFAULT_MB_HIGH_WATER_5906;
17427                 }
17428
17429                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17430                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17431                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17432                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17433                 tp->bufmgr_config.mbuf_high_water_jumbo =
17434                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17435         } else {
17436                 tp->bufmgr_config.mbuf_read_dma_low_water =
17437                         DEFAULT_MB_RDMA_LOW_WATER;
17438                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17439                         DEFAULT_MB_MACRX_LOW_WATER;
17440                 tp->bufmgr_config.mbuf_high_water =
17441                         DEFAULT_MB_HIGH_WATER;
17442
17443                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17444                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17445                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17446                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17447                 tp->bufmgr_config.mbuf_high_water_jumbo =
17448                         DEFAULT_MB_HIGH_WATER_JUMBO;
17449         }
17450
17451         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17452         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17453 }
17454
17455 static char *tg3_phy_string(struct tg3 *tp)
17456 {
17457         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17458         case TG3_PHY_ID_BCM5400:        return "5400";
17459         case TG3_PHY_ID_BCM5401:        return "5401";
17460         case TG3_PHY_ID_BCM5411:        return "5411";
17461         case TG3_PHY_ID_BCM5701:        return "5701";
17462         case TG3_PHY_ID_BCM5703:        return "5703";
17463         case TG3_PHY_ID_BCM5704:        return "5704";
17464         case TG3_PHY_ID_BCM5705:        return "5705";
17465         case TG3_PHY_ID_BCM5750:        return "5750";
17466         case TG3_PHY_ID_BCM5752:        return "5752";
17467         case TG3_PHY_ID_BCM5714:        return "5714";
17468         case TG3_PHY_ID_BCM5780:        return "5780";
17469         case TG3_PHY_ID_BCM5755:        return "5755";
17470         case TG3_PHY_ID_BCM5787:        return "5787";
17471         case TG3_PHY_ID_BCM5784:        return "5784";
17472         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17473         case TG3_PHY_ID_BCM5906:        return "5906";
17474         case TG3_PHY_ID_BCM5761:        return "5761";
17475         case TG3_PHY_ID_BCM5718C:       return "5718C";
17476         case TG3_PHY_ID_BCM5718S:       return "5718S";
17477         case TG3_PHY_ID_BCM57765:       return "57765";
17478         case TG3_PHY_ID_BCM5719C:       return "5719C";
17479         case TG3_PHY_ID_BCM5720C:       return "5720C";
17480         case TG3_PHY_ID_BCM5762:        return "5762C";
17481         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17482         case 0:                 return "serdes";
17483         default:                return "unknown";
17484         }
17485 }
17486
17487 static char *tg3_bus_string(struct tg3 *tp, char *str)
17488 {
17489         if (tg3_flag(tp, PCI_EXPRESS)) {
17490                 strcpy(str, "PCI Express");
17491                 return str;
17492         } else if (tg3_flag(tp, PCIX_MODE)) {
17493                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17494
17495                 strcpy(str, "PCIX:");
17496
17497                 if ((clock_ctrl == 7) ||
17498                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17499                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17500                         strcat(str, "133MHz");
17501                 else if (clock_ctrl == 0)
17502                         strcat(str, "33MHz");
17503                 else if (clock_ctrl == 2)
17504                         strcat(str, "50MHz");
17505                 else if (clock_ctrl == 4)
17506                         strcat(str, "66MHz");
17507                 else if (clock_ctrl == 6)
17508                         strcat(str, "100MHz");
17509         } else {
17510                 strcpy(str, "PCI:");
17511                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17512                         strcat(str, "66MHz");
17513                 else
17514                         strcat(str, "33MHz");
17515         }
17516         if (tg3_flag(tp, PCI_32BIT))
17517                 strcat(str, ":32-bit");
17518         else
17519                 strcat(str, ":64-bit");
17520         return str;
17521 }
17522
17523 static void tg3_init_coal(struct tg3 *tp)
17524 {
17525         struct ethtool_coalesce *ec = &tp->coal;
17526
17527         memset(ec, 0, sizeof(*ec));
17528         ec->cmd = ETHTOOL_GCOALESCE;
17529         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17530         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17531         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17532         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17533         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17534         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17535         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17536         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17537         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17538
17539         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17540                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17541                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17542                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17543                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17544                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17545         }
17546
17547         if (tg3_flag(tp, 5705_PLUS)) {
17548                 ec->rx_coalesce_usecs_irq = 0;
17549                 ec->tx_coalesce_usecs_irq = 0;
17550                 ec->stats_block_coalesce_usecs = 0;
17551         }
17552 }
17553
17554 static int tg3_init_one(struct pci_dev *pdev,
17555                                   const struct pci_device_id *ent)
17556 {
17557         struct net_device *dev;
17558         struct tg3 *tp;
17559         int i, err;
17560         u32 sndmbx, rcvmbx, intmbx;
17561         char str[40];
17562         u64 dma_mask, persist_dma_mask;
17563         netdev_features_t features = 0;
17564
17565         err = pci_enable_device(pdev);
17566         if (err) {
17567                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17568                 return err;
17569         }
17570
17571         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17572         if (err) {
17573                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17574                 goto err_out_disable_pdev;
17575         }
17576
17577         pci_set_master(pdev);
17578
17579         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17580         if (!dev) {
17581                 err = -ENOMEM;
17582                 goto err_out_free_res;
17583         }
17584
17585         SET_NETDEV_DEV(dev, &pdev->dev);
17586
17587         tp = netdev_priv(dev);
17588         tp->pdev = pdev;
17589         tp->dev = dev;
17590         tp->rx_mode = TG3_DEF_RX_MODE;
17591         tp->tx_mode = TG3_DEF_TX_MODE;
17592         tp->irq_sync = 1;
17593         tp->pcierr_recovery = false;
17594
17595         if (tg3_debug > 0)
17596                 tp->msg_enable = tg3_debug;
17597         else
17598                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17599
17600         if (pdev_is_ssb_gige_core(pdev)) {
17601                 tg3_flag_set(tp, IS_SSB_CORE);
17602                 if (ssb_gige_must_flush_posted_writes(pdev))
17603                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17604                 if (ssb_gige_one_dma_at_once(pdev))
17605                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17606                 if (ssb_gige_have_roboswitch(pdev)) {
17607                         tg3_flag_set(tp, USE_PHYLIB);
17608                         tg3_flag_set(tp, ROBOSWITCH);
17609                 }
17610                 if (ssb_gige_is_rgmii(pdev))
17611                         tg3_flag_set(tp, RGMII_MODE);
17612         }
17613
17614         /* The word/byte swap controls here control register access byte
17615          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17616          * setting below.
17617          */
17618         tp->misc_host_ctrl =
17619                 MISC_HOST_CTRL_MASK_PCI_INT |
17620                 MISC_HOST_CTRL_WORD_SWAP |
17621                 MISC_HOST_CTRL_INDIR_ACCESS |
17622                 MISC_HOST_CTRL_PCISTATE_RW;
17623
17624         /* The NONFRM (non-frame) byte/word swap controls take effect
17625          * on descriptor entries, anything which isn't packet data.
17626          *
17627          * The StrongARM chips on the board (one for tx, one for rx)
17628          * are running in big-endian mode.
17629          */
17630         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17631                         GRC_MODE_WSWAP_NONFRM_DATA);
17632 #ifdef __BIG_ENDIAN
17633         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17634 #endif
17635         spin_lock_init(&tp->lock);
17636         spin_lock_init(&tp->indirect_lock);
17637         INIT_WORK(&tp->reset_task, tg3_reset_task);
17638
17639         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17640         if (!tp->regs) {
17641                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17642                 err = -ENOMEM;
17643                 goto err_out_free_dev;
17644         }
17645
17646         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17647             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17648             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17649             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17650             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17651             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17652             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17653             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17654             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17655             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17656             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17657             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17658             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17659             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17660             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17661                 tg3_flag_set(tp, ENABLE_APE);
17662                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17663                 if (!tp->aperegs) {
17664                         dev_err(&pdev->dev,
17665                                 "Cannot map APE registers, aborting\n");
17666                         err = -ENOMEM;
17667                         goto err_out_iounmap;
17668                 }
17669         }
17670
17671         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17672         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17673
17674         dev->ethtool_ops = &tg3_ethtool_ops;
17675         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17676         dev->netdev_ops = &tg3_netdev_ops;
17677         dev->irq = pdev->irq;
17678
17679         err = tg3_get_invariants(tp, ent);
17680         if (err) {
17681                 dev_err(&pdev->dev,
17682                         "Problem fetching invariants of chip, aborting\n");
17683                 goto err_out_apeunmap;
17684         }
17685
17686         /* The EPB bridge inside 5714, 5715, and 5780 and any
17687          * device behind the EPB cannot support DMA addresses > 40-bit.
17688          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17689          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17690          * do DMA address check in tg3_start_xmit().
17691          */
17692         if (tg3_flag(tp, IS_5788))
17693                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17694         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17695                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17696 #ifdef CONFIG_HIGHMEM
17697                 dma_mask = DMA_BIT_MASK(64);
17698 #endif
17699         } else
17700                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17701
17702         /* Configure DMA attributes. */
17703         if (dma_mask > DMA_BIT_MASK(32)) {
17704                 err = dma_set_mask(&pdev->dev, dma_mask);
17705                 if (!err) {
17706                         features |= NETIF_F_HIGHDMA;
17707                         err = dma_set_coherent_mask(&pdev->dev,
17708                                                     persist_dma_mask);
17709                         if (err < 0) {
17710                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17711                                         "DMA for consistent allocations\n");
17712                                 goto err_out_apeunmap;
17713                         }
17714                 }
17715         }
17716         if (err || dma_mask == DMA_BIT_MASK(32)) {
17717                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
17718                 if (err) {
17719                         dev_err(&pdev->dev,
17720                                 "No usable DMA configuration, aborting\n");
17721                         goto err_out_apeunmap;
17722                 }
17723         }
17724
17725         tg3_init_bufmgr_config(tp);
17726
17727         /* 5700 B0 chips do not support checksumming correctly due
17728          * to hardware bugs.
17729          */
17730         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17731                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17732
17733                 if (tg3_flag(tp, 5755_PLUS))
17734                         features |= NETIF_F_IPV6_CSUM;
17735         }
17736
17737         /* TSO is on by default on chips that support hardware TSO.
17738          * Firmware TSO on older chips gives lower performance, so it
17739          * is off by default, but can be enabled using ethtool.
17740          */
17741         if ((tg3_flag(tp, HW_TSO_1) ||
17742              tg3_flag(tp, HW_TSO_2) ||
17743              tg3_flag(tp, HW_TSO_3)) &&
17744             (features & NETIF_F_IP_CSUM))
17745                 features |= NETIF_F_TSO;
17746         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17747                 if (features & NETIF_F_IPV6_CSUM)
17748                         features |= NETIF_F_TSO6;
17749                 if (tg3_flag(tp, HW_TSO_3) ||
17750                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17751                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17752                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17753                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17754                     tg3_asic_rev(tp) == ASIC_REV_57780)
17755                         features |= NETIF_F_TSO_ECN;
17756         }
17757
17758         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17759                          NETIF_F_HW_VLAN_CTAG_RX;
17760         dev->vlan_features |= features;
17761
17762         /*
17763          * Add loopback capability only for a subset of devices that support
17764          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17765          * loopback for the remaining devices.
17766          */
17767         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17768             !tg3_flag(tp, CPMU_PRESENT))
17769                 /* Add the loopback capability */
17770                 features |= NETIF_F_LOOPBACK;
17771
17772         dev->hw_features |= features;
17773         dev->priv_flags |= IFF_UNICAST_FLT;
17774
17775         /* MTU range: 60 - 9000 or 1500, depending on hardware */
17776         dev->min_mtu = TG3_MIN_MTU;
17777         dev->max_mtu = TG3_MAX_MTU(tp);
17778
17779         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17780             !tg3_flag(tp, TSO_CAPABLE) &&
17781             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17782                 tg3_flag_set(tp, MAX_RXPEND_64);
17783                 tp->rx_pending = 63;
17784         }
17785
17786         err = tg3_get_device_address(tp);
17787         if (err) {
17788                 dev_err(&pdev->dev,
17789                         "Could not obtain valid ethernet address, aborting\n");
17790                 goto err_out_apeunmap;
17791         }
17792
17793         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17794         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17795         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17796         for (i = 0; i < tp->irq_max; i++) {
17797                 struct tg3_napi *tnapi = &tp->napi[i];
17798
17799                 tnapi->tp = tp;
17800                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17801
17802                 tnapi->int_mbox = intmbx;
17803                 if (i <= 4)
17804                         intmbx += 0x8;
17805                 else
17806                         intmbx += 0x4;
17807
17808                 tnapi->consmbox = rcvmbx;
17809                 tnapi->prodmbox = sndmbx;
17810
17811                 if (i)
17812                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17813                 else
17814                         tnapi->coal_now = HOSTCC_MODE_NOW;
17815
17816                 if (!tg3_flag(tp, SUPPORT_MSIX))
17817                         break;
17818
17819                 /*
17820                  * If we support MSIX, we'll be using RSS.  If we're using
17821                  * RSS, the first vector only handles link interrupts and the
17822                  * remaining vectors handle rx and tx interrupts.  Reuse the
17823                  * mailbox values for the next iteration.  The values we setup
17824                  * above are still useful for the single vectored mode.
17825                  */
17826                 if (!i)
17827                         continue;
17828
17829                 rcvmbx += 0x8;
17830
17831                 if (sndmbx & 0x4)
17832                         sndmbx -= 0x4;
17833                 else
17834                         sndmbx += 0xc;
17835         }
17836
17837         /*
17838          * Reset chip in case UNDI or EFI driver did not shutdown
17839          * DMA self test will enable WDMAC and we'll see (spurious)
17840          * pending DMA on the PCI bus at that point.
17841          */
17842         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17843             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17844                 tg3_full_lock(tp, 0);
17845                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17846                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17847                 tg3_full_unlock(tp);
17848         }
17849
17850         err = tg3_test_dma(tp);
17851         if (err) {
17852                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17853                 goto err_out_apeunmap;
17854         }
17855
17856         tg3_init_coal(tp);
17857
17858         pci_set_drvdata(pdev, dev);
17859
17860         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17861             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17862             tg3_asic_rev(tp) == ASIC_REV_5762)
17863                 tg3_flag_set(tp, PTP_CAPABLE);
17864
17865         tg3_timer_init(tp);
17866
17867         tg3_carrier_off(tp);
17868
17869         err = register_netdev(dev);
17870         if (err) {
17871                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17872                 goto err_out_apeunmap;
17873         }
17874
17875         if (tg3_flag(tp, PTP_CAPABLE)) {
17876                 tg3_ptp_init(tp);
17877                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17878                                                    &tp->pdev->dev);
17879                 if (IS_ERR(tp->ptp_clock))
17880                         tp->ptp_clock = NULL;
17881         }
17882
17883         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17884                     tp->board_part_number,
17885                     tg3_chip_rev_id(tp),
17886                     tg3_bus_string(tp, str),
17887                     dev->dev_addr);
17888
17889         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)) {
17890                 char *ethtype;
17891
17892                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17893                         ethtype = "10/100Base-TX";
17894                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17895                         ethtype = "1000Base-SX";
17896                 else
17897                         ethtype = "10/100/1000Base-T";
17898
17899                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17900                             "(WireSpeed[%d], EEE[%d])\n",
17901                             tg3_phy_string(tp), ethtype,
17902                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17903                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17904         }
17905
17906         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17907                     (dev->features & NETIF_F_RXCSUM) != 0,
17908                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17909                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17910                     tg3_flag(tp, ENABLE_ASF) != 0,
17911                     tg3_flag(tp, TSO_CAPABLE) != 0);
17912         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17913                     tp->dma_rwctrl,
17914                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17915                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17916
17917         pci_save_state(pdev);
17918
17919         return 0;
17920
17921 err_out_apeunmap:
17922         if (tp->aperegs) {
17923                 iounmap(tp->aperegs);
17924                 tp->aperegs = NULL;
17925         }
17926
17927 err_out_iounmap:
17928         if (tp->regs) {
17929                 iounmap(tp->regs);
17930                 tp->regs = NULL;
17931         }
17932
17933 err_out_free_dev:
17934         free_netdev(dev);
17935
17936 err_out_free_res:
17937         pci_release_regions(pdev);
17938
17939 err_out_disable_pdev:
17940         if (pci_is_enabled(pdev))
17941                 pci_disable_device(pdev);
17942         return err;
17943 }
17944
17945 static void tg3_remove_one(struct pci_dev *pdev)
17946 {
17947         struct net_device *dev = pci_get_drvdata(pdev);
17948
17949         if (dev) {
17950                 struct tg3 *tp = netdev_priv(dev);
17951
17952                 tg3_ptp_fini(tp);
17953
17954                 release_firmware(tp->fw);
17955
17956                 tg3_reset_task_cancel(tp);
17957
17958                 if (tg3_flag(tp, USE_PHYLIB)) {
17959                         tg3_phy_fini(tp);
17960                         tg3_mdio_fini(tp);
17961                 }
17962
17963                 unregister_netdev(dev);
17964                 if (tp->aperegs) {
17965                         iounmap(tp->aperegs);
17966                         tp->aperegs = NULL;
17967                 }
17968                 if (tp->regs) {
17969                         iounmap(tp->regs);
17970                         tp->regs = NULL;
17971                 }
17972                 free_netdev(dev);
17973                 pci_release_regions(pdev);
17974                 pci_disable_device(pdev);
17975         }
17976 }
17977
17978 #ifdef CONFIG_PM_SLEEP
17979 static int tg3_suspend(struct device *device)
17980 {
17981         struct net_device *dev = dev_get_drvdata(device);
17982         struct tg3 *tp = netdev_priv(dev);
17983         int err = 0;
17984
17985         rtnl_lock();
17986
17987         if (!netif_running(dev))
17988                 goto unlock;
17989
17990         tg3_reset_task_cancel(tp);
17991         tg3_phy_stop(tp);
17992         tg3_netif_stop(tp);
17993
17994         tg3_timer_stop(tp);
17995
17996         tg3_full_lock(tp, 1);
17997         tg3_disable_ints(tp);
17998         tg3_full_unlock(tp);
17999
18000         netif_device_detach(dev);
18001
18002         tg3_full_lock(tp, 0);
18003         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18004         tg3_flag_clear(tp, INIT_COMPLETE);
18005         tg3_full_unlock(tp);
18006
18007         err = tg3_power_down_prepare(tp);
18008         if (err) {
18009                 int err2;
18010
18011                 tg3_full_lock(tp, 0);
18012
18013                 tg3_flag_set(tp, INIT_COMPLETE);
18014                 err2 = tg3_restart_hw(tp, true);
18015                 if (err2)
18016                         goto out;
18017
18018                 tg3_timer_start(tp);
18019
18020                 netif_device_attach(dev);
18021                 tg3_netif_start(tp);
18022
18023 out:
18024                 tg3_full_unlock(tp);
18025
18026                 if (!err2)
18027                         tg3_phy_start(tp);
18028         }
18029
18030 unlock:
18031         rtnl_unlock();
18032         return err;
18033 }
18034
18035 static int tg3_resume(struct device *device)
18036 {
18037         struct net_device *dev = dev_get_drvdata(device);
18038         struct tg3 *tp = netdev_priv(dev);
18039         int err = 0;
18040
18041         rtnl_lock();
18042
18043         if (!netif_running(dev))
18044                 goto unlock;
18045
18046         netif_device_attach(dev);
18047
18048         tg3_full_lock(tp, 0);
18049
18050         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18051
18052         tg3_flag_set(tp, INIT_COMPLETE);
18053         err = tg3_restart_hw(tp,
18054                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18055         if (err)
18056                 goto out;
18057
18058         tg3_timer_start(tp);
18059
18060         tg3_netif_start(tp);
18061
18062 out:
18063         tg3_full_unlock(tp);
18064
18065         if (!err)
18066                 tg3_phy_start(tp);
18067
18068 unlock:
18069         rtnl_unlock();
18070         return err;
18071 }
18072 #endif /* CONFIG_PM_SLEEP */
18073
18074 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18075
18076 static void tg3_shutdown(struct pci_dev *pdev)
18077 {
18078         struct net_device *dev = pci_get_drvdata(pdev);
18079         struct tg3 *tp = netdev_priv(dev);
18080
18081         rtnl_lock();
18082         netif_device_detach(dev);
18083
18084         if (netif_running(dev))
18085                 dev_close(dev);
18086
18087         if (system_state == SYSTEM_POWER_OFF)
18088                 tg3_power_down(tp);
18089
18090         rtnl_unlock();
18091 }
18092
18093 /**
18094  * tg3_io_error_detected - called when PCI error is detected
18095  * @pdev: Pointer to PCI device
18096  * @state: The current pci connection state
18097  *
18098  * This function is called after a PCI bus error affecting
18099  * this device has been detected.
18100  */
18101 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18102                                               pci_channel_state_t state)
18103 {
18104         struct net_device *netdev = pci_get_drvdata(pdev);
18105         struct tg3 *tp = netdev_priv(netdev);
18106         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18107
18108         netdev_info(netdev, "PCI I/O error detected\n");
18109
18110         rtnl_lock();
18111
18112         /* Could be second call or maybe we don't have netdev yet */
18113         if (!netdev || tp->pcierr_recovery || !netif_running(netdev))
18114                 goto done;
18115
18116         /* We needn't recover from permanent error */
18117         if (state == pci_channel_io_frozen)
18118                 tp->pcierr_recovery = true;
18119
18120         tg3_phy_stop(tp);
18121
18122         tg3_netif_stop(tp);
18123
18124         tg3_timer_stop(tp);
18125
18126         /* Want to make sure that the reset task doesn't run */
18127         tg3_reset_task_cancel(tp);
18128
18129         netif_device_detach(netdev);
18130
18131         /* Clean up software state, even if MMIO is blocked */
18132         tg3_full_lock(tp, 0);
18133         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18134         tg3_full_unlock(tp);
18135
18136 done:
18137         if (state == pci_channel_io_perm_failure) {
18138                 if (netdev) {
18139                         tg3_napi_enable(tp);
18140                         dev_close(netdev);
18141                 }
18142                 err = PCI_ERS_RESULT_DISCONNECT;
18143         } else {
18144                 pci_disable_device(pdev);
18145         }
18146
18147         rtnl_unlock();
18148
18149         return err;
18150 }
18151
18152 /**
18153  * tg3_io_slot_reset - called after the pci bus has been reset.
18154  * @pdev: Pointer to PCI device
18155  *
18156  * Restart the card from scratch, as if from a cold-boot.
18157  * At this point, the card has exprienced a hard reset,
18158  * followed by fixups by BIOS, and has its config space
18159  * set up identically to what it was at cold boot.
18160  */
18161 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18162 {
18163         struct net_device *netdev = pci_get_drvdata(pdev);
18164         struct tg3 *tp = netdev_priv(netdev);
18165         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18166         int err;
18167
18168         rtnl_lock();
18169
18170         if (pci_enable_device(pdev)) {
18171                 dev_err(&pdev->dev,
18172                         "Cannot re-enable PCI device after reset.\n");
18173                 goto done;
18174         }
18175
18176         pci_set_master(pdev);
18177         pci_restore_state(pdev);
18178         pci_save_state(pdev);
18179
18180         if (!netdev || !netif_running(netdev)) {
18181                 rc = PCI_ERS_RESULT_RECOVERED;
18182                 goto done;
18183         }
18184
18185         err = tg3_power_up(tp);
18186         if (err)
18187                 goto done;
18188
18189         rc = PCI_ERS_RESULT_RECOVERED;
18190
18191 done:
18192         if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18193                 tg3_napi_enable(tp);
18194                 dev_close(netdev);
18195         }
18196         rtnl_unlock();
18197
18198         return rc;
18199 }
18200
18201 /**
18202  * tg3_io_resume - called when traffic can start flowing again.
18203  * @pdev: Pointer to PCI device
18204  *
18205  * This callback is called when the error recovery driver tells
18206  * us that its OK to resume normal operation.
18207  */
18208 static void tg3_io_resume(struct pci_dev *pdev)
18209 {
18210         struct net_device *netdev = pci_get_drvdata(pdev);
18211         struct tg3 *tp = netdev_priv(netdev);
18212         int err;
18213
18214         rtnl_lock();
18215
18216         if (!netdev || !netif_running(netdev))
18217                 goto done;
18218
18219         tg3_full_lock(tp, 0);
18220         tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18221         tg3_flag_set(tp, INIT_COMPLETE);
18222         err = tg3_restart_hw(tp, true);
18223         if (err) {
18224                 tg3_full_unlock(tp);
18225                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18226                 goto done;
18227         }
18228
18229         netif_device_attach(netdev);
18230
18231         tg3_timer_start(tp);
18232
18233         tg3_netif_start(tp);
18234
18235         tg3_full_unlock(tp);
18236
18237         tg3_phy_start(tp);
18238
18239 done:
18240         tp->pcierr_recovery = false;
18241         rtnl_unlock();
18242 }
18243
18244 static const struct pci_error_handlers tg3_err_handler = {
18245         .error_detected = tg3_io_error_detected,
18246         .slot_reset     = tg3_io_slot_reset,
18247         .resume         = tg3_io_resume
18248 };
18249
18250 static struct pci_driver tg3_driver = {
18251         .name           = DRV_MODULE_NAME,
18252         .id_table       = tg3_pci_tbl,
18253         .probe          = tg3_init_one,
18254         .remove         = tg3_remove_one,
18255         .err_handler    = &tg3_err_handler,
18256         .driver.pm      = &tg3_pm_ops,
18257         .shutdown       = tg3_shutdown,
18258 };
18259
18260 module_pci_driver(tg3_driver);